From 693f4509bb8bbafa881fe2155c5a905d9f046e17 Mon Sep 17 00:00:00 2001 From: Harry Zhang Date: Mon, 28 Aug 2017 12:51:12 -0700 Subject: [PATCH] Issue #45: get aws partition based on region --- common/python/ax/cloud/aws/__init__.py | 4 +- common/python/ax/cloud/aws/consts.py | 9 +++ common/python/ax/cloud/aws/util.py | 16 +++++ platform/cluster/aws/util.sh | 4 +- .../lib/ax/platform/ax_master_manager.py | 3 +- .../lib/ax/platform/ax_minion_upgrade.py | 5 +- .../source/lib/ax/platform/cluster_buckets.py | 49 +++---------- .../ax/platform/cluster_instance_profile.py | 72 +++++++++++++------ platform/source/tools/ax-upgrade-misc.py | 7 +- 9 files changed, 100 insertions(+), 69 deletions(-) diff --git a/common/python/ax/cloud/aws/__init__.py b/common/python/ax/cloud/aws/__init__.py index 1cc55888f777..bd7ae34afae2 100644 --- a/common/python/ax/cloud/aws/__init__.py +++ b/common/python/ax/cloud/aws/__init__.py @@ -8,9 +8,9 @@ from .instance_profile import InstanceProfile from .aws_s3 import AXS3Bucket, BUCKET_CLEAN_KEYWORD from .security import SecurityToken -from .util import default_aws_retry +from .util import default_aws_retry, get_aws_partition_from_region from .ec2 import EC2InstanceState, EC2, EC2IPPermission from .autoscaling import ASGInstanceLifeCycle, ASG from .launch_config import LaunchConfig from .ebs import RawEBSVolume -from .consts import AWS_DEFAULT_PROFILE +from .consts import AWS_DEFAULT_PROFILE, AWS_ALL_RESOURCES diff --git a/common/python/ax/cloud/aws/consts.py b/common/python/ax/cloud/aws/consts.py index b95650c9a6a1..6d04d5733f88 100644 --- a/common/python/ax/cloud/aws/consts.py +++ b/common/python/ax/cloud/aws/consts.py @@ -6,3 +6,12 @@ AWS_DEFAULT_PROFILE = "default" + + +AWS_ALL_RESOURCES = "*" + + +class AWSPartitions: + PARTITION_AWS = "aws" + PARTITION_US_GOV = "aws-us-gov" + PARTITION_CHINA = "aws-cn" diff --git a/common/python/ax/cloud/aws/util.py b/common/python/ax/cloud/aws/util.py index 03ed41e6aacd..85e1589a7dd5 100644 --- a/common/python/ax/cloud/aws/util.py +++ b/common/python/ax/cloud/aws/util.py @@ -8,6 +8,7 @@ import boto3 from botocore.exceptions import ClientError, EndpointConnectionError, ConnectionClosedError, BotoCoreError +from .consts import AWSPartitions def default_aws_retry(exception): # Boto has 3 types of errors (see botocore/exceptions.py) @@ -69,3 +70,18 @@ def tag_dict_to_aws_filter(tags): ) return filters + +def get_aws_partition_from_region(region_name): + """ + Given region, return the partition name. Partition is used to form Amazon Resource Number (ARNs) + See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + :param region_name: + :return: + """ + region_string = region_name.lower() + if region_string.startswith("cn-"): + return AWSPartitions.PARTITION_CHINA + elif region_string.startswith("us-gov"): + return AWSPartitions.PARTITION_US_GOV + else: + return AWSPartitions.PARTITION_AWS diff --git a/platform/cluster/aws/util.sh b/platform/cluster/aws/util.sh index 16c9d7b21a68..8243e88617d2 100755 --- a/platform/cluster/aws/util.sh +++ b/platform/cluster/aws/util.sh @@ -757,11 +757,11 @@ function delete-tag { # Creates the IAM roles (if they do not already exist) function create-iam-profiles { - /ax/bin/ax-upgrade-misc --ensure-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE + /ax/bin/ax-upgrade-misc --ensure-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE --aws-region ${AWS_REGION} } function delete-iam-profiles { - /ax/bin/ax-upgrade-misc --delete-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE + /ax/bin/ax-upgrade-misc --delete-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE --aws-region ${AWS_REGION} } # Wait for instance to be in specified state diff --git a/platform/source/lib/ax/platform/ax_master_manager.py b/platform/source/lib/ax/platform/ax_master_manager.py index d0c55826d4e5..2897f4b95278 100755 --- a/platform/source/lib/ax/platform/ax_master_manager.py +++ b/platform/source/lib/ax/platform/ax_master_manager.py @@ -626,7 +626,8 @@ def upgrade(self): self.master_instance = self.ec2.Instance(instance_id) logger.info("Running master %s.", instance_id) self.aws_image = ami_id - self.instance_profile = AXClusterInstanceProfile(self.cluster_name_id, aws_profile=self.profile).get_master_arn() + self.instance_profile = AXClusterInstanceProfile(self.cluster_name_id, region_name=self.region, + aws_profile=self.profile).get_master_arn() self.populate_attributes() master_tag_updated = self.ensure_master_tags() # TODO: Possible race here. diff --git a/platform/source/lib/ax/platform/ax_minion_upgrade.py b/platform/source/lib/ax/platform/ax_minion_upgrade.py index 5d4fb9257b52..0c2e5d2cc9e1 100644 --- a/platform/source/lib/ax/platform/ax_minion_upgrade.py +++ b/platform/source/lib/ax/platform/ax_minion_upgrade.py @@ -94,8 +94,9 @@ def _update_launch_config(self, old_name, new_name, retain_spot_price=False): # Replace ImageId and everything listed in default_kube_up_env. config["ImageId"] = ami_id - config["IamInstanceProfile"] = AXClusterInstanceProfile(self._cluster_name_id, - aws_profile=self._profile).get_minion_instance_profile_name() + config["IamInstanceProfile"] = AXClusterInstanceProfile( + self._cluster_name_id, region_name=self._region, aws_profile=self._profile + ).get_minion_instance_profile_name() user_data = zlib.decompressobj(32 + zlib.MAX_WBITS).decompress(user_data) user_data = kube_env_update(user_data, updates) comp = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16) diff --git a/platform/source/lib/ax/platform/cluster_buckets.py b/platform/source/lib/ax/platform/cluster_buckets.py index 6984fb49c3a1..9d9b46784f5c 100644 --- a/platform/source/lib/ax/platform/cluster_buckets.py +++ b/platform/source/lib/ax/platform/cluster_buckets.py @@ -9,6 +9,7 @@ from ax.aws.profiles import AWSAccountInfo from ax.cloud import Cloud +from ax.cloud.aws import get_aws_partition_from_region from ax.meta import AXClusterId, AXClusterConfigPath, AXClusterDataPath, AXSupportConfigPath, AXUpgradeConfigPath from ax.platform.exceptions import AXPlatformException @@ -29,43 +30,14 @@ } } -upgrade_bucket_policy_template = """ +SUPPORT_BUCKET_POLICY_TEMPLATE = """ { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { - "AWS": "arn:aws:iam::{id}:root" - }, - "Action": [ - "s3:ListBucket", - "s3:GetBucketLocation", - "s3:ListBucketMultipartUploads", - "s3:ListBucketVersions" - ], - "Resource": "arn:aws:s3:::{s3}" - }, - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::{id}:root" - }, - "Action": "s3:*", - "Resource": "arn:aws:s3:::{s3}/*" - } - ] -} -""" - -support_bucket_policy_template = """ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::{id}:root" + "AWS": "arn:{partition}:iam::{account}:root" }, "Action": [ "s3:ListBucket", @@ -74,12 +46,12 @@ "s3:ListBucketVersions", "s3:GetBucketAcl" ], - "Resource": "arn:aws:s3:::{s3}" + "Resource": "arn:{partition}:s3:::{bucket_name}" }, { "Effect": "Allow", "Principal": { - "AWS": "arn:aws:iam::{id}:root" + "AWS": "arn:{partition}:iam::{account}:root" }, "Action": [ "s3:GetObject", @@ -88,7 +60,7 @@ "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts" ], - "Resource": "arn:aws:s3:::{s3}/*" + "Resource": "arn:{partition}:s3:::{bucket_name}/*" } ] } @@ -185,6 +157,7 @@ def __init__(self, name_id, aws_profile, aws_region): self._name_id = name_id self._aws_profile = aws_profile self._aws_region = aws_region + self._aws_partition = get_aws_partition_from_region(self._aws_region) def update(self, iam): """ @@ -204,7 +177,7 @@ def update(self, iam): if not support_bucket.get_policy(): logger.info("Argo support bucket policy does not exist, creating new one...") if not support_bucket.put_policy( - policy=self._generate_bucket_policy_string(template=support_bucket_policy_template, + policy=self._generate_bucket_policy_string(template=SUPPORT_BUCKET_POLICY_TEMPLATE, bucket_name=support_bucket.get_bucket_name(), iam=iam) ): @@ -217,7 +190,7 @@ def update(self, iam): if not upgrade_bucket.get_policy(): logger.info("Argo upgrade bucket policy does not exist, creating new one...") if not upgrade_bucket.put_policy( - policy=self._generate_bucket_policy_string(template=support_bucket_policy_template, + policy=self._generate_bucket_policy_string(template=SUPPORT_BUCKET_POLICY_TEMPLATE, bucket_name=upgrade_bucket.get_bucket_name(), iam=iam) ): @@ -241,6 +214,6 @@ def _generate_bucket_policy_string(self, template, bucket_name, iam): aws_cid = AWSAccountInfo(aws_profile=self._aws_profile).get_account_id_from_iam(iam) policy = json.loads(template) for s in policy["Statement"]: - s["Principal"]["AWS"] = s["Principal"]["AWS"].format(id=aws_cid) - s["Resource"] = s["Resource"].format(s3=bucket_name) + s["Principal"]["AWS"] = s["Principal"]["AWS"].format(partition=self._aws_partition, account=aws_cid) + s["Resource"] = s["Resource"].format(partition=self._aws_partition, bucket_name=bucket_name) return json.dumps(policy) diff --git a/platform/source/lib/ax/platform/cluster_instance_profile.py b/platform/source/lib/ax/platform/cluster_instance_profile.py index 6400def322c3..0eb5199ca22d 100644 --- a/platform/source/lib/ax/platform/cluster_instance_profile.py +++ b/platform/source/lib/ax/platform/cluster_instance_profile.py @@ -5,16 +5,18 @@ # # Module to manage instance profiles for AX cluster. +import copy import logging from ax.aws.profiles import AWSAccountInfo +from ax.cloud.aws import get_aws_partition_from_region, AWS_ALL_RESOURCES from ax.cloud.aws.instance_profile import InstanceProfile logger = logging.getLogger(__name__) # Default instance profile statement for master. -master_policy = { +MASTER_POLICY_TEMPLATE = { "Version": "2012-10-17", "Statement": [ { @@ -35,7 +37,7 @@ { "Effect": "Allow", "Action": "s3:*", - "Resource": "arn:aws:s3:::applatix-*" + "Resource": "arn:{partition}:s3:::applatix-*" }, { "Action": [ @@ -51,20 +53,28 @@ "Resource": "*", "Effect": "Allow" }, + { + "Action": "iam:PassRole", + "Resource": [ + "arn:{partition}:iam::{account}:role/{master_name}", + "arn:{partition}:iam::{account}:role/{minion_name}" + ], + "Effect": "Allow" + } ] } # Default instance profile statement for minions. -minion_policy = { +MINION_POLICY_TEMPLATE = { "Version": "2012-10-17", "Statement": [ { "Action": "s3:*", "Resource": [ - "arn:aws:s3:::applatix-*", - "arn:aws:s3:::*axawss3-test*", - "arn:aws:s3:::ax-public", - "arn:aws:s3:::ax-public/*" + "arn:{partition}:s3:::applatix-*", + "arn:{partition}:s3:::*axawss3-test*", + "arn:{partition}:s3:::ax-public", + "arn:{partition}:s3:::ax-public/*" ], "Effect": "Allow" }, @@ -145,33 +155,33 @@ ], "Resource": "*", "Effect": "Allow" + }, + { + "Action": "iam:PassRole", + "Resource": [ + "arn:{partition}:iam::{account}:role/{master_name}", + "arn:{partition}:iam::{account}:role/{minion_name}" + ], + "Effect": "Allow" } ] } class AXClusterInstanceProfile(object): - def __init__(self, name_id, aws_profile=None): + def __init__(self, name_id, region_name, aws_profile=None): self._name_id = name_id self._master_name = name_id + "-master" self._minion_name = name_id + "-minion" + self._aws_partition = get_aws_partition_from_region(region_name) self._master_profile = InstanceProfile(self._master_name, aws_profile=aws_profile) self._minion_profile = InstanceProfile(self._minion_name, aws_profile=aws_profile) # Create pass_role statement specific to this cluster. self._account = AWSAccountInfo(aws_profile=aws_profile).get_account_id() - pass_role = { - "Action": "iam:PassRole", - "Resource": [ - "arn:aws:iam::{}:role/{}".format(self._account, self._master_name), - "arn:aws:iam::{}:role/{}".format(self._account, self._minion_name), - ], - "Effect": "Allow" - } - self._master_policy = master_policy - self._master_policy["Statement"] += [pass_role] - self._minion_policy = minion_policy - self._minion_policy["Statement"] += [pass_role] + + self._master_policy = self._format_policy_resources(MASTER_POLICY_TEMPLATE) + self._minion_policy = self._format_policy_resources(MINION_POLICY_TEMPLATE) def update(self): """ @@ -202,3 +212,25 @@ def get_minion_instance_profile_name(self): No need to lookup from AWS as name is sufficient. """ return self._minion_name + + def _format_policy_resources(self, policy): + """ + Dynamically insert critical information into policy: aws partition, aws account id, + master name, minion name + :param policy: + :return: + """ + p = copy.deepcopy(policy) + for statement in p.get("Statement", []): + resource = statement.get("Resource", AWS_ALL_RESOURCES) + if resource != AWS_ALL_RESOURCES: + new_resource = [] + if isinstance(resource, list): + for r in resource: + new_resource.append(r.format(partition=self._aws_partition, account=self._account, + master_name=self._master_name, minion_name=self._minion_name)) + else: + new_resource.append(resource.format(partition=self._aws_partition, account=self._account, + master_name=self._master_name, minion_name=self._minion_name)) + statement["Resource"] = new_resource + return p diff --git a/platform/source/tools/ax-upgrade-misc.py b/platform/source/tools/ax-upgrade-misc.py index 94e6a71d7a37..4e2999130709 100755 --- a/platform/source/tools/ax-upgrade-misc.py +++ b/platform/source/tools/ax-upgrade-misc.py @@ -46,13 +46,12 @@ args = ax_parser.parse_args() if args.ensure_aws_iam or args.delete_aws_iam: from ax.platform.cluster_instance_profile import AXClusterInstanceProfile - aws_profile = args.aws_profile - assert aws_profile, "Missing AWS profile to ensure aws iam" assert args.cluster_name_id, "Missing cluster name id to ensure aws iam" + assert args.aws_region, "Missing AWS region to ensure aws iam" if args.ensure_aws_iam: - AXClusterInstanceProfile(args.cluster_name_id, aws_profile=aws_profile).update() + AXClusterInstanceProfile(args.cluster_name_id, args.aws_region, aws_profile=args.aws_profile).update() elif args.delete_aws_iam: - AXClusterInstanceProfile(args.cluster_name_id, aws_profile=aws_profile).delete() + AXClusterInstanceProfile(args.cluster_name_id, args.aws_region, aws_profile=args.aws_profile).delete() if args.ensure_aws_s3: from ax.platform.cluster_buckets import AXClusterBuckets