From 207d92c873647546e90c2a0da0c764fe7b964c97 Mon Sep 17 00:00:00 2001 From: Gareth Oakley Date: Sat, 6 Jan 2018 12:10:55 +0700 Subject: [PATCH 1/2] r/aws_dax_cluster: Add new resource --- aws/config.go | 5 +- aws/import_aws_dax_cluster_test.go | 30 ++ aws/provider.go | 1 + aws/resource_aws_dax_cluster.go | 588 +++++++++++++++++++++++ aws/resource_aws_dax_cluster_test.go | 224 +++++++++ aws/structure.go | 11 + aws/tagsDAX.go | 115 +++++ aws/tagsDAX_test.go | 103 ++++ website/aws.erb | 13 + website/docs/r/dax_cluster.html.markdown | 101 ++++ 10 files changed, 1190 insertions(+), 1 deletion(-) create mode 100644 aws/import_aws_dax_cluster_test.go create mode 100644 aws/resource_aws_dax_cluster.go create mode 100644 aws/resource_aws_dax_cluster_test.go create mode 100644 aws/tagsDAX.go create mode 100644 aws/tagsDAX_test.go create mode 100644 website/docs/r/dax_cluster.html.markdown diff --git a/aws/config.go b/aws/config.go index 86eb60f4f9b..9dbf533872b 100644 --- a/aws/config.go +++ b/aws/config.go @@ -34,6 +34,7 @@ import ( "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" "github.com/aws/aws-sdk-go/service/configservice" "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go/service/dax" "github.com/aws/aws-sdk-go/service/devicefarm" "github.com/aws/aws-sdk-go/service/directconnect" "github.com/aws/aws-sdk-go/service/directoryservice" @@ -142,6 +143,7 @@ type AWSClient struct { cognitoconn *cognitoidentity.CognitoIdentity cognitoidpconn *cognitoidentityprovider.CognitoIdentityProvider configconn *configservice.ConfigService + daxconn *dax.DAX devicefarmconn *devicefarm.DeviceFarm dmsconn *databasemigrationservice.DatabaseMigrationService dsconn *directoryservice.DirectoryService @@ -403,8 +405,9 @@ func (c *Config) Client() (interface{}, error) { client.configconn = configservice.New(sess) client.cognitoconn = cognitoidentity.New(sess) client.cognitoidpconn = cognitoidentityprovider.New(sess) - client.dmsconn = databasemigrationservice.New(sess) client.codepipelineconn = codepipeline.New(sess) + client.daxconn = dax.New(awsDynamoSess) + client.dmsconn = databasemigrationservice.New(sess) client.dsconn = directoryservice.New(sess) client.dynamodbconn = dynamodb.New(awsDynamoSess) client.ecrconn = ecr.New(awsEcrSess) diff --git a/aws/import_aws_dax_cluster_test.go b/aws/import_aws_dax_cluster_test.go new file mode 100644 index 00000000000..aae8e95a386 --- /dev/null +++ b/aws/import_aws_dax_cluster_test.go @@ -0,0 +1,30 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSDAXCluster_importBasic(t *testing.T) { + resourceName := "aws_dax_cluster.test" + rString := acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDAXClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSDAXClusterConfig(rString), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/aws/provider.go b/aws/provider.go index b3887547b51..f80e87df2b4 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -304,6 +304,7 @@ func Provider() terraform.ResourceProvider { "aws_codebuild_project": resourceAwsCodeBuildProject(), "aws_codepipeline": resourceAwsCodePipeline(), "aws_customer_gateway": resourceAwsCustomerGateway(), + "aws_dax_cluster": resourceAwsDaxCluster(), "aws_db_event_subscription": resourceAwsDbEventSubscription(), "aws_db_instance": resourceAwsDbInstance(), "aws_db_option_group": resourceAwsDbOptionGroup(), diff --git a/aws/resource_aws_dax_cluster.go b/aws/resource_aws_dax_cluster.go new file mode 100644 index 00000000000..31e2989a8c6 --- /dev/null +++ b/aws/resource_aws_dax_cluster.go @@ -0,0 +1,588 @@ +package aws + +import ( + "fmt" + "log" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/service/dax" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsDaxCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsDaxClusterCreate, + Read: resourceAwsDaxClusterRead, + Update: resourceAwsDaxClusterUpdate, + Delete: resourceAwsDaxClusterDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(45 * time.Minute), + Delete: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(val interface{}) string { + return strings.ToLower(val.(string)) + }, + // DAX follows the same naming convention as ElastiCache clusters + ValidateFunc: validateElastiCacheClusterId, + }, + "iam_role_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "node_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "replication_factor": { + Type: schema.TypeInt, + Required: true, + }, + "availability_zones": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "notification_topic_arn": { + Type: schema.TypeString, + Optional: true, + }, + "parameter_group_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "maintenance_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: func(val interface{}) string { + return strings.ToLower(val.(string)) + }, + ValidateFunc: validateOnceAWeekWindowFormat, + }, + "security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "subnet_group_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "tags": tagsSchema(), + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "configuration_endpoint": { + Type: schema.TypeString, + Computed: true, + }, + "cluster_address": { + Type: schema.TypeString, + Computed: true, + }, + "nodes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "address": { + Type: schema.TypeString, + Computed: true, + }, + "port": { + Type: schema.TypeInt, + Computed: true, + }, + "availability_zone": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsDaxClusterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).daxconn + + clusterName := d.Get("cluster_name").(string) + iamRoleArn := d.Get("iam_role_arn").(string) + nodeType := d.Get("node_type").(string) + numNodes := int64(d.Get("replication_factor").(int)) + subnetGroupName := d.Get("subnet_group_name").(string) + securityIdSet := d.Get("security_group_ids").(*schema.Set) + + securityIds := expandStringList(securityIdSet.List()) + tags := tagsFromMapDax(d.Get("tags").(map[string]interface{})) + + req := &dax.CreateClusterInput{ + ClusterName: aws.String(clusterName), + IamRoleArn: aws.String(iamRoleArn), + NodeType: aws.String(nodeType), + ReplicationFactor: aws.Int64(numNodes), + SecurityGroupIds: securityIds, + SubnetGroupName: aws.String(subnetGroupName), + Tags: tags, + } + + // optionals can be defaulted by AWS + if v, ok := d.GetOk("description"); ok { + req.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("parameter_group_name"); ok { + req.ParameterGroupName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("maintenance_window"); ok { + req.PreferredMaintenanceWindow = aws.String(v.(string)) + } + + if v, ok := d.GetOk("notification_topic_arn"); ok { + req.NotificationTopicArn = aws.String(v.(string)) + } + + preferred_azs := d.Get("availability_zones").(*schema.Set).List() + if len(preferred_azs) > 0 { + azs := expandStringList(preferred_azs) + req.AvailabilityZones = azs + } + + // IAM roles take some time to propagate + var resp *dax.CreateClusterOutput + err := resource.Retry(30*time.Second, func() *resource.RetryError { + var err error + resp, err = conn.CreateCluster(req) + if err != nil { + if isAWSErr(err, dax.ErrCodeInvalidParameterValueException, "No permission to assume role") { + log.Print("[DEBUG] Retrying create of DAX cluster") + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return fmt.Errorf("Error creating DAX cluster: %s", err) + } + + // Assign the cluster id as the resource ID + // DAX always retains the id in lower case, so we have to + // mimic that or else we won't be able to refresh a resource whose + // name contained uppercase characters. + d.SetId(strings.ToLower(*resp.Cluster.ClusterName)) + + pending := []string{"creating", "modifying"} + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: []string{"available"}, + Refresh: daxClusterStateRefreshFunc(conn, d.Id(), "available", pending), + Timeout: d.Timeout(schema.TimeoutCreate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + log.Printf("[DEBUG] Waiting for state to become available: %v", d.Id()) + _, sterr := stateConf.WaitForState() + if sterr != nil { + return fmt.Errorf("Error waiting for DAX cluster (%s) to be created: %s", d.Id(), sterr) + } + + return resourceAwsDaxClusterRead(d, meta) +} + +func resourceAwsDaxClusterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).daxconn + req := &dax.DescribeClustersInput{ + ClusterNames: []*string{aws.String(d.Id())}, + } + + res, err := conn.DescribeClusters(req) + if err != nil { + if isAWSErr(err, dax.ErrCodeClusterNotFoundFault, "") { + log.Printf("[WARN] DAX cluster (%s) not found", d.Id()) + d.SetId("") + return nil + } + + return err + } + + if len(res.Clusters) == 0 { + log.Printf("[WARN] DAX cluster (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + c := res.Clusters[0] + d.Set("arn", c.ClusterArn) + d.Set("cluster_name", c.ClusterName) + d.Set("description", c.Description) + d.Set("iam_role_arn", c.IamRoleArn) + d.Set("node_type", c.NodeType) + d.Set("replication_factor", c.TotalNodes) + + if c.ClusterDiscoveryEndpoint != nil { + d.Set("port", c.ClusterDiscoveryEndpoint.Port) + d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *c.ClusterDiscoveryEndpoint.Address, *c.ClusterDiscoveryEndpoint.Port))) + d.Set("cluster_address", aws.String(fmt.Sprintf("%s", *c.ClusterDiscoveryEndpoint.Address))) + } + + d.Set("subnet_group_name", c.SubnetGroup) + d.Set("security_group_ids", flattenDaxSecurityGroupIds(c.SecurityGroups)) + + if c.ParameterGroup != nil { + d.Set("parameter_group_name", c.ParameterGroup.ParameterGroupName) + } + + d.Set("maintenance_window", c.PreferredMaintenanceWindow) + + if c.NotificationConfiguration != nil { + if *c.NotificationConfiguration.TopicStatus == "active" { + d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) + } + } + + if err := setDaxClusterNodeData(d, c); err != nil { + return err + } + + // list tags for resource + // set tags + arn, err := buildDaxArn(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + log.Printf("[DEBUG] Error building ARN for DAX Cluster, not setting Tags for cluster %s", *c.ClusterName) + } else { + resp, err := conn.ListTags(&dax.ListTagsInput{ + ResourceName: aws.String(arn), + }) + + if err != nil { + log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn) + } + + var dt []*dax.Tag + if len(resp.Tags) > 0 { + dt = resp.Tags + } + d.Set("tags", tagsToMapDax(dt)) + } + + return nil +} + +func resourceAwsDaxClusterUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).daxconn + arn, err := buildDaxArn(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + log.Printf("[DEBUG] Error building ARN for DAX Cluster, not updating Tags for cluster %s", d.Id()) + } else { + if err := setTagsDax(conn, d, arn); err != nil { + return err + } + } + + req := &dax.UpdateClusterInput{ + ClusterName: aws.String(d.Id()), + } + + requestUpdate := false + awaitUpdate := false + if d.HasChange("description") { + req.Description = aws.String(d.Get("description").(string)) + requestUpdate = true + } + + if d.HasChange("security_group_ids") { + if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 { + req.SecurityGroupIds = expandStringList(attr.List()) + requestUpdate = true + } + } + + if d.HasChange("parameter_group_name") { + req.ParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) + requestUpdate = true + } + + if d.HasChange("maintenance_window") { + req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) + requestUpdate = true + } + + if d.HasChange("notification_topic_arn") { + v := d.Get("notification_topic_arn").(string) + req.NotificationTopicArn = aws.String(v) + if v == "" { + inactive := "inactive" + req.NotificationTopicStatus = &inactive + } + requestUpdate = true + } + + if requestUpdate { + log.Printf("[DEBUG] Modifying DAX Cluster (%s), opts:\n%s", d.Id(), req) + _, err := conn.UpdateCluster(req) + if err != nil { + return fmt.Errorf("[WARN] Error updating DAX cluster (%s), error: %s", d.Id(), err) + } + awaitUpdate = true + } + + if d.HasChange("replication_factor") { + oraw, nraw := d.GetChange("replication_factor") + o := oraw.(int) + n := nraw.(int) + if n < o { + log.Printf("[INFO] Decreasing nodes in DAX cluster %s from %d to %d", d.Id(), o, n) + _, err := conn.DecreaseReplicationFactor(&dax.DecreaseReplicationFactorInput{ + ClusterName: aws.String(d.Id()), + NewReplicationFactor: aws.Int64(int64(nraw.(int))), + }) + if err != nil { + return fmt.Errorf("[WARN] Error increasing nodes in DAX cluster %s, error: %s", d.Id(), err) + } + awaitUpdate = true + } + if n > o { + log.Printf("[INFO] Increasing nodes in DAX cluster %s from %d to %d", d.Id(), o, n) + _, err := conn.IncreaseReplicationFactor(&dax.IncreaseReplicationFactorInput{ + ClusterName: aws.String(d.Id()), + NewReplicationFactor: aws.Int64(int64(nraw.(int))), + }) + if err != nil { + return fmt.Errorf("[WARN] Error increasing nodes in DAX cluster %s, error: %s", d.Id(), err) + } + awaitUpdate = true + } + } + + if awaitUpdate { + log.Printf("[DEBUG] Waiting for update: %s", d.Id()) + pending := []string{"modifying"} + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: []string{"available"}, + Refresh: daxClusterStateRefreshFunc(conn, d.Id(), "available", pending), + Timeout: d.Timeout(schema.TimeoutUpdate), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + _, sterr := stateConf.WaitForState() + if sterr != nil { + return fmt.Errorf("Error waiting for DAX (%s) to update: %s", d.Id(), sterr) + } + } + + return resourceAwsDaxClusterRead(d, meta) +} + +func setDaxClusterNodeData(d *schema.ResourceData, c *dax.Cluster) error { + sortedNodes := make([]*dax.Node, len(c.Nodes)) + copy(sortedNodes, c.Nodes) + sort.Sort(byNodeId(sortedNodes)) + + nodeDate := make([]map[string]interface{}, 0, len(sortedNodes)) + + for _, node := range sortedNodes { + if node.NodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil || node.AvailabilityZone == nil { + return fmt.Errorf("Unexpected nil pointer in: %s", node) + } + nodeDate = append(nodeDate, map[string]interface{}{ + "id": *node.NodeId, + "address": *node.Endpoint.Address, + "port": int(*node.Endpoint.Port), + "availability_zone": *node.AvailabilityZone, + }) + } + + return d.Set("nodes", nodeDate) +} + +type byNodeId []*dax.Node + +func (b byNodeId) Len() int { return len(b) } +func (b byNodeId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byNodeId) Less(i, j int) bool { + return b[i].NodeId != nil && b[j].NodeId != nil && + *b[i].NodeId < *b[j].NodeId +} + +func resourceAwsDaxClusterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).daxconn + + req := &dax.DeleteClusterInput{ + ClusterName: aws.String(d.Id()), + } + err := resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.DeleteCluster(req) + if err != nil { + if isAWSErr(err, dax.ErrCodeInvalidClusterStateFault, "") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } + + log.Printf("[DEBUG] Waiting for deletion: %v", d.Id()) + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "available", "deleting", "incompatible-parameters", "incompatible-network"}, + Target: []string{}, + Refresh: daxClusterStateRefreshFunc(conn, d.Id(), "", []string{}), + Timeout: d.Timeout(schema.TimeoutDelete), + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + _, sterr := stateConf.WaitForState() + if sterr != nil { + return fmt.Errorf("Error waiting for DAX (%s) to delete: %s", d.Id(), sterr) + } + + d.SetId("") + + return nil +} + +func daxClusterStateRefreshFunc(conn *dax.DAX, clusterID, givenState string, pending []string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeClusters(&dax.DescribeClustersInput{ + ClusterNames: []*string{aws.String(clusterID)}, + }) + if err != nil { + if isAWSErr(err, dax.ErrCodeClusterNotFoundFault, "") { + log.Printf("[DEBUG] Detect deletion") + return nil, "", nil + } + + log.Printf("[ERROR] daxClusterStateRefreshFunc: %s", err) + return nil, "", err + } + + if len(resp.Clusters) == 0 { + return nil, "", fmt.Errorf("[WARN] Error: no DAX clusters found for id (%s)", clusterID) + } + + var c *dax.Cluster + for _, cluster := range resp.Clusters { + if *cluster.ClusterName == clusterID { + log.Printf("[DEBUG] Found matching DAX cluster: %s", *cluster.ClusterName) + c = cluster + } + } + + if c == nil { + return nil, "", fmt.Errorf("[WARN] Error: no matching DAX cluster for id (%s)", clusterID) + } + + // DescribeCluster returns a response without status late on in the + // deletion process - assume cluster is still deleting until we + // get ClusterNotFoundFault + if c.Status == nil { + log.Printf("[DEBUG] DAX Cluster %s has no status attribute set - assume status is deleting", clusterID) + return c, "deleting", nil + } + + log.Printf("[DEBUG] DAX Cluster (%s) status: %v", clusterID, *c.Status) + + // return the current state if it's in the pending array + for _, p := range pending { + log.Printf("[DEBUG] DAX: checking pending state (%s) for cluster (%s), cluster status: %s", pending, clusterID, *c.Status) + s := *c.Status + if p == s { + log.Printf("[DEBUG] Return with status: %v", *c.Status) + return c, p, nil + } + } + + // return given state if it's not in pending + if givenState != "" { + log.Printf("[DEBUG] DAX: checking given state (%s) of cluster (%s) against cluster status (%s)", givenState, clusterID, *c.Status) + // check to make sure we have the node count we're expecting + if int64(len(c.Nodes)) != *c.TotalNodes { + log.Printf("[DEBUG] Node count is not what is expected: %d found, %d expected", len(c.Nodes), *c.TotalNodes) + return nil, "creating", nil + } + + log.Printf("[DEBUG] Node count matched (%d)", len(c.Nodes)) + // loop the nodes and check their status as well + for _, n := range c.Nodes { + log.Printf("[DEBUG] Checking cache node for status: %s", n) + if n.NodeStatus != nil && *n.NodeStatus != "available" { + log.Printf("[DEBUG] Node (%s) is not yet available, status: %s", *n.NodeId, *n.NodeStatus) + return nil, "creating", nil + } + log.Printf("[DEBUG] Cache node not in expected state") + } + log.Printf("[DEBUG] DAX returning given state (%s), cluster: %s", givenState, c) + return c, givenState, nil + } + log.Printf("[DEBUG] current status: %v", *c.Status) + return c, *c.Status, nil + } +} + +func buildDaxArn(identifier, partition, accountid, region string) (string, error) { + if partition == "" { + return "", fmt.Errorf("Unable to construct DAX ARN because of missing AWS partition") + } + if accountid == "" { + return "", fmt.Errorf("Unable to construct DAX ARN because of missing AWS Account ID") + } + + arn := arn.ARN{ + Partition: partition, + Service: "dax", + Region: region, + AccountID: accountid, + Resource: fmt.Sprintf("cache/%s", identifier), + } + + return arn.String(), nil +} diff --git a/aws/resource_aws_dax_cluster_test.go b/aws/resource_aws_dax_cluster_test.go new file mode 100644 index 00000000000..d1fcc2c5334 --- /dev/null +++ b/aws/resource_aws_dax_cluster_test.go @@ -0,0 +1,224 @@ +package aws + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dax" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSDAXCluster_basic(t *testing.T) { + var dc dax.Cluster + rString := acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDAXClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSDAXClusterConfig(rString), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDAXClusterExists("aws_dax_cluster.test", &dc), + resource.TestMatchResourceAttr( + "aws_dax_cluster.test", "arn", regexp.MustCompile("^arn:aws:dax:[\\w-]+:\\d+:cache/")), + resource.TestMatchResourceAttr( + "aws_dax_cluster.test", "cluster_name", regexp.MustCompile("^tf-\\w+$")), + resource.TestMatchResourceAttr( + "aws_dax_cluster.test", "iam_role_arn", regexp.MustCompile("^arn:aws:iam::\\d+:role/")), + resource.TestCheckResourceAttr( + "aws_dax_cluster.test", "node_type", "dax.r3.large"), + resource.TestCheckResourceAttr( + "aws_dax_cluster.test", "replication_factor", "1"), + resource.TestCheckResourceAttr( + "aws_dax_cluster.test", "description", "test cluster"), + resource.TestMatchResourceAttr( + "aws_dax_cluster.test", "parameter_group_name", regexp.MustCompile("^default.dax")), + resource.TestMatchResourceAttr( + "aws_dax_cluster.test", "maintenance_window", regexp.MustCompile("^\\w{3}:\\d{2}:\\d{2}-\\w{3}:\\d{2}:\\d{2}$")), + resource.TestCheckResourceAttr( + "aws_dax_cluster.test", "subnet_group_name", "default"), + resource.TestMatchResourceAttr( + "aws_dax_cluster.test", "nodes.0.id", regexp.MustCompile("^tf-[\\w-]+$")), + resource.TestMatchResourceAttr( + "aws_dax_cluster.test", "configuration_endpoint", regexp.MustCompile(":\\d+$")), + resource.TestCheckResourceAttrSet( + "aws_dax_cluster.test", "cluster_address"), + resource.TestMatchResourceAttr( + "aws_dax_cluster.test", "port", regexp.MustCompile("^\\d+$")), + ), + }, + }, + }) +} + +func TestAccAWSDAXCluster_resize(t *testing.T) { + var dc dax.Cluster + rString := acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDAXClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSDAXClusterConfigResize_singleNode(rString), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDAXClusterExists("aws_dax_cluster.test", &dc), + resource.TestCheckResourceAttr( + "aws_dax_cluster.test", "replication_factor", "1"), + ), + }, + { + Config: testAccAWSDAXClusterConfigResize_multiNode(rString), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDAXClusterExists("aws_dax_cluster.test", &dc), + resource.TestCheckResourceAttr( + "aws_dax_cluster.test", "replication_factor", "2"), + ), + }, + { + Config: testAccAWSDAXClusterConfigResize_singleNode(rString), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDAXClusterExists("aws_dax_cluster.test", &dc), + resource.TestCheckResourceAttr( + "aws_dax_cluster.test", "replication_factor", "1"), + ), + }, + }, + }) +} + +func testAccCheckAWSDAXClusterDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).daxconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_dax_cluster" { + continue + } + res, err := conn.DescribeClusters(&dax.DescribeClustersInput{ + ClusterNames: []*string{aws.String(rs.Primary.ID)}, + }) + if err != nil { + // Verify the error is what we want + if isAWSErr(err, dax.ErrCodeClusterNotFoundFault, "") { + continue + } + return err + } + if len(res.Clusters) > 0 { + return fmt.Errorf("still exist.") + } + } + return nil +} + +func testAccCheckAWSDAXClusterExists(n string, v *dax.Cluster) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No DAX cluster ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).daxconn + resp, err := conn.DescribeClusters(&dax.DescribeClustersInput{ + ClusterNames: []*string{aws.String(rs.Primary.ID)}, + }) + if err != nil { + return fmt.Errorf("DAX error: %v", err) + } + + for _, c := range resp.Clusters { + if *c.ClusterName == rs.Primary.ID { + *v = *c + } + } + + return nil + } +} + +var baseConfig = ` +provider "aws" { + region = "us-west-2" +} + +resource "aws_iam_role" "test" { + assume_role_policy = < 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, len(remove), len(remove)) + for i, t := range remove { + k[i] = t.Key + } + + _, err := conn.UntagResource(&dax.UntagResourceInput{ + ResourceName: aws.String(arn), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.TagResource(&dax.TagResourceInput{ + ResourceName: aws.String(arn), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsDax(oldTags, newTags []*dax.Tag) ([]*dax.Tag, []*dax.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*dax.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapDax(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapDax(m map[string]interface{}) []*dax.Tag { + result := make([]*dax.Tag, 0, len(m)) + for k, v := range m { + t := &dax.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredDax(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapDax(ts []*dax.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredDax(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredDax(t *dax.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + if r, _ := regexp.MatchString(v, *t.Key); r == true { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/aws/tagsDAX_test.go b/aws/tagsDAX_test.go new file mode 100644 index 00000000000..2fc65c9cc2b --- /dev/null +++ b/aws/tagsDAX_test.go @@ -0,0 +1,103 @@ +package aws + +import ( + "fmt" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dax" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestDaxTagsDiff(t *testing.T) { + cases := []struct { + Old, New map[string]interface{} + Create, Remove map[string]string + }{ + // Basic add/remove + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "bar": "baz", + }, + Create: map[string]string{ + "bar": "baz", + }, + Remove: map[string]string{ + "foo": "bar", + }, + }, + + // Modify + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "foo": "baz", + }, + Create: map[string]string{ + "foo": "baz", + }, + Remove: map[string]string{ + "foo": "bar", + }, + }, + } + + for i, tc := range cases { + c, r := diffTagsDax(tagsFromMapDax(tc.Old), tagsFromMapDax(tc.New)) + cm := tagsToMapDax(c) + rm := tagsToMapDax(r) + if !reflect.DeepEqual(cm, tc.Create) { + t.Fatalf("%d: bad create: %#v", i, cm) + } + if !reflect.DeepEqual(rm, tc.Remove) { + t.Fatalf("%d: bad remove: %#v", i, rm) + } + } +} + +func TestTagsDaxIgnore(t *testing.T) { + var ignoredTags []*dax.Tag + ignoredTags = append(ignoredTags, &dax.Tag{ + Key: aws.String("aws:cloudformation:logical-id"), + Value: aws.String("foo"), + }) + ignoredTags = append(ignoredTags, &dax.Tag{ + Key: aws.String("aws:foo:bar"), + Value: aws.String("baz"), + }) + for _, tag := range ignoredTags { + if !tagIgnoredDax(tag) { + t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) + } + } +} + +// testAccCheckTags can be used to check the tags on a resource. +func testAccCheckDaxTags( + ts []*dax.Tag, key string, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + m := tagsToMapDax(ts) + v, ok := m[key] + if value != "" && !ok { + return fmt.Errorf("Missing tag: %s", key) + } else if value == "" && ok { + return fmt.Errorf("Extra tag: %s", key) + } + if value == "" { + return nil + } + + if v != value { + return fmt.Errorf("%s: bad value: %s", key, v) + } + + return nil + } +} diff --git a/website/aws.erb b/website/aws.erb index 81fba1358e4..1a50311c78c 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -589,6 +589,19 @@ + + > + DynamoDB Accelerator Resources + + + + > EC2 Resources