diff --git a/.changelog/20812.txt b/.changelog/20812.txt new file mode 100644 index 00000000000..f5aefb10a9e --- /dev/null +++ b/.changelog/20812.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_redshift_cluster: Add `availability_zone_relocation_enabled` attribute and allow `availability_zone` to be changed in-place. +``` + +```release-note:enhancement +data_source/aws_redshift_cluster: Add `availability_zone_relocation_enabled` attribute. +``` diff --git a/internal/service/redshift/cluster.go b/internal/service/redshift/cluster.go index 0833af09224..e850f15ade4 100644 --- a/internal/service/redshift/cluster.go +++ b/internal/service/redshift/cluster.go @@ -1,6 +1,8 @@ package redshift import ( + "context" + "errors" "fmt" "log" "regexp" @@ -11,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/redshift" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -56,9 +59,12 @@ func ResourceCluster() *schema.Resource { "availability_zone": { Type: schema.TypeString, Optional: true, - ForceNew: true, Computed: true, }, + "availability_zone_relocation_enabled": { + Type: schema.TypeBool, + Optional: true, + }, "cluster_identifier": { Type: schema.TypeString, Required: true, @@ -313,7 +319,28 @@ func ResourceCluster() *schema.Resource { "tags_all": tftags.TagsSchemaComputed(), }, - CustomizeDiff: verify.SetTagsDiff, + CustomizeDiff: customdiff.All( + verify.SetTagsDiff, + func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + if diff.Get("availability_zone_relocation_enabled").(bool) && diff.Get("publicly_accessible").(bool) { + return errors.New("availability_zone_relocation_enabled can not be true when publicly_accessible is true") + } + return nil + }, + func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + if diff.Id() == "" { + return nil + } + if diff.Get("availability_zone_relocation_enabled").(bool) { + return nil + } + o, n := diff.GetChange("availability_zone") + if o.(string) != n.(string) { + return fmt.Errorf("cannot change availability_zone if availability_zone_relocation_enabled is not true") + } + return nil + }, + ), } } @@ -354,6 +381,10 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { restoreOpts.AvailabilityZone = aws.String(v.(string)) } + if v, ok := d.GetOk("availability_zone_relocation_enabled"); ok { + restoreOpts.AvailabilityZoneRelocation = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("cluster_subnet_group_name"); ok { restoreOpts.ClusterSubnetGroupName = aws.String(v.(string)) } @@ -394,8 +425,7 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { resp, err := conn.RestoreFromClusterSnapshot(restoreOpts) if err != nil { - log.Printf("[ERROR] Error Restoring Redshift Cluster from Snapshot: %s", err) - return err + return fmt.Errorf("error restoring Redshift Cluster from snapshot: %w", err) } d.SetId(aws.StringValue(resp.Cluster.ClusterIdentifier)) @@ -446,6 +476,10 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { createOpts.AvailabilityZone = aws.String(v.(string)) } + if v, ok := d.GetOk("availability_zone_relocation_enabled"); ok { + createOpts.AvailabilityZoneRelocation = aws.Bool(v.(bool)) + } + if v, ok := d.GetOk("preferred_maintenance_window"); ok { createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) } @@ -477,8 +511,7 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Redshift Cluster create options: %s", createOpts) resp, err := conn.CreateCluster(createOpts) if err != nil { - log.Printf("[ERROR] Error creating Redshift Cluster: %s", err) - return err + return fmt.Errorf("error creating Redshift Cluster: %w", err) } log.Printf("[DEBUG]: Cluster create response: %s", resp) @@ -492,10 +525,14 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { Timeout: d.Timeout(schema.TimeoutCreate), MinTimeout: 10 * time.Second, } - _, err := stateConf.WaitForState() if err != nil { - return fmt.Errorf("Error waiting for Redshift Cluster state to be \"available\": %s", err) + return fmt.Errorf("Error waiting for Redshift Cluster state to be \"available\": %w", err) + } + + _, err = waitClusterRelocationStatusResolved(conn, d.Id()) + if err != nil { + return fmt.Errorf("error waiting for Redshift Cluster Availability Zone Relocation Status to resolve: %w", err) } if v, ok := d.GetOk("snapshot_copy"); ok { @@ -507,7 +544,7 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { if _, ok := d.GetOk("logging.0.enable"); ok { if err := enableRedshiftClusterLogging(d, conn); err != nil { - return fmt.Errorf("error enabling Redshift Cluster (%s) logging: %s", d.Id(), err) + return fmt.Errorf("error enabling Redshift Cluster (%s) logging: %w", d.Id(), err) } } @@ -550,6 +587,11 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("arn", arn) d.Set("automated_snapshot_retention_period", rsc.AutomatedSnapshotRetentionPeriod) d.Set("availability_zone", rsc.AvailabilityZone) + azr, err := clusterAvailabilityZoneRelocationStatus(rsc) + if err != nil { + return fmt.Errorf("error reading Redshift Cluster (%s): %w", d.Id(), err) + } + d.Set("availability_zone_relocation_enabled", azr) d.Set("cluster_identifier", rsc.ClusterIdentifier) if err := d.Set("cluster_nodes", flattenRedshiftClusterNodes(rsc.ClusterNodes)); err != nil { return fmt.Errorf("error setting cluster_nodes: %w", err) @@ -661,6 +703,11 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { requestUpdate = true } + if d.HasChange("availability_zone_relocation_enabled") { + req.AvailabilityZoneRelocation = aws.Bool(d.Get("availability_zone_relocation_enabled").(bool)) + requestUpdate = true + } + if d.HasChange("cluster_security_groups") { req.ClusterSecurityGroups = flex.ExpandStringSet(d.Get("cluster_security_groups").(*schema.Set)) requestUpdate = true @@ -722,11 +769,10 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { } if requestUpdate { - log.Printf("[INFO] Modifying Redshift Cluster: %s", d.Id()) - log.Printf("[DEBUG] Redshift Cluster Modify options: %s", req) + log.Printf("[DEBUG] Modifying Redshift Cluster: %s", d.Id()) _, err := conn.ModifyCluster(req) if err != nil { - return fmt.Errorf("Error modifying Redshift Cluster (%s): %s", d.Id(), err) + return fmt.Errorf("Error modifying Redshift Cluster (%s): %w", d.Id(), err) } } @@ -745,23 +791,20 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { removeIams := os.Difference(ns) addIams := ns.Difference(os) - log.Printf("[INFO] Building Redshift Modify Cluster IAM Role Options") req := &redshift.ModifyClusterIamRolesInput{ ClusterIdentifier: aws.String(d.Id()), AddIamRoles: flex.ExpandStringSet(addIams), RemoveIamRoles: flex.ExpandStringSet(removeIams), } - log.Printf("[INFO] Modifying Redshift Cluster IAM Roles: %s", d.Id()) - log.Printf("[DEBUG] Redshift Cluster Modify IAM Role options: %s", req) + log.Printf("[DEBUG] Modifying Redshift Cluster IAM Roles: %s", d.Id()) _, err := conn.ModifyClusterIamRoles(req) if err != nil { - return fmt.Errorf("Error modifying Redshift Cluster IAM Roles (%s): %s", d.Id(), err) + return fmt.Errorf("Error modifying Redshift Cluster IAM Roles (%s): %w", d.Id(), err) } } if requestUpdate || d.HasChange("iam_roles") { - stateConf := &resource.StateChangeConf{ Pending: []string{"creating", "deleting", "rebooting", "resizing", "renaming", "modifying", "available, prep-for-resize"}, Target: []string{"available"}, @@ -769,11 +812,39 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { Timeout: d.Timeout(schema.TimeoutUpdate), MinTimeout: 10 * time.Second, } - - // Wait, catching any errors _, err := stateConf.WaitForState() if err != nil { - return fmt.Errorf("Error Modifying Redshift Cluster (%s): %s", d.Id(), err) + return fmt.Errorf("Error waiting for Redshift Cluster modification (%s): %w", d.Id(), err) + } + + _, err = waitClusterRelocationStatusResolved(conn, d.Id()) + if err != nil { + return fmt.Errorf("error waiting for Redshift Cluster Availability Zone Relocation Status to resolve: %w", err) + } + } + + // Availability Zone cannot be changed at the same time as other settings + if d.HasChange("availability_zone") { + req := &redshift.ModifyClusterInput{ + ClusterIdentifier: aws.String(d.Id()), + AvailabilityZone: aws.String(d.Get("availability_zone").(string)), + } + log.Printf("[DEBUG] Relocating Redshift Cluster: %s", d.Id()) + _, err := conn.ModifyCluster(req) + if err != nil { + return fmt.Errorf("Error relocating Redshift Cluster (%s): %w", d.Id(), err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "deleting", "rebooting", "resizing", "renaming", "modifying", "available, prep-for-resize", "recovering"}, + Target: []string{"available"}, + Refresh: resourceClusterStateRefreshFunc(d.Id(), conn), + Timeout: d.Timeout(schema.TimeoutUpdate), + MinTimeout: 10 * time.Second, + } + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Redshift Cluster relocation (%s): %w", d.Id(), err) } } @@ -788,7 +859,7 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { ClusterIdentifier: aws.String(d.Id()), }) if err != nil { - return fmt.Errorf("Failed to disable snapshot copy: %s", err) + return fmt.Errorf("Failed to disable snapshot copy: %w", err) } } } @@ -868,7 +939,7 @@ func enableRedshiftSnapshotCopy(id string, scList []interface{}, conn *redshift. _, err := conn.EnableSnapshotCopy(&input) if err != nil { - return fmt.Errorf("Failed to enable snapshot copy: %s", err) + return fmt.Errorf("Failed to enable snapshot copy: %w", err) } return nil } @@ -990,3 +1061,15 @@ func flattenRedshiftClusterNodes(apiObjects []*redshift.ClusterNode) []interface return tfList } + +func clusterAvailabilityZoneRelocationStatus(cluster *redshift.Cluster) (bool, error) { + // AvailabilityZoneRelocation is not returned by the API, and AvailabilityZoneRelocationStatus is not implemented as Const at this time. + switch availabilityZoneRelocationStatus := aws.StringValue(cluster.AvailabilityZoneRelocationStatus); availabilityZoneRelocationStatus { + case "enabled": + return true, nil + case "disabled": + return false, nil + default: + return false, fmt.Errorf("unexpected AvailabilityZoneRelocationStatus value %q returned by API", availabilityZoneRelocationStatus) + } +} diff --git a/internal/service/redshift/cluster_data_source.go b/internal/service/redshift/cluster_data_source.go index d61589d277e..15db32922bf 100644 --- a/internal/service/redshift/cluster_data_source.go +++ b/internal/service/redshift/cluster_data_source.go @@ -37,6 +37,11 @@ func DataSourceCluster() *schema.Resource { Computed: true, }, + "availability_zone_relocation_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "bucket_name": { Type: schema.TypeString, Computed: true, @@ -196,6 +201,11 @@ func dataSourceClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("allow_version_upgrade", rsc.AllowVersionUpgrade) d.Set("automated_snapshot_retention_period", rsc.AutomatedSnapshotRetentionPeriod) d.Set("availability_zone", rsc.AvailabilityZone) + azr, err := clusterAvailabilityZoneRelocationStatus(rsc) + if err != nil { + return fmt.Errorf("error reading Redshift Cluster (%s): %w", d.Id(), err) + } + d.Set("availability_zone_relocation_enabled", azr) d.Set("cluster_identifier", rsc.ClusterIdentifier) if len(rsc.ClusterParameterGroups) > 0 { diff --git a/internal/service/redshift/cluster_data_source_test.go b/internal/service/redshift/cluster_data_source_test.go index abcf1174b02..ae16d732ac6 100644 --- a/internal/service/redshift/cluster_data_source_test.go +++ b/internal/service/redshift/cluster_data_source_test.go @@ -12,6 +12,7 @@ import ( func TestAccRedshiftClusterDataSource_basic(t *testing.T) { dataSourceName := "data.aws_redshift_cluster.test" + resourceName := "aws_redshift_cluster.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ @@ -40,6 +41,7 @@ func TestAccRedshiftClusterDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrSet(dataSourceName, "port"), resource.TestCheckResourceAttrSet(dataSourceName, "preferred_maintenance_window"), resource.TestCheckResourceAttrSet(dataSourceName, "publicly_accessible"), + resource.TestCheckResourceAttrPair(dataSourceName, "availability_zone_relocation_enabled", resourceName, "availability_zone_relocation_enabled"), ), }, }, @@ -91,6 +93,26 @@ func TestAccRedshiftClusterDataSource_logging(t *testing.T) { }) } +func TestAccRedshiftClusterDataSource_availabilityZoneRelocationEnabled(t *testing.T) { + dataSourceName := "data.aws_redshift_cluster.test" + resourceName := "aws_redshift_cluster.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, redshift.EndpointsID), + Providers: acctest.Providers, + Steps: []resource.TestStep{ + { + Config: testAccClusterDataSourceConfig_availabilityZoneRelocationEnabled(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "availability_zone_relocation_enabled", resourceName, "availability_zone_relocation_enabled"), + ), + }, + }, + }) +} + func testAccClusterDataSourceConfig(rName string) string { return fmt.Sprintf(` resource "aws_redshift_cluster" "test" { @@ -234,3 +256,25 @@ data "aws_redshift_cluster" "test" { } `, rName) } + +func testAccClusterDataSourceConfig_availabilityZoneRelocationEnabled(rName string) string { + return fmt.Sprintf(` +resource "aws_redshift_cluster" "test" { + cluster_identifier = %[1]q + + database_name = "testdb" + master_username = "foo" + master_password = "Password1" + node_type = "ra3.xlplus" + cluster_type = "single-node" + skip_final_snapshot = true + publicly_accessible = false + + availability_zone_relocation_enabled = true +} + +data "aws_redshift_cluster" "test" { + cluster_identifier = aws_redshift_cluster.test.cluster_identifier +} +`, rName) +} diff --git a/internal/service/redshift/cluster_test.go b/internal/service/redshift/cluster_test.go index bccc7f5855e..1586e975246 100644 --- a/internal/service/redshift/cluster_test.go +++ b/internal/service/redshift/cluster_test.go @@ -33,11 +33,13 @@ func TestAccRedshiftCluster_basic(t *testing.T) { Config: testAccClusterConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "availability_zone", "data.aws_availability_zones.available", "names.0"), resource.TestCheckResourceAttr(resourceName, "cluster_nodes.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "cluster_nodes.0.public_ip_address"), resource.TestCheckResourceAttr(resourceName, "cluster_type", "single-node"), resource.TestCheckResourceAttr(resourceName, "publicly_accessible", "true"), resource.TestMatchResourceAttr(resourceName, "dns_name", regexp.MustCompile(fmt.Sprintf("^%s.*\\.redshift\\..*", rName))), + resource.TestCheckResourceAttr(resourceName, "availability_zone_relocation_enabled", "false"), ), }, { @@ -427,7 +429,7 @@ func TestAccRedshiftCluster_tags(t *testing.T) { } func TestAccRedshiftCluster_forceNewUsername(t *testing.T) { - var v redshift.Cluster + var v1, v2 redshift.Cluster resourceName := "aws_redshift_cluster.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -440,16 +442,17 @@ func TestAccRedshiftCluster_forceNewUsername(t *testing.T) { { Config: testAccClusterConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &v), - testAccCheckClusterMasterUsername(&v, "foo_test"), + testAccCheckClusterExists(resourceName, &v1), + testAccCheckClusterMasterUsername(&v1, "foo_test"), resource.TestCheckResourceAttr(resourceName, "master_username", "foo_test"), ), }, { Config: testAccClusterConfig_updatedUsername(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &v), - testAccCheckClusterMasterUsername(&v, "new_username"), + testAccCheckClusterExists(resourceName, &v2), + testAccCheckClusterRecreated(&v1, &v2), + testAccCheckClusterMasterUsername(&v2, "new_username"), resource.TestCheckResourceAttr(resourceName, "master_username", "new_username"), ), }, @@ -458,7 +461,7 @@ func TestAccRedshiftCluster_forceNewUsername(t *testing.T) { } func TestAccRedshiftCluster_changeAvailabilityZone(t *testing.T) { - var v redshift.Cluster + var v1, v2 redshift.Cluster resourceName := "aws_redshift_cluster.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -469,16 +472,19 @@ func TestAccRedshiftCluster_changeAvailabilityZone(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccClusterConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &v), + Config: testAccClusterConfig_updateAvailabilityZone(rName, 0), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(resourceName, &v1), + resource.TestCheckResourceAttr(resourceName, "publicly_accessible", "false"), + resource.TestCheckResourceAttr(resourceName, "availability_zone_relocation_enabled", "true"), resource.TestCheckResourceAttrPair(resourceName, "availability_zone", "data.aws_availability_zones.available", "names.0"), ), }, { - Config: testAccClusterConfig_updatedAvailabilityZone(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &v), + Config: testAccClusterConfig_updateAvailabilityZone(rName, 1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(resourceName, &v2), + testAccCheckClusterNotRecreated(&v1, &v2), resource.TestCheckResourceAttrPair(resourceName, "availability_zone", "data.aws_availability_zones.available", "names.1"), ), }, @@ -486,6 +492,67 @@ func TestAccRedshiftCluster_changeAvailabilityZone(t *testing.T) { }) } +func TestAccRedshiftCluster_changeAvailabilityZoneAndSetAvailabilityZoneRelocation(t *testing.T) { + var v1, v2 redshift.Cluster + resourceName := "aws_redshift_cluster.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, redshift.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_updateAvailabilityZone_availabilityZoneRelocationNotSet(rName, 0), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(resourceName, &v1), + resource.TestCheckResourceAttr(resourceName, "publicly_accessible", "false"), + resource.TestCheckResourceAttr(resourceName, "availability_zone_relocation_enabled", "false"), + resource.TestCheckResourceAttrPair(resourceName, "availability_zone", "data.aws_availability_zones.available", "names.0"), + ), + }, + { + Config: testAccClusterConfig_updateAvailabilityZone(rName, 1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(resourceName, &v2), + testAccCheckClusterNotRecreated(&v1, &v2), + resource.TestCheckResourceAttr(resourceName, "availability_zone_relocation_enabled", "true"), + resource.TestCheckResourceAttrPair(resourceName, "availability_zone", "data.aws_availability_zones.available", "names.1"), + ), + }, + }, + }) +} + +func TestAccRedshiftCluster_changeAvailabilityZone_availabilityZoneRelocationNotSet(t *testing.T) { + var v redshift.Cluster + resourceName := "aws_redshift_cluster.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, redshift.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_updateAvailabilityZone_availabilityZoneRelocationNotSet(rName, 0), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "publicly_accessible", "false"), + resource.TestCheckResourceAttr(resourceName, "availability_zone_relocation_enabled", "false"), + resource.TestCheckResourceAttrPair(resourceName, "availability_zone", "data.aws_availability_zones.available", "names.0"), + ), + }, + { + Config: testAccClusterConfig_updateAvailabilityZone_availabilityZoneRelocationNotSet(rName, 1), + ExpectError: regexp.MustCompile(`cannot change availability_zone if availability_zone_relocation_enabled is not true`), + }, + }, + }) +} + func TestAccRedshiftCluster_changeEncryption1(t *testing.T) { var cluster1, cluster2 redshift.Cluster resourceName := "aws_redshift_cluster.test" @@ -546,6 +613,62 @@ func TestAccRedshiftCluster_changeEncryption2(t *testing.T) { }) } +func TestAccRedshiftCluster_availabilityZoneRelocation(t *testing.T) { + var v redshift.Cluster + resourceName := "aws_redshift_cluster.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, redshift.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_availabilityZoneRelocation(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "availability_zone_relocation_enabled", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "final_snapshot_identifier", + "master_password", + "skip_final_snapshot", + }, + }, + { + Config: testAccClusterConfig_availabilityZoneRelocation(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "availability_zone_relocation_enabled", "false"), + ), + }, + }, + }) +} + +func TestAccRedshiftCluster_availabilityZoneRelocation_publiclyAccessible(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, redshift.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_availabilityZoneRelocation_publiclyAccessible(rName), + ExpectError: regexp.MustCompile(`availability_zone_relocation_enabled can not be true when publicly_accessible is true`), + }, + }, + }) +} + func testAccCheckClusterDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).RedshiftConn @@ -655,6 +778,21 @@ func testAccCheckClusterNotRecreated(i, j *redshift.Cluster) resource.TestCheckF } } +func testAccCheckClusterRecreated(i, j *redshift.Cluster) resource.TestCheckFunc { + return func(s *terraform.State) error { + // In lieu of some other uniquely identifying attribute from the API that always changes + // when a cluster is destroyed and recreated with the same identifier, we use the SSH key + // as it will get regenerated when a cluster is destroyed. + // Certain update operations (e.g KMS encrypting a cluster) will change ClusterCreateTime. + // Clusters with the same identifier can/will have an overlapping Endpoint.Address. + if aws.StringValue(i.ClusterPublicKey) == aws.StringValue(j.ClusterPublicKey) { + return errors.New("Redshift Cluster was not recreated") + } + + return nil + } +} + func testAccClusterConfig_updateNodeCount(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptInExclude("usw2-az2"), fmt.Sprintf(` resource "aws_redshift_cluster" "test" { @@ -1269,18 +1407,84 @@ resource "aws_redshift_cluster" "test" { `, rName)) } -func testAccClusterConfig_updatedAvailabilityZone(rName string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptInExclude("usw2-az2"), fmt.Sprintf(` +func testAccClusterConfig_updateAvailabilityZone(rName string, regionIndex int) string { + return acctest.ConfigCompose( + acctest.ConfigAvailableAZsNoOptInExclude("usw2-az2"), + fmt.Sprintf(` resource "aws_redshift_cluster" "test" { cluster_identifier = %[1]q - availability_zone = data.aws_availability_zones.available.names[1] database_name = "mydb" master_username = "foo_test" master_password = "Mustbe8characters" - node_type = "dc2.large" - automated_snapshot_retention_period = 0 + node_type = "ra3.xlplus" + automated_snapshot_retention_period = 1 allow_version_upgrade = false skip_final_snapshot = true + + publicly_accessible = false + availability_zone_relocation_enabled = true + availability_zone = data.aws_availability_zones.available.names[%[2]d] +} +`, rName, regionIndex)) +} + +func testAccClusterConfig_updateAvailabilityZone_availabilityZoneRelocationNotSet(rName string, regionIndex int) string { + return acctest.ConfigCompose( + acctest.ConfigAvailableAZsNoOptInExclude("usw2-az2"), + fmt.Sprintf(` +resource "aws_redshift_cluster" "test" { + cluster_identifier = %[1]q + database_name = "mydb" + master_username = "foo_test" + master_password = "Mustbe8characters" + node_type = "ra3.xlplus" + automated_snapshot_retention_period = 1 + allow_version_upgrade = false + skip_final_snapshot = true + + publicly_accessible = false + availability_zone_relocation_enabled = false + availability_zone = data.aws_availability_zones.available.names[%[2]d] +} +`, rName, regionIndex)) +} + +func testAccClusterConfig_availabilityZoneRelocation(rName string, enabled bool) string { + return acctest.ConfigCompose( + fmt.Sprintf(` +resource "aws_redshift_cluster" "test" { + cluster_identifier = %[1]q + database_name = "mydb" + master_username = "foo_test" + master_password = "Mustbe8characters" + node_type = "ra3.xlplus" + number_of_nodes = 2 + cluster_type = "multi-node" + automated_snapshot_retention_period = 1 + allow_version_upgrade = false + skip_final_snapshot = true + + publicly_accessible = false + availability_zone_relocation_enabled = %[2]t +} +`, rName, enabled)) +} + +func testAccClusterConfig_availabilityZoneRelocation_publiclyAccessible(rName string) string { + return acctest.ConfigCompose( + fmt.Sprintf(` +resource "aws_redshift_cluster" "test" { + cluster_identifier = %[1]q + database_name = "mydb" + master_username = "foo_test" + master_password = "Mustbe8characters" + node_type = "ra3.xlplus" + automated_snapshot_retention_period = 1 + allow_version_upgrade = false + skip_final_snapshot = true + + publicly_accessible = true + availability_zone_relocation_enabled = true } `, rName)) } diff --git a/internal/service/redshift/enum.go b/internal/service/redshift/enum.go index 9d2a5e24007..4b49fa8c3d1 100644 --- a/internal/service/redshift/enum.go +++ b/internal/service/redshift/enum.go @@ -37,3 +37,24 @@ func clusterType_Values() []string { clusterTypeSingleNode, } } + +const ( + clusterAvailabilityZoneRelocationStatusEnabled = "enabled" + clusterAvailabilityZoneRelocationStatusDisabled = "disabled" + clusterAvailabilityZoneRelocationStatusPendingEnabling = "pending_enabling" + clusterAvailabilityZoneRelocationStatusPendingDisabling = "pending_disabling" +) + +func clusterAvailabilityZoneRelocationStatus_TerminalValues() []string { + return []string{ + clusterAvailabilityZoneRelocationStatusEnabled, + clusterAvailabilityZoneRelocationStatusDisabled, + } +} +func clusterAvailabilityZoneRelocationStatus_PendingValues() []string { + return []string{ + clusterAvailabilityZoneRelocationStatusPendingEnabling, + clusterAvailabilityZoneRelocationStatusPendingDisabling, + } + +} diff --git a/internal/service/redshift/status.go b/internal/service/redshift/status.go index 9709bad6982..63898f2afa6 100644 --- a/internal/service/redshift/status.go +++ b/internal/service/redshift/status.go @@ -22,3 +22,19 @@ func statusCluster(conn *redshift.Redshift, id string) resource.StateRefreshFunc return output, aws.StringValue(output.ClusterStatus), nil } } + +func statusClusterAvailabilityZoneRelocation(conn *redshift.Redshift, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindClusterByID(conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.AvailabilityZoneRelocationStatus), nil + } +} diff --git a/internal/service/redshift/wait.go b/internal/service/redshift/wait.go index 032fd14965a..4ad501f94e2 100644 --- a/internal/service/redshift/wait.go +++ b/internal/service/redshift/wait.go @@ -9,6 +9,8 @@ import ( const ( clusterInvalidClusterStateFaultTimeout = 15 * time.Minute + + clusterRelocationStatusResolvedTimeout = 1 * time.Minute ) func waitClusterDeleted(conn *redshift.Redshift, id string, timeout time.Duration) (*redshift.Cluster, error) { @@ -35,3 +37,20 @@ func waitClusterDeleted(conn *redshift.Redshift, id string, timeout time.Duratio return nil, err } + +func waitClusterRelocationStatusResolved(conn *redshift.Redshift, id string) (*redshift.Cluster, error) { + stateConf := &resource.StateChangeConf{ + Pending: clusterAvailabilityZoneRelocationStatus_PendingValues(), + Target: clusterAvailabilityZoneRelocationStatus_TerminalValues(), + Refresh: statusClusterAvailabilityZoneRelocation(conn, id), + Timeout: clusterRelocationStatusResolvedTimeout, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*redshift.Cluster); ok { + return output, err + } + + return nil, err +} diff --git a/website/docs/d/redshift_cluster.html.markdown b/website/docs/d/redshift_cluster.html.markdown index f4be77cf712..ffd98b3de54 100644 --- a/website/docs/d/redshift_cluster.html.markdown +++ b/website/docs/d/redshift_cluster.html.markdown @@ -13,12 +13,12 @@ Provides details about a specific redshift cluster. ## Example Usage ```terraform -data "aws_redshift_cluster" "test_cluster" { - cluster_identifier = "test-cluster" +data "aws_redshift_cluster" "example" { + cluster_identifier = "example-cluster" } -resource "aws_kinesis_firehose_delivery_stream" "test_stream" { - name = "terraform-kinesis-firehose-test-stream" +resource "aws_kinesis_firehose_delivery_stream" "example_stream" { + name = "terraform-kinesis-firehose-example-stream" destination = "redshift" s3_configuration { @@ -31,12 +31,12 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" { redshift_configuration { role_arn = aws_iam_role.firehose_role.arn - cluster_jdbcurl = "jdbc:redshift://${data.aws_redshift_cluster.test_cluster.endpoint}/${data.aws_redshift_cluster.test_cluster.database_name}" - username = "testuser" - password = "T3stPass" - data_table_name = "test-table" + cluster_jdbcurl = "jdbc:redshift://${data.aws_redshift_cluster.example.endpoint}/${data.aws_redshift_cluster.example.database_name}" + username = "exampleuser" + password = "Exampl3Pass" + data_table_name = "example-table" copy_options = "delimiter '|'" # the default delimiter - data_table_columns = "test-col" + data_table_columns = "example-col" } } ``` @@ -54,6 +54,7 @@ In addition to all arguments above, the following attributes are exported: * `allow_version_upgrade` - Whether major version upgrades can be applied during maintenance period * `automated_snapshot_retention_period` - The backup retention period * `availability_zone` - The availability zone of the cluster +* `availability_zone_relocation_enabled` - Indicates whether the cluster is able to be relocated to another availability zone. * `bucket_name` - The name of the S3 bucket where the log files are to be stored * `cluster_identifier` - The cluster identifier * `cluster_parameter_group_name` - The name of the parameter group to be associated with this cluster diff --git a/website/docs/r/redshift_cluster.html.markdown b/website/docs/r/redshift_cluster.html.markdown index 455898a8445..c0a1cabebc6 100644 --- a/website/docs/r/redshift_cluster.html.markdown +++ b/website/docs/r/redshift_cluster.html.markdown @@ -16,10 +16,10 @@ Provides a Redshift Cluster Resource. ## Example Usage ```terraform -resource "aws_redshift_cluster" "default" { +resource "aws_redshift_cluster" "example" { cluster_identifier = "tf-redshift-cluster" database_name = "mydb" - master_username = "foo" + master_username = "exampleuser" master_password = "Mustbe8characters" node_type = "dc1.large" cluster_type = "single-node" @@ -33,29 +33,30 @@ the [AWS official documentation](http://docs.aws.amazon.com/cli/latest/reference The following arguments are supported: -* `cluster_identifier` - (Required) The Cluster Identifier. Must be a lower case -string. +* `cluster_identifier` - (Required) The Cluster Identifier. Must be a lower case string. * `database_name` - (Optional) The name of the first database to be created when the cluster is created. - If you do not provide a name, Amazon Redshift will create a default database called `dev`. + If you do not provide a name, Amazon Redshift will create a default database called `dev`. * `node_type` - (Required) The node type to be provisioned for the cluster. * `cluster_type` - (Optional) The cluster type to use. Either `single-node` or `multi-node`. * `master_password` - (Required unless a `snapshot_identifier` is provided) Password for the master DB user. - Note that this may show up in logs, and it will be stored in the state file. Password must contain at least 8 chars and - contain at least one uppercase letter, one lowercase letter, and one number. + Note that this may show up in logs, and it will be stored in the state file. Password must contain at least 8 chars and + contain at least one uppercase letter, one lowercase letter, and one number. * `master_username` - (Required unless a `snapshot_identifier` is provided) Username for the master DB user. - * `cluster_security_groups` - (Optional) A list of security groups to be associated with this cluster. * `vpc_security_group_ids` - (Optional) A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster. * `cluster_subnet_group_name` - (Optional) The name of a cluster subnet group to be associated with this cluster. If this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC). -* `availability_zone` - (Optional) The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency. +* `availability_zone` - (Optional) The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency. Can only be changed if `availability_zone_relocation_enabled` is `true`. +* `availability_zone_relocation_enabled` - (Optional) If true, the cluster can be relocated to another availabity zone, either automatically by AWS or when requested. Default is `false`. Available for use on clusters from the RA3 instance family. * `preferred_maintenance_window` - (Optional) The weekly time range (in UTC) during which automated cluster maintenance can occur. - Format: ddd:hh24:mi-ddd:hh24:mi + Format: ddd:hh24:mi-ddd:hh24:mi * `cluster_parameter_group_name` - (Optional) The name of the parameter group to be associated with this cluster. * `automated_snapshot_retention_period` - (Optional) The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with create-cluster-snapshot. Default is 1. * `port` - (Optional) The port number on which the cluster accepts incoming connections. - The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections. Default port is 5439. + The cluster is accessible only via the JDBC and ODBC connection strings. + Part of the connection string requires the port on which the cluster will listen for incoming connections. + Default port is 5439. * `cluster_version` - (Optional) The version of the Amazon Redshift engine software that you want to deploy on the cluster. - The version selected runs on all the nodes in the cluster. + The version selected runs on all the nodes in the cluster. * `allow_version_upgrade` - (Optional) If true , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster. Default is true * `number_of_nodes` - (Optional) The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. Default is 1. * `publicly_accessible` - (Optional) If true, the cluster can be accessed from a public network. Default is `true`.