Skip to content

Commit

Permalink
Merge pull request #34932 from sasidhar-aws/f-aws_fsx_lustre_file_sys…
Browse files Browse the repository at this point in the history
…tem_per_unit_storage_throughput

r/aws_fsx_lustre_file_system - support modification of per_unit_storage_throughput parameter
  • Loading branch information
ewbankkit authored Jan 4, 2024
2 parents 6e919e2 + a54abc3 commit 2e9d2e5
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 3 deletions.
3 changes: 3 additions & 0 deletions .changelog/34932.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
resource/aws_fsx_lustre_file_system: Allow `per_unit_storage_throughput` to be updated in-place
```
9 changes: 6 additions & 3 deletions internal/service/fsx/lustre_file_system.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,6 @@ func ResourceLustreFileSystem() *schema.Resource {
"per_unit_storage_throughput": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
ValidateFunc: validation.IntInSlice([]int{
12,
40,
Expand Down Expand Up @@ -510,14 +509,18 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData,
input.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(d.Get("daily_automatic_backup_start_time").(string))
}

if v, ok := d.GetOk("data_compression_type"); ok {
input.LustreConfiguration.DataCompressionType = aws.String(v.(string))
if d.HasChange("data_compression_type") {
input.LustreConfiguration.DataCompressionType = aws.String(d.Get("data_compression_type").(string))
}

if d.HasChange("log_configuration") {
input.LustreConfiguration.LogConfiguration = expandLustreLogCreateConfiguration(d.Get("log_configuration").([]interface{}))
}

if d.HasChange("per_unit_storage_throughput") {
input.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(d.Get("per_unit_storage_throughput").(int)))
}

if d.HasChange("root_squash_configuration") {
input.LustreConfiguration.RootSquashConfiguration = expandLustreRootSquashConfiguration(d.Get("root_squash_configuration").([]interface{}))
}
Expand Down
78 changes: 78 additions & 0 deletions internal/service/fsx/lustre_file_system_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -614,6 +614,45 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent1(t *testing.T) {
})
}

func TestAccFSxLustreFileSystem_deploymentTypePersistent1_perUnitStorageThroughput(t *testing.T) {
ctx := acctest.Context(t)
var filesystem1, filesystem2 fsx.FileSystem
resourceName := "aws_fsx_lustre_file_system.test"
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) },
ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccLustreFileSystemConfig_persistent1DeploymentType(rName, 50),
Check: resource.ComposeTestCheckFunc(
testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1),
// per_unit_storage_throughput=50 is only available with deployment_type=PERSISTENT_1, so we test both here.
resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "50"),
resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent1),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"security_group_ids"},
},
{
Config: testAccLustreFileSystemConfig_persistent1DeploymentType(rName, 100),
Check: resource.ComposeTestCheckFunc(
testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2),
testAccCheckLustreFileSystemNotRecreated(&filesystem1, &filesystem2),
resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "100"),
),
},
},
})
}

func TestAccFSxLustreFileSystem_deploymentTypePersistent2(t *testing.T) {
ctx := acctest.Context(t)
var filesystem fsx.FileSystem
Expand Down Expand Up @@ -649,6 +688,45 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent2(t *testing.T) {
})
}

func TestAccFSxLustreFileSystem_deploymentTypePersistent2_perUnitStorageThroughput(t *testing.T) {
ctx := acctest.Context(t)
var filesystem1, filesystem2 fsx.FileSystem
resourceName := "aws_fsx_lustre_file_system.test"
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) },
ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccLustreFileSystemConfig_persistent2DeploymentType(rName, 125),
Check: resource.ComposeTestCheckFunc(
testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1),
// per_unit_storage_throughput=125 is only available with deployment_type=PERSISTENT_2, so we test both here.
resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "125"),
resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent2),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"security_group_ids"},
},
{
Config: testAccLustreFileSystemConfig_persistent2DeploymentType(rName, 250),
Check: resource.ComposeTestCheckFunc(
testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2),
testAccCheckLustreFileSystemNotRecreated(&filesystem1, &filesystem2),
resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "250"),
),
},
},
})
}

func TestAccFSxLustreFileSystem_logConfig(t *testing.T) {
ctx := acctest.Context(t)
var filesystem fsx.FileSystem
Expand Down

0 comments on commit 2e9d2e5

Please sign in to comment.