From f6ca055660ae7c0b51789f6044878803df72971e Mon Sep 17 00:00:00 2001 From: hezijie Date: Mon, 25 Oct 2021 17:22:39 +0800 Subject: [PATCH 1/9] add new resource `azurerm_storage_disks_pool_managed_disk_attachment` --- ...rage_disks_pool_managed_disk_attachment.go | 50 ++++ ...disks_pool_managed_disk_attachment_test.go | 17 ++ internal/services/storage/registration.go | 12 + ...s_pool_managed_disk_attachment_resource.go | 214 ++++++++++++++ ...l_managed_disk_attachment_resource_test.go | 272 ++++++++++++++++++ ...rage_disks_pool_managed_disk_attachment.go | 19 ++ ...disks_pool_managed_disk_attachment_test.go | 128 +++++++++ .../docs/r/storage_disks_pool.html.markdown | 8 + ...pool_managed_disk_attachment.html.markdown | 112 ++++++++ 9 files changed, 832 insertions(+) create mode 100644 internal/services/storage/parse/storage_disks_pool_managed_disk_attachment.go create mode 100644 internal/services/storage/parse/storage_disks_pool_managed_disk_attachment_test.go create mode 100644 internal/services/storage/storage_disks_pool_managed_disk_attachment_resource.go create mode 100644 internal/services/storage/storage_disks_pool_managed_disk_attachment_resource_test.go create mode 100644 internal/services/storage/validate/storage_disks_pool_managed_disk_attachment.go create mode 100644 internal/services/storage/validate/storage_disks_pool_managed_disk_attachment_test.go create mode 100644 website/docs/r/storage_disks_pool_managed_disk_attachment.html.markdown diff --git a/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment.go b/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment.go new file mode 100644 index 000000000000..df538f8ccdf0 --- /dev/null +++ b/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment.go @@ -0,0 +1,50 @@ +package parse + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-provider-azurerm/internal/resourceid" + computeParse "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/parse" +) + +const storageDiskPoolManagedDiskAttachmentIdSeparator = "/managedDisks|" + +var _ resourceid.Formatter = StorageDisksPoolManagedDiskAttachmentId{} + +type StorageDisksPoolManagedDiskAttachmentId struct { + DisksPoolId StorageDisksPoolId + ManagedDiskId computeParse.ManagedDiskId +} + +func NewStorageDisksPoolManagedDiskAttachmentId(diskPoolId StorageDisksPoolId, managedDiskId computeParse.ManagedDiskId) StorageDisksPoolManagedDiskAttachmentId { + return StorageDisksPoolManagedDiskAttachmentId{ + DisksPoolId: diskPoolId, + ManagedDiskId: managedDiskId, + } +} + +func (d StorageDisksPoolManagedDiskAttachmentId) ID() string { + return fmt.Sprintf("%s%s%s", d.DisksPoolId.ID(), storageDiskPoolManagedDiskAttachmentIdSeparator, d.ManagedDiskId.ID()) +} + +func StorageDisksPoolManagedDiskAttachmentID(input string) (*StorageDisksPoolManagedDiskAttachmentId, error) { + if !strings.Contains(input, storageDiskPoolManagedDiskAttachmentIdSeparator) { + return nil, fmt.Errorf("malformed disks pool managed disk attachment id:%q", input) + } + parts := strings.Split(input, storageDiskPoolManagedDiskAttachmentIdSeparator) + if len(parts) != 2 { + return nil, fmt.Errorf("malformed disks pool managed disk attachment id:%q", input) + } + + poolId, err := StorageDisksPoolID(parts[0]) + if err != nil { + return nil, fmt.Errorf("malformed disks pool id: %q, %v", poolId.ID(), err) + } + diskId, err := computeParse.ManagedDiskID(parts[1]) + if err != nil { + return nil, fmt.Errorf("malformed disk id: %q, %v", diskId.ID(), err) + } + id := NewStorageDisksPoolManagedDiskAttachmentId(*poolId, *diskId) + return &id, nil +} diff --git a/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment_test.go b/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment_test.go new file mode 100644 index 000000000000..db2c94400e36 --- /dev/null +++ b/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment_test.go @@ -0,0 +1,17 @@ +package parse + +import ( + "testing" + + computeparse "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/parse" +) + +func TestStorageDisksPoolManagedDiskAttachmentIDFormatter(t *testing.T) { + diskPoolId := NewStorageDisksPoolID("12345678-1234-9876-4563-123456789012", "resGroup1", "storagePool1") + managedDiskId := computeparse.NewManagedDiskID("12345678-1234-9876-4563-123456789012", "resGroup1", "diks1") + actual := NewStorageDisksPoolManagedDiskAttachmentId(diskPoolId, managedDiskId).ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/managedDisks|/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/diks1" + if actual != expected { + t.Fatalf("Expected %q but got %q", expected, actual) + } +} diff --git a/internal/services/storage/registration.go b/internal/services/storage/registration.go index 773e0a7ada60..dd9957019e48 100644 --- a/internal/services/storage/registration.go +++ b/internal/services/storage/registration.go @@ -1,6 +1,7 @@ package storage import ( + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" ) @@ -60,3 +61,14 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_storage_sync_group": resourceStorageSyncGroup(), } } + +func (r Registration) DataSources() []sdk.DataSource { + return []sdk.DataSource{} +} + +func (r Registration) Resources() []sdk.Resource { + return []sdk.Resource{ + DisksPoolResource{}, + DisksPoolManagedDiskAttachmentResource{}, + } +} diff --git a/internal/services/storage/storage_disks_pool_managed_disk_attachment_resource.go b/internal/services/storage/storage_disks_pool_managed_disk_attachment_resource.go new file mode 100644 index 000000000000..e76eeec48869 --- /dev/null +++ b/internal/services/storage/storage_disks_pool_managed_disk_attachment_resource.go @@ -0,0 +1,214 @@ +package storage + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-azurerm/internal/locks" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + computeParse "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/parse" + computeValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type DisksPoolManagedDiskAttachmentResource struct{} + +var _ sdk.Resource = DisksPoolManagedDiskAttachmentResource{} + +type DisksPoolManagedDiskAttachmentModel struct { + DisksPoolId string `tfschema:"disks_pool_id"` + DiskId string `tfschema:"managed_disk_id"` +} + +func (d DisksPoolManagedDiskAttachmentResource) Arguments() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "disks_pool_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.StorageDisksPoolID, + }, + "managed_disk_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: computeValidate.ManagedDiskID, + }, + } +} + +func (d DisksPoolManagedDiskAttachmentResource) Attributes() map[string]*schema.Schema { + return map[string]*schema.Schema{} +} + +func (d DisksPoolManagedDiskAttachmentResource) ModelObject() interface{} { + return &DisksPoolManagedDiskAttachmentModel{} +} + +func (d DisksPoolManagedDiskAttachmentResource) ResourceType() string { + return "azurerm_storage_disks_pool_managed_disk_attachment" +} + +func (d DisksPoolManagedDiskAttachmentResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + attachment := DisksPoolManagedDiskAttachmentModel{} + err := metadata.Decode(&attachment) + if err != nil { + return err + } + subscriptionId := metadata.Client.Account.SubscriptionId + poolId, err := parse.StorageDisksPoolID(attachment.DisksPoolId) + if err != nil { + return err + } + if poolId.SubscriptionId != subscriptionId { + return fmt.Errorf("Disks Pool subscription id %q is different from provider's subscription", poolId.SubscriptionId) + } + diskId, err := computeParse.ManagedDiskID(attachment.DiskId) + if err != nil { + return err + } + locks.ByID(attachment.DisksPoolId) + defer locks.UnlockByID(attachment.DisksPoolId) + id := parse.NewStorageDisksPoolManagedDiskAttachmentId(*poolId, *diskId) + + client := metadata.Client.Storage.DisksPoolsClient + poolResp, err := client.Get(ctx, poolId.ResourceGroup, poolId.DiskPoolName) + if err != nil { + return fmt.Errorf("retrieving %q: %+v", *poolId, err) + } + + if poolResp.Disks == nil { + poolResp.Disks = &[]storagepool.Disk{} + } + for _, disk := range *poolResp.Disks { + if disk.ID == nil { + continue + } + existedDiskId, err := computeParse.ManagedDiskID(*disk.ID) + if err != nil { + return fmt.Errorf("error on parsing existing attached disk id %q %+v", *disk.ID, err) + } + if *existedDiskId == *diskId { + return metadata.ResourceRequiresImport(d.ResourceType(), id) + } + } + + disks := append(*poolResp.Disks, storagepool.Disk{ + ID: utils.String(diskId.ID()), + }) + + future, err := client.Update(ctx, poolId.ResourceGroup, poolId.DiskPoolName, storagepool.DiskPoolUpdate{ + DiskPoolUpdateProperties: &storagepool.DiskPoolUpdateProperties{ + Disks: &disks, + }, + }) + if err != nil { + return fmt.Errorf("creation of %q: %+v", id, err) + } + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for creation of %q: %+v", id, err) + } + + metadata.SetID(id) + return nil + }, + } +} + +func (d DisksPoolManagedDiskAttachmentResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + id, err := parse.StorageDisksPoolManagedDiskAttachmentID(metadata.ResourceData.Id()) + if err != nil { + return err + } + poolId := id.DisksPoolId + client := metadata.Client.Storage.DisksPoolsClient + + poolResp, err := client.Get(ctx, poolId.ResourceGroup, poolId.DiskPoolName) + if err != nil { + if utils.ResponseWasNotFound(poolResp.Response) { + return metadata.MarkAsGone(id) + } + return fmt.Errorf("retrieving disks pool %q error: %+v", id.DisksPoolId, err) + } + if poolResp.DiskPoolProperties == nil || poolResp.DiskPoolProperties.Disks == nil { + return metadata.MarkAsGone(id) + } + + for _, disk := range *poolResp.Disks { + if disk.ID != nil && *disk.ID == id.ManagedDiskId.ID() { + m := DisksPoolManagedDiskAttachmentModel{ + DisksPoolId: id.DisksPoolId.ID(), + DiskId: id.ManagedDiskId.ID(), + } + return metadata.Encode(&m) + } + } + + return metadata.MarkAsGone(id) + }, + } +} + +func (d DisksPoolManagedDiskAttachmentResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + diskToDetach := &DisksPoolManagedDiskAttachmentModel{} + err := metadata.Decode(diskToDetach) + if err != nil { + return err + } + poolId, err := parse.StorageDisksPoolID(diskToDetach.DisksPoolId) + if err != nil { + return err + } + locks.ByID(diskToDetach.DisksPoolId) + defer locks.UnlockByID(diskToDetach.DisksPoolId) + + client := metadata.Client.Storage.DisksPoolsClient + pool, err := client.Get(ctx, poolId.ResourceGroup, poolId.DiskPoolName) + if err != nil { + return fmt.Errorf("retrieving disks pool %q error %v", diskToDetach.DisksPoolId, err) + } + if pool.Disks == nil { + return nil + } + attachedDisks := *pool.Disks + remainingDisks := make([]storagepool.Disk, 0) + for _, attachedDisk := range attachedDisks { + if utils.NormalizeNilableString(attachedDisk.ID) != diskToDetach.DiskId { + remainingDisks = append(remainingDisks, attachedDisk) + } + } + + future, err := client.Update(ctx, poolId.ResourceGroup, poolId.DiskPoolName, storagepool.DiskPoolUpdate{ + DiskPoolUpdateProperties: &storagepool.DiskPoolUpdateProperties{ + Disks: &remainingDisks, + }, + }) + if err != nil { + return fmt.Errorf("error on deletion of disks pool managed disk attachment %q: %+v", metadata.ResourceData.Id(), err) + } + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("waiting for deletion of disks pool managed disk attatchment %q: %+v", metadata.ResourceData.Id(), err) + } + return nil + }, + } +} + +func (d DisksPoolManagedDiskAttachmentResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return validate.StorageDisksPoolManagedDiskAttachment +} diff --git a/internal/services/storage/storage_disks_pool_managed_disk_attachment_resource_test.go b/internal/services/storage/storage_disks_pool_managed_disk_attachment_resource_test.go new file mode 100644 index 000000000000..99ff7c145804 --- /dev/null +++ b/internal/services/storage/storage_disks_pool_managed_disk_attachment_resource_test.go @@ -0,0 +1,272 @@ +package storage_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type DisksPoolManagedDiskAttachmentResource struct{} + +func TestAccStorageDisksPoolDiskAttachment_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool_managed_disk_attachment", "test") + a := DisksPoolManagedDiskAttachmentResource{} + data.ResourceTest(t, a, []acceptance.TestStep{ + { + Config: a.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(a), + ), + }, + data.ImportStep(), + }) +} + +func TestAccStorageDisksPoolDiskAttachment_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool_managed_disk_attachment", "test") + a := DisksPoolManagedDiskAttachmentResource{} + data.ResourceTest(t, a, []acceptance.TestStep{ + { + Config: a.basic(data), + Check: check.That(data.ResourceName).ExistsInAzure(a), + }, + { + Config: a.requiresImport(data), + ExpectError: acceptance.RequiresImportError("azurerm_storage_disks_pool_managed_disk_attachment"), + }, + }) +} + +func TestAccStorageDisksPoolDiskAttachment_multipleDisks(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool_managed_disk_attachment", "test") + a := DisksPoolManagedDiskAttachmentResource{} + secondResourceName := "azurerm_storage_disks_pool_managed_disk_attachment.second" + data.ResourceTest(t, a, []acceptance.TestStep{ + { + Config: a.multipleDisks(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(a), + check.That(secondResourceName).ExistsInAzure(a), + ), + }, + data.ImportStep(), + { + Config: a.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(a), + ), + }, + data.ImportStep(), + // { + // Config: a.template(data), + // }, + // data.ImportStep(), + }) +} + +func TestAccStorageDisksPoolDiskAttachment_destroy(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool_managed_disk_attachment", "test") + a := DisksPoolManagedDiskAttachmentResource{} + data.ResourceTest(t, a, []acceptance.TestStep{ + data.DisappearsStep(acceptance.DisappearsStepData{ + Config: a.basic, + TestResource: a, + }), + }) +} + +func (a DisksPoolManagedDiskAttachmentResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.StorageDisksPoolManagedDiskAttachmentID(state.ID) + if err != nil { + return nil, err + } + poolId := id.DisksPoolId + diskId := id.ManagedDiskId + client := clients.Storage.DisksPoolsClient + resp, err := client.Get(ctx, poolId.ResourceGroup, poolId.DiskPoolName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving %q: %+v", id, err) + } + targetDiskId := diskId.ID() + if resp.DiskPoolProperties == nil || resp.DiskPoolProperties.Disks == nil { + return utils.Bool(false), nil + } + for _, disk := range *resp.DiskPoolProperties.Disks { + if disk.ID != nil && *disk.ID == targetDiskId { + return utils.Bool(true), nil + } + } + return utils.Bool(false), nil +} + +func (a DisksPoolManagedDiskAttachmentResource) Destroy(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := parse.StorageDisksPoolManagedDiskAttachmentID(state.ID) + if err != nil { + return nil, err + } + + client := clients.Storage.DisksPoolsClient + pool, err := client.Get(ctx, id.DisksPoolId.ResourceGroup, id.DisksPoolId.DiskPoolName) + if err != nil { + return nil, err + } + if pool.Disks == nil { + return nil, err + } + attachedDisks := *pool.Disks + remainingDisks := make([]storagepool.Disk, 0) + for _, attachedDisk := range attachedDisks { + if utils.NormalizeNilableString(attachedDisk.ID) != id.ManagedDiskId.ID() { + remainingDisks = append(remainingDisks, attachedDisk) + } + } + + future, err := client.Update(ctx, id.DisksPoolId.ResourceGroup, id.DisksPoolId.DiskPoolName, storagepool.DiskPoolUpdate{ + DiskPoolUpdateProperties: &storagepool.DiskPoolUpdateProperties{ + Disks: &remainingDisks, + }, + }) + if err != nil { + return nil, err + } + if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { + return nil, err + } + return utils.Bool(true), nil +} + +func (a DisksPoolManagedDiskAttachmentResource) basic(data acceptance.TestData) string { + return fmt.Sprintf(` +%s +resource "azurerm_storage_disks_pool_managed_disk_attachment" "test" { + depends_on = [azurerm_role_assignment.test] + disks_pool_id = azurerm_storage_disks_pool.test.id + managed_disk_id = azurerm_managed_disk.test.id +} +`, a.template(data)) +} + +func (a DisksPoolManagedDiskAttachmentResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} +provider "azuread" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-diskspool-%[2]d" + location = "%[1]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctest-vnet-%[2]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + address_space = ["10.0.0.0/16"] +} + +resource "azurerm_subnet" "test" { + name = "acctest-subnet-%[2]d" + resource_group_name = azurerm_resource_group.test.name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.0.0/24"] + delegation { + name = "diskspool" + service_delegation { + actions = ["Microsoft.Network/virtualNetworks/read"] + name = "Microsoft.StoragePool/diskPools" + } + } +} + +resource "azurerm_managed_disk" "test" { + name = "acctest-diskspool-%[2]d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + create_option = "Empty" + storage_account_type = "Premium_LRS" + disk_size_gb = 4 + max_shares = 2 + zones = ["1"] +} + +data "azuread_service_principal" "test" { + display_name = "StoragePool Resource Provider" +} + +locals { + roles = ["Disk Pool Operator", "Virtual Machine Contributor"] +} + +resource "azurerm_role_assignment" "test" { + count = length(local.roles) + principal_id = data.azuread_service_principal.test.id + role_definition_name = local.roles[count.index] + scope = azurerm_managed_disk.test.id +} + +resource "azurerm_storage_disks_pool" "test" { + name = "acctest-diskspool-%[3]s" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + availability_zones = ["1"] + sku_name = "Basic_B1" + subnet_id = azurerm_subnet.test.id + tags = { + "env" = "qa" + } +} +`, data.Locations.Primary, data.RandomInteger, data.RandomString) +} + +func (a DisksPoolManagedDiskAttachmentResource) requiresImport(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_storage_disks_pool_managed_disk_attachment" "import" { + disks_pool_id = azurerm_storage_disks_pool.test.id + managed_disk_id = azurerm_managed_disk.test.id +} +`, a.basic(data)) +} + +func (a DisksPoolManagedDiskAttachmentResource) multipleDisks(data acceptance.TestData) string { + return fmt.Sprintf(` +%s + +resource "azurerm_managed_disk" "second" { + name = "acctest-diskspool-%d-2" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + create_option = "Empty" + storage_account_type = "Premium_LRS" + disk_size_gb = 4 + max_shares = 2 + zones = ["1"] +} + +resource "azurerm_role_assignment" "second" { + count = length(local.roles) + principal_id = data.azuread_service_principal.test.id + role_definition_name = local.roles[count.index] + scope = azurerm_managed_disk.second.id +} + +resource "azurerm_storage_disks_pool_managed_disk_attachment" "second" { + depends_on = [azurerm_role_assignment.second] + disks_pool_id = azurerm_storage_disks_pool.test.id + managed_disk_id = azurerm_managed_disk.second.id +} +`, a.basic(data), data.RandomInteger) +} diff --git a/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment.go b/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment.go new file mode 100644 index 000000000000..dd0259bd44e1 --- /dev/null +++ b/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment.go @@ -0,0 +1,19 @@ +package validate + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" +) + +func StorageDisksPoolManagedDiskAttachment(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + if _, err := parse.StorageDisksPoolManagedDiskAttachmentID(v); err != nil { + errors = append(errors, err) + } + return +} diff --git a/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment_test.go b/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment_test.go new file mode 100644 index 000000000000..93b13de0536c --- /dev/null +++ b/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment_test.go @@ -0,0 +1,128 @@ +package validate + +import "testing" + +func TestStorageDisksPoolManagedDiskAttachmentID(t *testing.T) { + cases := []struct { + Input string + Valid bool + }{ + + { + // empty + Input: "", + Valid: false, + }, + + { + // missing SubscriptionId + Input: "/", + Valid: false, + }, + + { + // missing value for SubscriptionId + Input: "/subscriptions/", + Valid: false, + }, + + { + // missing ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/", + Valid: false, + }, + + { + // missing value for ResourceGroup + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + // missing DiskPoolName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/", + Valid: false, + }, + + { + // missing value for DiskPoolName + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/", + Valid: false, + }, + + { + // missing valud for Managed Disk + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1", + Valid: false, + }, + + { + // upper-cased + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.STORAGEPOOL/DISKPOOLS/STORAGEPOOL1", + Valid: false, + }, + + { + // wrong separator + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/disks|/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/disk1", + Valid: false, + }, + + { + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/managedDisks|", + Valid: false, + }, + + { + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/managedDisks|/subscriptions", + Valid: false, + }, + + { + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/managedDisks|/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/", + Valid: false, + }, + + { + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/managedDisks|/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/", + Valid: false, + }, + + { + Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUP/RESGROUP1/PROVIDERS/MICROSOFT.STORAGEPOOL/DISKSPOOL/storagePool1/managedDisks|/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/providers/MICROSOFT.COMPUTE/DISKS/disk1", + Valid: false, + }, + + { + // missed pool part + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/", + Valid: false, + }, + + { + // missed pool part + Input: "/managedDisks|/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/", + Valid: false, + }, + + { + // wrong order + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/disk1/managedDisks|/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1", + Valid: false, + }, + + { + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/managedDisks|/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/disk1", + Valid: true, + }, + } + for _, tc := range cases { + t.Logf("[DEBUG] Testing Value %s", tc.Input) + _, errors := StorageDisksPoolManagedDiskAttachment(tc.Input, "test") + valid := len(errors) == 0 + + if tc.Valid != valid { + t.Fatalf("Case %q, Expected %t but got %t", tc.Input, tc.Valid, valid) + } + } +} diff --git a/website/docs/r/storage_disks_pool.html.markdown b/website/docs/r/storage_disks_pool.html.markdown index 02296a2e2726..0d1604a28ecf 100644 --- a/website/docs/r/storage_disks_pool.html.markdown +++ b/website/docs/r/storage_disks_pool.html.markdown @@ -12,6 +12,14 @@ Manages a Disk Pool. !> **Note:** This resource has been deprecated in favour of `azurerm_disk_pool` and will be removed in version 3.0 of the Azure Provider. +~> **Note:** Must be either a premium SSD, standard SSD, or an ultra disk in the same region and availability zone as the disk pool. + +~> **Note:** Ultra disks must have a disk sector size of 512 bytes. + +~> **Note:** Must be a shared disk, with a maxShares value of two or greater. + +~> **Note:** You must provide the StoragePool resource provider RBAC permissions to the disks that will be added to the disk pool. + ## Example Usage ```hcl diff --git a/website/docs/r/storage_disks_pool_managed_disk_attachment.html.markdown b/website/docs/r/storage_disks_pool_managed_disk_attachment.html.markdown new file mode 100644 index 000000000000..fcd5f99727b0 --- /dev/null +++ b/website/docs/r/storage_disks_pool_managed_disk_attachment.html.markdown @@ -0,0 +1,112 @@ +--- +subcategory: "Storage" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_storage_disks_pool_managed_disk_attachment" +description: |- + Manages a Disks Pool Managed Disk Attachment. +--- + +# azurerm_storage_disks_pool_managed_disk_attachment + +Manages a Disks Pool Managed Disk Attachment. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "example" { + name = "example" + location = "West Europe" +} + +resource "azurerm_virtual_network" "example" { + name = "example-network" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + address_space = ["10.0.0.0/16"] +} + +resource "azurerm_subnet" "example" { + name = "example-subnet" + resource_group_name = azurerm_resource_group.example.name + virtual_network_name = azurerm_virtual_network.example.name + address_prefixes = ["10.0.0.0/24"] + delegation { + name = "diskspool" + service_delegation { + actions = ["Microsoft.Network/virtualNetworks/read"] + name = "Microsoft.StoragePool/diskPools" + } + } +} + +resource "azurerm_storage_disks_pool" "example" { + name = "example-pool" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + subnet_id = azurerm_subnet.example.id + availability_zones = ["1"] + sku_name = "Basic_B1" +} + +resource "azurerm_managed_disk" "example" { + name = "example-disk" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + create_option = "Empty" + storage_account_type = "Premium_LRS" + disk_size_gb = 4 + max_shares = 2 + zones = ["1"] +} + +data "azuread_service_principal" "example" { + display_name = "StoragePool Resource Provider" +} + +locals { + roles = ["Disk Pool Operator", "Virtual Machine Contributor"] +} + +resource "azurerm_role_assignment" "example" { + count = length(local.roles) + principal_id = data.azuread_service_principal.example.id + role_definition_name = local.roles[count.index] + scope = azurerm_managed_disk.example.id +} + +resource "azurerm_storage_disks_pool_managed_disk_attachment" "example" { + depends_on = [azurerm_role_assignment.example] + disks_pool_id = azurerm_storage_disks_pool.example.id + managed_disk_id = azurerm_managed_disk.example.id +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `disks_pool_id` - (Required) The ID of the Disks Pool. Changing this forces a new Disks Pool Managed Disk Attachment to be created. + +* `managed_disk_id` - (Required) The ID of the Managed Disk. Changing this forces a new Disks Pool Managed Disk Attachment to be created. + +## Attributes Reference + +In addition to the Arguments listed above - the following Attributes are exported: + +* `id` - The ID of the Disks Pool Managed Disk Attachment. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the Disks Pool Managed Disk Attachment. +* `read` - (Defaults to 5 minutes) Used when retrieving the Disks Pool Managed Disk Attachment. +* `delete` - (Defaults to 30 minutes) Used when deleting the Disks Pool Managed Disk Attachment. + +## Import + +Disks Pool Managed Disk Attachments can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_storage_disks_pool_managed_disk_attachment.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/managedDisks|/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/disk1 +``` From 33544d02f2fdc20f4ce568cd3643c2b5bad6f96e Mon Sep 17 00:00:00 2001 From: zjhe Date: Fri, 19 Nov 2021 20:48:05 +0800 Subject: [PATCH 2/9] add nil check to avoid potential panic --- .../parse/storage_disks_pool_managed_disk_attachment.go | 6 ++++++ .../storage_disks_pool_managed_disk_attachment_test.go | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment.go b/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment.go index df538f8ccdf0..f3b91a26f0ac 100644 --- a/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment.go +++ b/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment.go @@ -38,10 +38,16 @@ func StorageDisksPoolManagedDiskAttachmentID(input string) (*StorageDisksPoolMan } poolId, err := StorageDisksPoolID(parts[0]) + if poolId == nil { + return nil, fmt.Errorf("malformed disks pool managed disk attachment id:%q", input) + } if err != nil { return nil, fmt.Errorf("malformed disks pool id: %q, %v", poolId.ID(), err) } diskId, err := computeParse.ManagedDiskID(parts[1]) + if diskId == nil { + return nil, fmt.Errorf("malformed disks pool managed disk attachment id:%q", input) + } if err != nil { return nil, fmt.Errorf("malformed disk id: %q, %v", diskId.ID(), err) } diff --git a/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment_test.go b/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment_test.go index 93b13de0536c..9e97fb23283b 100644 --- a/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment_test.go +++ b/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment_test.go @@ -73,6 +73,11 @@ func TestStorageDisksPoolManagedDiskAttachmentID(t *testing.T) { Valid: false, }, + { + Input: "/managedDisks|/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/disk1", + Valid: false, + }, + { Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/managedDisks|/subscriptions", Valid: false, From 6062d160b4fcee8eac7002620392b6fcd49fbc14 Mon Sep 17 00:00:00 2001 From: zjhe Date: Fri, 19 Nov 2021 20:49:10 +0800 Subject: [PATCH 3/9] move note about disk to attach to attachment resource doc --- website/docs/r/storage_disks_pool.html.markdown | 8 -------- ...orage_disks_pool_managed_disk_attachment.html.markdown | 8 ++++++++ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/website/docs/r/storage_disks_pool.html.markdown b/website/docs/r/storage_disks_pool.html.markdown index 0d1604a28ecf..02296a2e2726 100644 --- a/website/docs/r/storage_disks_pool.html.markdown +++ b/website/docs/r/storage_disks_pool.html.markdown @@ -12,14 +12,6 @@ Manages a Disk Pool. !> **Note:** This resource has been deprecated in favour of `azurerm_disk_pool` and will be removed in version 3.0 of the Azure Provider. -~> **Note:** Must be either a premium SSD, standard SSD, or an ultra disk in the same region and availability zone as the disk pool. - -~> **Note:** Ultra disks must have a disk sector size of 512 bytes. - -~> **Note:** Must be a shared disk, with a maxShares value of two or greater. - -~> **Note:** You must provide the StoragePool resource provider RBAC permissions to the disks that will be added to the disk pool. - ## Example Usage ```hcl diff --git a/website/docs/r/storage_disks_pool_managed_disk_attachment.html.markdown b/website/docs/r/storage_disks_pool_managed_disk_attachment.html.markdown index fcd5f99727b0..f7122d3f7298 100644 --- a/website/docs/r/storage_disks_pool_managed_disk_attachment.html.markdown +++ b/website/docs/r/storage_disks_pool_managed_disk_attachment.html.markdown @@ -10,6 +10,14 @@ description: |- Manages a Disks Pool Managed Disk Attachment. +~> **Note:** Must be either a premium SSD, standard SSD, or an ultra disk in the same region and availability zone as the disk pool. + +~> **Note:** Ultra disks must have a disk sector size of 512 bytes. + +~> **Note:** Must be a shared disk, with a maxShares value of two or greater. + +~> **Note:** You must provide the StoragePool resource provider RBAC permissions to the disks that will be added to the disk pool. + ## Example Usage ```hcl From 634f7629f73b1ee73d5650b08312d00ce7be776a Mon Sep 17 00:00:00 2001 From: zjhe Date: Fri, 7 Jan 2022 16:37:50 +0800 Subject: [PATCH 4/9] move managed disk attachment from storage to disks. --- ...k_pool_managed_disk_attachment_resource.go | 205 +++++++++++++++++ ..._managed_disk_attachment_resource_test.go} | 85 ++++--- internal/services/disks/registration.go | 1 + .../id_disk_pool_managed_disk_attachment.go} | 24 +- ...disk_pool_managed_disk_attachment_test.go} | 8 +- .../disk_pool_managed_disk_attachment.go} | 7 +- ...disk_pool_managed_disk_attachment_test.go} | 4 +- internal/services/storage/registration.go | 12 - ...s_pool_managed_disk_attachment_resource.go | 214 ------------------ website/docs/r/disk_pool.html.markdown | 4 +- ...ool_managed_disk_attachment.html.markdown} | 25 +- 11 files changed, 281 insertions(+), 308 deletions(-) create mode 100644 internal/services/disks/disk_pool_managed_disk_attachment_resource.go rename internal/services/{storage/storage_disks_pool_managed_disk_attachment_resource_test.go => disks/disk_pool_managed_disk_attachment_resource_test.go} (70%) rename internal/services/{storage/parse/storage_disks_pool_managed_disk_attachment.go => disks/sdk/2021-08-01/diskpools/id_disk_pool_managed_disk_attachment.go} (59%) rename internal/services/{storage/parse/storage_disks_pool_managed_disk_attachment_test.go => disks/sdk/2021-08-01/diskpools/id_disk_pool_managed_disk_attachment_test.go} (67%) rename internal/services/{storage/validate/storage_disks_pool_managed_disk_attachment.go => disks/validate/disk_pool_managed_disk_attachment.go} (55%) rename internal/services/{storage/validate/storage_disks_pool_managed_disk_attachment_test.go => disks/validate/disk_pool_managed_disk_attachment_test.go} (96%) delete mode 100644 internal/services/storage/storage_disks_pool_managed_disk_attachment_resource.go rename website/docs/r/{storage_disks_pool_managed_disk_attachment.html.markdown => disk_pool_managed_disk_attachment.html.markdown} (76%) diff --git a/internal/services/disks/disk_pool_managed_disk_attachment_resource.go b/internal/services/disks/disk_pool_managed_disk_attachment_resource.go new file mode 100644 index 000000000000..d1dd84769804 --- /dev/null +++ b/internal/services/disks/disk_pool_managed_disk_attachment_resource.go @@ -0,0 +1,205 @@ +package disks + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/disks/validate" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-azurerm/internal/locks" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + computeParse "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/parse" + computeValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/disks/sdk/2021-08-01/diskpools" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" +) + +type DiskPoolManagedDiskAttachmentResource struct{} + +var _ sdk.Resource = DiskPoolManagedDiskAttachmentResource{} + +type DiskPoolManagedDiskAttachmentModel struct { + DiskPoolId string `tfschema:"disk_pool_id"` + DiskId string `tfschema:"managed_disk_id"` +} + +func (d DiskPoolManagedDiskAttachmentResource) Arguments() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "disk_pool_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: diskpools.ValidateDiskPoolID, + }, + "managed_disk_id": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: computeValidate.ManagedDiskID, + }, + } +} + +func (d DiskPoolManagedDiskAttachmentResource) Attributes() map[string]*schema.Schema { + return map[string]*schema.Schema{} +} + +func (d DiskPoolManagedDiskAttachmentResource) ModelObject() interface{} { + return &DiskPoolManagedDiskAttachmentModel{} +} + +func (d DiskPoolManagedDiskAttachmentResource) ResourceType() string { + return "azurerm_disk_pool_managed_disk_attachment" +} + +func (d DiskPoolManagedDiskAttachmentResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + attachment := DiskPoolManagedDiskAttachmentModel{} + err := metadata.Decode(&attachment) + if err != nil { + return err + } + subscriptionId := metadata.Client.Account.SubscriptionId + poolId, err := diskpools.ParseDiskPoolID(attachment.DiskPoolId) + if err != nil { + return err + } + if poolId.SubscriptionId != subscriptionId { + return fmt.Errorf("Disk Pool subscription id %q is different from provider's subscription", poolId.SubscriptionId) + } + diskId, err := computeParse.ManagedDiskID(attachment.DiskId) + if err != nil { + return err + } + locks.ByID(attachment.DiskPoolId) + defer locks.UnlockByID(attachment.DiskPoolId) + id := diskpools.NewDiskPoolManagedDiskAttachmentId(*poolId, *diskId) + + client := metadata.Client.Disks.DiskPoolsClient + poolResp, err := client.Get(ctx, *poolId) + if err != nil { + return fmt.Errorf("retrieving %q: %+v", *poolId, err) + } + + disks := make([]diskpools.Disk, 0) + if poolResp.Model != nil && poolResp.Model.Properties.Disks != nil { + disks = *poolResp.Model.Properties.Disks + } + for _, disk := range disks { + existedDiskId, err := computeParse.ManagedDiskID(disk.Id) + if err != nil { + return fmt.Errorf("error on parsing existing attached disk id %q %+v", disk.Id, err) + } + if *existedDiskId == *diskId { + return metadata.ResourceRequiresImport(d.ResourceType(), id) + } + } + + disks = append(disks, diskpools.Disk{ + Id: diskId.ID(), + }) + + err = client.UpdateThenPoll(ctx, *poolId, diskpools.DiskPoolUpdate{ + Properties: diskpools.DiskPoolUpdateProperties{ + Disks: &disks, + }, + }) + if err != nil { + return fmt.Errorf("creation of %q: %+v", id, err) + } + + metadata.SetID(id) + return nil + }, + } +} + +func (d DiskPoolManagedDiskAttachmentResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + id, err := diskpools.DiskPoolManagedDiskAttachmentID(metadata.ResourceData.Id()) + if err != nil { + return err + } + poolId := id.DiskPoolId + client := metadata.Client.Disks.DiskPoolsClient + + poolResp, err := client.Get(ctx, poolId) + if err != nil { + if response.WasNotFound(poolResp.HttpResponse) { + return metadata.MarkAsGone(id) + } + return fmt.Errorf("retrieving disk pool %q error: %+v", id.DiskPoolId, err) + } + if poolResp.Model == nil || poolResp.Model.Properties.Disks == nil { + return metadata.MarkAsGone(id) + } + + for _, disk := range *poolResp.Model.Properties.Disks { + if disk.Id == id.ManagedDiskId.ID() { + m := DiskPoolManagedDiskAttachmentModel{ + DiskPoolId: id.DiskPoolId.ID(), + DiskId: id.ManagedDiskId.ID(), + } + return metadata.Encode(&m) + } + } + + return metadata.MarkAsGone(id) + }, + } +} + +func (d DiskPoolManagedDiskAttachmentResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + diskToDetach := &DiskPoolManagedDiskAttachmentModel{} + err := metadata.Decode(diskToDetach) + if err != nil { + return err + } + poolId, err := diskpools.ParseDiskPoolID(diskToDetach.DiskPoolId) + if err != nil { + return err + } + locks.ByID(diskToDetach.DiskPoolId) + defer locks.UnlockByID(diskToDetach.DiskPoolId) + + client := metadata.Client.Disks.DiskPoolsClient + pool, err := client.Get(ctx, *poolId) + if err != nil { + return fmt.Errorf("retrieving disk pool %q error %v", diskToDetach.DiskPoolId, err) + } + if pool.Model == nil || pool.Model.Properties.Disks == nil { + return nil + } + attachedDisks := *pool.Model.Properties.Disks + remainingDisks := make([]diskpools.Disk, 0) + for _, attachedDisk := range attachedDisks { + if attachedDisk.Id != diskToDetach.DiskId { + remainingDisks = append(remainingDisks, attachedDisk) + } + } + + err = client.UpdateThenPoll(ctx, *poolId, diskpools.DiskPoolUpdate{ + Properties: diskpools.DiskPoolUpdateProperties{ + Disks: &remainingDisks, + }, + }) + if err != nil { + return fmt.Errorf("error on deletion of disk pool managed disk attachment %q: %+v", metadata.ResourceData.Id(), err) + } + return nil + }, + } +} + +func (d DiskPoolManagedDiskAttachmentResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return validate.DiskPoolManagedDiskAttachment +} diff --git a/internal/services/storage/storage_disks_pool_managed_disk_attachment_resource_test.go b/internal/services/disks/disk_pool_managed_disk_attachment_resource_test.go similarity index 70% rename from internal/services/storage/storage_disks_pool_managed_disk_attachment_resource_test.go rename to internal/services/disks/disk_pool_managed_disk_attachment_resource_test.go index 99ff7c145804..661634f1b5ed 100644 --- a/internal/services/storage/storage_disks_pool_managed_disk_attachment_resource_test.go +++ b/internal/services/disks/disk_pool_managed_disk_attachment_resource_test.go @@ -1,15 +1,15 @@ -package storage_test +package disks_test import ( "context" "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool" + "github.com/hashicorp/go-azure-helpers/lang/response" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" "github.com/hashicorp/terraform-provider-azurerm/internal/clients" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/disks/sdk/2021-08-01/diskpools" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" "github.com/hashicorp/terraform-provider-azurerm/utils" ) @@ -17,7 +17,7 @@ import ( type DisksPoolManagedDiskAttachmentResource struct{} func TestAccStorageDisksPoolDiskAttachment_basic(t *testing.T) { - data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool_managed_disk_attachment", "test") + data := acceptance.BuildTestData(t, "azurerm_disk_pool_managed_disk_attachment", "test") a := DisksPoolManagedDiskAttachmentResource{} data.ResourceTest(t, a, []acceptance.TestStep{ { @@ -30,8 +30,8 @@ func TestAccStorageDisksPoolDiskAttachment_basic(t *testing.T) { }) } -func TestAccStorageDisksPoolDiskAttachment_requiresImport(t *testing.T) { - data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool_managed_disk_attachment", "test") +func TestAccDiskPoolDiskAttachment_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_disk_pool_managed_disk_attachment", "test") a := DisksPoolManagedDiskAttachmentResource{} data.ResourceTest(t, a, []acceptance.TestStep{ { @@ -40,15 +40,15 @@ func TestAccStorageDisksPoolDiskAttachment_requiresImport(t *testing.T) { }, { Config: a.requiresImport(data), - ExpectError: acceptance.RequiresImportError("azurerm_storage_disks_pool_managed_disk_attachment"), + ExpectError: acceptance.RequiresImportError("azurerm_disk_pool_managed_disk_attachment"), }, }) } func TestAccStorageDisksPoolDiskAttachment_multipleDisks(t *testing.T) { - data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool_managed_disk_attachment", "test") + data := acceptance.BuildTestData(t, "azurerm_disk_pool_managed_disk_attachment", "test") a := DisksPoolManagedDiskAttachmentResource{} - secondResourceName := "azurerm_storage_disks_pool_managed_disk_attachment.second" + secondResourceName := "azurerm_disk_pool_managed_disk_attachment.second" data.ResourceTest(t, a, []acceptance.TestStep{ { Config: a.multipleDisks(data), @@ -65,15 +65,11 @@ func TestAccStorageDisksPoolDiskAttachment_multipleDisks(t *testing.T) { ), }, data.ImportStep(), - // { - // Config: a.template(data), - // }, - // data.ImportStep(), }) } func TestAccStorageDisksPoolDiskAttachment_destroy(t *testing.T) { - data := acceptance.BuildTestData(t, "azurerm_storage_disks_pool_managed_disk_attachment", "test") + data := acceptance.BuildTestData(t, "azurerm_disk_pool_managed_disk_attachment", "test") a := DisksPoolManagedDiskAttachmentResource{} data.ResourceTest(t, a, []acceptance.TestStep{ data.DisappearsStep(acceptance.DisappearsStepData{ @@ -84,26 +80,26 @@ func TestAccStorageDisksPoolDiskAttachment_destroy(t *testing.T) { } func (a DisksPoolManagedDiskAttachmentResource) Exists(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.StorageDisksPoolManagedDiskAttachmentID(state.ID) + id, err := diskpools.DiskPoolManagedDiskAttachmentID(state.ID) if err != nil { return nil, err } - poolId := id.DisksPoolId + poolId := id.DiskPoolId diskId := id.ManagedDiskId - client := clients.Storage.DisksPoolsClient - resp, err := client.Get(ctx, poolId.ResourceGroup, poolId.DiskPoolName) + client := clients.Disks.DiskPoolsClient + resp, err := client.Get(ctx, poolId) if err != nil { - if utils.ResponseWasNotFound(resp.Response) { + if response.WasNotFound(resp.HttpResponse) { return utils.Bool(false), nil } return nil, fmt.Errorf("retrieving %q: %+v", id, err) } targetDiskId := diskId.ID() - if resp.DiskPoolProperties == nil || resp.DiskPoolProperties.Disks == nil { + if resp.Model == nil || resp.Model.Properties.Disks == nil { return utils.Bool(false), nil } - for _, disk := range *resp.DiskPoolProperties.Disks { - if disk.ID != nil && *disk.ID == targetDiskId { + for _, disk := range *resp.Model.Properties.Disks { + if disk.Id == targetDiskId { return utils.Bool(true), nil } } @@ -111,47 +107,44 @@ func (a DisksPoolManagedDiskAttachmentResource) Exists(ctx context.Context, clie } func (a DisksPoolManagedDiskAttachmentResource) Destroy(ctx context.Context, clients *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { - id, err := parse.StorageDisksPoolManagedDiskAttachmentID(state.ID) + id, err := diskpools.DiskPoolManagedDiskAttachmentID(state.ID) if err != nil { return nil, err } - client := clients.Storage.DisksPoolsClient - pool, err := client.Get(ctx, id.DisksPoolId.ResourceGroup, id.DisksPoolId.DiskPoolName) + client := clients.Disks.DiskPoolsClient + pool, err := client.Get(ctx, id.DiskPoolId) if err != nil { return nil, err } - if pool.Disks == nil { + if pool.Model == nil || pool.Model.Properties.Disks == nil { return nil, err } - attachedDisks := *pool.Disks - remainingDisks := make([]storagepool.Disk, 0) - for _, attachedDisk := range attachedDisks { - if utils.NormalizeNilableString(attachedDisk.ID) != id.ManagedDiskId.ID() { + attachedDisks := pool.Model.Properties.Disks + remainingDisks := make([]diskpools.Disk, 0) + for _, attachedDisk := range *attachedDisks { + if attachedDisk.Id != id.ManagedDiskId.ID() { remainingDisks = append(remainingDisks, attachedDisk) } } - future, err := client.Update(ctx, id.DisksPoolId.ResourceGroup, id.DisksPoolId.DiskPoolName, storagepool.DiskPoolUpdate{ - DiskPoolUpdateProperties: &storagepool.DiskPoolUpdateProperties{ + err = client.UpdateThenPoll(ctx, id.DiskPoolId, diskpools.DiskPoolUpdate{ + Properties: diskpools.DiskPoolUpdateProperties{ Disks: &remainingDisks, }, }) if err != nil { return nil, err } - if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return nil, err - } return utils.Bool(true), nil } func (a DisksPoolManagedDiskAttachmentResource) basic(data acceptance.TestData) string { return fmt.Sprintf(` %s -resource "azurerm_storage_disks_pool_managed_disk_attachment" "test" { +resource "azurerm_disk_pool_managed_disk_attachment" "test" { depends_on = [azurerm_role_assignment.test] - disks_pool_id = azurerm_storage_disks_pool.test.id + disk_pool_id = azurerm_disk_pool.test.id managed_disk_id = azurerm_managed_disk.test.id } `, a.template(data)) @@ -165,7 +158,7 @@ provider "azurerm" { provider "azuread" {} resource "azurerm_resource_group" "test" { - name = "acctestRG-diskspool-%[2]d" + name = "acctestRG-diskpool-%[2]d" location = "%[1]s" } @@ -191,7 +184,7 @@ resource "azurerm_subnet" "test" { } resource "azurerm_managed_disk" "test" { - name = "acctest-diskspool-%[2]d" + name = "acctest-diskpool-%[2]d" resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location create_option = "Empty" @@ -216,11 +209,11 @@ resource "azurerm_role_assignment" "test" { scope = azurerm_managed_disk.test.id } -resource "azurerm_storage_disks_pool" "test" { - name = "acctest-diskspool-%[3]s" +resource "azurerm_disk_pool" "test" { + name = "acctest-diskpool-%[3]s" resource_group_name = azurerm_resource_group.test.name location = azurerm_resource_group.test.location - availability_zones = ["1"] + zones = ["1"] sku_name = "Basic_B1" subnet_id = azurerm_subnet.test.id tags = { @@ -234,8 +227,8 @@ func (a DisksPoolManagedDiskAttachmentResource) requiresImport(data acceptance.T return fmt.Sprintf(` %s -resource "azurerm_storage_disks_pool_managed_disk_attachment" "import" { - disks_pool_id = azurerm_storage_disks_pool.test.id +resource "azurerm_disk_pool_managed_disk_attachment" "import" { + disk_pool_id = azurerm_disk_pool.test.id managed_disk_id = azurerm_managed_disk.test.id } `, a.basic(data)) @@ -263,9 +256,9 @@ resource "azurerm_role_assignment" "second" { scope = azurerm_managed_disk.second.id } -resource "azurerm_storage_disks_pool_managed_disk_attachment" "second" { +resource "azurerm_disk_pool_managed_disk_attachment" "second" { depends_on = [azurerm_role_assignment.second] - disks_pool_id = azurerm_storage_disks_pool.test.id + disk_pool_id = azurerm_disk_pool.test.id managed_disk_id = azurerm_managed_disk.second.id } `, a.basic(data), data.RandomInteger) diff --git a/internal/services/disks/registration.go b/internal/services/disks/registration.go index d5dbcde1bc72..ae69a780533f 100644 --- a/internal/services/disks/registration.go +++ b/internal/services/disks/registration.go @@ -20,6 +20,7 @@ func (r Registration) DataSources() []sdk.DataSource { func (r Registration) Resources() []sdk.Resource { resources := []sdk.Resource{ DiskPoolResource{}, + DiskPoolManagedDiskAttachmentResource{}, } if !features.ThreePointOh() { diff --git a/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment.go b/internal/services/disks/sdk/2021-08-01/diskpools/id_disk_pool_managed_disk_attachment.go similarity index 59% rename from internal/services/storage/parse/storage_disks_pool_managed_disk_attachment.go rename to internal/services/disks/sdk/2021-08-01/diskpools/id_disk_pool_managed_disk_attachment.go index f3b91a26f0ac..aa21306a1e6e 100644 --- a/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment.go +++ b/internal/services/disks/sdk/2021-08-01/diskpools/id_disk_pool_managed_disk_attachment.go @@ -1,4 +1,4 @@ -package parse +package diskpools import ( "fmt" @@ -10,25 +10,25 @@ import ( const storageDiskPoolManagedDiskAttachmentIdSeparator = "/managedDisks|" -var _ resourceid.Formatter = StorageDisksPoolManagedDiskAttachmentId{} +var _ resourceid.Formatter = DiskPoolManagedDiskAttachmentId{} -type StorageDisksPoolManagedDiskAttachmentId struct { - DisksPoolId StorageDisksPoolId +type DiskPoolManagedDiskAttachmentId struct { + DiskPoolId DiskPoolId ManagedDiskId computeParse.ManagedDiskId } -func NewStorageDisksPoolManagedDiskAttachmentId(diskPoolId StorageDisksPoolId, managedDiskId computeParse.ManagedDiskId) StorageDisksPoolManagedDiskAttachmentId { - return StorageDisksPoolManagedDiskAttachmentId{ - DisksPoolId: diskPoolId, +func NewDiskPoolManagedDiskAttachmentId(diskPoolId DiskPoolId, managedDiskId computeParse.ManagedDiskId) DiskPoolManagedDiskAttachmentId { + return DiskPoolManagedDiskAttachmentId{ + DiskPoolId: diskPoolId, ManagedDiskId: managedDiskId, } } -func (d StorageDisksPoolManagedDiskAttachmentId) ID() string { - return fmt.Sprintf("%s%s%s", d.DisksPoolId.ID(), storageDiskPoolManagedDiskAttachmentIdSeparator, d.ManagedDiskId.ID()) +func (d DiskPoolManagedDiskAttachmentId) ID() string { + return fmt.Sprintf("%s%s%s", d.DiskPoolId.ID(), storageDiskPoolManagedDiskAttachmentIdSeparator, d.ManagedDiskId.ID()) } -func StorageDisksPoolManagedDiskAttachmentID(input string) (*StorageDisksPoolManagedDiskAttachmentId, error) { +func DiskPoolManagedDiskAttachmentID(input string) (*DiskPoolManagedDiskAttachmentId, error) { if !strings.Contains(input, storageDiskPoolManagedDiskAttachmentIdSeparator) { return nil, fmt.Errorf("malformed disks pool managed disk attachment id:%q", input) } @@ -37,7 +37,7 @@ func StorageDisksPoolManagedDiskAttachmentID(input string) (*StorageDisksPoolMan return nil, fmt.Errorf("malformed disks pool managed disk attachment id:%q", input) } - poolId, err := StorageDisksPoolID(parts[0]) + poolId, err := ParseDiskPoolID(parts[0]) if poolId == nil { return nil, fmt.Errorf("malformed disks pool managed disk attachment id:%q", input) } @@ -51,6 +51,6 @@ func StorageDisksPoolManagedDiskAttachmentID(input string) (*StorageDisksPoolMan if err != nil { return nil, fmt.Errorf("malformed disk id: %q, %v", diskId.ID(), err) } - id := NewStorageDisksPoolManagedDiskAttachmentId(*poolId, *diskId) + id := NewDiskPoolManagedDiskAttachmentId(*poolId, *diskId) return &id, nil } diff --git a/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment_test.go b/internal/services/disks/sdk/2021-08-01/diskpools/id_disk_pool_managed_disk_attachment_test.go similarity index 67% rename from internal/services/storage/parse/storage_disks_pool_managed_disk_attachment_test.go rename to internal/services/disks/sdk/2021-08-01/diskpools/id_disk_pool_managed_disk_attachment_test.go index db2c94400e36..b34707c6c6fd 100644 --- a/internal/services/storage/parse/storage_disks_pool_managed_disk_attachment_test.go +++ b/internal/services/disks/sdk/2021-08-01/diskpools/id_disk_pool_managed_disk_attachment_test.go @@ -1,4 +1,4 @@ -package parse +package diskpools import ( "testing" @@ -6,10 +6,10 @@ import ( computeparse "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/parse" ) -func TestStorageDisksPoolManagedDiskAttachmentIDFormatter(t *testing.T) { - diskPoolId := NewStorageDisksPoolID("12345678-1234-9876-4563-123456789012", "resGroup1", "storagePool1") +func TestDiskPoolManagedDiskAttachmentIDFormatter(t *testing.T) { + diskPoolId := NewDiskPoolID("12345678-1234-9876-4563-123456789012", "resGroup1", "storagePool1") managedDiskId := computeparse.NewManagedDiskID("12345678-1234-9876-4563-123456789012", "resGroup1", "diks1") - actual := NewStorageDisksPoolManagedDiskAttachmentId(diskPoolId, managedDiskId).ID() + actual := NewDiskPoolManagedDiskAttachmentId(diskPoolId, managedDiskId).ID() expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/managedDisks|/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/diks1" if actual != expected { t.Fatalf("Expected %q but got %q", expected, actual) diff --git a/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment.go b/internal/services/disks/validate/disk_pool_managed_disk_attachment.go similarity index 55% rename from internal/services/storage/validate/storage_disks_pool_managed_disk_attachment.go rename to internal/services/disks/validate/disk_pool_managed_disk_attachment.go index dd0259bd44e1..9a9b1f96ce92 100644 --- a/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment.go +++ b/internal/services/disks/validate/disk_pool_managed_disk_attachment.go @@ -2,17 +2,16 @@ package validate import ( "fmt" - - "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/disks/sdk/2021-08-01/diskpools" ) -func StorageDisksPoolManagedDiskAttachment(input interface{}, key string) (warnings []string, errors []error) { +func DiskPoolManagedDiskAttachment(input interface{}, key string) (warnings []string, errors []error) { v, ok := input.(string) if !ok { errors = append(errors, fmt.Errorf("expected %q to be a string", key)) return } - if _, err := parse.StorageDisksPoolManagedDiskAttachmentID(v); err != nil { + if _, err := diskpools.DiskPoolManagedDiskAttachmentID(v); err != nil { errors = append(errors, err) } return diff --git a/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment_test.go b/internal/services/disks/validate/disk_pool_managed_disk_attachment_test.go similarity index 96% rename from internal/services/storage/validate/storage_disks_pool_managed_disk_attachment_test.go rename to internal/services/disks/validate/disk_pool_managed_disk_attachment_test.go index 9e97fb23283b..fc8845d97f6f 100644 --- a/internal/services/storage/validate/storage_disks_pool_managed_disk_attachment_test.go +++ b/internal/services/disks/validate/disk_pool_managed_disk_attachment_test.go @@ -2,7 +2,7 @@ package validate import "testing" -func TestStorageDisksPoolManagedDiskAttachmentID(t *testing.T) { +func TestDiskPoolManagedDiskAttachmentID(t *testing.T) { cases := []struct { Input string Valid bool @@ -123,7 +123,7 @@ func TestStorageDisksPoolManagedDiskAttachmentID(t *testing.T) { } for _, tc := range cases { t.Logf("[DEBUG] Testing Value %s", tc.Input) - _, errors := StorageDisksPoolManagedDiskAttachment(tc.Input, "test") + _, errors := DiskPoolManagedDiskAttachment(tc.Input, "test") valid := len(errors) == 0 if tc.Valid != valid { diff --git a/internal/services/storage/registration.go b/internal/services/storage/registration.go index dd9957019e48..773e0a7ada60 100644 --- a/internal/services/storage/registration.go +++ b/internal/services/storage/registration.go @@ -1,7 +1,6 @@ package storage import ( - "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" ) @@ -61,14 +60,3 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_storage_sync_group": resourceStorageSyncGroup(), } } - -func (r Registration) DataSources() []sdk.DataSource { - return []sdk.DataSource{} -} - -func (r Registration) Resources() []sdk.Resource { - return []sdk.Resource{ - DisksPoolResource{}, - DisksPoolManagedDiskAttachmentResource{}, - } -} diff --git a/internal/services/storage/storage_disks_pool_managed_disk_attachment_resource.go b/internal/services/storage/storage_disks_pool_managed_disk_attachment_resource.go deleted file mode 100644 index e76eeec48869..000000000000 --- a/internal/services/storage/storage_disks_pool_managed_disk_attachment_resource.go +++ /dev/null @@ -1,214 +0,0 @@ -package storage - -import ( - "context" - "fmt" - "time" - - "github.com/Azure/azure-sdk-for-go/services/storagepool/mgmt/2021-08-01/storagepool" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-azurerm/internal/locks" - "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" - computeParse "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/parse" - computeValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/parse" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/storage/validate" - "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" - "github.com/hashicorp/terraform-provider-azurerm/utils" -) - -type DisksPoolManagedDiskAttachmentResource struct{} - -var _ sdk.Resource = DisksPoolManagedDiskAttachmentResource{} - -type DisksPoolManagedDiskAttachmentModel struct { - DisksPoolId string `tfschema:"disks_pool_id"` - DiskId string `tfschema:"managed_disk_id"` -} - -func (d DisksPoolManagedDiskAttachmentResource) Arguments() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "disks_pool_id": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validate.StorageDisksPoolID, - }, - "managed_disk_id": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: computeValidate.ManagedDiskID, - }, - } -} - -func (d DisksPoolManagedDiskAttachmentResource) Attributes() map[string]*schema.Schema { - return map[string]*schema.Schema{} -} - -func (d DisksPoolManagedDiskAttachmentResource) ModelObject() interface{} { - return &DisksPoolManagedDiskAttachmentModel{} -} - -func (d DisksPoolManagedDiskAttachmentResource) ResourceType() string { - return "azurerm_storage_disks_pool_managed_disk_attachment" -} - -func (d DisksPoolManagedDiskAttachmentResource) Create() sdk.ResourceFunc { - return sdk.ResourceFunc{ - Timeout: 30 * time.Minute, - Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { - attachment := DisksPoolManagedDiskAttachmentModel{} - err := metadata.Decode(&attachment) - if err != nil { - return err - } - subscriptionId := metadata.Client.Account.SubscriptionId - poolId, err := parse.StorageDisksPoolID(attachment.DisksPoolId) - if err != nil { - return err - } - if poolId.SubscriptionId != subscriptionId { - return fmt.Errorf("Disks Pool subscription id %q is different from provider's subscription", poolId.SubscriptionId) - } - diskId, err := computeParse.ManagedDiskID(attachment.DiskId) - if err != nil { - return err - } - locks.ByID(attachment.DisksPoolId) - defer locks.UnlockByID(attachment.DisksPoolId) - id := parse.NewStorageDisksPoolManagedDiskAttachmentId(*poolId, *diskId) - - client := metadata.Client.Storage.DisksPoolsClient - poolResp, err := client.Get(ctx, poolId.ResourceGroup, poolId.DiskPoolName) - if err != nil { - return fmt.Errorf("retrieving %q: %+v", *poolId, err) - } - - if poolResp.Disks == nil { - poolResp.Disks = &[]storagepool.Disk{} - } - for _, disk := range *poolResp.Disks { - if disk.ID == nil { - continue - } - existedDiskId, err := computeParse.ManagedDiskID(*disk.ID) - if err != nil { - return fmt.Errorf("error on parsing existing attached disk id %q %+v", *disk.ID, err) - } - if *existedDiskId == *diskId { - return metadata.ResourceRequiresImport(d.ResourceType(), id) - } - } - - disks := append(*poolResp.Disks, storagepool.Disk{ - ID: utils.String(diskId.ID()), - }) - - future, err := client.Update(ctx, poolId.ResourceGroup, poolId.DiskPoolName, storagepool.DiskPoolUpdate{ - DiskPoolUpdateProperties: &storagepool.DiskPoolUpdateProperties{ - Disks: &disks, - }, - }) - if err != nil { - return fmt.Errorf("creation of %q: %+v", id, err) - } - if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for creation of %q: %+v", id, err) - } - - metadata.SetID(id) - return nil - }, - } -} - -func (d DisksPoolManagedDiskAttachmentResource) Read() sdk.ResourceFunc { - return sdk.ResourceFunc{ - Timeout: 5 * time.Minute, - Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { - id, err := parse.StorageDisksPoolManagedDiskAttachmentID(metadata.ResourceData.Id()) - if err != nil { - return err - } - poolId := id.DisksPoolId - client := metadata.Client.Storage.DisksPoolsClient - - poolResp, err := client.Get(ctx, poolId.ResourceGroup, poolId.DiskPoolName) - if err != nil { - if utils.ResponseWasNotFound(poolResp.Response) { - return metadata.MarkAsGone(id) - } - return fmt.Errorf("retrieving disks pool %q error: %+v", id.DisksPoolId, err) - } - if poolResp.DiskPoolProperties == nil || poolResp.DiskPoolProperties.Disks == nil { - return metadata.MarkAsGone(id) - } - - for _, disk := range *poolResp.Disks { - if disk.ID != nil && *disk.ID == id.ManagedDiskId.ID() { - m := DisksPoolManagedDiskAttachmentModel{ - DisksPoolId: id.DisksPoolId.ID(), - DiskId: id.ManagedDiskId.ID(), - } - return metadata.Encode(&m) - } - } - - return metadata.MarkAsGone(id) - }, - } -} - -func (d DisksPoolManagedDiskAttachmentResource) Delete() sdk.ResourceFunc { - return sdk.ResourceFunc{ - Timeout: 30 * time.Minute, - Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { - diskToDetach := &DisksPoolManagedDiskAttachmentModel{} - err := metadata.Decode(diskToDetach) - if err != nil { - return err - } - poolId, err := parse.StorageDisksPoolID(diskToDetach.DisksPoolId) - if err != nil { - return err - } - locks.ByID(diskToDetach.DisksPoolId) - defer locks.UnlockByID(diskToDetach.DisksPoolId) - - client := metadata.Client.Storage.DisksPoolsClient - pool, err := client.Get(ctx, poolId.ResourceGroup, poolId.DiskPoolName) - if err != nil { - return fmt.Errorf("retrieving disks pool %q error %v", diskToDetach.DisksPoolId, err) - } - if pool.Disks == nil { - return nil - } - attachedDisks := *pool.Disks - remainingDisks := make([]storagepool.Disk, 0) - for _, attachedDisk := range attachedDisks { - if utils.NormalizeNilableString(attachedDisk.ID) != diskToDetach.DiskId { - remainingDisks = append(remainingDisks, attachedDisk) - } - } - - future, err := client.Update(ctx, poolId.ResourceGroup, poolId.DiskPoolName, storagepool.DiskPoolUpdate{ - DiskPoolUpdateProperties: &storagepool.DiskPoolUpdateProperties{ - Disks: &remainingDisks, - }, - }) - if err != nil { - return fmt.Errorf("error on deletion of disks pool managed disk attachment %q: %+v", metadata.ResourceData.Id(), err) - } - if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("waiting for deletion of disks pool managed disk attatchment %q: %+v", metadata.ResourceData.Id(), err) - } - return nil - }, - } -} - -func (d DisksPoolManagedDiskAttachmentResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { - return validate.StorageDisksPoolManagedDiskAttachment -} diff --git a/website/docs/r/disk_pool.html.markdown b/website/docs/r/disk_pool.html.markdown index 6a4f590ec407..8bbd4d1e1bb8 100644 --- a/website/docs/r/disk_pool.html.markdown +++ b/website/docs/r/disk_pool.html.markdown @@ -41,7 +41,7 @@ resource "azurerm_subnet" "example" { } } -resource "azurerm_storage_disks_pool" "example" { +resource "azurerm_disk_pool" "example" { name = "example-disk-pool" resource_group_name = azurerm_resource_group.example.name location = azurerm_resource_group.example.location @@ -91,5 +91,5 @@ The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/d Disk Pools can be imported using the `resource id`, e.g. ```shell -terraform import azurerm_storage_disks_pool.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/diskPool1 +terraform import azurerm_disk_pool.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/diskPool1 ``` diff --git a/website/docs/r/storage_disks_pool_managed_disk_attachment.html.markdown b/website/docs/r/disk_pool_managed_disk_attachment.html.markdown similarity index 76% rename from website/docs/r/storage_disks_pool_managed_disk_attachment.html.markdown rename to website/docs/r/disk_pool_managed_disk_attachment.html.markdown index f7122d3f7298..97b3a07d9e9f 100644 --- a/website/docs/r/storage_disks_pool_managed_disk_attachment.html.markdown +++ b/website/docs/r/disk_pool_managed_disk_attachment.html.markdown @@ -1,14 +1,14 @@ --- -subcategory: "Storage" +subcategory: "Disks" layout: "azurerm" -page_title: "Azure Resource Manager: azurerm_storage_disks_pool_managed_disk_attachment" +page_title: "Azure Resource Manager: azurerm_disk_pool_managed_disk_attachment" description: |- - Manages a Disks Pool Managed Disk Attachment. + Manages a Disk Pool Managed Disk Attachment. --- -# azurerm_storage_disks_pool_managed_disk_attachment +# azurerm_disk_pool_managed_disk_attachment -Manages a Disks Pool Managed Disk Attachment. +Manages a Disk Pool Managed Disk Attachment. ~> **Note:** Must be either a premium SSD, standard SSD, or an ultra disk in the same region and availability zone as the disk pool. @@ -40,6 +40,7 @@ resource "azurerm_subnet" "example" { address_prefixes = ["10.0.0.0/24"] delegation { name = "diskspool" + service_delegation { actions = ["Microsoft.Network/virtualNetworks/read"] name = "Microsoft.StoragePool/diskPools" @@ -47,12 +48,12 @@ resource "azurerm_subnet" "example" { } } -resource "azurerm_storage_disks_pool" "example" { +resource "azurerm_disk_pool" "example" { name = "example-pool" resource_group_name = azurerm_resource_group.example.name location = azurerm_resource_group.example.location subnet_id = azurerm_subnet.example.id - availability_zones = ["1"] + zones = ["1"] sku_name = "Basic_B1" } @@ -82,9 +83,9 @@ resource "azurerm_role_assignment" "example" { scope = azurerm_managed_disk.example.id } -resource "azurerm_storage_disks_pool_managed_disk_attachment" "example" { +resource "azurerm_disk_pool_managed_disk_attachment" "example" { depends_on = [azurerm_role_assignment.example] - disks_pool_id = azurerm_storage_disks_pool.example.id + disk_pool_id = azurerm_disk_pool.example.id managed_disk_id = azurerm_managed_disk.example.id } ``` @@ -93,7 +94,7 @@ resource "azurerm_storage_disks_pool_managed_disk_attachment" "example" { The following arguments are supported: -* `disks_pool_id` - (Required) The ID of the Disks Pool. Changing this forces a new Disks Pool Managed Disk Attachment to be created. +* `disk_pool_id` - (Required) The ID of the Disk Pool. Changing this forces a new Disk Pool Managed Disk Attachment to be created. * `managed_disk_id` - (Required) The ID of the Managed Disk. Changing this forces a new Disks Pool Managed Disk Attachment to be created. @@ -101,7 +102,7 @@ The following arguments are supported: In addition to the Arguments listed above - the following Attributes are exported: -* `id` - The ID of the Disks Pool Managed Disk Attachment. +* `id` - The ID of the Disk Pool Managed Disk Attachment. ## Timeouts @@ -116,5 +117,5 @@ The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/d Disks Pool Managed Disk Attachments can be imported using the `resource id`, e.g. ```shell -terraform import azurerm_storage_disks_pool_managed_disk_attachment.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/managedDisks|/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/disk1 +terraform import azurerm_disk_pool_managed_disk_attachment.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.StoragePool/diskPools/storagePool1/managedDisks|/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/resGroup1/providers/Microsoft.Compute/disks/disk1 ``` From 019a3fbcd64749b1cdc7da28b698ee751f1e472d Mon Sep 17 00:00:00 2001 From: zjhe Date: Fri, 7 Jan 2022 18:11:39 +0800 Subject: [PATCH 5/9] trigger GitHub actions From 6c2c851986fc7dc2fcab4c651c90724ce7827fd4 Mon Sep 17 00:00:00 2001 From: zjhe Date: Fri, 7 Jan 2022 18:54:37 +0800 Subject: [PATCH 6/9] fix lint issue --- .../disks/disk_pool_managed_disk_attachment_resource.go | 2 +- .../disks/validate/disk_pool_managed_disk_attachment.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/services/disks/disk_pool_managed_disk_attachment_resource.go b/internal/services/disks/disk_pool_managed_disk_attachment_resource.go index d1dd84769804..b1936479dd3d 100644 --- a/internal/services/disks/disk_pool_managed_disk_attachment_resource.go +++ b/internal/services/disks/disk_pool_managed_disk_attachment_resource.go @@ -3,7 +3,6 @@ package disks import ( "context" "fmt" - "github.com/hashicorp/terraform-provider-azurerm/internal/services/disks/validate" "time" "github.com/hashicorp/go-azure-helpers/lang/response" @@ -13,6 +12,7 @@ import ( computeParse "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/parse" computeValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/services/disks/sdk/2021-08-01/diskpools" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/disks/validate" "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" ) diff --git a/internal/services/disks/validate/disk_pool_managed_disk_attachment.go b/internal/services/disks/validate/disk_pool_managed_disk_attachment.go index 9a9b1f96ce92..bb9cad4f4951 100644 --- a/internal/services/disks/validate/disk_pool_managed_disk_attachment.go +++ b/internal/services/disks/validate/disk_pool_managed_disk_attachment.go @@ -2,6 +2,7 @@ package validate import ( "fmt" + "github.com/hashicorp/terraform-provider-azurerm/internal/services/disks/sdk/2021-08-01/diskpools" ) From a4c7c6077060b3d94422bd567d52c624f46fdbec Mon Sep 17 00:00:00 2001 From: zjhe Date: Fri, 7 Jan 2022 19:35:00 +0800 Subject: [PATCH 7/9] rename acc test methods names --- .../disk_pool_managed_disk_attachment_resource_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/services/disks/disk_pool_managed_disk_attachment_resource_test.go b/internal/services/disks/disk_pool_managed_disk_attachment_resource_test.go index 661634f1b5ed..5f9036973228 100644 --- a/internal/services/disks/disk_pool_managed_disk_attachment_resource_test.go +++ b/internal/services/disks/disk_pool_managed_disk_attachment_resource_test.go @@ -16,7 +16,7 @@ import ( type DisksPoolManagedDiskAttachmentResource struct{} -func TestAccStorageDisksPoolDiskAttachment_basic(t *testing.T) { +func TestAccDiskPoolDiskAttachment_basic(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_disk_pool_managed_disk_attachment", "test") a := DisksPoolManagedDiskAttachmentResource{} data.ResourceTest(t, a, []acceptance.TestStep{ @@ -45,7 +45,7 @@ func TestAccDiskPoolDiskAttachment_requiresImport(t *testing.T) { }) } -func TestAccStorageDisksPoolDiskAttachment_multipleDisks(t *testing.T) { +func TestAccDiskPoolDiskAttachment_multipleDisks(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_disk_pool_managed_disk_attachment", "test") a := DisksPoolManagedDiskAttachmentResource{} secondResourceName := "azurerm_disk_pool_managed_disk_attachment.second" @@ -68,7 +68,7 @@ func TestAccStorageDisksPoolDiskAttachment_multipleDisks(t *testing.T) { }) } -func TestAccStorageDisksPoolDiskAttachment_destroy(t *testing.T) { +func TestAccDiskPoolDiskAttachment_destroy(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_disk_pool_managed_disk_attachment", "test") a := DisksPoolManagedDiskAttachmentResource{} data.ResourceTest(t, a, []acceptance.TestStep{ From 814023a297ccb51754223004331a95b1d66378d0 Mon Sep 17 00:00:00 2001 From: hezijie Date: Thu, 13 Jan 2022 14:05:25 +0800 Subject: [PATCH 8/9] Add retry mechanism to disk pool's code. --- internal/services/disks/disk_pool_resource.go | 51 +++++++++++++++---- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/internal/services/disks/disk_pool_resource.go b/internal/services/disks/disk_pool_resource.go index f1e9507e8f15..b8ca7e8a8d6b 100644 --- a/internal/services/disks/disk_pool_resource.go +++ b/internal/services/disks/disk_pool_resource.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" "github.com/hashicorp/go-azure-helpers/resourcemanager/location" "github.com/hashicorp/go-azure-helpers/resourcemanager/tags" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-azurerm/internal/locks" "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" @@ -119,11 +120,17 @@ func (r DiskPoolResource) Create() sdk.ResourceFunc { Sku: expandDisksPoolSku(m.Sku), Tags: tags.Expand(m.Tags), } - if err := client.CreateOrUpdateThenPoll(ctx, id, createParameter); err != nil { + future, err := client.CreateOrUpdate(ctx, id, createParameter) + if err != nil { return fmt.Errorf("creating %s: %+v", id, err) } - metadata.SetID(id) - return nil + return pluginsdk.Retry(metadata.ResourceData.Timeout(pluginsdk.TimeoutCreate), func() *resource.RetryError { + if err := r.retryError(future.Poller.PollUntilDone()); err != nil { + return err + } + metadata.SetID(id) + return nil + }) }, } } @@ -165,7 +172,7 @@ func (DiskPoolResource) Read() sdk.ResourceFunc { } } -func (DiskPoolResource) Delete() sdk.ResourceFunc { +func (r DiskPoolResource) Delete() sdk.ResourceFunc { return sdk.ResourceFunc{ Timeout: 30 * time.Minute, Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { @@ -178,11 +185,14 @@ func (DiskPoolResource) Delete() sdk.ResourceFunc { locks.ByID(id.ID()) defer locks.UnlockByID(id.ID()) - if err := client.DeleteThenPoll(ctx, *id); err != nil { + future, err := client.Delete(ctx, *id) + if err != nil { return fmt.Errorf("deleting %s: %+v", *id, err) } - return nil + return pluginsdk.Retry(metadata.ResourceData.Timeout(pluginsdk.TimeoutDelete), func() *resource.RetryError { + return r.retryError(future.Poller.PollUntilDone()) + }) }, } } @@ -191,7 +201,7 @@ func (DiskPoolResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { return diskpools.ValidateDiskPoolID } -func (DiskPoolResource) Update() sdk.ResourceFunc { +func (r DiskPoolResource) Update() sdk.ResourceFunc { return sdk.ResourceFunc{ Timeout: 30 * time.Minute, Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { @@ -218,15 +228,36 @@ func (DiskPoolResource) Update() sdk.ResourceFunc { patch.Tags = tags.Expand(m.Tags) } - if err := client.UpdateThenPoll(ctx, *id, patch); err != nil { + future, err := client.Update(ctx, *id, patch) + if err != nil { return fmt.Errorf("updating %s: %+v", *id, err) } - - return nil + return pluginsdk.Retry(metadata.ResourceData.Timeout(pluginsdk.TimeoutUpdate), func() *resource.RetryError { + return r.retryError(future.Poller.PollUntilDone()) + }) }, } } +func (DiskPoolResource) retryError(err error) *resource.RetryError { + if err == nil { + return nil + } + // according to https://docs.microsoft.com/en-us/azure/virtual-machines/disks-pools-troubleshoot#common-failure-codes-when-deploying-a-disk-pool the errors below are retryable. + retryableErrors := []string{ + "UnexpectedError", + "DeploymentTimeout", + "GoalStateApplicationTimeoutError", + "OngoingOperationInProgress", + } + for _, retryableError := range retryableErrors { + if strings.Contains(err.Error(), retryableError) { + return pluginsdk.RetryableError(err) + } + } + return pluginsdk.NonRetryableError(err) +} + func expandDisksPoolSku(sku string) diskpools.Sku { parts := strings.Split(sku, "_") return diskpools.Sku{ From 27db546a2fd4fefff0f7e3e199f4285a299ec564 Mon Sep 17 00:00:00 2001 From: hezijie Date: Thu, 13 Jan 2022 14:20:49 +0800 Subject: [PATCH 9/9] remove UnexpectedError from retryable errors --- internal/services/disks/disk_pool_resource.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/services/disks/disk_pool_resource.go b/internal/services/disks/disk_pool_resource.go index b8ca7e8a8d6b..d44187171448 100644 --- a/internal/services/disks/disk_pool_resource.go +++ b/internal/services/disks/disk_pool_resource.go @@ -245,7 +245,6 @@ func (DiskPoolResource) retryError(err error) *resource.RetryError { } // according to https://docs.microsoft.com/en-us/azure/virtual-machines/disks-pools-troubleshoot#common-failure-codes-when-deploying-a-disk-pool the errors below are retryable. retryableErrors := []string{ - "UnexpectedError", "DeploymentTimeout", "GoalStateApplicationTimeoutError", "OngoingOperationInProgress",