diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go index ec1db8fc652e..4b9af295ceeb 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go @@ -113,6 +113,11 @@ func dataSourceKubernetesClusterNodePool() *schema.Resource { Computed: true, }, + "os_disk_type": { + Type: schema.TypeString, + Computed: true, + }, + "os_type": { Type: schema.TypeString, Computed: true, @@ -244,6 +249,12 @@ func dataSourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interf osDiskSizeGB = int(*props.OsDiskSizeGB) } d.Set("os_disk_size_gb", osDiskSizeGB) + + osDiskType := containerservice.Managed + if props.OsDiskType != "" { + osDiskType = props.OsDiskType + } + d.Set("os_disk_type", string(osDiskType)) d.Set("os_type", string(props.OsType)) // not returned from the API if not Spot diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 2cd20c239231..5b1aa753593a 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -165,6 +165,17 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { ValidateFunc: validation.IntAtLeast(1), }, + "os_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: containerservice.Managed, + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.Ephemeral), + string(containerservice.Managed), + }, false), + }, + "os_type": { Type: schema.TypeString, Optional: true, @@ -339,6 +350,10 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int profile.ProximityPlacementGroupID = &proximityPlacementGroupId } + if osDiskType := d.Get("os_disk_type").(string); osDiskType != "" { + profile.OsDiskType = containerservice.OSDiskType(osDiskType) + } + if vnetSubnetID := d.Get("vnet_subnet_id").(string); vnetSubnetID != "" { profile.VnetSubnetID = utils.String(vnetSubnetID) } @@ -629,6 +644,12 @@ func resourceArmKubernetesClusterNodePoolRead(d *schema.ResourceData, meta inter osDiskSizeGB = int(*props.OsDiskSizeGB) } d.Set("os_disk_size_gb", osDiskSizeGB) + + osDiskType := containerservice.Managed + if props.OsDiskType != "" { + osDiskType = props.OsDiskType + } + d.Set("os_disk_type", osDiskType) d.Set("os_type", string(props.OsType)) // not returned from the API if not Spot diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index dbb9412afa8b..640f5dd10442 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -124,6 +124,17 @@ func SchemaDefaultNodePool() *schema.Schema { ValidateFunc: validation.IntAtLeast(1), }, + "os_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: containerservice.Managed, + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.Ephemeral), + string(containerservice.Managed), + }, false), + }, + "vnet_subnet_id": { Type: schema.TypeString, Optional: true, @@ -155,6 +166,7 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA Count: defaultCluster.Count, VMSize: defaultCluster.VMSize, OsDiskSizeGB: defaultCluster.OsDiskSizeGB, + OsDiskType: defaultCluster.OsDiskType, VnetSubnetID: defaultCluster.VnetSubnetID, MaxPods: defaultCluster.MaxPods, OsType: defaultCluster.OsType, @@ -233,6 +245,11 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC profile.OsDiskSizeGB = utils.Int32(osDiskSizeGB) } + profile.OsDiskType = containerservice.Managed + if osDiskType := raw["os_disk_type"].(string); osDiskType != "" { + profile.OsDiskType = containerservice.OSDiskType(raw["os_disk_type"].(string)) + } + if vnetSubnetID := raw["vnet_subnet_id"].(string); vnetSubnetID != "" { profile.VnetSubnetID = utils.String(vnetSubnetID) } @@ -360,6 +377,11 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro osDiskSizeGB = int(*agentPool.OsDiskSizeGB) } + osDiskType := containerservice.Managed + if agentPool.OsDiskType != "" { + osDiskType = agentPool.OsDiskType + } + vnetSubnetId := "" if agentPool.VnetSubnetID != nil { vnetSubnetId = *agentPool.VnetSubnetID @@ -388,6 +410,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro "node_labels": nodeLabels, "node_taints": []string{}, "os_disk_size_gb": osDiskSizeGB, + "os_disk_type": string(osDiskType), "tags": tags.Flatten(agentPool.Tags), "type": string(agentPool.Type), "vm_size": string(agentPool.VMSize), diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go index 60a8092cf3c2..211f15f78828 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go @@ -34,6 +34,7 @@ var kubernetesNodePoolTests = map[string]func(t *testing.T){ "spot": testAccAzureRMKubernetesClusterNodePool_spot, "osDiskSizeGB": testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB, "proximityPlacementGroupId": testAccAzureRMKubernetesClusterNodePool_proximityPlacementGroupId, + "osDiskType": testAccAzureRMKubernetesClusterNodePool_osDiskType, "modeSystem": testAccAzureRMKubernetesClusterNodePool_modeSystem, "modeUpdate": testAccAzureRMKubernetesClusterNodePool_modeUpdate, "virtualNetworkAutomatic": testAccAzureRMKubernetesClusterNodePool_virtualNetworkAutomatic, @@ -597,6 +598,30 @@ func testAccAzureRMKubernetesClusterNodePool_proximityPlacementGroupId(t *testin }) } +func TestAccAzureRMKubernetesClusterNodePool_osDiskType(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesClusterNodePool_osDiskType(t) +} + +func testAccAzureRMKubernetesClusterNodePool_osDiskType(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_osDiskTypeConfig(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + func TestAccAzureRMKubernetesClusterNodePool_requiresImport(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesClusterNodePool_requiresImport(t) @@ -1416,39 +1441,32 @@ func testAccAzureRMKubernetesClusterNodePool_proximityPlacementGroupIdConfig(dat provider "azurerm" { features {} } - resource "azurerm_resource_group" "test" { name = "acctestRG-aks-%d" location = "%s" } - resource "azurerm_kubernetes_cluster" "test" { name = "acctestaks%d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" - default_node_pool { name = "default" node_count = 1 vm_size = "Standard_DS2_v2" } - identity { type = "SystemAssigned" } } - resource "azurerm_proximity_placement_group" "test" { name = "acctestPPG-aks-%d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name - tags = { environment = "Production" } } - resource "azurerm_kubernetes_cluster_node_pool" "test" { name = "internal" kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id @@ -1456,10 +1474,27 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { node_count = 1 proximity_placement_group_id = azurerm_proximity_placement_group.test.id } - `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) } +func testAccAzureRMKubernetesClusterNodePool_osDiskTypeConfig(data acceptance.TestData) string { + template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data) + return fmt.Sprintf(` +provider "azurerm" { + features {} +} +%s +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS3_v2" + node_count = 1 + os_disk_size_gb = 100 + os_disk_type = "Ephemeral" +} +`, template) +} + func testAccAzureRMKubernetesClusterNodePool_spotConfig(data acceptance.TestData) string { template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data) return fmt.Sprintf(` diff --git a/website/docs/d/kubernetes_cluster_node_pool.html.markdown b/website/docs/d/kubernetes_cluster_node_pool.html.markdown index 7d9de1ffeeaf..39dea9fe65d3 100644 --- a/website/docs/d/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/d/kubernetes_cluster_node_pool.html.markdown @@ -66,6 +66,8 @@ In addition to the Arguments listed above - the following Attributes are exporte * `os_disk_size_gb` - The size of the OS Disk on each Node in this Node Pool. +* `os_disk_type` - The type of the OS Disk on each Node in this Node Pool. + * `os_type` - The operating system used on each Node in this Node Pool. * `priority` - The priority of the Virtual Machines in the Virtual Machine Scale Set backing this Node Pool. diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index 657b27745f85..5b35dab0a5ef 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -95,6 +95,8 @@ The following arguments are supported: * `os_disk_size_gb` - (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created. +* `os_disk_type` - (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created. + * `os_type` - (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`. * `priority` - (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created.