From c967216233398b1a9d624abb27faf8c8b7701655 Mon Sep 17 00:00:00 2001 From: JD Date: Fri, 29 Apr 2022 11:23:45 -0700 Subject: [PATCH] Updating azure cause microsoft broke backwards compatibility --- config/registry/azurerm/index.yaml | 2 +- modules/azure_aks/azure-aks.yaml | 8 ++++---- modules/azure_aks/tf_module/aks.tf | 21 ++++++++++++--------- modules/azure_base/tf_module/logging.tf | 12 ++++++++++++ 4 files changed, 29 insertions(+), 14 deletions(-) diff --git a/config/registry/azurerm/index.yaml b/config/registry/azurerm/index.yaml index 7dbdf88fb..02e80d71e 100644 --- a/config/registry/azurerm/index.yaml +++ b/config/registry/azurerm/index.yaml @@ -1,7 +1,7 @@ required_providers: azurerm: source: "hashicorp/azurerm" - version: "2.78.0" + version: "3.4.0" helm: source: "hashicorp/helm" version: "2.4.1" diff --git a/modules/azure_aks/azure-aks.yaml b/modules/azure_aks/azure-aks.yaml index 2829f6441..43831154d 100644 --- a/modules/azure_aks/azure-aks.yaml +++ b/modules/azure_aks/azure-aks.yaml @@ -68,19 +68,19 @@ inputs: default: "10.0.128.10" outputs: - name: k8s_endpoint - export: true + export: false description: The endpoint to communicate to the kubernetes cluster through. - name: k8s_ca_data - export: true + export: false description: The certificate authority used by the kubernetes cluster for ssl. - name: k8s_cluster_name export: true description: The name of the kubernetes cluster. - name: client_cert - export: true + export: false description: Base64 encoded public certificate used by clients to authenticate to the Kubernetes cluster. - name: client_key - export: true + export: false description: Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. output_providers: helm: diff --git a/modules/azure_aks/tf_module/aks.tf b/modules/azure_aks/tf_module/aks.tf index 456870791..12042edf4 100644 --- a/modules/azure_aks/tf_module/aks.tf +++ b/modules/azure_aks/tf_module/aks.tf @@ -1,3 +1,8 @@ +data "azurerm_nat_gateway" "nat" { + name = "opta-${var.env_name}-nat-gateway" + resource_group_name = data.azurerm_resource_group.opta.name +} + resource "azurerm_user_assigned_identity" "opta" { name = "opta-${var.env_name}-aks" location = data.azurerm_resource_group.opta.location @@ -63,15 +68,13 @@ resource "azurerm_kubernetes_cluster" "main" { service_cidr = var.service_cidr dns_service_ip = var.dns_service_ip docker_bridge_cidr = "172.17.0.1/16" + outbound_type = "userAssignedNATGateway" } - role_based_access_control { - enabled = true - azure_active_directory { - managed = true - tenant_id = data.azurerm_client_config.current.tenant_id - admin_group_object_ids = var.admin_group_object_ids - } + azure_active_directory_role_based_access_control { + managed = true + tenant_id = data.azurerm_client_config.current.tenant_id + admin_group_object_ids = var.admin_group_object_ids } default_node_pool { @@ -86,8 +89,8 @@ resource "azurerm_kubernetes_cluster" "main" { } identity { - type = "UserAssigned" - user_assigned_identity_id = azurerm_user_assigned_identity.opta.id + type = "UserAssigned" + identity_ids = [azurerm_user_assigned_identity.opta.id] } kubelet_identity { diff --git a/modules/azure_base/tf_module/logging.tf b/modules/azure_base/tf_module/logging.tf index 954aef6ce..da199c248 100644 --- a/modules/azure_base/tf_module/logging.tf +++ b/modules/azure_base/tf_module/logging.tf @@ -30,6 +30,16 @@ resource "azurerm_monitor_diagnostic_setting" "infra_logging" { } } + log { + category = "AzurePolicyEvaluationDetails" + enabled = false + + retention_policy { + days = 0 + enabled = false + } + } + metric { category = "AllMetrics" @@ -53,6 +63,8 @@ resource "azurerm_log_analytics_workspace" "watcher" { } resource "azurerm_network_watcher_flow_log" "vpc_flow_log" { + name = "opta-${var.env_name}" + location = data.azurerm_resource_group.opta.location network_watcher_name = data.azurerm_network_watcher.default.name resource_group_name = "NetworkWatcherRG"