diff --git a/data/data/aws/bootstrap/main.tf b/data/data/aws/bootstrap/main.tf index 2b5cbfa869b..982bc94272b 100644 --- a/data/data/aws/bootstrap/main.tf +++ b/data/data/aws/bootstrap/main.tf @@ -184,6 +184,18 @@ resource "aws_security_group_rule" "ssh" { to_port = 22 } +resource "aws_security_group_rule" "ssh_v6" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.bootstrap.id + + protocol = "tcp" + ipv6_cidr_blocks = local.public_endpoints ? ["::/0"] : var.vpc_ipv6_cidrs + from_port = 22 + to_port = 22 +} + resource "aws_security_group_rule" "bootstrap_journald_gateway" { type = "ingress" security_group_id = aws_security_group.bootstrap.id @@ -194,3 +206,14 @@ resource "aws_security_group_rule" "bootstrap_journald_gateway" { to_port = 19531 } +resource "aws_security_group_rule" "bootstrap_journald_gateway_v6" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.bootstrap.id + + protocol = "tcp" + ipv6_cidr_blocks = local.public_endpoints ? ["::/0"] : var.vpc_ipv6_cidrs + from_port = 19531 + to_port = 19531 +} diff --git a/data/data/aws/bootstrap/variables.tf b/data/data/aws/bootstrap/variables.tf index a37e9612e3a..000c8c84529 100644 --- a/data/data/aws/bootstrap/variables.tf +++ b/data/data/aws/bootstrap/variables.tf @@ -68,6 +68,12 @@ variable "vpc_cidrs" { description = "VPC CIDR blocks." } +variable "vpc_ipv6_cidrs" { + type = list(string) + default = [] + description = "VPC IPv6 CIDR blocks." +} + variable "vpc_security_group_ids" { type = list(string) default = [] @@ -78,3 +84,8 @@ variable "publish_strategy" { type = string description = "The publishing strategy for endpoints like load balancers" } + +variable "use_ipv6" { + description = "Use IPv6 instead of IPv4" + type = bool +} diff --git a/data/data/aws/main.tf b/data/data/aws/main.tf index a7b6ec6d823..be50c30bf15 100644 --- a/data/data/aws/main.tf +++ b/data/data/aws/main.tf @@ -23,8 +23,10 @@ module "bootstrap" { target_group_arns_length = module.vpc.aws_lb_target_group_arns_length vpc_id = module.vpc.vpc_id vpc_cidrs = module.vpc.vpc_cidrs + vpc_ipv6_cidrs = module.vpc.vpc_ipv6_cidrs vpc_security_group_ids = [module.vpc.master_sg_id] publish_strategy = var.aws_publish_strategy + use_ipv6 = var.aws_use_ipv6 tags = local.tags } @@ -70,10 +72,12 @@ module "dns" { cluster_domain = var.cluster_domain cluster_id = var.cluster_id etcd_count = var.master_count - etcd_ip_addresses = flatten(module.masters.ip_addresses) + etcd_ip_addresses = var.aws_use_ipv6 == true ? flatten(module.masters.ipv6_addresses) : flatten(module.masters.ip_addresses) tags = local.tags vpc_id = module.vpc.vpc_id publish_strategy = var.aws_publish_strategy + + use_ipv6 = var.aws_use_ipv6 } module "vpc" { @@ -95,6 +99,8 @@ module "vpc" { ) tags = local.tags + + use_ipv6 = var.aws_use_ipv6 } resource "aws_ami_copy" "main" { diff --git a/data/data/aws/master/main.tf b/data/data/aws/master/main.tf index 6a74f78e162..b3eeb147221 100644 --- a/data/data/aws/master/main.tf +++ b/data/data/aws/master/main.tf @@ -89,7 +89,16 @@ resource "aws_network_interface" "master" { var.tags, ) } - + +# NOTE(russellb) For some reason, I was not able to access get IPv6 addresses +# on the resource, but was able to get them using the network interface data +# source. +data "aws_network_interface" "master" { + count = var.instance_count + + id = aws_network_interface.master[count.index].id +} + resource "aws_instance" "master" { count = var.instance_count ami = var.ec2_ami @@ -137,4 +146,3 @@ resource "aws_lb_target_group_attachment" "master" { target_group_arn = var.target_group_arns[count.index % local.target_group_arns_length] target_id = aws_instance.master[floor(count.index / local.target_group_arns_length)].private_ip } - diff --git a/data/data/aws/master/outputs.tf b/data/data/aws/master/outputs.tf index 06c06533e7f..eb3060db12f 100644 --- a/data/data/aws/master/outputs.tf +++ b/data/data/aws/master/outputs.tf @@ -2,3 +2,6 @@ output "ip_addresses" { value = aws_network_interface.master.*.private_ips } +output "ipv6_addresses" { + value = data.aws_network_interface.master.*.ipv6_addresses +} diff --git a/data/data/aws/route53/base.tf b/data/data/aws/route53/base.tf index 681b0e69c5c..6619960b292 100644 --- a/data/data/aws/route53/base.tf +++ b/data/data/aws/route53/base.tf @@ -69,19 +69,20 @@ resource "aws_route53_record" "api_external_internal_zone" { } resource "aws_route53_record" "etcd_a_nodes" { - count = var.etcd_count + count = var.use_ipv6 == false ? var.etcd_count : 0 type = "A" ttl = "60" zone_id = aws_route53_zone.int.zone_id name = "etcd-${count.index}.${var.cluster_domain}" - # TF-UPGRADE-TODO: In Terraform v0.10 and earlier, it was sometimes necessary to - # force an interpolation expression to be interpreted as a list by wrapping it - # in an extra set of list brackets. That form was supported for compatibilty in - # v0.11, but is no longer supported in Terraform v0.12. - # - # If the expression in the following list itself returns a list, remove the - # brackets to avoid interpretation as a list of lists. If the expression - # returns a single list item then leave it as-is and remove this TODO comment. + records = [var.etcd_ip_addresses[count.index]] +} + +resource "aws_route53_record" "etcd_aaaa_nodes" { + count = var.use_ipv6 == true ? var.etcd_count : 0 + type = "AAAA" + ttl = "60" + zone_id = aws_route53_zone.int.zone_id + name = "etcd-${count.index}.${var.cluster_domain}" records = [var.etcd_ip_addresses[count.index]] } @@ -90,6 +91,6 @@ resource "aws_route53_record" "etcd_cluster" { ttl = "60" zone_id = aws_route53_zone.int.zone_id name = "_etcd-server-ssl._tcp" - records = formatlist("0 10 2380 %s", aws_route53_record.etcd_a_nodes.*.fqdn) + records = formatlist("0 10 2380 %s", var.use_ipv6 == false ? aws_route53_record.etcd_a_nodes.*.fqdn : aws_route53_record.etcd_aaaa_nodes.*.fqdn) } diff --git a/data/data/aws/route53/variables.tf b/data/data/aws/route53/variables.tf index 06746923ac0..9ab0348fa0f 100644 --- a/data/data/aws/route53/variables.tf +++ b/data/data/aws/route53/variables.tf @@ -9,7 +9,7 @@ variable "etcd_count" { } variable "etcd_ip_addresses" { - description = "List of string IPs for machines running etcd members." + description = "List of string IPs (IPv4 or IPv6) for machines running etcd members." type = list(string) default = [] } @@ -64,3 +64,8 @@ based on if api_external_lb_dns_name for example, which will be null when there So publish_strategy serves an coordinated proxy for that decision. EOF } + +variable "use_ipv6" { + description = "Use IPv6 instead of IPv4" + type = bool +} diff --git a/data/data/aws/variables-aws.tf b/data/data/aws/variables-aws.tf index 07104900ffc..2234c550b99 100644 --- a/data/data/aws/variables-aws.tf +++ b/data/data/aws/variables-aws.tf @@ -91,3 +91,9 @@ variable "aws_publish_strategy" { type = string description = "The cluster publishing strategy, either Internal or External" } + +variable "aws_use_ipv6" { + type = bool + default = false + description = "Enable an experimental IPv6 environment" +} diff --git a/data/data/aws/vpc/master-elb.tf b/data/data/aws/vpc/master-elb.tf index fcf9fd0ee1a..85b5e4da7c4 100644 --- a/data/data/aws/vpc/master-elb.tf +++ b/data/data/aws/vpc/master-elb.tf @@ -153,4 +153,3 @@ resource "aws_lb_listener" "api_external_api" { type = "forward" } } - diff --git a/data/data/aws/vpc/outputs.tf b/data/data/aws/vpc/outputs.tf index 8f410101aa9..a1f355b2475 100644 --- a/data/data/aws/vpc/outputs.tf +++ b/data/data/aws/vpc/outputs.tf @@ -6,6 +6,10 @@ output "vpc_cidrs" { value = [data.aws_vpc.cluster_vpc.cidr_block] } +output "vpc_ipv6_cidrs" { + value = [data.aws_vpc.cluster_vpc.ipv6_cidr_block] +} + output "az_to_private_subnet_id" { value = zipmap(data.aws_subnet.private.*.availability_zone, data.aws_subnet.private.*.id) } @@ -63,4 +67,3 @@ output "aws_lb_api_internal_dns_name" { output "aws_lb_api_internal_zone_id" { value = aws_lb.api_internal.zone_id } - diff --git a/data/data/aws/vpc/sg-master.tf b/data/data/aws/vpc/sg-master.tf index 4aa8736c623..2c7038733dc 100644 --- a/data/data/aws/vpc/sg-master.tf +++ b/data/data/aws/vpc/sg-master.tf @@ -23,6 +23,18 @@ resource "aws_security_group_rule" "master_mcs" { to_port = 22623 } +resource "aws_security_group_rule" "master_mcs_v6" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.master.id + + protocol = "tcp" + ipv6_cidr_blocks = [data.aws_vpc.cluster_vpc.ipv6_cidr_block] + from_port = 22623 + to_port = 22623 +} + resource "aws_security_group_rule" "master_egress" { type = "egress" security_group_id = aws_security_group.master.id @@ -33,6 +45,18 @@ resource "aws_security_group_rule" "master_egress" { cidr_blocks = ["0.0.0.0/0"] } +resource "aws_security_group_rule" "master_egress_v6" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "egress" + security_group_id = aws_security_group.master.id + + from_port = 0 + to_port = 0 + protocol = "-1" + ipv6_cidr_blocks = ["::/0"] +} + resource "aws_security_group_rule" "master_ingress_icmp" { type = "ingress" security_group_id = aws_security_group.master.id @@ -43,6 +67,18 @@ resource "aws_security_group_rule" "master_ingress_icmp" { to_port = -1 } +resource "aws_security_group_rule" "master_ingress_icmp_v6" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.master.id + + protocol = "icmp" + ipv6_cidr_blocks = [data.aws_vpc.cluster_vpc.ipv6_cidr_block] + from_port = -1 + to_port = -1 +} + resource "aws_security_group_rule" "master_ingress_ssh" { type = "ingress" security_group_id = aws_security_group.master.id @@ -53,6 +89,18 @@ resource "aws_security_group_rule" "master_ingress_ssh" { to_port = 22 } +resource "aws_security_group_rule" "master_ingress_ssh_v6" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.master.id + + protocol = "tcp" + ipv6_cidr_blocks = [data.aws_vpc.cluster_vpc.ipv6_cidr_block] + from_port = 22 + to_port = 22 +} + resource "aws_security_group_rule" "master_ingress_https" { type = "ingress" security_group_id = aws_security_group.master.id @@ -63,6 +111,18 @@ resource "aws_security_group_rule" "master_ingress_https" { to_port = 6443 } +resource "aws_security_group_rule" "master_ingress_https_v6" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.master.id + + protocol = "tcp" + ipv6_cidr_blocks = [data.aws_vpc.cluster_vpc.ipv6_cidr_block] + from_port = 6443 + to_port = 6443 +} + resource "aws_security_group_rule" "master_ingress_vxlan" { type = "ingress" security_group_id = aws_security_group.master.id @@ -253,3 +313,31 @@ resource "aws_security_group_rule" "master_ingress_services_udp" { self = true } +# For our AWS IPv6 environment, we run CoreDNS with host networking, +# because it must use IPv4 to reach AWS DNS, so it can't be on our +# IPv6 only SDN. +resource "aws_security_group_rule" "master_dns_udp" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.master.id + + ipv6_cidr_blocks = [data.aws_vpc.cluster_vpc.ipv6_cidr_block] + + protocol = "udp" + from_port = 5353 + to_port = 5353 +} + +resource "aws_security_group_rule" "master_dns_tcp" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.master.id + + ipv6_cidr_blocks = [data.aws_vpc.cluster_vpc.ipv6_cidr_block] + + protocol = "tcp" + from_port = 5353 + to_port = 5353 +} diff --git a/data/data/aws/vpc/sg-worker.tf b/data/data/aws/vpc/sg-worker.tf index 1000c688dc5..e06aa472728 100644 --- a/data/data/aws/vpc/sg-worker.tf +++ b/data/data/aws/vpc/sg-worker.tf @@ -23,6 +23,18 @@ resource "aws_security_group_rule" "worker_egress" { cidr_blocks = ["0.0.0.0/0"] } +resource "aws_security_group_rule" "worker_egress_v6" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "egress" + security_group_id = aws_security_group.worker.id + + from_port = 0 + to_port = 0 + protocol = "-1" + ipv6_cidr_blocks = ["::/0"] +} + resource "aws_security_group_rule" "worker_ingress_icmp" { type = "ingress" security_group_id = aws_security_group.worker.id @@ -33,6 +45,18 @@ resource "aws_security_group_rule" "worker_ingress_icmp" { to_port = -1 } +resource "aws_security_group_rule" "worker_ingress_icmp_v6" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.worker.id + + protocol = "icmp" + ipv6_cidr_blocks = [data.aws_vpc.cluster_vpc.ipv6_cidr_block] + from_port = -1 + to_port = -1 +} + resource "aws_security_group_rule" "worker_ingress_ssh" { type = "ingress" security_group_id = aws_security_group.worker.id @@ -43,6 +67,18 @@ resource "aws_security_group_rule" "worker_ingress_ssh" { to_port = 22 } +resource "aws_security_group_rule" "worker_ingress_ssh_v6" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.worker.id + + protocol = "tcp" + ipv6_cidr_blocks = [data.aws_vpc.cluster_vpc.ipv6_cidr_block] + from_port = 22 + to_port = 22 +} + resource "aws_security_group_rule" "worker_ingress_vxlan" { type = "ingress" security_group_id = aws_security_group.worker.id @@ -163,3 +199,31 @@ resource "aws_security_group_rule" "worker_ingress_services_udp" { self = true } +# For our AWS IPv6 environment, we run CoreDNS with host networking, +# because it must use IPv4 to reach AWS DNS, so it can't be on our +# IPv6 only SDN. +resource "aws_security_group_rule" "worker_dns_udp" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.worker.id + + ipv6_cidr_blocks = [data.aws_vpc.cluster_vpc.ipv6_cidr_block] + + protocol = "udp" + from_port = 5353 + to_port = 5353 +} + +resource "aws_security_group_rule" "worker_dns_tcp" { + count = var.use_ipv6 == true ? 1 : 0 + + type = "ingress" + security_group_id = aws_security_group.worker.id + + ipv6_cidr_blocks = [data.aws_vpc.cluster_vpc.ipv6_cidr_block] + + protocol = "tcp" + from_port = 5353 + to_port = 5353 +} diff --git a/data/data/aws/vpc/variables.tf b/data/data/aws/vpc/variables.tf index 220a21c906d..39273d91aac 100644 --- a/data/data/aws/vpc/variables.tf +++ b/data/data/aws/vpc/variables.tf @@ -41,3 +41,8 @@ variable "private_subnets" { type = list(string) description = "Existing private subnets into which the cluster should be installed." } + +variable "use_ipv6" { + description = "Use IPv6 instead of IPv4" + type = bool +} diff --git a/data/data/aws/vpc/vpc-private.tf b/data/data/aws/vpc/vpc-private.tf index 15f98bf7968..63b08705142 100644 --- a/data/data/aws/vpc/vpc-private.tf +++ b/data/data/aws/vpc/vpc-private.tf @@ -24,6 +24,21 @@ resource "aws_route" "to_nat_gw" { } } +# We can't target the NAT gw for our "private" IPv6 subnet. Instead, we target the internet gateway, +# since we want our private IPv6 addresses to be able to talk out to the internet, too. +resource "aws_route" "private_igw_v6" { + count = var.use_ipv6 == true && var.private_subnets == null ? length(var.availability_zones) : 0 + + route_table_id = aws_route_table.private_routes[count.index].id + destination_ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.igw[0].id + depends_on = [aws_route_table.private_routes] + + timeouts { + create = "20m" + } +} + resource "aws_subnet" "private_subnet" { count = var.private_subnets == null ? length(var.availability_zones) : 0 @@ -33,6 +48,10 @@ resource "aws_subnet" "private_subnet" { availability_zone = var.availability_zones[count.index] + # AWS gives us a /56 and we need to carve out /64 subnets. + ipv6_cidr_block = var.use_ipv6 == true ? cidrsubnet(data.aws_vpc.cluster_vpc.ipv6_cidr_block, 8, count.index) : "" + assign_ipv6_address_on_creation = var.use_ipv6 + tags = merge( { "Name" = "${var.cluster_id}-private-${var.availability_zones[count.index]}" diff --git a/data/data/aws/vpc/vpc-public.tf b/data/data/aws/vpc/vpc-public.tf index e85ac8e92da..eb150bd6488 100644 --- a/data/data/aws/vpc/vpc-public.tf +++ b/data/data/aws/vpc/vpc-public.tf @@ -43,6 +43,18 @@ resource "aws_route" "igw_route" { } } +resource "aws_route" "igw_route_v6" { + count = var.use_ipv6 == true && var.vpc == null ? 1 : 0 + + destination_ipv6_cidr_block = "::/0" + route_table_id = aws_route_table.default[0].id + gateway_id = aws_internet_gateway.igw[0].id + + timeouts { + create = "20m" + } +} + resource "aws_subnet" "public_subnet" { count = var.public_subnets == null ? length(var.availability_zones) : 0 @@ -50,6 +62,12 @@ resource "aws_subnet" "public_subnet" { cidr_block = cidrsubnet(local.new_public_cidr_range, 3, count.index) availability_zone = var.availability_zones[count.index] + # AWS gives us a /56 and we need to carve out /64 subnets. + # We skip the first length(var.availability_zones), because + # that is now many were taken in vpc-private.tf. + ipv6_cidr_block = var.use_ipv6 == true ? cidrsubnet(data.aws_vpc.cluster_vpc.ipv6_cidr_block, 8, count.index + length(var.availability_zones)) : "" + assign_ipv6_address_on_creation = var.use_ipv6 + tags = merge( { "Name" = "${var.cluster_id}-public-${var.availability_zones[count.index]}" diff --git a/data/data/aws/vpc/vpc.tf b/data/data/aws/vpc/vpc.tf index a2534912f2a..607d67968c4 100644 --- a/data/data/aws/vpc/vpc.tf +++ b/data/data/aws/vpc/vpc.tf @@ -10,6 +10,8 @@ resource "aws_vpc" "new_vpc" { enable_dns_hostnames = true enable_dns_support = true + assign_generated_ipv6_cidr_block = var.use_ipv6 + tags = merge( { "Name" = "${var.cluster_id}-vpc" diff --git a/data/data/azure/bootstrap/main.tf b/data/data/azure/bootstrap/main.tf index 91293d72fd5..47633f15095 100644 --- a/data/data/azure/bootstrap/main.tf +++ b/data/data/azure/bootstrap/main.tf @@ -1,5 +1,6 @@ locals { - bootstrap_nic_ip_configuration_name = "bootstrap-nic-ip" + bootstrap_nic_ip_configuration_name = "bootstrap-nic-ip" + bootstrap_nic_ip_v6_configuration_name = "bootstrap-nic-ip-v6" } data "azurerm_storage_account_sas" "ignition" { @@ -78,16 +79,44 @@ data "azurerm_public_ip" "bootstrap_public_ip" { resource_group_name = var.resource_group_name } +resource "azurerm_public_ip" "bootstrap_public_ip_v6" { + count = var.private || ! var.use_ipv6 ? 0 : 1 + + sku = "Standard" + location = var.region + name = "${var.cluster_id}-bootstrap-pip-v6" + resource_group_name = var.resource_group_name + allocation_method = "Static" + ip_version = "IPv6" +} + +data "azurerm_public_ip" "bootstrap_public_ip_v6" { + count = var.private || ! var.use_ipv6 ? 0 : 1 + + name = azurerm_public_ip.bootstrap_public_ip_v6[0].name + resource_group_name = var.resource_group_name +} + resource "azurerm_network_interface" "bootstrap" { name = "${var.cluster_id}-bootstrap-nic" location = var.region resource_group_name = var.resource_group_name - ip_configuration { - subnet_id = var.subnet_id - name = local.bootstrap_nic_ip_configuration_name - private_ip_address_allocation = "Dynamic" - public_ip_address_id = var.private ? null : azurerm_public_ip.bootstrap_public_ip[0].id + dynamic "ip_configuration" { + for_each = var.use_ipv6 ? [ + { primary : true, name : local.bootstrap_nic_ip_configuration_name, ip_address_version : "IPv4", public_ip_id : var.private ? null : azurerm_public_ip.bootstrap_public_ip[0].id }, + { primary : false, name : local.bootstrap_nic_ip_v6_configuration_name, ip_address_version : "IPv6", public_ip_id : var.private ? null : azurerm_public_ip.bootstrap_public_ip_v6[0].id }, + ] : [ + { primary : true, name : local.bootstrap_nic_ip_configuration_name, ip_address_version : "IPv4", public_ip_id : var.private ? null : azurerm_public_ip.bootstrap_public_ip[0].id } + ] + content { + primary = ip_configuration.value.primary + name = ip_configuration.value.name + subnet_id = var.subnet_id + private_ip_address_version = ip_configuration.value.ip_address_version + private_ip_address_allocation = "Dynamic" + public_ip_address_id = ip_configuration.value.public_ip_id + } } } @@ -97,12 +126,28 @@ resource "azurerm_network_interface_backend_address_pool_association" "public_lb ip_configuration_name = local.bootstrap_nic_ip_configuration_name } +resource "azurerm_network_interface_backend_address_pool_association" "public_lb_bootstrap_v6" { + count = var.use_ipv6 ? 1 : 0 + + network_interface_id = azurerm_network_interface.bootstrap.id + backend_address_pool_id = var.elb_backend_pool_v6_id + ip_configuration_name = local.bootstrap_nic_ip_v6_configuration_name +} + resource "azurerm_network_interface_backend_address_pool_association" "internal_lb_bootstrap" { network_interface_id = azurerm_network_interface.bootstrap.id backend_address_pool_id = var.ilb_backend_pool_id ip_configuration_name = local.bootstrap_nic_ip_configuration_name } +resource "azurerm_network_interface_backend_address_pool_association" "internal_lb_bootstrap_v6" { + count = var.use_ipv6 ? 1 : 0 + + network_interface_id = azurerm_network_interface.bootstrap.id + backend_address_pool_id = var.ilb_backend_pool_v6_id + ip_configuration_name = local.bootstrap_nic_ip_v6_configuration_name +} + resource "azurerm_virtual_machine" "bootstrap" { name = "${var.cluster_id}-bootstrap" location = var.region @@ -151,7 +196,9 @@ resource "azurerm_virtual_machine" "bootstrap" { depends_on = [ azurerm_network_interface_backend_address_pool_association.public_lb_bootstrap, - azurerm_network_interface_backend_address_pool_association.internal_lb_bootstrap + azurerm_network_interface_backend_address_pool_association.public_lb_bootstrap_v6, + azurerm_network_interface_backend_address_pool_association.internal_lb_bootstrap, + azurerm_network_interface_backend_address_pool_association.internal_lb_bootstrap_v6 ] } diff --git a/data/data/azure/bootstrap/variables.tf b/data/data/azure/bootstrap/variables.tf index 445182b1a48..c5995fd26a8 100644 --- a/data/data/azure/bootstrap/variables.tf +++ b/data/data/azure/bootstrap/variables.tf @@ -43,11 +43,21 @@ variable "elb_backend_pool_id" { description = "The external load balancer bakend pool id. used to attach the bootstrap NIC" } +variable "elb_backend_pool_v6_id" { + type = string + description = "The external load balancer bakend pool id for ipv6. used to attach the bootstrap NIC" +} + variable "ilb_backend_pool_id" { type = string description = "The internal load balancer bakend pool id. used to attach the bootstrap NIC" } +variable "ilb_backend_pool_v6_id" { + type = string + description = "The internal load balancer bakend pool id for ipv6. used to attach the bootstrap NIC" +} + variable "storage_account" { type = any description = "the storage account for the cluster. It can be used for boot diagnostics." @@ -68,3 +78,8 @@ variable "private" { type = bool description = "This value determines if this is a private cluster or not." } + +variable "use_ipv6" { + description = "Use IPv6 as well as IPv4" + type = bool +} diff --git a/data/data/azure/dns/dns.tf b/data/data/azure/dns/dns.tf index 0fe7e3e093f..85c6712653c 100644 --- a/data/data/azure/dns/dns.tf +++ b/data/data/azure/dns/dns.tf @@ -23,6 +23,16 @@ resource "azureprivatedns_a_record" "apiint_internal" { records = [var.internal_lb_ipaddress] } +resource "azureprivatedns_aaaa_record" "apiint_internal_v6" { + count = var.use_ipv6 ? 1 : 0 + + name = "api-int" + zone_name = azureprivatedns_zone.private.name + resource_group_name = var.resource_group_name + ttl = 300 + records = [var.internal_lb_ipaddress_v6] +} + resource "azureprivatedns_a_record" "api_internal" { name = "api" zone_name = azureprivatedns_zone.private.name @@ -31,6 +41,16 @@ resource "azureprivatedns_a_record" "api_internal" { records = [var.internal_lb_ipaddress] } +resource "azureprivatedns_aaaa_record" "api_internal_v6" { + count = var.use_ipv6 ? 1 : 0 + + name = "api" + zone_name = azureprivatedns_zone.private.name + resource_group_name = var.resource_group_name + ttl = 300 + records = [var.internal_lb_ipaddress_v6] +} + resource "azurerm_dns_cname_record" "api_external" { count = var.private ? 0 : 1 @@ -41,6 +61,16 @@ resource "azurerm_dns_cname_record" "api_external" { record = var.external_lb_fqdn } +resource "azurerm_dns_cname_record" "api_external_v6" { + count = var.private || ! var.use_ipv6 ? 0 : 1 + + name = "v6-${local.api_external_name}" + zone_name = var.base_domain + resource_group_name = var.base_domain_resource_group_name + ttl = 300 + record = var.external_lb_fqdn_v6 +} + resource "azureprivatedns_a_record" "etcd_a_nodes" { count = var.etcd_count name = "etcd-${count.index}" @@ -50,6 +80,15 @@ resource "azureprivatedns_a_record" "etcd_a_nodes" { records = [var.etcd_ip_addresses[count.index]] } +resource "azureprivatedns_aaaa_record" "etcd_aaaa_nodes" { + count = var.use_ipv6 ? var.etcd_count : 0 + name = "etcd-${count.index}" + zone_name = azureprivatedns_zone.private.name + resource_group_name = var.resource_group_name + ttl = 60 + records = [var.etcd_ip_v6_addresses[count.index]] +} + resource "azureprivatedns_srv_record" "etcd_cluster" { name = "_etcd-server-ssl._tcp" zone_name = azureprivatedns_zone.private.name @@ -57,7 +96,7 @@ resource "azureprivatedns_srv_record" "etcd_cluster" { ttl = 60 dynamic "record" { - for_each = azureprivatedns_a_record.etcd_a_nodes.*.name + for_each = var.use_ipv6 ? concat(azureprivatedns_a_record.etcd_a_nodes.*.name, azureprivatedns_aaaa_record.etcd_aaaa_nodes.*.name) : azureprivatedns_a_record.etcd_a_nodes.*.name iterator = name content { target = "${name.value}.${azureprivatedns_zone.private.name}" diff --git a/data/data/azure/dns/variables.tf b/data/data/azure/dns/variables.tf index 41f26f45d81..acad5d80e00 100644 --- a/data/data/azure/dns/variables.tf +++ b/data/data/azure/dns/variables.tf @@ -29,8 +29,18 @@ variable "external_lb_fqdn" { type = string } +variable "external_lb_fqdn_v6" { + description = "External API's LB fqdn for IPv6" + type = string +} + variable "internal_lb_ipaddress" { - description = "External API's LB Ip address" + description = "External API's LB IP address" + type = string +} + +variable "internal_lb_ipaddress_v6" { + description = "External API's LB IP v6 address" type = string } @@ -50,6 +60,12 @@ variable "etcd_ip_addresses" { default = [] } +variable "etcd_ip_v6_addresses" { + description = "List of string IPs for machines running etcd members." + type = list(string) + default = [] +} + variable "resource_group_name" { type = string description = "Resource group for the deployment" @@ -59,3 +75,8 @@ variable "private" { type = bool description = "This value determines if this is a private cluster or not." } + +variable "use_ipv6" { + description = "Use IPv6 as well as IPv4" + type = bool +} diff --git a/data/data/azure/main.tf b/data/data/azure/main.tf index ea3553da5d5..392cdcbff46 100644 --- a/data/data/azure/main.tf +++ b/data/data/azure/main.tf @@ -22,27 +22,32 @@ provider "azureprivatedns" { } module "bootstrap" { - source = "./bootstrap" - resource_group_name = azurerm_resource_group.main.name - region = var.azure_region - vm_size = var.azure_bootstrap_vm_type - vm_image = azurerm_image.cluster.id - identity = azurerm_user_assigned_identity.main.id - cluster_id = var.cluster_id - ignition = var.ignition_bootstrap - subnet_id = module.vnet.master_subnet_id - elb_backend_pool_id = module.vnet.public_lb_backend_pool_id - ilb_backend_pool_id = module.vnet.internal_lb_backend_pool_id - tags = local.tags - storage_account = azurerm_storage_account.cluster - nsg_name = module.vnet.master_nsg_name - private = module.vnet.private + source = "./bootstrap" + resource_group_name = azurerm_resource_group.main.name + region = var.azure_region + vm_size = var.azure_bootstrap_vm_type + vm_image = azurerm_image.cluster.id + identity = azurerm_user_assigned_identity.main.id + cluster_id = var.cluster_id + ignition = var.ignition_bootstrap + subnet_id = module.vnet.master_subnet_id + elb_backend_pool_id = module.vnet.public_lb_backend_pool_id + elb_backend_pool_v6_id = module.vnet.public_lb_backend_pool_v6_id + ilb_backend_pool_id = module.vnet.internal_lb_backend_pool_id + ilb_backend_pool_v6_id = module.vnet.internal_lb_backend_pool_v6_id + tags = local.tags + storage_account = azurerm_storage_account.cluster + nsg_name = module.vnet.master_nsg_name + private = module.vnet.private + + use_ipv6 = var.azure_use_ipv6 } module "vnet" { source = "./vnet" resource_group_name = azurerm_resource_group.main.name vnet_cidr = var.machine_cidr + vnet_cidr_v6 = var.azure_machine_cidr_v6 cluster_id = var.cluster_id region = var.azure_region dns_label = var.cluster_id @@ -53,27 +58,33 @@ module "vnet" { master_subnet = var.azure_control_plane_subnet worker_subnet = var.azure_compute_subnet private = var.azure_private + + use_ipv6 = var.azure_use_ipv6 } module "master" { - source = "./master" - resource_group_name = azurerm_resource_group.main.name - cluster_id = var.cluster_id - region = var.azure_region - availability_zones = var.azure_master_availability_zones - vm_size = var.azure_master_vm_type - vm_image = azurerm_image.cluster.id - identity = azurerm_user_assigned_identity.main.id - ignition = var.ignition_master - external_lb_id = module.vnet.public_lb_id - elb_backend_pool_id = module.vnet.public_lb_backend_pool_id - ilb_backend_pool_id = module.vnet.internal_lb_backend_pool_id - subnet_id = module.vnet.master_subnet_id - instance_count = var.master_count - storage_account = azurerm_storage_account.cluster - os_volume_type = var.azure_master_root_volume_type - os_volume_size = var.azure_master_root_volume_size - private = module.vnet.private + source = "./master" + resource_group_name = azurerm_resource_group.main.name + cluster_id = var.cluster_id + region = var.azure_region + availability_zones = var.azure_master_availability_zones + vm_size = var.azure_master_vm_type + vm_image = azurerm_image.cluster.id + identity = azurerm_user_assigned_identity.main.id + ignition = var.ignition_master + external_lb_id = module.vnet.public_lb_id + elb_backend_pool_id = module.vnet.public_lb_backend_pool_id + elb_backend_pool_v6_id = module.vnet.public_lb_backend_pool_v6_id + ilb_backend_pool_id = module.vnet.internal_lb_backend_pool_id + ilb_backend_pool_v6_id = module.vnet.internal_lb_backend_pool_v6_id + subnet_id = module.vnet.master_subnet_id + instance_count = var.master_count + storage_account = azurerm_storage_account.cluster + os_volume_type = var.azure_master_root_volume_type + os_volume_size = var.azure_master_root_volume_size + private = module.vnet.private + + use_ipv6 = var.azure_use_ipv6 } module "dns" { @@ -83,12 +94,17 @@ module "dns" { base_domain = var.base_domain virtual_network_id = module.vnet.virtual_network_id external_lb_fqdn = module.vnet.public_lb_pip_fqdn + external_lb_fqdn_v6 = module.vnet.public_lb_pip_v6_fqdn internal_lb_ipaddress = module.vnet.internal_lb_ip_address + internal_lb_ipaddress_v6 = module.vnet.internal_lb_ip_v6_address resource_group_name = azurerm_resource_group.main.name base_domain_resource_group_name = var.azure_base_domain_resource_group_name etcd_count = var.master_count etcd_ip_addresses = module.master.ip_addresses + etcd_ip_v6_addresses = module.master.ip_v6_addresses private = module.vnet.private + + use_ipv6 = var.azure_use_ipv6 } resource "random_string" "storage_suffix" { diff --git a/data/data/azure/master/master.tf b/data/data/azure/master/master.tf index 861966d3cb6..a35552a7024 100644 --- a/data/data/azure/master/master.tf +++ b/data/data/azure/master/master.tf @@ -2,6 +2,8 @@ locals { // The name of the masters' ipconfiguration is hardcoded to "pipconfig". It needs to match cluster-api // https://github.com/openshift/cluster-api-provider-azure/blob/master/pkg/cloud/azure/services/networkinterfaces/networkinterfaces.go#L131 ip_configuration_name = "pipConfig" + // TODO: Azure machine provider probably needs to look for pipConfig-v6 as well (or a different name like pipConfig-secondary) + ip_v6_configuration_name = "pipConfig-v6" } resource "azurerm_network_interface" "master" { @@ -11,10 +13,20 @@ resource "azurerm_network_interface" "master" { location = var.region resource_group_name = var.resource_group_name - ip_configuration { - subnet_id = var.subnet_id - name = local.ip_configuration_name - private_ip_address_allocation = "Dynamic" + dynamic "ip_configuration" { + for_each = var.use_ipv6 ? [ + { primary : true, name : local.ip_configuration_name, ip_address_version : "IPv4" }, + { primary : false, name : local.ip_v6_configuration_name, ip_address_version : "IPv6" }, + ] : [ + { primary : true, name : local.ip_configuration_name, ip_address_version : "IPv4" } + ] + content { + primary = ip_configuration.value.primary + name = ip_configuration.value.name + subnet_id = var.subnet_id + private_ip_address_version = ip_configuration.value.ip_address_version + private_ip_address_allocation = "Dynamic" + } } } @@ -23,7 +35,15 @@ resource "azurerm_network_interface_backend_address_pool_association" "master" { network_interface_id = element(azurerm_network_interface.master.*.id, count.index) backend_address_pool_id = var.elb_backend_pool_id - ip_configuration_name = local.ip_configuration_name #must be the same as nic's ip configuration name. + ip_configuration_name = local.ip_configuration_name +} + +resource "azurerm_network_interface_backend_address_pool_association" "master_v6" { + count = var.use_ipv6 ? var.instance_count : 0 + + network_interface_id = element(azurerm_network_interface.master.*.id, count.index) + backend_address_pool_id = var.elb_backend_pool_v6_id + ip_configuration_name = local.ip_v6_configuration_name } resource "azurerm_network_interface_backend_address_pool_association" "master_internal" { @@ -31,7 +51,15 @@ resource "azurerm_network_interface_backend_address_pool_association" "master_in network_interface_id = element(azurerm_network_interface.master.*.id, count.index) backend_address_pool_id = var.ilb_backend_pool_id - ip_configuration_name = local.ip_configuration_name #must be the same as nic's ip configuration name. + ip_configuration_name = local.ip_configuration_name +} + +resource "azurerm_network_interface_backend_address_pool_association" "master_internal_v6" { + count = var.use_ipv6 ? var.instance_count : 0 + + network_interface_id = element(azurerm_network_interface.master.*.id, count.index) + backend_address_pool_id = var.ilb_backend_pool_v6_id + ip_configuration_name = local.ip_v6_configuration_name } resource "azurerm_virtual_machine" "master" { diff --git a/data/data/azure/master/outputs.tf b/data/data/azure/master/outputs.tf index 414be4c4807..0d4d986e8ce 100644 --- a/data/data/azure/master/outputs.tf +++ b/data/data/azure/master/outputs.tf @@ -2,3 +2,7 @@ output "ip_addresses" { value = azurerm_network_interface.master.*.private_ip_address } +output "ip_v6_addresses" { + value = azurerm_network_interface.master.*.private_ip_addresses.1 +} + diff --git a/data/data/azure/master/variables.tf b/data/data/azure/master/variables.tf index 255a49b0e70..d65870eda50 100644 --- a/data/data/azure/master/variables.tf +++ b/data/data/azure/master/variables.tf @@ -38,10 +38,18 @@ variable "elb_backend_pool_id" { type = string } +variable "elb_backend_pool_v6_id" { + type = string +} + variable "ilb_backend_pool_id" { type = string } +variable "ilb_backend_pool_v6_id" { + type = string +} + variable "ignition_master" { type = string default = "" @@ -91,3 +99,8 @@ variable "private" { type = bool description = "This value determines if this is a private cluster or not." } + +variable "use_ipv6" { + description = "Use IPv6 as well as IPv4" + type = bool +} diff --git a/data/data/azure/variables-azure.tf b/data/data/azure/variables-azure.tf index 33bbd0b50b8..dcdf7862aa2 100644 --- a/data/data/azure/variables-azure.tf +++ b/data/data/azure/variables-azure.tf @@ -111,3 +111,13 @@ variable "azure_private" { type = bool description = "This determines if this is a private cluster or not." } + +variable "azure_use_ipv6" { + type = bool + default = false + description = "Enable an experimental IPv6 environment" +} +variable "azure_machine_cidr_v6" { + type = string + description = "The machine CIDR for the virtual network when IPv6 is in use." +} \ No newline at end of file diff --git a/data/data/azure/vnet/common.tf b/data/data/azure/vnet/common.tf index 50304ded1e3..5b1aef10cb9 100644 --- a/data/data/azure/vnet/common.tf +++ b/data/data/azure/vnet/common.tf @@ -26,8 +26,11 @@ data "azurerm_virtual_network" "preexisting_virtual_network" { // Only reference data sources which are guaranteed to exist at any time (above) in this locals{} block locals { - master_subnet_cidr = cidrsubnet(var.vnet_cidr, 3, 0) #master subnet is a smaller subnet within the vnet. i.e from /21 to /24 - worker_subnet_cidr = cidrsubnet(var.vnet_cidr, 3, 1) #node subnet is a smaller subnet within the vnet. i.e from /21 to /24 + master_subnet_cidr = cidrsubnet(var.vnet_cidr, 3, 0) #master subnet is a smaller subnet within the vnet. i.e from /21 to /24 + master_subnet_cidr_v6 = cidrsubnet(var.vnet_cidr_v6, 16, 0) #master subnet is a smaller subnet within the vnet. i.e from /48 to /64 + + worker_subnet_cidr = cidrsubnet(var.vnet_cidr, 3, 1) #node subnet is a smaller subnet within the vnet. i.e from /21 to /24 + worker_subnet_cidr_v6 = cidrsubnet(var.vnet_cidr_v6, 16, 1) #node subnet is a smaller subnet within the vnet. i.e from /48 to /64 master_subnet_id = var.preexisting_network ? data.azurerm_subnet.preexisting_master_subnet[0].id : azurerm_subnet.master_subnet[0].id worker_subnet_id = var.preexisting_network ? data.azurerm_subnet.preexisting_worker_subnet[0].id : azurerm_subnet.worker_subnet[0].id diff --git a/data/data/azure/vnet/internal-lb.tf b/data/data/azure/vnet/internal-lb.tf index 21339e38a69..3b73c1134a9 100644 --- a/data/data/azure/vnet/internal-lb.tf +++ b/data/data/azure/vnet/internal-lb.tf @@ -1,5 +1,6 @@ locals { - internal_lb_frontend_ip_configuration_name = "internal-lb-ip" + internal_lb_frontend_ip_configuration_name = "internal-lb-ip" + internal_lb_frontend_ip_v6_configuration_name = "internal-lb-ip-v6" } resource "azurerm_lb" "internal" { @@ -8,10 +9,24 @@ resource "azurerm_lb" "internal" { resource_group_name = var.resource_group_name location = var.region - frontend_ip_configuration { - name = local.internal_lb_frontend_ip_configuration_name - subnet_id = local.master_subnet_id - private_ip_address_allocation = "Dynamic" + dynamic "frontend_ip_configuration" { + for_each = [for ip in [ + { name : local.internal_lb_frontend_ip_configuration_name, ipv6 : false }, + { name : local.internal_lb_frontend_ip_v6_configuration_name, ipv6 : true }, + ] : { + name : ip.name + ipv6 : ip.ipv6 + } if ! ip.ipv6 || var.use_ipv6] + + content { + name = frontend_ip_configuration.value.name + subnet_id = local.master_subnet_id + private_ip_address_version = frontend_ip_configuration.value.ipv6 ? "IPv6" : "IPv4" + # WORKAROUND: Allocate a high ipv6 internal LB address to avoid the race with NIC allocation (a master and the LB + # were being assigned the same IP dynamically). Issue is being tracked as a support ticket to Azure. + private_ip_address_allocation = frontend_ip_configuration.value.ipv6 ? "Static" : "Dynamic" + private_ip_address = frontend_ip_configuration.value.ipv6 ? cidrhost(local.master_subnet_cidr_v6, -2) : null + } } } @@ -21,6 +36,14 @@ resource "azurerm_lb_backend_address_pool" "internal_lb_controlplane_pool" { name = "${var.cluster_id}-internal-controlplane" } +resource "azurerm_lb_backend_address_pool" "internal_lb_controlplane_pool_v6" { + count = var.use_ipv6 ? 1 : 0 + + resource_group_name = var.resource_group_name + loadbalancer_id = azurerm_lb.internal.id + name = "${var.cluster_id}-internal-controlplane-v6" +} + resource "azurerm_lb_rule" "internal_lb_rule_api_internal" { name = "api-internal" resource_group_name = var.resource_group_name @@ -36,6 +59,23 @@ resource "azurerm_lb_rule" "internal_lb_rule_api_internal" { probe_id = azurerm_lb_probe.internal_lb_probe_api_internal.id } +resource "azurerm_lb_rule" "internal_lb_rule_api_internal_v6" { + count = var.use_ipv6 ? 1 : 0 + + name = "api-internal-v6" + resource_group_name = var.resource_group_name + protocol = "Tcp" + backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v6[0].id + loadbalancer_id = azurerm_lb.internal.id + frontend_port = 6443 + backend_port = 6443 + frontend_ip_configuration_name = local.internal_lb_frontend_ip_v6_configuration_name + enable_floating_ip = false + idle_timeout_in_minutes = 30 + load_distribution = "Default" + probe_id = azurerm_lb_probe.internal_lb_probe_api_internal.id +} + resource "azurerm_lb_rule" "internal_lb_rule_sint" { name = "sint" resource_group_name = var.resource_group_name @@ -51,6 +91,23 @@ resource "azurerm_lb_rule" "internal_lb_rule_sint" { probe_id = azurerm_lb_probe.internal_lb_probe_sint.id } +resource "azurerm_lb_rule" "internal_lb_rule_sint_v6" { + count = var.use_ipv6 ? 1 : 0 + + name = "sint-v6" + resource_group_name = var.resource_group_name + protocol = "Tcp" + backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v6[0].id + loadbalancer_id = azurerm_lb.internal.id + frontend_port = 22623 + backend_port = 22623 + frontend_ip_configuration_name = local.internal_lb_frontend_ip_v6_configuration_name + enable_floating_ip = false + idle_timeout_in_minutes = 30 + load_distribution = "Default" + probe_id = azurerm_lb_probe.internal_lb_probe_sint.id +} + resource "azurerm_lb_probe" "internal_lb_probe_sint" { name = "sint-probe" resource_group_name = var.resource_group_name @@ -70,4 +127,3 @@ resource "azurerm_lb_probe" "internal_lb_probe_api_internal" { port = 6443 protocol = "TCP" } - diff --git a/data/data/azure/vnet/outputs.tf b/data/data/azure/vnet/outputs.tf index 6357f20d6d1..693f57ea629 100644 --- a/data/data/azure/vnet/outputs.tf +++ b/data/data/azure/vnet/outputs.tf @@ -6,10 +6,18 @@ output "public_lb_backend_pool_id" { value = azurerm_lb_backend_address_pool.master_public_lb_pool.id } +output "public_lb_backend_pool_v6_id" { + value = var.use_ipv6 ? azurerm_lb_backend_address_pool.master_public_lb_pool_v6[0].id : null +} + output "internal_lb_backend_pool_id" { value = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool.id } +output "internal_lb_backend_pool_v6_id" { + value = var.use_ipv6 ? azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v6[0].id : null +} + output "public_lb_id" { value = var.private ? null : azurerm_lb.public.id } @@ -18,10 +26,18 @@ output "public_lb_pip_fqdn" { value = var.private ? null : data.azurerm_public_ip.cluster_public_ip.fqdn } +output "public_lb_pip_v6_fqdn" { + value = var.private || ! var.use_ipv6 ? null : data.azurerm_public_ip.cluster_public_ip_v6[0].fqdn +} + output "internal_lb_ip_address" { value = azurerm_lb.internal.private_ip_address } +output "internal_lb_ip_v6_address" { + value = var.use_ipv6 ? azurerm_lb.internal.private_ip_addresses[1] : null +} + output "master_nsg_name" { value = azurerm_network_security_group.master.name } diff --git a/data/data/azure/vnet/public-lb.tf b/data/data/azure/vnet/public-lb.tf index 896b59c5d71..f83fd82a686 100644 --- a/data/data/azure/vnet/public-lb.tf +++ b/data/data/azure/vnet/public-lb.tf @@ -1,5 +1,6 @@ locals { - public_lb_frontend_ip_configuration_name = "public-lb-ip" + public_lb_frontend_ip_configuration_name = "public-lb-ip" + public_lb_frontend_ip_v6_configuration_name = "public-lb-ip-v6" } resource "azurerm_public_ip" "cluster_public_ip" { @@ -16,15 +17,47 @@ data "azurerm_public_ip" "cluster_public_ip" { resource_group_name = var.resource_group_name } +resource "azurerm_public_ip" "cluster_public_ip_v6" { + count = var.use_ipv6 ? 1 : 0 + + ip_version = "IPv6" + sku = "Standard" + location = var.region + name = "${var.cluster_id}-pip-v6" + resource_group_name = var.resource_group_name + allocation_method = "Static" + domain_name_label = var.dns_label +} + +data "azurerm_public_ip" "cluster_public_ip_v6" { + count = var.use_ipv6 ? 1 : 0 + + name = azurerm_public_ip.cluster_public_ip_v6[0].name + resource_group_name = var.resource_group_name +} + resource "azurerm_lb" "public" { sku = "Standard" name = "${var.cluster_id}-public-lb" resource_group_name = var.resource_group_name location = var.region - frontend_ip_configuration { - name = local.public_lb_frontend_ip_configuration_name - public_ip_address_id = azurerm_public_ip.cluster_public_ip.id + dynamic "frontend_ip_configuration" { + for_each = [for ip in [ + { name : local.public_lb_frontend_ip_configuration_name, value : azurerm_public_ip.cluster_public_ip.id, ipv6 : false }, + { name : local.public_lb_frontend_ip_v6_configuration_name, value : azurerm_public_ip.cluster_public_ip_v6[0].id, ipv6 : true }, + ] : { + name : ip.name + value : ip.value + ipv6 : ip.ipv6 + } if ! ip.ipv6 || var.use_ipv6] + + content { + name = frontend_ip_configuration.value.name + public_ip_address_id = frontend_ip_configuration.value.value + private_ip_address_version = frontend_ip_configuration.value.ipv6 ? "IPv6" : "IPv4" + private_ip_address_allocation = "Dynamic" + } } } @@ -34,6 +67,14 @@ resource "azurerm_lb_backend_address_pool" "master_public_lb_pool" { name = "${var.cluster_id}-public-lb-control-plane" } +resource "azurerm_lb_backend_address_pool" "master_public_lb_pool_v6" { + count = var.use_ipv6 ? 1 : 0 + + resource_group_name = var.resource_group_name + loadbalancer_id = azurerm_lb.public.id + name = "${var.cluster_id}-public-lb-control-plane-v6" +} + resource "azurerm_lb_rule" "public_lb_rule_api_internal" { count = var.private ? 0 : 1 @@ -51,6 +92,23 @@ resource "azurerm_lb_rule" "public_lb_rule_api_internal" { probe_id = azurerm_lb_probe.public_lb_probe_api_internal[0].id } +resource "azurerm_lb_rule" "public_lb_rule_api_internal_v6" { + count = var.private || ! var.use_ipv6 ? 0 : 1 + + name = "api-internal-v6" + resource_group_name = var.resource_group_name + protocol = "Tcp" + backend_address_pool_id = azurerm_lb_backend_address_pool.master_public_lb_pool_v6[0].id + loadbalancer_id = azurerm_lb.public.id + frontend_port = 6443 + backend_port = 6443 + frontend_ip_configuration_name = local.public_lb_frontend_ip_v6_configuration_name + enable_floating_ip = false + idle_timeout_in_minutes = 30 + load_distribution = "Default" + probe_id = azurerm_lb_probe.public_lb_probe_api_internal[0].id +} + resource "azurerm_lb_rule" "internal_outbound_rule" { count = var.private ? 1 : 0 @@ -67,6 +125,22 @@ resource "azurerm_lb_rule" "internal_outbound_rule" { load_distribution = "Default" } +resource "azurerm_lb_rule" "internal_outbound_rule_v6" { + count = var.private && var.use_ipv6 ? 1 : 0 + + name = "internal_outbound_rule_v6" + resource_group_name = var.resource_group_name + protocol = "Tcp" + backend_address_pool_id = azurerm_lb_backend_address_pool.master_public_lb_pool_v6[0].id + loadbalancer_id = azurerm_lb.public.id + frontend_port = 27627 + backend_port = 27627 + frontend_ip_configuration_name = local.public_lb_frontend_ip_v6_configuration_name + enable_floating_ip = false + idle_timeout_in_minutes = 30 + load_distribution = "Default" +} + resource "azurerm_lb_probe" "public_lb_probe_api_internal" { count = var.private ? 0 : 1 diff --git a/data/data/azure/vnet/variables.tf b/data/data/azure/vnet/variables.tf index 929ddc7d0ca..ac435d5c7fd 100644 --- a/data/data/azure/vnet/variables.tf +++ b/data/data/azure/vnet/variables.tf @@ -2,6 +2,10 @@ variable "vnet_cidr" { type = string } +variable "vnet_cidr_v6" { + type = string +} + variable "resource_group_name" { type = string description = "Resource group for the deployment" @@ -57,3 +61,8 @@ variable "private" { type = bool description = "The determines if this is a private/internal cluster or not." } + +variable "use_ipv6" { + description = "Use IPv6 as well as IPv4" + type = bool +} diff --git a/data/data/azure/vnet/vnet.tf b/data/data/azure/vnet/vnet.tf index 60880073221..cc11f4752c9 100644 --- a/data/data/azure/vnet/vnet.tf +++ b/data/data/azure/vnet/vnet.tf @@ -4,7 +4,7 @@ resource "azurerm_virtual_network" "cluster_vnet" { name = var.virtual_network_name resource_group_name = var.resource_group_name location = var.region - address_space = [var.vnet_cidr] + address_space = var.use_ipv6 ? [var.vnet_cidr, var.vnet_cidr_v6] : [var.vnet_cidr] } resource "azurerm_route_table" "route_table" { @@ -17,7 +17,7 @@ resource "azurerm_subnet" "master_subnet" { count = var.preexisting_network ? 0 : 1 resource_group_name = var.resource_group_name - address_prefix = local.master_subnet_cidr + address_prefixes = var.use_ipv6 ? [local.master_subnet_cidr, local.master_subnet_cidr_v6] : [local.master_subnet_cidr] virtual_network_name = local.virtual_network name = var.master_subnet } @@ -26,7 +26,7 @@ resource "azurerm_subnet" "worker_subnet" { count = var.preexisting_network ? 0 : 1 resource_group_name = var.resource_group_name - address_prefix = local.worker_subnet_cidr + address_prefixes = var.use_ipv6 ? [local.worker_subnet_cidr, local.worker_subnet_cidr_v6] : [local.worker_subnet_cidr] virtual_network_name = local.virtual_network name = var.worker_subnet } diff --git a/docs/dev/aws_ipv6.md b/docs/dev/aws_ipv6.md new file mode 100644 index 00000000000..20dbd05533b --- /dev/null +++ b/docs/dev/aws_ipv6.md @@ -0,0 +1,93 @@ +# AWS IPv6 Dev and Test Environment + +The installer includes code to enable an experimental IPv6 dev and test +environment on AWS. This is off by default and currently only intended for +those working on enabling IPv6 in OpenShift. + +## Enabling the Environment + +To enable IPv6 in your AWS environment, set the following environment variable +before running the installer: + +```bash + export OPENSHIFT_INSTALL_AWS_USE_IPV6=”true” +``` + +## AWS Network Environment + +AWS does not support a single-stack (IPv6 only) environment, but it does +support a dual-stack (IPv4 and IPv6) environment, so that’s what is enabled +here. This is a summary of the changes to the network environment: + +* The VPC has IPv6 enabled and a `/56` IPv6 CIDR will be allocated by AWS. +* Each Subnet will have an IPv6 `/64` subnet allocated to it. +* All IPv4 specific security group rules have corresponding IPv6 rules created. +* AWS Network Load Balancers (NLBs) do not support IPv6, so external API access + is still over IPv4. AWS does not have a TCP load balancer that supports + IPv6, other than classic load balancers with EC2-Classic, and not EC2-VPC. + AWS Application Load Balancers supposedly support IPv6, but that would + require doing HTTPS load balancing for the API instead of just TCP load + balancing, so we just use the IPv4 NLBs. API access within the cluster is + still exercising IPv6 when using its Service IP.. +* IPv6 DNS records (AAAA) are created and the IPv4 (A) records are disabled, + except for the API since the API is still accessed via an IPv4 only load + balancer. +* IPv6 routing is configured. Since all instances get global IPv6 addresses, + NAT is not used from the instances out to the internet. The current + implementation uses security groups to block incoming traffic sent directly + to any of the instances, but will move to using an egress-only internet + gateway which will make this isolation more explicit. + + +## Node Addresses + +Each AWS instance will receive both a private IPv4 address and a globally +routeable IPv6 address. + +Kubelet is configured to use the IPv6 address for the Node object. + +etcd and all other services running with host networking will be configured to +use the IPv6 address. + +## Hack for IPv4 Access Where Necessary + +There are some pods that still require IPv4 access on AWS to be functional. +For example, the CoreDNS pods must have IPv4 connectivity since the AWS DNS +server is only available via IPv4. This also means we have to add a security +group rule allowing DNS traffic to our CoreDNS pods over the AWS network (they +use port 5353). + +Another case where this hack is required is several pods that need to access +AWS APIs. The AWS APIs are IPv4-only. + +Since this is an AWS-IPv6 specific hack, it is currently centralized into one +place: ovn-kubernetes. It will automatically add a second interface with IPv4 +access to the set of affected pods. + +## Install Configuration + +Here is the suggested network configuration for `install-config.yaml`: + +```yaml +networking: + clusterNetwork: + - cidr: fd01::/48 + hostPrefix: 64 + machineCIDR: 10.0.0.0/16 + networkType: OVNKubernetes + serviceNetwork: + - fd02::/112 +``` + +Note that an IPv4 CIDR is still used for `machineCIDR` since AWS will provide a +dual-stack (IPv4 and IPv6) environment. We must specify the IPv4 CIDR and AWS +will automatically allocate an IPv6 CIDR. + +`OVNKubernetes` is the only `networkType` supported in this environment. + +## Current Status of IPv6 + +Note that IPv6 support is under heavy development across many components in +OpenShift, so the use of some custom images may be needed to include fixes to +known issues. Coordination of work-in-progress is out of scope for this +document. diff --git a/pkg/terraform/exec/plugins/Gopkg.lock b/pkg/terraform/exec/plugins/Gopkg.lock index ddc28eacbef..7bc3f5e7200 100644 --- a/pkg/terraform/exec/plugins/Gopkg.lock +++ b/pkg/terraform/exec/plugins/Gopkg.lock @@ -847,7 +847,8 @@ version = "v2.10.0" [[projects]] - digest = "1:95e2fb03b69a29c7f843830c392f684f110da8ca83649c821c20bad61d9116b5" + branch = "allow_subnet_backport" + digest = "1:3dbe6c639cd70f179cf91bbd5168b61bd6c14c484ee6b008d35d8a6d6d6c7b3b" name = "github.com/terraform-providers/terraform-provider-azurerm" packages = [ "azurerm", @@ -862,8 +863,8 @@ "version", ] pruneopts = "NUT" - revision = "05662fd82a299fff148a6a31cc9c2ba9b5841064" - version = "v1.27.1" + revision = "459bdfe309d805db7907512dd8c5a4a68662105a" + source = "https://github.com/smarterclayton/terraform-provider-azurerm" [[projects]] digest = "1:2daa3bddc630ead813a47149cdc429816c43f9423a7d6e2aeff7ca794b072548" diff --git a/pkg/terraform/exec/plugins/Gopkg.toml b/pkg/terraform/exec/plugins/Gopkg.toml index 4bbebda3361..3e280bba7a2 100644 --- a/pkg/terraform/exec/plugins/Gopkg.toml +++ b/pkg/terraform/exec/plugins/Gopkg.toml @@ -46,7 +46,8 @@ ignored = [ [[constraint]] name = "github.com/terraform-providers/terraform-provider-azurerm" - version = "=1.27.1" + source = "https://github.com/smarterclayton/terraform-provider-azurerm" + branch = "allow_subnet_backport" [[constraint]] name = "github.com/terraform-providers/terraform-provider-google" @@ -71,3 +72,7 @@ ignored = [ [[constraint]] name = "github.com/openshift-metal3/terraform-provider-ironic" version = "v0.1.8" + +[[override]] + name = "github.com/apparentlymart/go-cidr" + version = "v1.0.1" diff --git a/pkg/terraform/exec/plugins/azureprivatedns/provider.go b/pkg/terraform/exec/plugins/azureprivatedns/provider.go index 5108042180c..e1b160bb2ca 100644 --- a/pkg/terraform/exec/plugins/azureprivatedns/provider.go +++ b/pkg/terraform/exec/plugins/azureprivatedns/provider.go @@ -55,6 +55,7 @@ func Provider() terraform.ResourceProvider { ResourcesMap: map[string]*schema.Resource{ "azureprivatedns_zone": resourceArmPrivateDNSZone(), "azureprivatedns_a_record": resourceArmPrivateDNSARecord(), + "azureprivatedns_aaaa_record": resourceArmPrivateDNSAAAARecord(), "azureprivatedns_srv_record": resourceArmPrivateDNSSrvRecord(), "azureprivatedns_zone_virtual_network_link": resourceArmPrivateDNSZoneVirtualNetworkLink(), }, diff --git a/pkg/terraform/exec/plugins/azureprivatedns/resource_private_dns_aaaa_record.go b/pkg/terraform/exec/plugins/azureprivatedns/resource_private_dns_aaaa_record.go new file mode 100644 index 00000000000..e3829ca85ae --- /dev/null +++ b/pkg/terraform/exec/plugins/azureprivatedns/resource_private_dns_aaaa_record.go @@ -0,0 +1,179 @@ +package azureprivatedns + +import ( + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns" + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmPrivateDNSAAAARecord() *schema.Resource { + return &schema.Resource{ + Create: resourceArmPrivateDNSAAAARecordCreateUpdate, + Read: resourceArmPrivateDNSAAAARecordRead, + Update: resourceArmPrivateDNSAAAARecordCreateUpdate, + Delete: resourceArmPrivateDNSAAAARecordDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "resource_group_name": azure.SchemaResourceGroupNameDiffSuppress(), + + "zone_name": { + Type: schema.TypeString, + Required: true, + }, + + "records": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "ttl": { + Type: schema.TypeInt, + Required: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceArmPrivateDNSAAAARecordCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).recordSetsClient + ctx := meta.(*ArmClient).StopContext + + name := d.Get("name").(string) + resGroup := d.Get("resource_group_name").(string) + zoneName := d.Get("zone_name").(string) + + ttl := int64(d.Get("ttl").(int)) + t := d.Get("tags").(map[string]interface{}) + + parameters := privatedns.RecordSet{ + Name: &name, + RecordSetProperties: &privatedns.RecordSetProperties{ + Metadata: expandTags(t), + TTL: &ttl, + AaaaRecords: expandAzureRmPrivateDNSAAAARecords(d), + }, + } + + eTag := "" + ifNoneMatch := "" // set to empty to allow updates to records after creation + if _, err := client.CreateOrUpdate(ctx, resGroup, zoneName, privatedns.AAAA, name, parameters, eTag, ifNoneMatch); err != nil { + return fmt.Errorf("error creating/updating Private DNS AAAA Record %q (Zone %q / Resource Group %q): %s", name, zoneName, resGroup, err) + } + + resp, err := client.Get(ctx, resGroup, zoneName, privatedns.AAAA, name) + if err != nil { + return fmt.Errorf("error retrieving Private DNS AAAA Record %q (Zone %q / Resource Group %q): %s", name, zoneName, resGroup, err) + } + + if resp.ID == nil { + return fmt.Errorf("cannot read Private DNS AAAA Record %s (resource group %s) ID", name, resGroup) + } + + d.SetId(*resp.ID) + + return resourceArmPrivateDNSAAAARecordRead(d, meta) +} + +func resourceArmPrivateDNSAAAARecordRead(d *schema.ResourceData, meta interface{}) error { + dnsClient := meta.(*ArmClient).recordSetsClient + ctx := meta.(*ArmClient).StopContext + + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + + resGroup := id.ResourceGroup + name := id.Path["AAAA"] + zoneName := id.Path["privateDnsZones"] + + resp, err := dnsClient.Get(ctx, resGroup, zoneName, privatedns.AAAA, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + return fmt.Errorf("error reading Private DNS A record %s: %+v", name, err) + } + + d.Set("name", name) + d.Set("resource_group_name", resGroup) + d.Set("zone_name", zoneName) + d.Set("ttl", resp.TTL) + + if err := d.Set("records", flattenAzureRmPrivateDNSAAAARecords(resp.AaaaRecords)); err != nil { + return err + } + flattenAndSetTags(d, resp.Metadata) + + return nil +} + +func resourceArmPrivateDNSAAAARecordDelete(d *schema.ResourceData, meta interface{}) error { + dnsClient := meta.(*ArmClient).recordSetsClient + ctx := meta.(*ArmClient).StopContext + + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + + resGroup := id.ResourceGroup + name := id.Path["AAAA"] + zoneName := id.Path["privateDnsZones"] + + resp, err := dnsClient.Delete(ctx, resGroup, zoneName, privatedns.AAAA, name, "") + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("error deleting Private DNS AAAA Record %s: %+v", name, err) + } + + return nil +} + +func flattenAzureRmPrivateDNSAAAARecords(records *[]privatedns.AaaaRecord) []string { + results := make([]string, 0) + if records == nil { + return results + } + + for _, record := range *records { + if record.Ipv6Address == nil { + continue + } + + results = append(results, *record.Ipv6Address) + } + + return results +} + +func expandAzureRmPrivateDNSAAAARecords(d *schema.ResourceData) *[]privatedns.AaaaRecord { + recordStrings := d.Get("records").(*schema.Set).List() + records := make([]privatedns.AaaaRecord, len(recordStrings)) + + for i, v := range recordStrings { + ipv6 := v.(string) + records[i] = privatedns.AaaaRecord{ + Ipv6Address: &ipv6, + } + } + + return &records +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancers.go b/pkg/terraform/exec/plugins/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancers.go index 90c49df4a39..3900634becc 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancers.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/loadbalancers.go @@ -79,7 +79,7 @@ func (client LoadBalancersClient) CreateOrUpdatePreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-12-01" + const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -241,7 +241,7 @@ func (client LoadBalancersClient) GetPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-12-01" + const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go b/pkg/terraform/exec/plugins/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go index 57e0bfd7529..ae88edd67a9 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go @@ -12772,6 +12772,8 @@ type FrontendIPConfigurationPropertiesFormat struct { LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` // PrivateIPAddress - The private IP address of the IP configuration. PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + // PrivateIPAddressVersion - The private IP address version of the IP configuration. + PrivateIPAddressVersion *string `json:"privateIPAddressVersion,omitempty"` // PrivateIPAllocationMethod - The Private IP allocation method. Possible values are: 'Static' and 'Dynamic'. Possible values include: 'Static', 'Dynamic' PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` // Subnet - The reference of the subnet resource. diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/data_source_subnet.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/data_source_subnet.go index e0793640680..09a80d7ac24 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/data_source_subnet.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/data_source_subnet.go @@ -31,6 +31,12 @@ func dataSourceArmSubnet() *schema.Resource { Computed: true, }, + "address_prefixes": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "network_security_group_id": { Type: schema.TypeString, Computed: true, @@ -81,7 +87,18 @@ func dataSourceArmSubnetRead(d *schema.ResourceData, meta interface{}) error { d.Set("virtual_network_name", virtualNetworkName) if props := resp.SubnetPropertiesFormat; props != nil { - d.Set("address_prefix", props.AddressPrefix) + if props.AddressPrefix != nil { + d.Set("address_prefix", props.AddressPrefix) + } + if props.AddressPrefixes == nil { + if props.AddressPrefix != nil && len(*props.AddressPrefix) > 0 { + d.Set("address_prefixes", []string{*props.AddressPrefix}) + } else { + d.Set("address_prefixes", []string{}) + } + } else { + d.Set("address_prefixes", props.AddressPrefixes) + } if props.NetworkSecurityGroup != nil { d.Set("network_security_group_id", props.NetworkSecurityGroup.ID) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_loadbalancer.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_loadbalancer.go index e5c821d490b..49503992a8d 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_loadbalancer.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_loadbalancer.go @@ -74,6 +74,17 @@ func resourceArmLoadBalancer() *schema.Resource { ValidateFunc: validate.IPv4AddressOrEmpty, }, + "private_ip_address_version": { + Type: schema.TypeString, + Optional: true, + Default: string(network.IPv4), + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(network.IPv4), + string(network.IPv6), + }, false), + }, + "public_ip_address_id": { Type: schema.TypeString, Optional: true, @@ -305,6 +316,10 @@ func expandAzureRmLoadBalancerFrontendIpConfigurations(d *schema.ResourceData) * properties.PrivateIPAddress = &v } + if v := data["private_ip_address_version"].(string); v != "" { + properties.PrivateIPAddressVersion = &v + } + if v := data["public_ip_address_id"].(string); v != "" { properties.PublicIPAddress = &network.PublicIPAddress{ ID: &v, @@ -361,6 +376,10 @@ func flattenLoadBalancerFrontendIpConfiguration(ipConfigs *[]network.FrontendIPC ipConfig["private_ip_address"] = *pip } + if pip := props.PrivateIPAddressVersion; pip != nil { + ipConfig["private_ip_address_version"] = *pip + } + if pip := props.PublicIPAddress; pip != nil { ipConfig["public_ip_address_id"] = *pip.ID } diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_public_ip.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_public_ip.go index 457274a3b22..0b5f7f33d1e 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_public_ip.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_public_ip.go @@ -164,12 +164,6 @@ func resourceArmPublicIpCreateUpdate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Either `allocation_method` or `public_ip_address_allocation` must be specified.") } - if strings.EqualFold(string(ipVersion), string(network.IPv6)) { - if strings.EqualFold(ipAllocationMethod, "static") { - return fmt.Errorf("Cannot specify publicIpAllocationMethod as Static for IPv6 PublicIp") - } - } - if strings.EqualFold(sku, "standard") { if !strings.EqualFold(ipAllocationMethod, "static") { return fmt.Errorf("Static IP allocation must be used when creating Standard SKU public IP addresses.") diff --git a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_subnet.go b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_subnet.go index 7a611f748c0..d8ff80cb164 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_subnet.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/terraform-providers/terraform-provider-azurerm/azurerm/resource_arm_subnet.go @@ -39,8 +39,17 @@ func resourceArmSubnet() *schema.Resource { }, "address_prefix": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Optional: true, + Deprecated: "Use the `address_prefixes` property instead.", + ConflictsWith: []string{"address_prefixes"}, + }, + + "address_prefixes": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ConflictsWith: []string{"address_prefix"}, }, "network_security_group_id": { @@ -142,15 +151,28 @@ func resourceArmSubnetCreateUpdate(d *schema.ResourceData, meta interface{}) err } } - addressPrefix := d.Get("address_prefix").(string) + var prefixSet bool + properties := network.SubnetPropertiesFormat{} + if value, ok := d.GetOk("address_prefixes"); ok { + var addressPrefixes []string + for _, item := range value.([]interface{}) { + addressPrefixes = append(addressPrefixes, item.(string)) + } + properties.AddressPrefixes = &addressPrefixes + prefixSet = len(addressPrefixes) > 0 + } + if value, ok := d.GetOk("address_prefix"); ok { + addressPrefix := value.(string) + properties.AddressPrefix = &addressPrefix + prefixSet = len(addressPrefix) > 0 + } + if !prefixSet { + return fmt.Errorf("[ERROR] either address_prefix or address_prefixes is required") + } azureRMLockByName(vnetName, virtualNetworkResourceName) defer azureRMUnlockByName(vnetName, virtualNetworkResourceName) - properties := network.SubnetPropertiesFormat{ - AddressPrefix: &addressPrefix, - } - if v, ok := d.GetOk("network_security_group_id"); ok { nsgId := v.(string) properties.NetworkSecurityGroup = &network.SecurityGroup{ @@ -245,7 +267,18 @@ func resourceArmSubnetRead(d *schema.ResourceData, meta interface{}) error { d.Set("virtual_network_name", vnetName) if props := resp.SubnetPropertiesFormat; props != nil { - d.Set("address_prefix", props.AddressPrefix) + if props.AddressPrefix != nil { + d.Set("address_prefix", props.AddressPrefix) + } + if props.AddressPrefixes == nil { + if props.AddressPrefix != nil && len(*props.AddressPrefix) > 0 { + d.Set("address_prefixes", []string{*props.AddressPrefix}) + } else { + d.Set("address_prefixes", []string{}) + } + } else { + d.Set("address_prefixes", props.AddressPrefixes) + } var securityGroupId *string if props.NetworkSecurityGroup != nil { diff --git a/pkg/terraform/exec/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/pkg/terraform/exec/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go index 75344732027..ed749d4c51c 100644 --- a/pkg/terraform/exec/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ b/pkg/terraform/exec/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -44,7 +44,7 @@ func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { } return &net.IPNet{ - IP: insertNumIntoIP(ip, num, newPrefixLen), + IP: insertNumIntoIP(ip, big.NewInt(int64(num)), newPrefixLen), Mask: net.CIDRMask(newPrefixLen, addrLen), }, nil } @@ -56,28 +56,32 @@ func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { func Host(base *net.IPNet, num int) (net.IP, error) { ip := base.IP mask := base.Mask + bigNum := big.NewInt(int64(num)) parentLen, addrLen := mask.Size() hostLen := addrLen - parentLen - maxHostNum := uint64(1< maxHostNum { + if numUint64.Cmp(maxHostNum) == 1 { return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) } - var bitlength int - if ip.To4() != nil { - bitlength = 32 - } else { - bitlength = 128 - } - return insertNumIntoIP(ip, num, bitlength), nil + var bitlength int + if ip.To4() != nil { + bitlength = 32 + } else { + bitlength = 128 + } + return insertNumIntoIP(ip, bigNum, bitlength), nil } // AddressRange returns the first and last addresses in the given CIDR range. @@ -129,7 +133,11 @@ func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) } - for j := i + 1; j < len(subnets); j++ { + for j := 0; j < len(subnets); j++ { + if i == j { + continue + } + first := firstLastIP[j][0] last := firstLastIP[j][1] if s.Contains(first) || s.Contains(last) { diff --git a/pkg/terraform/exec/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/pkg/terraform/exec/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go index 861a5f623d7..e5e6a2cf91a 100644 --- a/pkg/terraform/exec/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go +++ b/pkg/terraform/exec/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go @@ -29,9 +29,8 @@ func intToIP(ipInt *big.Int, bits int) net.IP { return net.IP(ret) } -func insertNumIntoIP(ip net.IP, num int, prefixLen int) net.IP { +func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP { ipInt, totalBits := ipToInt(ip) - bigNum := big.NewInt(int64(num)) bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) ipInt.Or(ipInt, bigNum) return intToIP(ipInt, totalBits) diff --git a/pkg/tfvars/aws/aws.go b/pkg/tfvars/aws/aws.go index be1115f4306..92947f4d23f 100644 --- a/pkg/tfvars/aws/aws.go +++ b/pkg/tfvars/aws/aws.go @@ -4,6 +4,7 @@ package aws import ( "encoding/json" "fmt" + "os" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsproviderconfig/v1beta1" @@ -27,6 +28,7 @@ type config struct { PrivateSubnets []string `json:"aws_private_subnets,omitempty"` PublicSubnets *[]string `json:"aws_public_subnets,omitempty"` PublishStrategy string `json:"aws_publish_strategy,omitempty"` + UseIPv6 bool `json:"aws_use_ipv6,omitempty"` } // TFVars generates AWS-specific Terraform variables launching the cluster. @@ -76,6 +78,11 @@ func TFVars(vpc string, privateSubnets []string, publicSubnets []string, publish instanceClass := defaults.InstanceClass(masterConfig.Placement.Region) + useIPv6 := false + if os.Getenv("OPENSHIFT_INSTALL_AWS_USE_IPV6") == "true" { + useIPv6 = true + } + cfg := &config{ Region: masterConfig.Placement.Region, ExtraTags: tags, @@ -89,6 +96,7 @@ func TFVars(vpc string, privateSubnets []string, publicSubnets []string, publish VPC: vpc, PrivateSubnets: privateSubnets, PublishStrategy: string(publish), + UseIPv6: useIPv6, } if len(publicSubnets) == 0 { diff --git a/pkg/tfvars/azure/azure.go b/pkg/tfvars/azure/azure.go index b8e2a98d8ec..3d8a294e322 100644 --- a/pkg/tfvars/azure/azure.go +++ b/pkg/tfvars/azure/azure.go @@ -2,6 +2,7 @@ package azure import ( "encoding/json" + "os" "github.com/Azure/go-autorest/autorest/to" @@ -35,6 +36,8 @@ type config struct { ComputeSubnet string `json:"azure_compute_subnet"` PreexistingNetwork bool `json:"azure_preexisting_network"` Private bool `json:"azure_private"` + MachineCIDRv6 string `json:"azure_machine_cidr_v6"` + UseIPv6 bool `json:"azure_use_ipv6,omitempty"` } // TFVarsSources contains the parameters to be converted into Terraform variables @@ -60,6 +63,13 @@ func TFVars(sources TFVarsSources) ([]byte, error) { masterAvailabilityZones[i] = to.String(c.Zone) } + useIPv6 := false + // TODO: make configurable + machineCIDRv6 := "fd00::/48" + if os.Getenv("OPENSHIFT_INSTALL_AZURE_USE_IPV6") == "true" { + useIPv6 = true + } + cfg := &config{ Auth: sources.Auth, Region: region, @@ -76,6 +86,8 @@ func TFVars(sources TFVarsSources) ([]byte, error) { ControlPlaneSubnet: masterConfig.Subnet, ComputeSubnet: workerConfig.Subnet, PreexistingNetwork: sources.PreexistingNetwork, + MachineCIDRv6: machineCIDRv6, + UseIPv6: useIPv6, } return json.MarshalIndent(cfg, "", " ") diff --git a/pkg/types/validation/installconfig.go b/pkg/types/validation/installconfig.go index d2b79afcc56..034e91ec427 100644 --- a/pkg/types/validation/installconfig.go +++ b/pkg/types/validation/installconfig.go @@ -3,6 +3,7 @@ package validation import ( "fmt" "net" + "os" "sort" "strings" @@ -75,7 +76,7 @@ func ValidateInstallConfig(c *types.InstallConfig, openStackValidValuesFetcher o } } if c.Networking != nil { - allErrs = append(allErrs, validateNetworking(c.Networking, field.NewPath("networking"))...) + allErrs = append(allErrs, validateNetworking(c.Networking, field.NewPath("networking"), &c.Platform)...) } else { allErrs = append(allErrs, field.Required(field.NewPath("networking"), "networking is required")) } @@ -99,14 +100,38 @@ func ValidateInstallConfig(c *types.InstallConfig, openStackValidValuesFetcher o return allErrs } -func validateNetworking(n *types.Networking, fldPath *field.Path) field.ErrorList { +func validateNetworking(n *types.Networking, fldPath *field.Path, platform *types.Platform) field.ErrorList { allErrs := field.ErrorList{} if n.NetworkType == "" { allErrs = append(allErrs, field.Required(fldPath.Child("networkType"), "network provider type required")) } + // IPv6 CIDRs are only allowed for: + // - baremetal platform + // - AWS IPv6 dev/test env, with OPENSHIFT_INSTALL_AWS_USE_IPV6 set + allowIPv6 := false + requireIPv6 := false + if platform.BareMetal != nil { + allowIPv6 = true + } + if platform.AWS != nil && os.Getenv("OPENSHIFT_INSTALL_AWS_USE_IPV6") == "true" { + allowIPv6 = true + requireIPv6 = true + if n.NetworkType != "OVNKubernetes" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("networkType"), n.NetworkType, "networkType must be OVNKubernetes for AWS IPv6")) + } + } + if platform.Azure != nil && os.Getenv("OPENSHIFT_INSTALL_AZURE_USE_IPV6") == "true" { + allowIPv6 = true + if n.NetworkType != "OVNKubernetes" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("networkType"), n.NetworkType, "networkType must be OVNKubernetes for AWS IPv6")) + } + } + if n.MachineCIDR != nil { - if err := validate.SubnetCIDR(&n.MachineCIDR.IPNet); err != nil { + // MachineCIDR should still be IPv4 for AWS IPv6, because AWS only does + // dual-stack, and the IPv6 CIDR will be assigned by AWS. + if err := validate.SubnetCIDR(&n.MachineCIDR.IPNet, allowIPv6, false); err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("machineCIDR"), n.MachineCIDR.String(), err.Error())) } } else { @@ -114,7 +139,7 @@ func validateNetworking(n *types.Networking, fldPath *field.Path) field.ErrorLis } for i, sn := range n.ServiceNetwork { - if err := validate.SubnetCIDR(&sn.IPNet); err != nil { + if err := validate.SubnetCIDR(&sn.IPNet, allowIPv6, requireIPv6); err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceNetwork").Index(i), sn.String(), err.Error())) } if n.MachineCIDR != nil && validate.DoCIDRsOverlap(&sn.IPNet, &n.MachineCIDR.IPNet) { @@ -140,7 +165,7 @@ func validateNetworking(n *types.Networking, fldPath *field.Path) field.ErrorLis } for i, cn := range n.ClusterNetwork { - allErrs = append(allErrs, validateClusterNetwork(n, &cn, i, fldPath.Child("clusterNetwork").Index(i))...) + allErrs = append(allErrs, validateClusterNetwork(n, &cn, i, fldPath.Child("clusterNetwork").Index(i), allowIPv6, requireIPv6)...) } if len(n.ClusterNetwork) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("clusterNetwork"), "cluster network required")) @@ -148,9 +173,9 @@ func validateNetworking(n *types.Networking, fldPath *field.Path) field.ErrorLis return allErrs } -func validateClusterNetwork(n *types.Networking, cn *types.ClusterNetworkEntry, idx int, fldPath *field.Path) field.ErrorList { +func validateClusterNetwork(n *types.Networking, cn *types.ClusterNetworkEntry, idx int, fldPath *field.Path, allowIPv6, requireIPv6 bool) field.ErrorList { allErrs := field.ErrorList{} - if err := validate.SubnetCIDR(&cn.CIDR.IPNet); err != nil { + if err := validate.SubnetCIDR(&cn.CIDR.IPNet, allowIPv6, requireIPv6); err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("cidr"), cn.CIDR.IPNet.String(), err.Error())) } if n.MachineCIDR != nil && validate.DoCIDRsOverlap(&cn.CIDR.IPNet, &n.MachineCIDR.IPNet) { @@ -223,11 +248,15 @@ func validatePlatform(platform *types.Platform, fldPath *field.Path, openStackVa } if platform.AWS != nil { validate(aws.Name, platform.AWS, func(f *field.Path) field.ErrorList { return awsvalidation.ValidatePlatform(platform.AWS, f) }) + } else if os.Getenv("OPENSHIFT_INSTALL_AWS_USE_IPV6") == "true" { + allErrs = append(allErrs, field.Invalid(fldPath, activePlatform, "OPENSHIFT_INSTALL_AWS_USE_IPV6 only valid for AWS")) } if platform.Azure != nil { validate(azure.Name, platform.Azure, func(f *field.Path) field.ErrorList { return azurevalidation.ValidatePlatform(platform.Azure, c.Publish, f) }) + } else if os.Getenv("OPENSHIFT_INSTALL_AZURE_USE_IPV6") == "true" { + allErrs = append(allErrs, field.Invalid(fldPath, activePlatform, "OPENSHIFT_INSTALL_AZURE_USE_IPV6 only valid for Azure")) } if platform.GCP != nil { validate(gcp.Name, platform.GCP, func(f *field.Path) field.ErrorList { return gcpvalidation.ValidatePlatform(platform.GCP, f) }) diff --git a/pkg/validate/validate.go b/pkg/validate/validate.go index 6e2fcc52fc6..213f539d380 100644 --- a/pkg/validate/validate.go +++ b/pkg/validate/validate.go @@ -108,10 +108,13 @@ func ClusterName(v string) error { } // SubnetCIDR checks if the given IP net is a valid CIDR. -func SubnetCIDR(cidr *net.IPNet) error { - if cidr.IP.To4() == nil { +func SubnetCIDR(cidr *net.IPNet, allowIPv6, requireIPv6 bool) error { + if allowIPv6 == false && cidr.IP.To4() == nil { return errors.New("must use IPv4") } + if requireIPv6 == true && cidr.IP.To4() != nil { + return errors.New("must use IPv6") + } if cidr.IP.IsUnspecified() { return errors.New("address must be specified") } diff --git a/pkg/validate/validate_test.go b/pkg/validate/validate_test.go index 016854e971f..e0b1b511861 100644 --- a/pkg/validate/validate_test.go +++ b/pkg/validate/validate_test.go @@ -59,8 +59,8 @@ func TestSubnetCIDR(t *testing.T) { {"1.2.3.4/1", "invalid network address. got 1.2.3.4/1, expecting 0.0.0.0/1"}, {"1.2.3.4/31", ""}, {"1.2.3.4/32", ""}, - {"0:0:0:0:0:1:102:304/116", "must use IPv4"}, - {"0:0:0:0:0:ffff:102:304/116", "invalid network address. got 1.2.3.4/20, expecting 1.2.0.0/20"}, + {"0:0:0:0:0:1:102:304/116", "invalid network address. got ::1:102:304/116, expecting ::1:102:0/116"}, + {"fd01::/48", ""}, {"172.17.0.0/20", "overlaps with default Docker Bridge subnet (172.17.0.0/20)"}, {"172.0.0.0/8", "overlaps with default Docker Bridge subnet (172.0.0.0/8)"}, {"255.255.255.255/1", "invalid network address. got 255.255.255.255/1, expecting 128.0.0.0/1"}, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go index 4abe3c5f25a..de6905abbcb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network/models.go @@ -12633,6 +12633,8 @@ type FrontendIPConfigurationPropertiesFormat struct { LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` // PrivateIPAddress - The private IP address of the IP configuration. PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + // PrivateIPAddressVersion - The private IP address version of the IP configuration. + PrivateIPAddressVersion *string `json:"privateIPAddressVersion,omitempty"` // PrivateIPAllocationMethod - The Private IP allocation method. Possible values are: 'Static' and 'Dynamic'. Possible values include: 'Static', 'Dynamic' PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` // Subnet - The reference of the subnet resource.