diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 32d024af011..9b92261c940 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -27,6 +27,7 @@ variables: ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini IDEMPOT_CHECK: "false" RESET_CHECK: "false" + REMOVE_NODE_CHECK: "false" UPGRADE_TEST: "false" MITOGEN_ENABLE: "false" ANSIBLE_LOG_LEVEL: "-vv" @@ -80,3 +81,4 @@ include: - .gitlab-ci/terraform.yml - .gitlab-ci/packet.yml - .gitlab-ci/vagrant.yml + - .gitlab-ci/molecule.yml diff --git a/.gitlab-ci/molecule.yml b/.gitlab-ci/molecule.yml new file mode 100644 index 00000000000..5f8e02e4c75 --- /dev/null +++ b/.gitlab-ci/molecule.yml @@ -0,0 +1,93 @@ +--- + +.molecule: + tags: [c3.small.x86] + only: [/^pr-.*$/] + except: ['triggers'] + image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION + services: [] + stage: deploy-part1 + before_script: + - tests/scripts/rebase.sh + - apt-get update && apt-get install -y python3-pip + - update-alternatives --install /usr/bin/python python /usr/bin/python3 10 + - python -m pip uninstall -y ansible ansible-base ansible-core + - python -m pip install -r tests/requirements.txt + - ./tests/scripts/vagrant_clean.sh + script: + - ./tests/scripts/molecule_run.sh + after_script: + - chronic ./tests/scripts/molecule_logs.sh + artifacts: + when: always + paths: + - molecule_logs/ + +# CI template for periodic CI jobs +# Enabled when PERIODIC_CI_ENABLED var is set +.molecule_periodic: + only: + variables: + - $PERIODIC_CI_ENABLED + allow_failure: true + extends: .molecule + +molecule_full: + extends: .molecule_periodic + +molecule_no_container_engines: + extends: .molecule + script: + - ./tests/scripts/molecule_run.sh -e container-engine + when: on_success + +molecule_docker: + extends: .molecule + script: + - ./tests/scripts/molecule_run.sh -i container-engine/docker + when: on_success + +molecule_containerd: + extends: .molecule + script: + - ./tests/scripts/molecule_run.sh -i container-engine/containerd + when: on_success + +molecule_cri-o: + extends: .molecule + stage: deploy-part2 + script: + - ./tests/scripts/molecule_run.sh -i container-engine/cri-o + when: on_success + +molecule_cri-dockerd: + extends: .molecule + stage: deploy-part2 + script: + - ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd + when: on_success + +# Stage 3 container engines don't get as much attention so allow them to fail +molecule_kata: + extends: .molecule + stage: deploy-part3 + allow_failure: true + script: + - ./tests/scripts/molecule_run.sh -i container-engine/kata-containers + when: on_success + +molecule_gvisor: + extends: .molecule + stage: deploy-part3 + allow_failure: true + script: + - ./tests/scripts/molecule_run.sh -i container-engine/gvisor + when: on_success + +molecule_youki: + extends: .molecule + stage: deploy-part3 + allow_failure: true + script: + - ./tests/scripts/molecule_run.sh -i container-engine/youki + when: on_success diff --git a/.gitlab-ci/packet.yml b/.gitlab-ci/packet.yml index a096275974e..a0d514c5c15 100644 --- a/.gitlab-ci/packet.yml +++ b/.gitlab-ci/packet.yml @@ -263,6 +263,13 @@ packet_centos7-docker-weave-upgrade-ha: variables: UPGRADE_TEST: basic +packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha: + stage: deploy-part3 + extends: .packet_periodic + when: on_success + variables: + UPGRADE_TEST: basic + # Calico HA Wireguard packet_ubuntu20-calico-ha-wireguard: stage: deploy-part2 @@ -276,6 +283,14 @@ packet_debian10-calico-upgrade: variables: UPGRADE_TEST: graceful +packet_almalinux8-calico-remove-node: + stage: deploy-part3 + extends: .packet_pr + when: on_success + variables: + REMOVE_NODE_CHECK: "true" + REMOVE_NODE_NAME: "instance-3" + packet_debian10-calico-upgrade-once: stage: deploy-part3 extends: .packet_periodic diff --git a/.gitlab-ci/terraform.yml b/.gitlab-ci/terraform.yml index 91874091f55..8ffb11163ab 100644 --- a/.gitlab-ci/terraform.yml +++ b/.gitlab-ci/terraform.yml @@ -60,11 +60,11 @@ tf-validate-openstack: PROVIDER: openstack CLUSTER: $CI_COMMIT_REF_NAME -tf-validate-packet: +tf-validate-metal: extends: .terraform_validate variables: TF_VERSION: $TERRAFORM_VERSION - PROVIDER: packet + PROVIDER: metal CLUSTER: $CI_COMMIT_REF_NAME tf-validate-aws: diff --git a/.gitlab-ci/vagrant.yml b/.gitlab-ci/vagrant.yml index a13028ddaf0..8c0f4a21d03 100644 --- a/.gitlab-ci/vagrant.yml +++ b/.gitlab-ci/vagrant.yml @@ -1,28 +1,5 @@ --- -molecule_tests: - tags: [c3.small.x86] - only: [/^pr-.*$/] - except: ['triggers'] - image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION - services: [] - stage: deploy-part1 - before_script: - - tests/scripts/rebase.sh - - apt-get update && apt-get install -y python3-pip - - update-alternatives --install /usr/bin/python python /usr/bin/python3 10 - - python -m pip uninstall -y ansible ansible-base ansible-core - - python -m pip install -r tests/requirements.txt - - ./tests/scripts/vagrant_clean.sh - script: - - ./tests/scripts/molecule_run.sh - after_script: - - chronic ./tests/scripts/molecule_logs.sh - artifacts: - when: always - paths: - - molecule_logs/ - .vagrant: extends: .testcases variables: diff --git a/README.md b/README.md index 35ebc248b0b..a37e53fd889 100644 --- a/README.md +++ b/README.md @@ -131,17 +131,17 @@ Note: Upstart/SysV init based OS types are not supported. ## Supported Components - Core - - [kubernetes](https://github.com/kubernetes/kubernetes) v1.23.5 - - [etcd](https://github.com/etcd-io/etcd) v3.5.1 + - [kubernetes](https://github.com/kubernetes/kubernetes) v1.23.6 + - [etcd](https://github.com/etcd-io/etcd) v3.5.3 - [docker](https://www.docker.com/) v20.10 (see note) - - [containerd](https://containerd.io/) v1.6.2 + - [containerd](https://containerd.io/) v1.6.4 - [cri-o](http://cri-o.io/) v1.22 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS) - Network Plugin - [cni-plugins](https://github.com/containernetworking/plugins) v1.0.1 - [calico](https://github.com/projectcalico/calico) v3.21.4 - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [cilium](https://github.com/cilium/cilium) v1.11.1 - - [flanneld](https://github.com/flannel-io/flannel) v0.15.1 + - [flanneld](https://github.com/flannel-io/flannel) v0.17.0 - [kube-ovn](https://github.com/alauda/kube-ovn) v1.8.1 - [kube-router](https://github.com/cloudnativelabs/kube-router) v1.4.0 - [multus](https://github.com/intel/multus-cni) v3.8 diff --git a/contrib/terraform/packet/README.md b/contrib/terraform/metal/README.md similarity index 96% rename from contrib/terraform/packet/README.md rename to contrib/terraform/metal/README.md index 5e9bb159803..a21aed461d5 100644 --- a/contrib/terraform/packet/README.md +++ b/contrib/terraform/metal/README.md @@ -60,9 +60,9 @@ Terraform will be used to provision all of the Equinix Metal resources with base Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state): ```ShellSession -cp -LRp contrib/terraform/packet/sample-inventory inventory/$CLUSTER +cp -LRp contrib/terraform/metal/sample-inventory inventory/$CLUSTER cd inventory/$CLUSTER -ln -s ../../contrib/terraform/packet/hosts +ln -s ../../contrib/terraform/metal/hosts ``` This will be the base for subsequent Terraform commands. @@ -101,7 +101,7 @@ This helps when identifying which hosts are associated with each cluster. While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values: - cluster_name = the name of the inventory directory created above as $CLUSTER -- packet_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above +- metal_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above #### Enable localhost access @@ -119,7 +119,7 @@ Once the Kubespray playbooks are run, a Kubernetes configuration file will be wr In the cluster's inventory folder, the following files might be created (either by Terraform or manually), to prevent you from pushing them accidentally they are in a -`.gitignore` file in the `terraform/packet` directory : +`.gitignore` file in the `terraform/metal` directory : - `.terraform` - `.tfvars` @@ -135,7 +135,7 @@ plugins. This is accomplished as follows: ```ShellSession cd inventory/$CLUSTER -terraform init ../../contrib/terraform/packet +terraform init ../../contrib/terraform/metal ``` This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules. @@ -146,7 +146,7 @@ You can apply the Terraform configuration to your cluster with the following com issued from your cluster's inventory directory (`inventory/$CLUSTER`): ```ShellSession -terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet +terraform apply -var-file=cluster.tfvars ../../contrib/terraform/metal export ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i hosts ../../cluster.yml ``` @@ -156,7 +156,7 @@ ansible-playbook -i hosts ../../cluster.yml You can destroy your new cluster with the following command issued from the cluster's inventory directory: ```ShellSession -terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet +terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/metal ``` If you've started the Ansible run, it may also be a good idea to do some manual cleanup: diff --git a/contrib/terraform/packet/hosts b/contrib/terraform/metal/hosts similarity index 100% rename from contrib/terraform/packet/hosts rename to contrib/terraform/metal/hosts diff --git a/contrib/terraform/packet/kubespray.tf b/contrib/terraform/metal/kubespray.tf similarity index 74% rename from contrib/terraform/packet/kubespray.tf rename to contrib/terraform/metal/kubespray.tf index 819cc707bcb..c8019e5c65e 100644 --- a/contrib/terraform/packet/kubespray.tf +++ b/contrib/terraform/metal/kubespray.tf @@ -1,16 +1,15 @@ # Configure the Equinix Metal Provider -provider "packet" { - version = "~> 2.0" +provider "metal" { } -resource "packet_ssh_key" "k8s" { +resource "metal_ssh_key" "k8s" { count = var.public_key_path != "" ? 1 : 0 name = "kubernetes-${var.cluster_name}" public_key = chomp(file(var.public_key_path)) } -resource "packet_device" "k8s_master" { - depends_on = [packet_ssh_key.k8s] +resource "metal_device" "k8s_master" { + depends_on = [metal_ssh_key.k8s] count = var.number_of_k8s_masters hostname = "${var.cluster_name}-k8s-master-${count.index + 1}" @@ -18,12 +17,12 @@ resource "packet_device" "k8s_master" { facilities = [var.facility] operating_system = var.operating_system billing_cycle = var.billing_cycle - project_id = var.packet_project_id + project_id = var.metal_project_id tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"] } -resource "packet_device" "k8s_master_no_etcd" { - depends_on = [packet_ssh_key.k8s] +resource "metal_device" "k8s_master_no_etcd" { + depends_on = [metal_ssh_key.k8s] count = var.number_of_k8s_masters_no_etcd hostname = "${var.cluster_name}-k8s-master-${count.index + 1}" @@ -31,12 +30,12 @@ resource "packet_device" "k8s_master_no_etcd" { facilities = [var.facility] operating_system = var.operating_system billing_cycle = var.billing_cycle - project_id = var.packet_project_id + project_id = var.metal_project_id tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"] } -resource "packet_device" "k8s_etcd" { - depends_on = [packet_ssh_key.k8s] +resource "metal_device" "k8s_etcd" { + depends_on = [metal_ssh_key.k8s] count = var.number_of_etcd hostname = "${var.cluster_name}-etcd-${count.index + 1}" @@ -44,12 +43,12 @@ resource "packet_device" "k8s_etcd" { facilities = [var.facility] operating_system = var.operating_system billing_cycle = var.billing_cycle - project_id = var.packet_project_id + project_id = var.metal_project_id tags = ["cluster-${var.cluster_name}", "etcd"] } -resource "packet_device" "k8s_node" { - depends_on = [packet_ssh_key.k8s] +resource "metal_device" "k8s_node" { + depends_on = [metal_ssh_key.k8s] count = var.number_of_k8s_nodes hostname = "${var.cluster_name}-k8s-node-${count.index + 1}" @@ -57,7 +56,7 @@ resource "packet_device" "k8s_node" { facilities = [var.facility] operating_system = var.operating_system billing_cycle = var.billing_cycle - project_id = var.packet_project_id + project_id = var.metal_project_id tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"] } diff --git a/contrib/terraform/metal/output.tf b/contrib/terraform/metal/output.tf new file mode 100644 index 00000000000..262d91bb77a --- /dev/null +++ b/contrib/terraform/metal/output.tf @@ -0,0 +1,16 @@ +output "k8s_masters" { + value = metal_device.k8s_master.*.access_public_ipv4 +} + +output "k8s_masters_no_etc" { + value = metal_device.k8s_master_no_etcd.*.access_public_ipv4 +} + +output "k8s_etcds" { + value = metal_device.k8s_etcd.*.access_public_ipv4 +} + +output "k8s_nodes" { + value = metal_device.k8s_node.*.access_public_ipv4 +} + diff --git a/contrib/terraform/packet/sample-inventory/cluster.tfvars b/contrib/terraform/metal/sample-inventory/cluster.tfvars similarity index 95% rename from contrib/terraform/packet/sample-inventory/cluster.tfvars rename to contrib/terraform/metal/sample-inventory/cluster.tfvars index f5f953e0dda..f167aeb769c 100644 --- a/contrib/terraform/packet/sample-inventory/cluster.tfvars +++ b/contrib/terraform/metal/sample-inventory/cluster.tfvars @@ -2,7 +2,7 @@ cluster_name = "mycluster" # Your Equinix Metal project ID. See hhttps://metal.equinix.com/developers/docs/accounts/ -packet_project_id = "Example-API-Token" +metal_project_id = "Example-API-Token" # The public SSH key to be uploaded into authorized_keys in bare metal Equinix Metal nodes provisioned # leave this value blank if the public key is already setup in the Equinix Metal project diff --git a/contrib/terraform/packet/sample-inventory/group_vars b/contrib/terraform/metal/sample-inventory/group_vars similarity index 100% rename from contrib/terraform/packet/sample-inventory/group_vars rename to contrib/terraform/metal/sample-inventory/group_vars diff --git a/contrib/terraform/packet/variables.tf b/contrib/terraform/metal/variables.tf similarity index 78% rename from contrib/terraform/packet/variables.tf rename to contrib/terraform/metal/variables.tf index 67af8e4a824..f0c9b2889fc 100644 --- a/contrib/terraform/packet/variables.tf +++ b/contrib/terraform/metal/variables.tf @@ -2,12 +2,12 @@ variable "cluster_name" { default = "kubespray" } -variable "packet_project_id" { +variable "metal_project_id" { description = "Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/" } variable "operating_system" { - default = "ubuntu_16_04" + default = "ubuntu_20_04" } variable "public_key_path" { @@ -24,23 +24,23 @@ variable "facility" { } variable "plan_k8s_masters" { - default = "c2.medium.x86" + default = "c3.small.x86" } variable "plan_k8s_masters_no_etcd" { - default = "c2.medium.x86" + default = "c3.small.x86" } variable "plan_etcd" { - default = "c2.medium.x86" + default = "c3.small.x86" } variable "plan_k8s_nodes" { - default = "c2.medium.x86" + default = "c3.medium.x86" } variable "number_of_k8s_masters" { - default = 0 + default = 1 } variable "number_of_k8s_masters_no_etcd" { @@ -52,6 +52,6 @@ variable "number_of_etcd" { } variable "number_of_k8s_nodes" { - default = 0 + default = 1 } diff --git a/contrib/terraform/packet/versions.tf b/contrib/terraform/metal/versions.tf similarity index 57% rename from contrib/terraform/packet/versions.tf rename to contrib/terraform/metal/versions.tf index d222f2bddb5..637203f2355 100644 --- a/contrib/terraform/packet/versions.tf +++ b/contrib/terraform/metal/versions.tf @@ -2,8 +2,8 @@ terraform { required_version = ">= 0.12" required_providers { - packet = { - source = "terraform-providers/packet" + metal = { + source = "equinix/metal" } } } diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md index 174d66e5a76..0e144c2beb0 100644 --- a/contrib/terraform/openstack/README.md +++ b/contrib/terraform/openstack/README.md @@ -20,6 +20,7 @@ most modern installs of OpenStack that support the basic services. - [Open Telekom Cloud](https://cloud.telekom.de/) - [OVH](https://www.ovh.com/) - [Rackspace](https://www.rackspace.com/) +- [Safespring](https://www.safespring.com) - [Ultimum](https://ultimum.io/) - [VexxHost](https://vexxhost.com/) - [Zetta](https://www.zetta.io/) @@ -247,6 +248,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. |`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. | |`az_list` | List of Availability Zones available in your OpenStack cluster. | |`network_name` | The name to be given to the internal network that will be generated | +|`use_existing_network`| Use an existing network with the name of `network_name`. `false` by default | |`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated | |`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. | |`floatingip_pool` | Name of the pool from which floating IPs will be allocated | @@ -283,7 +285,9 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. |`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) | |`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. | |`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default | +|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default | |`k8s_nodes` | Map containing worker node definition, see explanation below | +|`k8s_masters` | Map containing master node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` | ##### k8s_nodes diff --git a/contrib/terraform/openstack/kubespray.tf b/contrib/terraform/openstack/kubespray.tf index ea943225e02..8e09c0d959a 100644 --- a/contrib/terraform/openstack/kubespray.tf +++ b/contrib/terraform/openstack/kubespray.tf @@ -24,6 +24,7 @@ module "ips" { network_name = var.network_name router_id = module.network.router_id k8s_nodes = var.k8s_nodes + k8s_masters = var.k8s_masters k8s_master_fips = var.k8s_master_fips bastion_fips = var.bastion_fips router_internal_port_id = module.network.router_internal_port_id @@ -44,6 +45,7 @@ module "compute" { number_of_bastions = var.number_of_bastions number_of_k8s_nodes_no_floating_ip = var.number_of_k8s_nodes_no_floating_ip number_of_gfs_nodes_no_floating_ip = var.number_of_gfs_nodes_no_floating_ip + k8s_masters = var.k8s_masters k8s_nodes = var.k8s_nodes bastion_root_volume_size_in_gb = var.bastion_root_volume_size_in_gb etcd_root_volume_size_in_gb = var.etcd_root_volume_size_in_gb @@ -70,6 +72,7 @@ module "compute" { flavor_bastion = var.flavor_bastion k8s_master_fips = module.ips.k8s_master_fips k8s_master_no_etcd_fips = module.ips.k8s_master_no_etcd_fips + k8s_masters_fips = module.ips.k8s_masters_fips k8s_node_fips = module.ips.k8s_node_fips k8s_nodes_fips = module.ips.k8s_nodes_fips bastion_fips = module.ips.bastion_fips @@ -89,8 +92,10 @@ module "compute" { extra_sec_groups_name = var.extra_sec_groups_name group_vars_path = var.group_vars_path port_security_enabled = var.port_security_enabled - - network_id = module.network.router_id + force_null_port_security = var.force_null_port_security + network_router_id = module.network.router_id + network_id = module.network.network_id + use_existing_network = var.use_existing_network } output "private_subnet_id" { diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf index 10a41a99d73..16898353a08 100644 --- a/contrib/terraform/openstack/modules/compute/main.tf +++ b/contrib/terraform/openstack/modules/compute/main.tf @@ -20,7 +20,8 @@ data "template_file" "cloudinit" { } data "openstack_networking_network_v2" "k8s_network" { - name = var.network_name + count = var.use_existing_network ? 1 : 0 + name = var.network_name } resource "openstack_compute_keypair_v2" "k8s" { @@ -158,25 +159,25 @@ resource "openstack_compute_servergroup_v2" "k8s_etcd" { locals { # master groups master_sec_groups = compact([ - openstack_networking_secgroup_v2.k8s_master.name, - openstack_networking_secgroup_v2.k8s.name, - var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].name : "", + openstack_networking_secgroup_v2.k8s_master.id, + openstack_networking_secgroup_v2.k8s.id, + var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].id : "", ]) # worker groups worker_sec_groups = compact([ - openstack_networking_secgroup_v2.k8s.name, - openstack_networking_secgroup_v2.worker.name, - var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].name : "", + openstack_networking_secgroup_v2.k8s.id, + openstack_networking_secgroup_v2.worker.id, + var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].id : "", ]) # bastion groups bastion_sec_groups = compact(concat([ - openstack_networking_secgroup_v2.k8s.name, - openstack_networking_secgroup_v2.bastion[0].name, + openstack_networking_secgroup_v2.k8s.id, + openstack_networking_secgroup_v2.bastion[0].id, ])) # etcd groups - etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.name]) + etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id]) # glusterfs groups - gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.name]) + gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id]) # Image uuid image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id @@ -189,11 +190,15 @@ locals { resource "openstack_networking_port_v2" "bastion_port" { count = var.number_of_bastions name = "${var.cluster_name}-bastion-${count.index + 1}" - network_id = "${data.openstack_networking_network_v2.k8s_network.id}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id admin_state_up = "true" - port_security_enabled = var.port_security_enabled + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null no_security_groups = var.port_security_enabled ? null : false + + depends_on = [ + var.network_router_id + ] } resource "openstack_compute_instance_v2" "bastion" { @@ -223,7 +228,7 @@ resource "openstack_compute_instance_v2" "bastion" { metadata = { ssh_user = var.ssh_user kubespray_groups = "bastion" - depends_on = var.network_id + depends_on = var.network_router_id use_access_ip = var.use_access_ip } @@ -235,11 +240,15 @@ resource "openstack_compute_instance_v2" "bastion" { resource "openstack_networking_port_v2" "k8s_master_port" { count = var.number_of_k8s_masters name = "${var.cluster_name}-k8s-master-${count.index + 1}" - network_id = "${data.openstack_networking_network_v2.k8s_network.id}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id admin_state_up = "true" - port_security_enabled = var.port_security_enabled + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled security_group_ids = var.port_security_enabled ? local.master_sec_groups : null no_security_groups = var.port_security_enabled ? null : false + + depends_on = [ + var.network_router_id + ] } resource "openstack_compute_instance_v2" "k8s_master" { @@ -279,7 +288,7 @@ resource "openstack_compute_instance_v2" "k8s_master" { metadata = { ssh_user = var.ssh_user kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster" - depends_on = var.network_id + depends_on = var.network_router_id use_access_ip = var.use_access_ip } @@ -288,14 +297,76 @@ resource "openstack_compute_instance_v2" "k8s_master" { } } +resource "openstack_networking_port_v2" "k8s_masters_port" { + for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {} + name = "${var.cluster_name}-k8s-${each.key}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id + admin_state_up = "true" + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.master_sec_groups : null + no_security_groups = var.port_security_enabled ? null : false + + depends_on = [ + var.network_router_id + ] +} + +resource "openstack_compute_instance_v2" "k8s_masters" { + for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {} + name = "${var.cluster_name}-k8s-${each.key}" + availability_zone = each.value.az + image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null + flavor_id = each.value.flavor + key_pair = openstack_compute_keypair_v2.k8s.name + + dynamic "block_device" { + for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : [] + content { + uuid = local.image_to_use_master + source_type = "image" + volume_size = var.master_root_volume_size_in_gb + volume_type = var.master_volume_type + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + } + + network { + port = openstack_networking_port_v2.k8s_masters_port[each.key].id + } + + dynamic "scheduler_hints" { + for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] + content { + group = openstack_compute_servergroup_v2.k8s_master[0].id + } + } + + metadata = { + ssh_user = var.ssh_user + kubespray_groups = "%{if each.value.etcd == true}etcd,%{endif}kube_control_plane,${var.supplementary_master_groups},k8s_cluster%{if each.value.floating_ip == false},no_floating%{endif}" + depends_on = var.network_router_id + use_access_ip = var.use_access_ip + } + + provisioner "local-exec" { + command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}" + } +} + resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" { count = var.number_of_k8s_masters_no_etcd name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}" - network_id = "${data.openstack_networking_network_v2.k8s_network.id}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id admin_state_up = "true" - port_security_enabled = var.port_security_enabled + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled security_group_ids = var.port_security_enabled ? local.master_sec_groups : null no_security_groups = var.port_security_enabled ? null : false + + depends_on = [ + var.network_router_id + ] } resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { @@ -335,7 +406,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { metadata = { ssh_user = var.ssh_user kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster" - depends_on = var.network_id + depends_on = var.network_router_id use_access_ip = var.use_access_ip } @@ -347,11 +418,15 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { resource "openstack_networking_port_v2" "etcd_port" { count = var.number_of_etcd name = "${var.cluster_name}-etcd-${count.index + 1}" - network_id = "${data.openstack_networking_network_v2.k8s_network.id}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id admin_state_up = "true" - port_security_enabled = var.port_security_enabled + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null no_security_groups = var.port_security_enabled ? null : false + + depends_on = [ + var.network_router_id + ] } resource "openstack_compute_instance_v2" "etcd" { @@ -389,7 +464,7 @@ resource "openstack_compute_instance_v2" "etcd" { metadata = { ssh_user = var.ssh_user kubespray_groups = "etcd,no_floating" - depends_on = var.network_id + depends_on = var.network_router_id use_access_ip = var.use_access_ip } } @@ -397,11 +472,15 @@ resource "openstack_compute_instance_v2" "etcd" { resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" { count = var.number_of_k8s_masters_no_floating_ip name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}" - network_id = "${data.openstack_networking_network_v2.k8s_network.id}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id admin_state_up = "true" - port_security_enabled = var.port_security_enabled + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled security_group_ids = var.port_security_enabled ? local.master_sec_groups : null no_security_groups = var.port_security_enabled ? null : false + + depends_on = [ + var.network_router_id + ] } resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { @@ -439,7 +518,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { metadata = { ssh_user = var.ssh_user kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating" - depends_on = var.network_id + depends_on = var.network_router_id use_access_ip = var.use_access_ip } } @@ -447,11 +526,15 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port" { count = var.number_of_k8s_masters_no_floating_ip_no_etcd name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}" - network_id = "${data.openstack_networking_network_v2.k8s_network.id}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id admin_state_up = "true" - port_security_enabled = var.port_security_enabled + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled security_group_ids = var.port_security_enabled ? local.master_sec_groups : null no_security_groups = var.port_security_enabled ? null : false + + depends_on = [ + var.network_router_id + ] } resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" { @@ -490,7 +573,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" { metadata = { ssh_user = var.ssh_user kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating" - depends_on = var.network_id + depends_on = var.network_router_id use_access_ip = var.use_access_ip } } @@ -498,11 +581,15 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" { resource "openstack_networking_port_v2" "k8s_node_port" { count = var.number_of_k8s_nodes name = "${var.cluster_name}-k8s-node-${count.index + 1}" - network_id = "${data.openstack_networking_network_v2.k8s_network.id}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id admin_state_up = "true" - port_security_enabled = var.port_security_enabled + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null no_security_groups = var.port_security_enabled ? null : false + + depends_on = [ + var.network_router_id + ] } resource "openstack_compute_instance_v2" "k8s_node" { @@ -542,7 +629,7 @@ resource "openstack_compute_instance_v2" "k8s_node" { metadata = { ssh_user = var.ssh_user kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}" - depends_on = var.network_id + depends_on = var.network_router_id use_access_ip = var.use_access_ip } @@ -554,11 +641,15 @@ resource "openstack_compute_instance_v2" "k8s_node" { resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" { count = var.number_of_k8s_nodes_no_floating_ip name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}" - network_id = "${data.openstack_networking_network_v2.k8s_network.id}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id admin_state_up = "true" - port_security_enabled = var.port_security_enabled + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null no_security_groups = var.port_security_enabled ? null : false + + depends_on = [ + var.network_router_id + ] } resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { @@ -597,7 +688,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { metadata = { ssh_user = var.ssh_user kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}" - depends_on = var.network_id + depends_on = var.network_router_id use_access_ip = var.use_access_ip } } @@ -605,11 +696,15 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { resource "openstack_networking_port_v2" "k8s_nodes_port" { for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {} name = "${var.cluster_name}-k8s-node-${each.key}" - network_id = "${data.openstack_networking_network_v2.k8s_network.id}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id admin_state_up = "true" - port_security_enabled = var.port_security_enabled + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null no_security_groups = var.port_security_enabled ? null : false + + depends_on = [ + var.network_router_id + ] } resource "openstack_compute_instance_v2" "k8s_nodes" { @@ -648,7 +743,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" { metadata = { ssh_user = var.ssh_user kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}" - depends_on = var.network_id + depends_on = var.network_router_id use_access_ip = var.use_access_ip } @@ -660,11 +755,15 @@ resource "openstack_compute_instance_v2" "k8s_nodes" { resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" { count = var.number_of_gfs_nodes_no_floating_ip name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}" - network_id = "${data.openstack_networking_network_v2.k8s_network.id}" + network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id admin_state_up = "true" - port_security_enabled = var.port_security_enabled - security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null + port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled + security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null no_security_groups = var.port_security_enabled ? null : false + + depends_on = [ + var.network_router_id + ] } resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" { @@ -701,7 +800,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" { metadata = { ssh_user = var.ssh_user_gfs kubespray_groups = "gfs-cluster,network-storage,no_floating" - depends_on = var.network_id + depends_on = var.network_router_id use_access_ip = var.use_access_ip } } @@ -719,6 +818,12 @@ resource "openstack_networking_floatingip_associate_v2" "k8s_master" { port_id = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index) } +resource "openstack_networking_floatingip_associate_v2" "k8s_masters" { + for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {} + floating_ip = var.k8s_masters_fips[each.key].address + port_id = openstack_networking_port_v2.k8s_masters_port[each.key].id +} + resource "openstack_networking_floatingip_associate_v2" "k8s_master_no_etcd" { count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0 floating_ip = var.k8s_master_no_etcd_fips[count.index] diff --git a/contrib/terraform/openstack/modules/compute/variables.tf b/contrib/terraform/openstack/modules/compute/variables.tf index 527e6dceb61..ca8034bb5a7 100644 --- a/contrib/terraform/openstack/modules/compute/variables.tf +++ b/contrib/terraform/openstack/modules/compute/variables.tf @@ -68,6 +68,14 @@ variable "network_id" { default = "" } +variable "use_existing_network" { + type = bool +} + +variable "network_router_id" { + default = "" +} + variable "k8s_master_fips" { type = list } @@ -80,6 +88,10 @@ variable "k8s_node_fips" { type = list } +variable "k8s_masters_fips" { + type = map +} + variable "k8s_nodes_fips" { type = map } @@ -104,6 +116,8 @@ variable "k8s_allowed_egress_ips" { type = list } +variable "k8s_masters" {} + variable "k8s_nodes" {} variable "supplementary_master_groups" { @@ -167,3 +181,7 @@ variable "group_vars_path" { variable "port_security_enabled" { type = bool } + +variable "force_null_port_security" { + type = bool +} diff --git a/contrib/terraform/openstack/modules/ips/main.tf b/contrib/terraform/openstack/modules/ips/main.tf index 243572162f2..3f962fdfc97 100644 --- a/contrib/terraform/openstack/modules/ips/main.tf +++ b/contrib/terraform/openstack/modules/ips/main.tf @@ -14,6 +14,12 @@ resource "openstack_networking_floatingip_v2" "k8s_master" { depends_on = [null_resource.dummy_dependency] } +resource "openstack_networking_floatingip_v2" "k8s_masters" { + for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {} + pool = var.floatingip_pool + depends_on = [null_resource.dummy_dependency] +} + # If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones. resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" { count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters_no_etcd diff --git a/contrib/terraform/openstack/modules/ips/outputs.tf b/contrib/terraform/openstack/modules/ips/outputs.tf index 591cac2502f..3ff4622abf6 100644 --- a/contrib/terraform/openstack/modules/ips/outputs.tf +++ b/contrib/terraform/openstack/modules/ips/outputs.tf @@ -3,6 +3,10 @@ output "k8s_master_fips" { value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master[*].address } +output "k8s_masters_fips" { + value = openstack_networking_floatingip_v2.k8s_masters +} + # If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created. output "k8s_master_no_etcd_fips" { value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address diff --git a/contrib/terraform/openstack/modules/ips/variables.tf b/contrib/terraform/openstack/modules/ips/variables.tf index a30fffde025..b52888b847f 100644 --- a/contrib/terraform/openstack/modules/ips/variables.tf +++ b/contrib/terraform/openstack/modules/ips/variables.tf @@ -16,6 +16,8 @@ variable "router_id" { default = "" } +variable "k8s_masters" {} + variable "k8s_nodes" {} variable "k8s_master_fips" {} diff --git a/contrib/terraform/openstack/modules/network/outputs.tf b/contrib/terraform/openstack/modules/network/outputs.tf index fed9d654b38..0e8a5004f33 100644 --- a/contrib/terraform/openstack/modules/network/outputs.tf +++ b/contrib/terraform/openstack/modules/network/outputs.tf @@ -2,6 +2,10 @@ output "router_id" { value = "%{if var.use_neutron == 1} ${var.router_id == null ? element(concat(openstack_networking_router_v2.k8s.*.id, [""]), 0) : var.router_id} %{else} %{endif}" } +output "network_id" { + value = element(concat(openstack_networking_network_v2.k8s.*.id, [""]),0) +} + output "router_internal_port_id" { value = element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0) } diff --git a/contrib/terraform/openstack/sample-inventory/cluster.tfvars b/contrib/terraform/openstack/sample-inventory/cluster.tfvars index c27d6972361..3c2576775fd 100644 --- a/contrib/terraform/openstack/sample-inventory/cluster.tfvars +++ b/contrib/terraform/openstack/sample-inventory/cluster.tfvars @@ -32,6 +32,28 @@ number_of_k8s_masters_no_floating_ip_no_etcd = 0 flavor_k8s_master = "" +k8s_masters = { + # "master-1" = { + # "az" = "nova" + # "flavor" = "" + # "floating_ip" = true + # "etcd" = true + # }, + # "master-2" = { + # "az" = "nova" + # "flavor" = "" + # "floating_ip" = false + # "etcd" = true + # }, + # "master-3" = { + # "az" = "nova" + # "flavor" = "" + # "floating_ip" = true + # "etcd" = true + # }, +} + + # nodes number_of_k8s_nodes = 2 @@ -52,6 +74,9 @@ number_of_k8s_nodes_no_floating_ip = 4 # networking network_name = "" +# Use a existing network with the name of network_name. Set to false to create a network with name of network_name. +# use_existing_network = true + external_net = "" subnet_cidr = "" @@ -59,3 +84,6 @@ subnet_cidr = "" floatingip_pool = "" bastion_allowed_remote_ips = ["0.0.0.0/0"] + +# Force port security to be null. Some cloud providers do not allow to set port security. +# force_null_port_security = false \ No newline at end of file diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf index 350e5bcae69..12c7f03a5f9 100644 --- a/contrib/terraform/openstack/variables.tf +++ b/contrib/terraform/openstack/variables.tf @@ -137,6 +137,12 @@ variable "network_name" { default = "internal" } +variable "use_existing_network" { + description = "Use an existing network" + type = bool + default = "false" +} + variable "network_dns_domain" { description = "dns_domain for the internal network" type = string @@ -154,6 +160,12 @@ variable "port_security_enabled" { default = "true" } +variable "force_null_port_security" { + description = "Force port security to be null. Some providers does not allow setting port security" + type = bool + default = "false" +} + variable "subnet_cidr" { description = "Subnet CIDR block." type = string @@ -274,6 +286,10 @@ variable "router_internal_port_id" { default = null } +variable "k8s_masters" { + default = {} +} + variable "k8s_nodes" { default = {} } diff --git a/contrib/terraform/packet/output.tf b/contrib/terraform/packet/output.tf deleted file mode 100644 index c27b9b915cf..00000000000 --- a/contrib/terraform/packet/output.tf +++ /dev/null @@ -1,16 +0,0 @@ -output "k8s_masters" { - value = packet_device.k8s_master.*.access_public_ipv4 -} - -output "k8s_masters_no_etc" { - value = packet_device.k8s_master_no_etcd.*.access_public_ipv4 -} - -output "k8s_etcds" { - value = packet_device.k8s_etcd.*.access_public_ipv4 -} - -output "k8s_nodes" { - value = packet_device.k8s_node.*.access_public_ipv4 -} - diff --git a/contrib/terraform/terraform.py b/contrib/terraform/terraform.py index 537dc62f438..0896fcb29f1 100755 --- a/contrib/terraform/terraform.py +++ b/contrib/terraform/terraform.py @@ -114,10 +114,10 @@ def iterhosts(resources): def iterips(resources): - '''yield ip tuples of (instance_id, ip)''' + '''yield ip tuples of (port_id, ip)''' for module_name, key, resource in resources: resource_type, name = key.split('.', 1) - if resource_type == 'openstack_compute_floatingip_associate_v2': + if resource_type == 'openstack_networking_floatingip_associate_v2': yield openstack_floating_ips(resource) @@ -195,8 +195,8 @@ def parse_bool(string_form): raise ValueError('could not convert %r to a bool' % string_form) -@parses('packet_device') -def packet_device(resource, tfvars=None): +@parses('metal_device') +def metal_device(resource, tfvars=None): raw_attrs = resource['primary']['attributes'] name = raw_attrs['hostname'] groups = [] @@ -213,14 +213,14 @@ def packet_device(resource, tfvars=None): 'state': raw_attrs['state'], # ansible 'ansible_ssh_host': raw_attrs['network.0.address'], - 'ansible_ssh_user': 'root', # Use root by default in packet + 'ansible_ssh_user': 'root', # Use root by default in metal # generic 'ipv4_address': raw_attrs['network.0.address'], 'public_ipv4': raw_attrs['network.0.address'], 'ipv6_address': raw_attrs['network.1.address'], 'public_ipv6': raw_attrs['network.1.address'], 'private_ipv4': raw_attrs['network.2.address'], - 'provider': 'packet', + 'provider': 'metal', } if raw_attrs['operating_system'] == 'flatcar_stable': @@ -228,10 +228,10 @@ def packet_device(resource, tfvars=None): attrs.update({'ansible_ssh_user': 'core'}) # add groups based on attrs - groups.append('packet_operating_system=' + attrs['operating_system']) - groups.append('packet_locked=%s' % attrs['locked']) - groups.append('packet_state=' + attrs['state']) - groups.append('packet_plan=' + attrs['plan']) + groups.append('metal_operating_system=' + attrs['operating_system']) + groups.append('metal_locked=%s' % attrs['locked']) + groups.append('metal_state=' + attrs['state']) + groups.append('metal_plan=' + attrs['plan']) # groups specific to kubespray groups = groups + attrs['tags'] @@ -243,13 +243,13 @@ def openstack_floating_ips(resource): raw_attrs = resource['primary']['attributes'] attrs = { 'ip': raw_attrs['floating_ip'], - 'instance_id': raw_attrs['instance_id'], + 'port_id': raw_attrs['port_id'], } return attrs def openstack_floating_ips(resource): raw_attrs = resource['primary']['attributes'] - return raw_attrs['instance_id'], raw_attrs['floating_ip'] + return raw_attrs['port_id'], raw_attrs['floating_ip'] @parses('openstack_compute_instance_v2') @calculate_mantl_vars @@ -282,6 +282,7 @@ def openstack_host(resource, module_name): # generic 'public_ipv4': raw_attrs['access_ip_v4'], 'private_ipv4': raw_attrs['access_ip_v4'], + 'port_id' : raw_attrs['network.0.port'], 'provider': 'openstack', } @@ -339,10 +340,10 @@ def openstack_host(resource, module_name): def iter_host_ips(hosts, ips): '''Update hosts that have an entry in the floating IP list''' for host in hosts: - host_id = host[1]['id'] + port_id = host[1]['port_id'] - if host_id in ips: - ip = ips[host_id] + if port_id in ips: + ip = ips[port_id] host[1].update({ 'access_ip_v4': ip, diff --git a/docs/calico.md b/docs/calico.md index 1cec1392c7e..2d10c04e45e 100644 --- a/docs/calico.md +++ b/docs/calico.md @@ -50,7 +50,7 @@ calico_datastore: kdd ### Optional : Define network backend -In some cases you may want to define Calico network backend. Allowed values are `bird`, `vxlan` or `none`. Bird is a default value. +In some cases you may want to define Calico network backend. Allowed values are `bird`, `vxlan` or `none`. `vxlan` is the default value. To re-define you need to edit the inventory and add a group variable `calico_network_backend` diff --git a/docs/kata-containers.md b/docs/kata-containers.md index 86295c5a2ee..9e7d6c46974 100644 --- a/docs/kata-containers.md +++ b/docs/kata-containers.md @@ -10,7 +10,7 @@ _Qemu_ is the only hypervisor supported by Kubespray. To use Kata Containers, set the following variables: -**k8s_cluster.yml**: +**k8s-cluster.yml**: ```yaml container_manager: containerd @@ -61,7 +61,7 @@ kata_containers_qemu_overhead_fixed_memory: 290Mi ### Optional : Select Kata Containers version -Optionally you can select the Kata Containers release version to be installed. The available releases are published in [GitHub](https://github.com/kata-containers/runtime/releases). +Optionally you can select the Kata Containers release version to be installed. The available releases are published in [GitHub](https://github.com/kata-containers/kata-containers/releases). ```yaml kata_containers_version: 2.2.2 diff --git a/docs/vars.md b/docs/vars.md index 00d35e48a53..a63e560b0a8 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -57,36 +57,55 @@ Kubernetes needs some parameters in order to get deployed. These are the following default cluster parameters: * *cluster_name* - Name of cluster (default is cluster.local) + * *container_manager* - Container Runtime to install in the nodes (default is containerd) + * *image_command_tool* - Tool used to pull images (default depends on `container_manager` and is `nerdctl` for `containerd`, `crictl` for `crio`, `docker` for `docker`) + * *image_command_tool_on_localhost* - Tool used to pull images on localhost (default is equal to `image_command_tool`) + * *dns_domain* - Name of cluster DNS domain (default is cluster.local) + * *kube_network_plugin* - Plugin to use for container networking + * *kube_service_addresses* - Subnet for cluster IPs (default is 10.233.0.0/18). Must not overlap with kube_pods_subnet + * *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not overlap with kube_service_addresses. + * *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remaining bits in kube_pods_subnet dictates how many kube_nodes can be in cluster. Setting this > 25 will raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly (assertion not applicable to calico which doesn't use this as a hard limit, see [Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes). + * *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services. + * *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``. + * *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``. + * *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube_nodes can be in cluster. + * *skydns_server* - Cluster IP for DNS (default is 10.233.0.3) + * *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4) + * *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/) on the CoreDNS service. + * *coredns_k8s_external_zone* - Zone that will be used when CoreDNS k8s_external plugin is enabled (default is k8s_external.local) + * *enable_coredns_k8s_endpoint_pod_names* - If enabled, it configures endpoint_pod_names option for kubernetes plugin. on the CoreDNS service. + * *cloud_provider* - Enable extra Kubelet option if operating inside GCE or OpenStack (default is unset) + * *kube_feature_gates* - A list of key=value pairs that describe feature gates for alpha/experimental Kubernetes features. (defaults is `[]`). Additionally, you can use also the following variables to individually customize your kubernetes components installation (they works exactly like `kube_feature_gates`): @@ -95,8 +114,10 @@ following default cluster parameters: * *kube_scheduler_feature_gates* * *kube_proxy_feature_gates* * *kubelet_feature_gates* + * *kubeadm_feature_gates* - A list of key=value pairs that describe feature gates for alpha/experimental Kubeadm features. (defaults is `[]`) + * *authorization_modes* - A list of [authorization mode]( https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module) that the cluster should be configured for. Defaults to `['Node', 'RBAC']` @@ -106,6 +127,27 @@ following default cluster parameters: require a service account and cluster role bindings. You can override this setting by setting authorization_modes to `[]`. +* *kube_apiserver_admission_control_config_file* - Enable configuration for `kube-apiserver` admission plugins. + Currently this variable allow you to configure the `EventRateLimit` admission plugin. + + To configure the **EventRateLimit** plugin you have to define a data structure like this: + +```yml +kube_apiserver_admission_event_rate_limits: + limit_1: + type: Namespace + qps: 50 + burst: 100 + cache_size: 2000 + limit_2: + type: User + qps: 50 + burst: 100 + ... +``` + +* *kube_apiserver_service_account_lookup* - Enable validation service account before validating token. Default `true`. + Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances' private addresses, make sure to pick another values for ``kube_service_addresses`` and ``kube_pods_subnet``, for example from the ``172.18.0.0/16``. @@ -150,6 +192,8 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m **Note** that server certificates are **not** approved automatically. Approve them manually (`kubectl get csr`, `kubectl certificate approve`) or implement custom approving controller like [kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp). +* *kubelet_streaming_connection_idle_timeout* - Set the maximum time a streaming connection can be idle before the connection is automatically closed. +* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host. * *node_labels* - Labels applied to nodes via kubelet --node-labels parameter. For example, labels can be set in the inventory as variables or more widely in group_vars. *node_labels* can only be defined as a dict: diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml index d3c27ac35bd..ea69a5b2c1d 100644 --- a/inventory/sample/group_vars/all/all.yml +++ b/inventory/sample/group_vars/all/all.yml @@ -113,3 +113,10 @@ no_proxy_exclude_workers: false # sysctl_file_path to add sysctl conf to # sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml index fd8b5cd0d12..ba324967ea7 100644 --- a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml @@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens" kube_api_anonymous_auth: true ## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.23.5 +kube_version: v1.23.6 # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) diff --git a/requirements-2.12.txt b/requirements-2.12.txt index 99f4e60524f..806315c73e6 100644 --- a/requirements-2.12.txt +++ b/requirements-2.12.txt @@ -1,5 +1,5 @@ -ansible==5.5.0 -ansible-core==2.12.3 +ansible==5.7.0 +ansible-core==2.12.5 cryptography==2.8 jinja2==2.11.3 netaddr==0.7.19 diff --git a/roles/container-engine/containerd/handlers/main.yml b/roles/container-engine/containerd/handlers/main.yml index bd483bc8e88..d2f12658f9b 100644 --- a/roles/container-engine/containerd/handlers/main.yml +++ b/roles/container-engine/containerd/handlers/main.yml @@ -11,6 +11,7 @@ state: restarted enabled: yes daemon-reload: yes + masked: no - name: Containerd | wait for containerd command: "{{ containerd_bin_dir }}/ctr images ls -q" diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml index 8facf61aaf4..746e9c4a474 100644 --- a/roles/container-engine/containerd/tasks/main.yml +++ b/roles/container-engine/containerd/tasks/main.yml @@ -98,7 +98,8 @@ meta: flush_handlers - name: containerd | Ensure containerd is started and enabled - service: + systemd: name: containerd + daemon_reload: yes enabled: yes state: started diff --git a/roles/container-engine/cri-dockerd/handlers/main.yml b/roles/container-engine/cri-dockerd/handlers/main.yml index 94b760a02aa..1cc890a79cd 100644 --- a/roles/container-engine/cri-dockerd/handlers/main.yml +++ b/roles/container-engine/cri-dockerd/handlers/main.yml @@ -8,7 +8,9 @@ - name: cri-dockerd | reload systemd systemd: + name: cri-dockerd daemon_reload: true + masked: no - name: cri-dockerd | reload cri-dockerd.socket service: diff --git a/roles/container-engine/cri-o/defaults/main.yml b/roles/container-engine/cri-o/defaults/main.yml index 2b5ddd379ab..ccc9f19be9f 100644 --- a/roles/container-engine/cri-o/defaults/main.yml +++ b/roles/container-engine/cri-o/defaults/main.yml @@ -56,15 +56,11 @@ crio_runtimes: # surface and mitigating the consequences of containers breakout. kata_runtimes: # Kata Containers with the default configured VMM - - name: kata-runtime - path: /opt/kata/bin/kata-runtime - type: oci - root: /run/kata-containers - # Kata Containers with the QEMU VMM - name: kata-qemu - path: /opt/kata/bin/kata-qemu - type: oci + path: /usr/local/bin/containerd-shim-kata-qemu-v2 + type: vm root: /run/kata-containers + privileged_without_host_devices: true # crun is a fast and low-memory footprint OCI Container Runtime fully written in C. crun_runtime: diff --git a/roles/container-engine/cri-o/molecule/default/molecule.yml b/roles/container-engine/cri-o/molecule/default/molecule.yml index af913b489e7..c46da721389 100644 --- a/roles/container-engine/cri-o/molecule/default/molecule.yml +++ b/roles/container-engine/cri-o/molecule/default/molecule.yml @@ -7,8 +7,8 @@ lint: | set -e yamllint -c ../../../.yamllint . platforms: - - name: ubuntu1804 - box: generic/ubuntu1804 + - name: ubuntu2004 + box: generic/ubuntu2004 cpus: 2 memory: 1024 groups: diff --git a/roles/container-engine/docker/handlers/main.yml b/roles/container-engine/docker/handlers/main.yml index 18502899c09..8c26de27308 100644 --- a/roles/container-engine/docker/handlers/main.yml +++ b/roles/container-engine/docker/handlers/main.yml @@ -9,7 +9,9 @@ - name: Docker | reload systemd systemd: + name: docker daemon_reload: true + masked: no - name: Docker | reload docker.socket service: diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index ad3480180a9..6ba48bb3cea 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -71,10 +71,10 @@ nerdctl_extra_flags: '{%- if containerd_insecure_registries is defined and conta kubeadm_version: "{{ kube_version }}" crun_version: 1.4.4 runc_version: v1.1.1 -kata_containers_version: 2.2.3 +kata_containers_version: 2.4.1 youki_version: 0.0.1 gvisor_version: 20210921 -containerd_version: 1.6.2 +containerd_version: 1.6.4 cri_dockerd_version: v0.2.0 # this is relevant when container_manager == 'docker' @@ -95,7 +95,7 @@ github_image_repo: "ghcr.io" # TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults # after migration to container download -calico_version: "v3.21.4" +calico_version: "v3.22.2" calico_ctl_version: "{{ calico_version }}" calico_cni_version: "{{ calico_version }}" calico_flexvol_version: "{{ calico_version }}" @@ -105,8 +105,8 @@ calico_apiserver_version: "{{ calico_version }}" typha_enabled: false calico_apiserver_enabled: false -flannel_version: "v0.15.1" -flannel_cni_version: "v1.0.0" +flannel_version: "v0.17.0" +flannel_cni_version: "v1.0.1" cni_version: "v1.0.1" weave_version: 2.8.1 pod_infra_version: "3.3" @@ -114,20 +114,21 @@ cilium_version: "v1.11.3" kube_ovn_version: "v1.8.1" kube_router_version: "v1.4.0" multus_version: "v3.8" -helm_version: "v3.8.0" -nerdctl_version: "0.18.0" +helm_version: "v3.8.2" +nerdctl_version: "0.19.0" krew_version: "v0.4.2" # Get kubernetes major version (i.e. 1.17.4 => 1.17) kube_major_version: "{{ kube_version | regex_replace('^v([0-9])+\\.([0-9]+)\\.[0-9]+', 'v\\1.\\2') }}" etcd_supported_versions: - v1.23: "v3.5.1" - v1.22: "v3.5.1" - v1.21: "v3.5.0" + v1.23: "v3.5.3" + v1.22: "v3.5.3" + v1.21: "v3.5.3" etcd_version: "{{ etcd_supported_versions[kube_major_version] }}" crictl_supported_versions: + v1.24: "v1.24.0" v1.23: "v1.23.0" v1.22: "v1.22.0" v1.21: "v1.21.0" @@ -138,7 +139,6 @@ kubelet_download_url: "https://storage.googleapis.com/kubernetes-release/release kubectl_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl" kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm" etcd_download_url: "https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" -flannel_cni_download_url: "https://github.com/flannel-io/cni-plugin/releases/download/{{ flannel_cni_version }}/flannel-{{ image_arch }}" cni_download_url: "https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz" calicoctl_download_url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" calicoctl_alternate_download_url: "https://github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" @@ -159,18 +159,22 @@ cri_dockerd_download_url: "https://github.com/Mirantis/cri-dockerd/releases/down crictl_checksums: arm: + v1.24.0: 1ab8a88d6ce1e9cff1c76fc454d2d41cf0c89e98c6db15a41804a3a5874cbf89 v1.23.0: c20f7a118183d1e6da24c3709471ea0b4dee51cb709f958e0d90f3acb4eb59ae v1.22.0: b74f7cc52ce79c6d7fd776beb6353f4628e9c36f17ba2b8e6c48155714057f07 v1.21.0: 638af758860b282f8ec862b90ecbc200ec87388134e555684f92d39591c938f7 arm64: + v1.24.0: b6fe172738dfa68ca4c71ade53574e859bf61a3e34d21b305587b1ad4ab28d24 v1.23.0: 91094253e77094435027998a99b9b6a67b0baad3327975365f7715a1a3bd9595 v1.22.0: a713c37fade0d96a989bc15ebe906e08ef5c8fe5e107c2161b0665e9963b770e v1.21.0: 454eecd29fe636282339af5b73c60234a7d10e4b11b9e18937e33056763d72cf amd64: + v1.24.0: 3df4a4306e0554aea4fdc26ecef9eea29a58c8460bebfaca3405799787609880 v1.23.0: b754f83c80acdc75f93aba191ff269da6be45d0fc2d3f4079704e7d1424f1ca8 v1.22.0: 45e0556c42616af60ebe93bf4691056338b3ea0001c0201a6a8ff8b1dbc0652a v1.21.0: 85c78a35584971625bf1c3bcd46e5404a90396f979d7586f18b11119cb623e24 ppc64le: + v1.24.0: 586c263678c6d8d543976607ea1732115e622d44993e2bcbed29832370d3a754 v1.23.0: 53db9e605a3042ea77bbf42a01a4e248dea8839bcab544c491745874f73aeee7 v1.22.0: c78bcea20c8f8ca3be0762cca7349fd2f1df520c304d0b2ef5e8fa514f64e45f v1.21.0: 0770100d30d430dbb67a58119ffed459856163ba01b6d71ac6fd4be7336253cf @@ -179,12 +183,15 @@ crictl_checksums: # Kubernetes versions above Kubespray's current target version are untested and should be used with caution. kubelet_checksums: arm: + v1.24.0: fd19ff957c73e5397f9af931c82bdb95791e47dc7d3135d38720ecda211758a3 + v1.23.6: 2f3fb387c20de1da586ac6bc43fa714fb7c2116b4243a2ef1e28ecfbba324cea v1.23.5: 9505cf63fb56a1d90d1db9c1507587621455a152ef16d871e802875e1e7b4587 v1.23.4: e67a51013ed59ea3df0ad1d54863d483cc99247584992b8cad6dd612135a70c5 v1.23.3: 80a2c005e7b6c4e9363a18fa1d8911b6592eb2f93cbaa8a56fe5f6f59515d1a4 v1.23.2: f9e83b3bd99b9e70cd98a5f8dc75a89d3d51548d51e4e05615cdc48d6144f908 v1.23.1: 29868f172ef171ae990deafcdc13af7fe5b00f0a546ae81c267c4ad01231c3ce v1.23.0: 7417fc7cd624a85887f0a28054f58f7534143579fe85285d0b68c8984c95f2ba + v1.22.9: 99eb1607e30d855b14da6f4f21d00d09dc6477c3e3bc1e88d00dea7961f3a488 v1.22.8: 7bc14bfca0efb5af6d7e56218f5c51862596cd9927843f8456a36e70e8e64da8 v1.22.7: 3709a794b33081b3f5f5ff1c6f9ab1614c3723d1da0a31c74c37ccdec456e94f v1.22.6: 9957157375a343664db35be75281d610df85e1986a98cc3db1398bd0e53c36f4 @@ -194,6 +201,7 @@ kubelet_checksums: v1.22.2: 941e639b0f859eba65df0c66be82808ea6be697ed5dbf4df8e602dcbfa683aa3 v1.22.1: f42bc00f274be7ce0578b359cbccc48ead03894b599f5bf4d10e44c305fbab65 v1.22.0: 4354dc8db1d8ca336eb940dd73adcd3cf17cbdefbf11889602420f6ee9c6c4bb + v1.21.12: 4019ecdef3db0acb8f9ec61fe9eb5f0ff746eaf6595a59b7a44de62916f0c45d v1.21.11: 37fd7e30e532a51d6eef48b29a3b6a52f47827741f48458e1a7285e9ea9f88e9 v1.21.10: 5ab184ab71032662e68c3a933ce10fac59c0b02ff48db40b5438e617367d4689 v1.21.9: c359937e497184577efda5c850874af92ab626331da0cbf208d087e3f03b6114 @@ -207,12 +215,15 @@ kubelet_checksums: v1.21.1: 2d2d17654a4abf66307c81c513228f29719aa02f5d5855dbd67a1337ab47c804 v1.21.0: c2a3c926842c892ca3a124fb0526970e6ce1d92cb3ac35028b251a5672bb1af1 arm64: + v1.24.0: 8f066c9a048dd1704bf22ccf6e994e2fa2ea1175c9768a786f6cb6608765025e + v1.23.6: 11a0310e8e7af5a11539ac26d6c14cf1b77d35bce4ca74e4bbd053ed1afc8650 v1.23.5: 61f7e3ae0eb00633d3b5163c046cfcae7e73b5f26d4ffcf343f3a45904323583 v1.23.4: c4f09c9031a34549fbaa48231b115fee6e170ce6832dce26d4b50b040aad2311 v1.23.3: 95c36d0d1e65f6167f8fa80df04b3a816bc803e6bb5554f04d6af849c729a77d v1.23.2: 65372ad077a660dfb8a863432c8a22cd0b650122ca98ce2e11f51a536449339f v1.23.1: c24e4ab211507a39141d227595610383f7c5686cae3795b7d75eebbce8606f3d v1.23.0: a546fb7ccce69c4163e4a0b19a31f30ea039b4e4560c23fd6e3016e2b2dfd0d9 + v1.22.9: d7a692ee4f5f5929a15c61947ae2deecb71b0945461f6064ced83d13094028e8 v1.22.8: 604c672908a3b3cbbcf9d109d8d5fef0879992ddcf0d3e0766079d3bb7d0ca3e v1.22.7: 8291d304c0ba4faec4336336d4cdd5159f5c90652b8b0d6be0cb5ce8f8bf92e3 v1.22.6: fbb823fe82b16c6f37911e907d3e4921f4642d5d48eb60e56aba1d7be0665430 @@ -222,6 +233,7 @@ kubelet_checksums: v1.22.2: f5fe3d6f4b2df5a794ebf325dc17fcdfe905a188e25f7c7e47d9cd15f14f8c2d v1.22.1: d5ffd67d8285fb224a1c49622fd739131f7b941e3d68f233dec96e72c9ebee63 v1.22.0: cea637a7da4f1097b16b0195005351c07032a820a3d64c3ff326b9097cfac930 + v1.21.12: cb523115a0aef43fc7f1de58c33d364185b3888af2083c303e6cc59335431ac2 v1.21.11: ec0df7cf90f3422d674f9881e33d6e329a12e0f5bb438b422999493fd4370edf v1.21.10: 5278427751381b90299e4ef330f41ca6b691aab39c3100cd200344ce6a7481c9 v1.21.9: 8797c78961cb71a757f35714d2735bb8bdbea94fc13d567bc0f1cf4f8e49e880 @@ -235,12 +247,15 @@ kubelet_checksums: v1.21.1: 5b37d7fc2da65a25896447685166769333b5896488de21bc9667edb4e799905e v1.21.0: 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4 amd64: + v1.24.0: 3d98ac8b4fb8dc99f9952226f2565951cc366c442656a889facc5b1b2ec2ba52 + v1.23.6: fbb83e35f6b9f7cae19c50694240291805ca9c4028676af868306553b3e9266c v1.23.5: 253b9db2299b09b91e4c09781ce1d2db6bad2099cf16ba210245159f48d0d5e4 v1.23.4: ec3db57edcce219c24ef37f4a6a2eef5a1543e4a9bd15e7ecc993b9f74950d91 v1.23.3: 8f9d2dd992af82855fbac2d82e030429b08ba7775e4fee7bf043eb857dfb0317 v1.23.2: c3c4be17910935d234b776288461baf7a9c6a7414d1f1ac2ef8d3a1af4e41ab6 v1.23.1: 7ff47abf62096a41005d18c6d482cf73f26b613854173327fa9f2b98720804d4 v1.23.0: 4756ff345dd80704b749d87efb8eb294a143a1f4a251ec586197d26ad20ea518 + v1.22.9: 61530a9e6a5cb1f971295de860a8ade29db65d0dff50d1ffff3de1155dfd0c02 v1.22.8: 2e6d1774f18c4d4527c3b9197a64ea5705edcf1b547c77b3e683458d771f3ce7 v1.22.7: cfc96b5f781bfbfdcb05115f4e26a5a6afc9d74bb4a5647c057b2c13086fb24d v1.22.6: 7b009835b0ab74aa16ebf57f5179893035e0cf5994e1bcf9b783275921a0393a @@ -250,6 +265,7 @@ kubelet_checksums: v1.22.2: 0fd6572e24e3bebbfd6b2a7cb7adced41dad4a828ef324a83f04b46378a8cb24 v1.22.1: 2079780ad2ff993affc9b8e1a378bf5ee759bf87fdc446e6a892a0bbd7353683 v1.22.0: fec5c596f7f815f17f5d7d955e9707df1ef02a2ca5e788b223651f83376feb7f + v1.21.12: 56246c4d0433a7cfd29e3e989fe3835a7545a781ff0123738713c8c78a99ec17 v1.21.11: ea22e3683016643344c5839a317b5e7b0061fdded321339a6d545766765bb10a v1.21.10: 8e0dab1cb93e61771fba594484a37a6079073ed2d707cf300c472e79b2f91bf0 v1.21.9: 1fa0c296df6af71fca1bdd94f9fb19c7051b4b3f8cf19c353192cb96b413fcf2 @@ -263,12 +279,15 @@ kubelet_checksums: v1.21.1: e77ff3ea404b2e69519ea4dce41cbdf11ae2bcba75a86d409a76eecda1c76244 v1.21.0: 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35 ppc64le: + v1.24.0: d41d62f6aeff9f8f9b1a1390ed2b17994952966741d6675af8410799bca38931 + v1.23.6: 04461a5f75c2734ec5989f03bf72d766fb8d55021f1625b671bf805a62882089 v1.23.5: 82e24cc48f23c0bfa3e90cce14b7ae0e0fb28a9ed9d2827e8ca503588f7ea1b5 v1.23.4: f23611aea7130ba423268983ba1ce6db9451f69069dd16a8dbf013ab46237196 v1.23.3: 055a9c9e8679c9ff963e43d1dc7d7aa3670a8aa56b96725de85c816e682c24bb v1.23.2: 6fdee30ee13149845aac8d110ad6a1894bb35f953e1ecb562ce7c59f63329dca v1.23.1: 9c3dc8ba6888b610e204d4066f0460d5b24037219300bb5f5b254ea7e8d5a4d1 v1.23.0: 25c841e08ab2655486813287aa97cadf7524277040599e95c32ed9f206308753 + v1.22.9: fd5be136a69e011ccb9d4482e4c13f23045e3c9c891e7e87394019f003f5cc79 v1.22.8: 804c336a31dfce44330e358d6b30dd0056859c3edc2b9bf34672d327fa8a2e23 v1.22.7: 3bfa04aa3a443aacdc6cf3b0a500317b5efa5cbdf4d9c343026be442120113b4 v1.22.6: 4e64366b96abaf7b45d14c72f6f84fb51c84a66ea0f25e93e50f986e6af7d29e @@ -278,6 +297,7 @@ kubelet_checksums: v1.22.2: 9b4e555110f747569393220ef12a54ae26eb4168eefb77d4b1e6c1d123f71438 v1.22.1: a8c379fce4b1c1bc40238dfea67db286ec8ffec56ed701d581b53a941f7031bb v1.22.0: 957dcc6ae45078ce971af183c0061d60168c15f484dcd978588cc6380236423f + v1.21.12: b9f3d7e56f8399bcef467db45e57d2a329f0138fadcaff280b83dcb8ed5f3ca2 v1.21.11: d1d9e6b5703a22868118c9ed2e495dd50fc6bde514e8263cadd26f90f191480b v1.21.10: 4bfd0af8f5a3d1e581fa16162e266dbd7636e2c9836b10a5f1d8c191352d064d v1.21.6: 84c4b957b0882fbada3db04c55edf176cd53ff9d36879fac626438b41e38e3e4 @@ -289,12 +309,15 @@ kubelet_checksums: v1.21.0: ac4ad7c275516b761b79b1c238d1745aafbf6bddb4c80931c02e16fcda9ff8c0 kubectl_checksums: arm: + v1.24.0: 410fc0b3c718f8f431fe4f7d5820bf8133b16ffb76187a53fa90929a77a38cbc + v1.23.6: 30d8e9656334b57e78c8dbc5d5f245a64b9a74c4fd03db47182fa7a21c2f5e32 v1.23.5: 58420bc549e1683a4529066b38b2ac657611ed3b70041be78fba3b29401415db v1.23.4: bde3d7801cfe444d4e226d4669dfd518e4687e16c99efddd016c4bf3d529b198 v1.23.3: bc41382fbd3f6b33cb5ccb1819c5a38f2e6f3c9ce22acfedd6970b0b9b7748da v1.23.2: 6521719af33342f00ebb6cf020848e25152a63ed5f35a94440c08373b7a36173 v1.23.1: 52001ed48e9e1c8b8623f3e6b0242111227721e5ddd08fa18046c65c406e35a5 v1.23.0: 6152216d88fa4d32da58c67f78b63b3b99bf4d4d726ffb9fb74ea698dccc8644 + v1.22.9: 4b45c5fb69e385f58293c5142d0ee51f79c3e3620a180632bd2370c01d0698e7 v1.22.8: 08ffeb8924c315cd466fc930377ac545edd6ac4ebb8bf284218947256b6729f3 v1.22.7: be9a833a6eae7ee7698ee5cc18bacc2652207af07528e60a78f43a8139fffbfc v1.22.6: a0dea833198a95ec85b4d55fe7e16333bcdc6a93290238c7473887e7e06f23a7 @@ -304,6 +327,7 @@ kubectl_checksums: v1.22.2: a16f7d70e65589d2dbd5d4f2115f6ccd4f089fe17a2961c286b809ad94eb052a v1.22.1: 50991ec4313ee42da03d60e21b90bc15e3252c97db189d1b66aad5bbb555997b v1.22.0: 6d7c787416a148acffd49746837df4cebb1311c652483dc3d2c8d24ce1cc897e + v1.21.12: 9952c99d8acf23ad6a3f9fb6849f4e22b5277bcfc6dcfb8a802295b1f15157e1 v1.21.11: 16e0065ac097d42cf791ac0be297c1a86ef48c72ba2a32748ac2c7ad51a58175 v1.21.10: faefbe444bd78cf217ef1c72abce816f335e06a1e08afa8a1d9530ba04d24ee9 v1.21.9: 543f0425d76c71d3d3ac2af8eaa7ca7dd6aa1919f01312303f328b6d805f3e20 @@ -317,12 +341,15 @@ kubectl_checksums: v1.21.1: d963971fd796b04ccaa389cf59900834e01c151a52c531585ac68aae779c0d91 v1.21.0: 6d79f6ebec2eda45b0808a895fa5d06bd8611c02d34b584eaa94857f365b25aa arm64: + v1.24.0: 449278789de283648e4076ade46816da249714f96e71567e035e9d17e1fff06d + v1.23.6: 4be771c8e6a082ba61f0367077f480237f9858ef5efe14b1dbbfc05cd42fc360 v1.23.5: 15cd560c04def7bbe5ee3f6f75e2cfd3913371c7e76354f4b2d5d6f536b70e39 v1.23.4: aa45dba48791eeb78a994a2723c462d155af4e39fdcfbcb39ce9c96f604a967a v1.23.3: 6708d7a701b3d9ab3b359c6be27a3012b1c486fa1e81f79e5bdc71ffca2c38f9 v1.23.2: 6e7bb8ddc5fc8fa89a4c31aba02942718b092a5107585bd09a83c95039c7510b v1.23.1: c0c24c7f6a974390e15148a575c84878e925f32328ff96ae173ec762678e4524 v1.23.0: 1d77d6027fc8dfed772609ad9bd68f611b7e4ce73afa949f27084ad3a92b15fe + v1.22.9: 33724bed4dddf4d8ecd6ae75667552d121e2fb575ff2db427ce66516e048edac v1.22.8: 48105735b74e941a84dec6bd53637c023ad53dc5fadd9bf616347cb339c76b47 v1.22.7: 44342131947bc61e6b03103e7e1302d16fa3e5b2e2cd67e27194f66223ecf798 v1.22.6: b43199fe66a58f292f2c685b922330819190eb22ac41cc5c10c33fdf9f2bbc29 @@ -332,6 +359,7 @@ kubectl_checksums: v1.22.2: c5bcc7e5321d34ac42c4635ad4f6fe8bd4698e9c879dc3367be542a0b301297b v1.22.1: 5c7ef1e505c35a8dc0b708f6b6ecdad6723875bb85554e9f9c3fe591e030ae5c v1.22.0: 8d9cc92dcc942f5ea2b2fc93c4934875d9e0e8ddecbde24c7d4c4e092cfc7afc + v1.21.12: 3f3739cff2d1a4c28d2f89d06a2bd39388af95ce25f70b6d5cc0de0538d2ce4b v1.21.11: 2d51a37128d823520f5f2b70436f5e3ae426eeacd16d671ae7806d421e4f57d8 v1.21.10: d0a88f897824954ec104895eae5f9ff9a173b162d1c9245c274cfe8db323fb37 v1.21.9: 6e2893b5de590fd9587ba327c048e5318e9e12e2acdc5a83c995c57ae822e6e4 @@ -345,12 +373,15 @@ kubectl_checksums: v1.21.1: d7e1163f4127efd841e5f5db6eacced11c2a3b20384457341b19ca295d0c535f v1.21.0: a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d amd64: + v1.24.0: 94d686bb6772f6fb59e3a32beff908ab406b79acdfb2427abdc4ac3ce1bb98d7 + v1.23.6: 703a06354bab9f45c80102abff89f1a62cbc2c6d80678fd3973a014acc7c500a v1.23.5: 715da05c56aa4f8df09cb1f9d96a2aa2c33a1232f6fd195e3ffce6e98a50a879 v1.23.4: 3f0398d4c8a5ff633e09abd0764ed3b9091fafbe3044970108794b02731c72d6 v1.23.3: d7da739e4977657a3b3c84962df49493e36b09cc66381a5e36029206dd1e01d0 v1.23.2: 5b55b58205acbafa7f4e3fc69d9ce5a9257be63455db318e24db4ab5d651cbde v1.23.1: 156fd5e7ebbedf3c482fd274089ad75a448b04cf42bc53f370e4e4ea628f705e v1.23.0: 2d0f5ba6faa787878b642c151ccb2c3390ce4c1e6c8e2b59568b3869ba407c4f + v1.22.9: ae6a9b585f9a366d24bb71f508bfb9e2bb90822136138109d3a91cd28e6563bb v1.22.8: 761bf1f648056eeef753f84c8365afe4305795c5f605cd9be6a715483fe7ca6b v1.22.7: 4dd14c5b61f112b73a5c9c844011a7887c4ffd6b91167ca76b67197dee54d388 v1.22.6: 1ab07643807a45e2917072f7ba5f11140b40f19675981b199b810552d6af5c53 @@ -360,6 +391,7 @@ kubectl_checksums: v1.22.2: aeca0018958c1cae0bf2f36f566315e52f87bdab38b440df349cd091e9f13f36 v1.22.1: 78178a8337fc6c76780f60541fca7199f0f1a2e9c41806bded280a4a5ef665c9 v1.22.0: 703e70d49b82271535bc66bc7bd469a58c11d47f188889bd37101c9772f14fa1 + v1.21.12: 5a8bde5198dc0e87dfa8ebc50c29f69becdc94c756254f6b2c3f37cdbfaf2e42 v1.21.11: 9c45ce24ad412701beeac8d9f0004787209d76dd66390915f38a8682358484cb v1.21.10: 24ce60269b1ffe1ca151af8bfd3905c2427ebef620bc9286484121adf29131c0 v1.21.9: 195d5387f2a6ca7b8ab5c2134b4b6cc27f29372f54b771947ba7c18ee983fbe6 @@ -373,12 +405,15 @@ kubectl_checksums: v1.21.1: 58785190e2b4fc6891e01108e41f9ba5db26e04cebb7c1ac639919a931ce9233 v1.21.0: 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0 ppc64le: + v1.24.0: 153a1ca1593ef4cb56b16922f8e229986a621d396112f0cfad6fa568ad00fa75 + v1.23.6: 3fdba4f852046b0ee782048cad9c1fe4db9c98cb882ff78b5bca4632984c7700 v1.23.5: d625dbea2879d12ca1c61b1c00084405a34514abaea1096110c8c8661cfac84f v1.23.4: 1648768124315c5cbcfa6c24a31a34037558c09b91ead60267e13d6c7f3b597b v1.23.3: 7297e595ed549bac93decda41c9830a3e032fd374467d679c98ef35dcdd1d2aa v1.23.2: 97d50dc4ff0a6c70bbfcbd45f6959e6201c6317392b2894008017380669f6015 v1.23.1: 514e50afdb5b8953adfffe4941e903748348830bdd82805fd4489c3334a02a4a v1.23.0: e96f2b16d8a10fe6531dfac9143efa4960432cf2ae8b26ffd174fa00eb28a851 + v1.22.9: 4ac554b2eb811c10276761ec185e1dbd96b24df4ed141159960c2325d6451f6e v1.22.8: 30d5cba5bdee3bb9395a988867a161ff52e7dc01a40cd4fa2a2adb1c08b76227 v1.22.7: b25bcc11619ea61a60a1cfa8bfd4ef15ccb8db008251013b3473cc04082754bc v1.22.6: d9acb45bcbcead81e8f61572dd800d82e605af2532edb4be1633b732b009d2e2 @@ -388,6 +423,7 @@ kubectl_checksums: v1.22.2: f8c8c4734846c56a8eae6e5c877c84e38513337ea1ca08d63e019ffe82114342 v1.22.1: 4eced82fb83c405937c35c18de5ac25befa68ca5ab016b3d279011d7f3701eea v1.22.0: 7ea30171a5db9dfbdc240674f5cde00fb75a8193ef73783950b8d10c810b6a5b + v1.21.12: 7c537ace6677ea3ea7fff2e412aabbb858174edee687cb3a26d33ab16c2a16c8 v1.21.11: 1a617321711c2b8b8a5b1ba909f5f3c4e6f6ed7284fa24ac3a527fdada31abcb v1.21.10: ce2ea17f905f683ec3a4aa87c0e2f4a642e5ee3a059a0b6836c3c3985015d577 v1.21.6: c16d92482eafabf78c23ae49c6e823c141a0d96f0e24a5b653469591533b3340 @@ -399,12 +435,15 @@ kubectl_checksums: v1.21.0: 3d8702c2d341bbed07f6e9c952815c558034d817e3d1a672c1d7d042b021e9c0 kubeadm_checksums: arm: + v1.24.0: c463bf24981dea705f4ee6e547abd5cc3b3e499843f836aae1a04f5b80abf4c2 + v1.23.6: da2221f593e63195736659e96103a20e4b7f2060c3030e8111a4134af0d37cfb v1.23.5: 9ea3e52cb236f446a33cf69e4ed6ac28a76103c1e351b2675cb9bfcb77222a61 v1.23.4: 9ca72cf1e6bbbe91bf634a18571c84f3fc36ba5fcd0526b14432e87b7262a5ee v1.23.3: cb2513531111241bfb0f343cff18f7b504326252ae080bb69ad1ccf3e31a2753 v1.23.2: 63a6ca7dca76475ddef84e4ff84ef058ee2003d0e453b85a52729094025d158e v1.23.1: 77baac1659f7f474ba066ef8ca67a86accc4e40d117e73c6c76a2e62689d8369 v1.23.0: b59790cdce297ac0937cc9ce0599979c40bc03601642b467707014686998dbda + v1.22.9: f68ca35fc71691e599d4913de58b6d77abcb2d27c324abc23388b4383b5299ea v1.22.8: f55fce83ae69b0f660a0fbdd2d05681d2e29a1119d7cce890fe1f50724bdcc60 v1.22.7: 26b3d79d88e81bf354d716fa48210b0358d2f6ca99cba06eb7640ac1e32724b8 v1.22.6: ad23ad06e83f2466f78652221f73fd58d23d6122b3395c24d9a3be779f6afa49 @@ -414,6 +453,7 @@ kubeadm_checksums: v1.22.2: 6ccc26494160e19468b0cb55d56b2d5c62d21424fac79cb66402224c2bf73a0d v1.22.1: cc08281c5261e860df9a0b5040b8aa2e6d202a243daf25556f5f6d3fd8f2e1e9 v1.22.0: 6a002deb0ee191001d5c0e0435e9a995d70aa376d55075c5f61e70ce198433b8 + v1.21.12: ba2292581d78e615775d97f8dad8e2b7668aa70347a3dfa814598678833b8bf0 v1.21.11: 4c59ecfaac03987a839310fe69867021225d06ebf62b04929ae3e13e6dcfa6b3 v1.21.10: ba6fdf511ce4521b89d2674d31014a64da78f351763adbbb32ae13b19535f0be v1.21.9: 855e5dff65343245136eb13c0ce89f830a5a22269536834cf50fdb2e47789ad5 @@ -427,12 +467,15 @@ kubeadm_checksums: v1.21.1: d2a6b582ae5407f2dcd3da902060cadbe5212577ffc9f546245e0d83d4490582 v1.21.0: 878e4c848412c3d6ac35323e6be1bd09f9648069ea1caa8e618fd2a6a6925b2f arm64: + v1.24.0: 3e0fa21b8ebce04ca919fdfea7cc756e5f645166b95d6e4b5d9912d7721f9004 + v1.23.6: a4db7458e224c3a2a7b468fc2704b31fec437614914b26a9e3d9efb6eecf61ee v1.23.5: 22a8468abc5d45b3415d694ad52cc8099114248c3d1fcf4297ec2b336f5cc274 v1.23.4: 90fd5101e321053cdb66d165879a9cde18f19ba9bb8eae152fd4f4fcbe497be1 v1.23.3: 5eceefa3ca737ff1532f91bdb9ef7162882029a2a0300b4348a0980249698398 v1.23.2: a29fcde7f92e1abfe992e99f415d3aee0fa381478b4a3987e333438b5380ddff v1.23.1: eb865da197f4595dec21e6fb1fa1751ef25ac66b64fa77fd4411bbee33352a40 v1.23.0: 989d117128dcaa923b2c7a917a03f4836c1b023fe1ee723541e0e39b068b93a6 + v1.22.9: 0168c60d1997435b006b17c95a1d42e55743048cc50ee16c8774498aa203a202 v1.22.8: 67f09853d10434347eb75dbb9c63d57011ba3e4f7e1b320a0c30612b8185be8c v1.22.7: 2ae0287769a70f442757e49af0ecd9ca2c6e5748e8ba72cb822d669a7aeeb8fa v1.22.6: bc10e4fb42a182515f4232205bea53f90270b8f80ec1a6c1cc3301bff05e86b7 @@ -442,6 +485,7 @@ kubeadm_checksums: v1.22.2: 77b4c6a56ae0ec142f54a6f5044a7167cdd7193612b04b77bf433ffe1d1918ef v1.22.1: 85df7978b2e5bb78064ed0bcce14a39d105a1a3968bb92ee5d2f96a1fa09ed12 v1.22.0: 9fc14b993de2c275b54445255d7770bd1d6cdb49f4cf9c227c5b035f658a2351 + v1.21.12: 6b59aab97cabb8becdd0aa1260bc0553998c8e6511507c07b0fa231c0865211d v1.21.11: 97117a6d984ff88628654494181b62502cbf4c310af70d4de92dab35482900e5 v1.21.10: 7607bfd40317a24a276e452b46a26a7298dde2988fce826f1ee0fe9355eae786 v1.21.9: 8947309c985911a99fb0a6e30f9ca85d9b7adc1215149e45e5be150c7e5e5de9 @@ -455,12 +499,15 @@ kubeadm_checksums: v1.21.1: 1c9a93ac74f2756c1eb40a9d18bb7e146eeab0b33177c0f66f5e617ed7261d1b v1.21.0: 50bb95d1827455346b5643dcf83a52520733c3a582b8b1ffb50f04a8e66f00e7 amd64: + v1.24.0: 5e58a29eaaf69ea80e90d9780d2a2d5f189fd74f94ec3bec9e3823d472277318 + v1.23.6: 9213c7d738e86c9a562874021df832735236fcfd5599fd4474bab3283d34bfd7 v1.23.5: 8eebded187ee84c97003074eaa347e34131fef3acdf3e589a9b0200f94687667 v1.23.4: c91912c9fd34a50492f889e08ff94c447fdceff150b588016fecc9051a1e56b8 v1.23.3: 57ec7f2921568dcf4cda0699b877cc830d49ddd2709e035c339a5afc3b83586f v1.23.2: 58487391ec37489bb32fe532e367995e9ecaeafdb65c2113ff3675e7a8407219 v1.23.1: 4d5766cb90050ee84e15df5e09148072da2829492fdb324521c4fa6d74d3aa34 v1.23.0: e21269a058d4ad421cf5818d4c7825991b8ba51cd06286932a33b21293b071b0 + v1.22.9: e3061f3a9c52bff82ae740c928fe389a256964a5756d691758bf3611904d7183 v1.22.8: fc10b4e5b66c9bfa6dc297bbb4a93f58051a6069c969905ef23c19680d8d49dc v1.22.7: 7e4be37fc5ddeeae732886bf83c374198813e76d84ed2f6590145e08ece1a8b2 v1.22.6: 0bf8e47ad91215cd8c5e0ded565645aeb1ad6f0a9223a2486eb913bff929d472 @@ -470,6 +517,7 @@ kubeadm_checksums: v1.22.2: 4ff09d3cd2118ee2670bc96ed034620a9a1ea6a69ef38804363d4710a2f90d8c v1.22.1: 50a5f0d186d7aefae309539e9cc7d530ef1a9b45ce690801655c2bee722d978c v1.22.0: 90a48b92a57ff6aef63ff409e2feda0713ca926b2cd243fe7e88a84c483456cc + v1.21.12: f6ef1d2d19ba0aaaba4c57c4eda94e2725c3f7e9412feb5d6fe12c1827e7c1cb v1.21.11: 3514ea5acaae9c2779a341deb24832df17722cb612fa7a78d34f602f91e94d17 v1.21.10: 61aaadd98806d979b65e031a144d9379390d26ccb5383d47bdd8b7c727e94a7b v1.21.9: 3333116f9f0d72e0598f52dcbef7ecab1ce88192fdcfd5384ca919fdc075e8d5 @@ -483,12 +531,15 @@ kubeadm_checksums: v1.21.1: 1553c07a6a777c4cf71d45d5892915f0ea6586b8a80f9fea39e7a659d6315d42 v1.21.0: 7bdaf0d58f0d286538376bc40b50d7e3ab60a3fe7a0709194f53f1605129550f ppc64le: + v1.24.0: 286de74330365bf660d480297a7aba165a956f6fbb98acd11df2f672e21d7b5c + v1.23.6: 0b975ac27fa794134a5a25dfbf6df598e2b62e483134326788443131f6d8e5e4 v1.23.5: bec93d18fd5e5ef6d5da3d18edb282e58a64ff34ec3544d82dc31a3255d9ed1d v1.23.4: 9c681254bf7cfce8b94326364d677f1944c0afb070f666f7fd438bd37133f7cc v1.23.3: fd87d972db45dd6f623dd4ca06075e7e697f1bdaa7936c5c06924d1189ba7ff8 v1.23.2: 2d76c4d9795e25867b9b6fe7853f94efb8c2f2b3052adab4073fddca93eedc01 v1.23.1: 6b645c868834197bcb25104f468c601477967341aba6326bdf5d0957dcaa9edc v1.23.0: 895c84055bca698f50ecdf1fc01d2f368563f77384b1dd00bdacbf6d0c825cc1 + v1.22.9: aca9539afc208343b0138d2e9e56b018ea782b74068389e7381e1c361f584446 v1.22.8: 715dcac3dc5055306fc9b56352f5323df7947479c831993fecadc3a7c9072071 v1.22.7: 1496cb57091c6189728f295fbc6f8ea944f08fa9f844d917f7f7ca1a3b896acb v1.22.6: a3aed2613b0566d1c829c15ff1206c25743bade24c4087b039824860d07de517 @@ -498,6 +549,7 @@ kubeadm_checksums: v1.22.2: 115bdf1e9e4821cf02aa77875930b4640cfba6b3560492ac75fe6159e897be6f v1.22.1: 45e5145abf4700ddb5de3469ddb6b316e7588595e4a3e64f44064738808b9c97 v1.22.0: 22a7d995e78e93abca2999c911b065d63f51f33982dc305f23762a8d7c045d25 + v1.21.12: c66a01b7fe383e5db61bd0e8d9c579a025db4699c6ca48c6f786ca4eef9de3ae v1.21.11: 5e05d8d87b511cdcea212ab07c42bceb9386d033941e9b6a883b89f38b19de29 v1.21.10: fc44609f9e88f5c5bde04b2b4f39cfaf55aa03bf17e2e100e1d99a3d2cdaad5a v1.21.6: b292528714c770c6164bb5120f8499ff05134b97f2b35ca269a6109ff49f48b1 @@ -519,24 +571,17 @@ etcd_binary_checksums: v3.5.0: 444e10e6880595d75aaf55762901c722049b29d56fef50b2f23464bb7f9db74d v3.5.1: 86203022e23d7368bac23d96095270dc6300f356ea882e435926a9effd7e5f0e v3.5.2: 256cad725542d6fd463e81b8a19b86ead4cdfe113f7fb8a1eabc6c32c25d068b + v3.5.3: 8b00f2f51568303799368ee4a3c9b9ff8a3dd9f8b7772c4f6589e46bc62f7115 amd64: v3.5.0: 864baa0437f8368e0713d44b83afe21dce1fb4ee7dae4ca0f9dd5f0df22d01c4 v3.5.1: 728a14914217ce60de2e1299fc1a2c2c5564e7ffd0d9dadf3f5073103ab619b4 v3.5.2: c068ea3cdcc8ec8fbef75995cef7f63f8206766cdea6b93a91e6d4cdbe79952a + v3.5.3: e13e119ff9b28234561738cd261c2a031eb1c8688079dcf96d8035b3ad19ca58 ppc64le: v3.5.0: bde45588b66ed2fe0a7082b1cdb08124efff4960edfa6af4dd2f645886004a86 v3.5.1: 4c2598f43c1565428f8b645e741739a1e84300d5f2af3e4b335982862c98dc6f v3.5.2: 3d37187b042a32b7234a1e2a4612374db50c9a50e0cdd63baaa0d52d91335186 - -flannel_cni_binary_checksums: - arm: - v1.0.0: bd36adccabf9974c0a9f9a09bfe35558ddeb83636efefa94bc260ef267e14671 - arm64: - v1.0.0: eb7183498b5d1ae4034dde6184e4396270e29ed88482b295f81b93eb3120c61c - amd64: - v1.0.0: 22dc9152571167f30d59d1e512bb7f4b4978175ddd46a30b24ad247c8c843dd7 - ppc64le: - v1.0.0: 994d206a84013b050677d3e09b53485faaa9e769a43065cf4674efb52fafb18c + v3.5.3: f14154897ca5ad4698383b4c197001340fbe467525f6fab3b89ee8116246480f cni_binary_checksums: arm: @@ -550,29 +595,29 @@ cni_binary_checksums: calicoctl_binary_checksums: arm: - v3.22.1: 0 - v3.21.4: 0 + v3.22.2: 0 + v3.21.5: 0 v3.20.4: 0 v3.19.4: 0 amd64: - v3.22.1: b42be4f7c053f209127a9b9a6945ce11ab3bc0725f2741f92380400b008c6111 - v3.21.4: 3fb4f31bd72f0a570f6a6cf71d7b4c7641a0f142d2239fad32012058f0ec63bc + v3.22.2: 2608fe464b50019e4ade388142f194463e351013ec81da21b111307411865a81 + v3.21.5: 98407b1c608fec0896004767c72cd4b6cf939976d67d3eca121f1f02137c92a7 v3.20.4: f61f3892712e4871fa57f6b9726cd42c36c333ca45bb178df48a15e64b5e8166 v3.19.4: 1b2ac701b05b77a14e0545a7d70915792ce833eea875f7eb7a18ded5a5878d5c arm64: - v3.22.1: 6ad55dd57190f474c2a186ad935f25e71ec8ca25fadf504dacfc1e19b1b70155 - v3.21.4: 5c277b8de4c069f7cf8bd87e02709eeeb668e3a1ca4784aafcf83cdd05568df8 + v3.22.2: 4817dff5a867e4e5e8cfe4571b569cfe73d56e3f9a3bac4ccf5d25a5a681277d + v3.21.5: cc73e2b8f5b695b6ab06e7856cd516c1e9ec3e903abb510ef465ca6b530e18e6 v3.20.4: 4115ed46669d2bcfdcdcaec3f3a73345cd96932d8a0e9dde1523e6c02d45f8aa v3.19.4: 50fd7085a6810efd88385df13ad2dfcc0078cd3813ad7bf56b2eb9f18fa7bad0 ppc64le: - v3.22.1: c7bf3d9bb77288e3c384f568eab657b47ef72f5147ce386ac01a60f0f19224ea - v3.21.4: 29b4e2fd9131e01343d4804b1a14eb09a1d50146d54a139a35eb4030e521a5b1 + v3.22.2: 56d39963a201d32dc43dcc3483dbe530b09b4a83005a7e39090f368d354946ba + v3.21.5: 1ebb615b18f9c3fe2d41281d1bc9e3909048b56d2bc76c18431cbeb7a653d24d v3.20.4: 63e2ca9d34c67bb758f478475a3e41735e520535d15f0663cfe00a53bc10af3c v3.19.4: d2cbc2e987859564f618c749ce7cf01b754683f2a6fc4e9e873cdd756404b48d calico_crds_archive_checksums: - v3.22.1: 7b778b88a8ab6574d0f84918c831bfab6034389e03435ef9d9e3b53922334af3 - v3.21.4: e778b230e82378d848e6e75387ab5b6f78e0049953cdf30027eedba2cdbb4e57 + v3.22.2: 839f2ceb2b2227801c3d588d8e9b1bb7f05520a4afe7665e928fe30919d30f5b + v3.21.5: ffbbaa2bc32b01bf160828d2cfd4504d83c69cb1f74c0028349181ed61bad635 v3.20.4: 47d749ee79732f320669350ef8a7e3ca16b67ad6c97c2e0da8278d6c2659fce3 v3.19.4: 8f271c23442ca20d54a598d046312af0144e8bc53a14809945ce1dfc0cb1b7d4 @@ -610,13 +655,13 @@ krew_archive_checksums: helm_archive_checksums: arm: - v3.8.0: 05e900d0688edd8d455e0d4c51b419cd2b10120d485be7a1262582f51c92e941 + v3.8.2: 3447782673a8dec87f0736d3fcde5c2af6316b0dd19f43b7ffaf873e4f5a486e arm64: - v3.8.0: 23e08035dc0106fe4e0bd85800fd795b2b9ecd9f32187aa16c49b0a917105161 + v3.8.2: 238db7f55e887f9c1038b7e43585b84389a05fff5424e70557886cad1635b3ce amd64: - v3.8.0: 8408c91e846c5b9ba15eb6b1a5a79fc22dd4d33ac6ea63388e5698d1b2320c8b + v3.8.2: 6cb9a48f72ab9ddfecab88d264c2f6508ab3cd42d9c09666be16a7bf006bed7b ppc64le: - v3.8.0: 5070fa5188e7bc798dd54bc1ea9fc4cda623d9ff45eedb05ec93db234309f391 + v3.8.2: 144fcfface6dc99295c1cfdd39238f188c601b96472e933e17054eddd1acb8fa cri_dockerd_archive_checksums: arm: @@ -689,24 +734,32 @@ kata_containers_binary_checksums: 2.2.2: 0 2.2.3: 0 2.3.0: 0 + 2.4.0: 0 + 2.4.1: 0 amd64: 2.0.4: 022a60c2d92a5ab9a5eb83d5a95154a2d06fdc2206b2a473d902ccc86766371a 2.1.1: a83591d968cd0f1adfb5025d7aa33ca1385d4b1165ff10d74602302fc3c0373f 2.2.2: 2e3ac77b8abd4d839cf16780b57aee8f3d6e1f19489edd7d6d8069ea3cc3c18a 2.2.3: e207ab5c8128b50fe61f4f6f98fd34af0fa5ebc0793862be6d13a2674321774f 2.3.0: 430fa55b387b3bafbbabb7e59aa8c809927a22f8d836732a0719fd2e1d131b31 + 2.4.0: fca40fa4e91efc79c75367ffe09ca32ad795d302aacb91992874f40bfc00348f + 2.4.1: e234ffce779d451dc2a170b394b91d35b96e44ea50dc4a3256defa603efdf607 arm64: 2.0.4: 0 2.1.1: 0 2.2.2: 0 2.2.3: 0 2.3.0: 0 + 2.4.0: 0 + 2.4.1: 0 ppc64le: 2.0.4: 0 2.1.1: 0 2.2.2: 0 2.2.3: 0 2.3.0: 0 + 2.4.0: 0 + 2.4.1: 0 gvisor_runsc_binary_checksums: arm: @@ -730,20 +783,16 @@ gvisor_containerd_shim_binary_checksums: nerdctl_archive_checksums: arm: - 0.18.0: a9832ad756d624d1cb39bd55a328956553890b00e081ccccef4da5c34e4b8671 + 0.19.0: 0dc6eadb9dfdb1579a11d7ce657f81f10d7f0c9ad149f5136d955c3f401e4339 arm64: - 0.18.0: 4f73a20d4872da3771a1f7088cae707bf119d35c2e97ea33c9d31b9d167ad956 + 0.19.0: 9d33eb3dd1b4b8fbe969d0f2e26d3a19e443955bd2d2b5e0911ea0066f79a160 amd64: - 0.18.0: ea44e112d3365c8d86f9189b6fae250767e383dfa7c01a8c05f3de2a7b7c8720 + 0.19.0: 9cf4d1a2b18baf0c713d7746f896fd6a9d18a130ea8f590c6ed11474748b1733 ppc64le: - 0.18.0: 2a3d61e0d2e121c3b86677f3470f18b8bd7aa12a8b04b91a2faeddd95215c7e0 + 0.19.0: 2229f1a68071eb5f60034b72359b4004e80628a8470cd99e2d41dd479c4b90f4 containerd_archive_checksums: arm: - 1.4.9: 0 - 1.4.11: 0 - 1.4.12: 0 - 1.4.13: 0 1.5.5: 0 1.5.7: 0 1.5.8: 0 @@ -753,11 +802,9 @@ containerd_archive_checksums: 1.6.0: 0 1.6.1: 0 1.6.2: 0 + 1.6.3: 0 + 1.6.4: 0 arm64: - 1.4.9: 0 - 1.4.11: 0 - 1.4.12: 0 - 1.4.13: 0 1.5.5: 0 1.5.7: 0 1.5.8: 0 @@ -767,11 +814,9 @@ containerd_archive_checksums: 1.6.0: 6eff3e16d44c89e1e8480a9ca078f79bab82af602818455cc162be344f64686a 1.6.1: fbeec71f2d37e0e4ceaaac2bdf081295add940a7a5c7a6bcc125e5bbae067791 1.6.2: a4b24b3c38a67852daa80f03ec2bc94e31a0f4393477cd7dc1c1a7c2d3eb2a95 + 1.6.3: 354e30d52ff94bd6cd7ceb8259bdf28419296b46cf5585e9492a87fdefcfe8b2 + 1.6.4: 0205bd1907154388dc85b1afeeb550cbb44c470ef4a290cb1daf91501c85cae6 amd64: - 1.4.9: 346f88ad5b973960ff81b5539d4177af5941ec2e4703b479ca9a6081ff1d023b - 1.4.11: 80c47ec5ce2cd91a15204b5f5b534892ca653e75f3fba0c451ca326bca45fb00 - 1.4.12: 26bb35ee8a2467029ca450352112ba3a0d2b8bf6b70bf040f62d91f3c501736c - 1.4.13: bc8b3e6abe99143788de5afaaf896cb7f229733f1ebd980eec48e71cc21c0a6a 1.5.5: 8efc527ffb772a82021800f0151374a3113ed2439922497ff08f2596a70f10f1 1.5.7: 109fc95b86382065ea668005c376360ddcd8c4ec413e7abe220ae9f461e0e173 1.5.8: feeda3f563edf0294e33b6c4b89bd7dbe0ee182ca61a2f9b8c3de2766bcbc99b @@ -781,11 +826,9 @@ containerd_archive_checksums: 1.6.0: f77725e4f757523bf1472ec3b9e02b09303a5d99529173be0f11a6d39f5676e9 1.6.1: c1df0a12af2be019ca2d6c157f94e8ce7430484ab29948c9805882df40ec458b 1.6.2: 3d94f887de5f284b0d6ee61fa17ba413a7d60b4bb27d756a402b713a53685c6a + 1.6.3: 306b3c77f0b5e28ed10d527edf3d73f56bf0a1fb296075af4483d8516b6975ed + 1.6.4: f23c8ac914d748f85df94d3e82d11ca89ca9fe19a220ce61b99a05b070044de0 ppc64le: - 1.4.9: 0 - 1.4.11: 0 - 1.4.12: 0 - 1.4.13: 0 1.5.5: 0 1.5.7: 0 1.5.8: 0 @@ -795,9 +838,10 @@ containerd_archive_checksums: 1.6.0: 0 1.6.1: 0 1.6.2: 0 + 1.6.3: 0 + 1.6.4: 0 etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch][etcd_version] }}" -flannel_cni_binary_checksum: "{{ flannel_cni_binary_checksums[image_arch][flannel_cni_version] }}" cni_binary_checksum: "{{ cni_binary_checksums[image_arch][cni_version] }}" kubelet_binary_checksum: "{{ kubelet_checksums[image_arch][kube_version] }}" kubectl_binary_checksum: "{{ kubectl_checksums[image_arch][kube_version] }}" @@ -828,8 +872,10 @@ containerd_archive_checksum: "{{ containerd_archive_checksums[image_arch][contai kube_proxy_image_repo: "{{ kube_image_repo }}/kube-proxy" etcd_image_repo: "{{ quay_image_repo }}/coreos/etcd" etcd_image_tag: "{{ etcd_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}" -flannel_image_repo: "{{ quay_image_repo }}/coreos/flannel" +flannel_image_repo: "{{ docker_image_repo }}/flannelcni/flannel" flannel_image_tag: "{{ flannel_version }}-{{ image_arch }}" +flannel_init_image_repo: "{{ docker_image_repo }}/flannelcni/flannel-cni-plugin" +flannel_init_image_tag: "{{ flannel_cni_version }}-{{ image_arch }}" calico_node_image_repo: "{{ quay_image_repo }}/calico/node" calico_node_image_tag: "{{ calico_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}" calico_cni_image_repo: "{{ quay_image_repo }}/calico/cni" @@ -954,6 +1000,9 @@ cinder_csi_plugin_image_tag: "v1.22.0" aws_ebs_csi_plugin_image_repo: "{{ docker_image_repo }}/amazon/aws-ebs-csi-driver" aws_ebs_csi_plugin_image_tag: "v0.5.0" +gcp_pd_csi_plugin_image_repo: "{{ kube_image_repo }}/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver" +gcp_pd_csi_plugin_image_tag: "v1.4.0" + azure_csi_image_repo: "mcr.microsoft.com/oss/kubernetes-csi" azure_csi_provisioner_image_tag: "v2.2.2" azure_csi_attacher_image_tag: "v3.3.0" @@ -978,6 +1027,7 @@ dashboard_metrics_scraper_tag: "v1.0.7" metallb_speaker_image_repo: "{{ quay_image_repo }}/metallb/speaker" metallb_controller_image_repo: "{{ quay_image_repo }}/metallb/controller" +metallb_version: v0.12.1 downloads: netcheck_server: @@ -1016,19 +1066,6 @@ downloads: groups: - etcd - flannel_cni: - enabled: "{{ kube_network_plugin == 'flannel' }}" - file: true - version: "{{ flannel_cni_version }}" - dest: "{{ local_release_dir }}/flannel-{{ flannel_cni_version }}-{{ image_arch }}" - sha256: "{{ flannel_cni_binary_checksum }}" - url: "{{ flannel_cni_download_url }}" - unarchive: false - owner: "root" - mode: "0755" - groups: - - k8s_cluster - cni: enabled: true file: true @@ -1256,6 +1293,15 @@ downloads: groups: - k8s_cluster + flannel_init: + enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" + container: true + repo: "{{ flannel_init_image_repo }}" + tag: "{{ flannel_init_image_tag }}" + sha256: "{{ flannel_init_digest_checksum|default(None) }}" + groups: + - k8s_cluster + calicoctl: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" file: true @@ -1666,6 +1712,24 @@ downloads: groups: - kube_control_plane + metallb_speaker: + enabled: "{{ metallb_enabled }}" + container: true + repo: "{{ metallb_speaker_image_repo }}" + tag: "{{ metallb_version }}" + sha256: "{{ metallb_speaker_digest_checksum|default(None) }}" + groups: + - kube_control_plane + + metallb_controller: + enabled: "{{ metallb_enabled }}" + container: true + repo: "{{ metallb_controller_image_repo }}" + tag: "{{ metallb_version }}" + sha256: "{{ metallb_controller_digest_checksum|default(None) }}" + groups: + - kube_control_plane + download_defaults: container: false file: false diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index c477c2a4198..ddbddba4b11 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -11,53 +11,6 @@ delay: 6 when: inventory_hostname == groups['kube_control_plane'][0] -- name: Kubernetes Apps | Check AppArmor status - command: which apparmor_parser - register: apparmor_status - when: - - podsecuritypolicy_enabled - - inventory_hostname == groups['kube_control_plane'][0] - failed_when: false - -- name: Kubernetes Apps | Set apparmor_enabled - set_fact: - apparmor_enabled: "{{ apparmor_status.rc == 0 }}" - when: - - podsecuritypolicy_enabled - - inventory_hostname == groups['kube_control_plane'][0] - -- name: Kubernetes Apps | Render templates for PodSecurityPolicy - template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - mode: 0640 - register: psp_manifests - with_items: - - {file: psp.yml, type: psp, name: psp} - - {file: psp-cr.yml, type: clusterrole, name: psp-cr} - - {file: psp-crb.yml, type: rolebinding, name: psp-crb} - when: - - podsecuritypolicy_enabled - - inventory_hostname == groups['kube_control_plane'][0] - -- name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy - kube: - name: "{{ item.item.name }}" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.item.type }}" - filename: "{{ kube_config_dir }}/{{ item.item.file }}" - state: "latest" - register: result - until: result is succeeded - retries: 10 - delay: 6 - with_items: "{{ psp_manifests.results }}" - when: - - inventory_hostname == groups['kube_control_plane'][0] - - not item is skipped - loop_control: - label: "{{ item.item.file }}" - - name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes template: src: "node-crb.yml.j2" diff --git a/roles/kubernetes-apps/container_runtimes/kata_containers/defaults/main.yaml b/roles/kubernetes-apps/container_runtimes/kata_containers/defaults/main.yaml index 8d90728d65c..6eacb7902a4 100644 --- a/roles/kubernetes-apps/container_runtimes/kata_containers/defaults/main.yaml +++ b/roles/kubernetes-apps/container_runtimes/kata_containers/defaults/main.yaml @@ -1,5 +1,5 @@ --- kata_containers_qemu_overhead: true -kata_containers_qemu_overhead_fixed_cpu: 10m -kata_containers_qemu_overhead_fixed_memory: 290Mi +kata_containers_qemu_overhead_fixed_cpu: 250m +kata_containers_qemu_overhead_fixed_memory: 160Mi diff --git a/roles/kubernetes-apps/csi_driver/gcp_pd/defaults/main.yml b/roles/kubernetes-apps/csi_driver/gcp_pd/defaults/main.yml index a6a447e5541..1ee662ea392 100644 --- a/roles/kubernetes-apps/csi_driver/gcp_pd/defaults/main.yml +++ b/roles/kubernetes-apps/csi_driver/gcp_pd/defaults/main.yml @@ -1,3 +1,2 @@ --- gcp_pd_csi_controller_replicas: 1 -gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0" diff --git a/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2 b/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2 index c63322c07f1..4762093dc1e 100644 --- a/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2 +++ b/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-controller.yml.j2 @@ -23,18 +23,19 @@ spec: priorityClassName: csi-gce-pd-controller containers: - name: csi-provisioner - image: {{ gcp_pd_csi_image_repo }}/csi-provisioner:{{ gcp_pd_csi_provisioner_image_tag }} + image: {{ csi_provisioner_image_repo }}:{{ csi_provisioner_image_tag }} args: - "--v=5" - "--csi-address=/csi/csi.sock" - "--feature-gates=Topology=true" + - "--default-fstype=ext4" # - "--run-controller-service=false" # disable the controller service of the CSI driver # - "--run-node-service=false" # disable the node service of the CSI driver volumeMounts: - name: socket-dir mountPath: /csi - name: csi-attacher - image: {{ gcp_pd_csi_image_repo }}/csi-attacher:{{ gcp_pd_csi_attacher_image_tag }} + image: {{ csi_attacher_image_repo }}:{{ csi_attacher_image_tag }} args: - "--v=5" - "--csi-address=/csi/csi.sock" @@ -42,7 +43,7 @@ spec: - name: socket-dir mountPath: /csi - name: csi-resizer - image: {{ gcp_pd_csi_image_repo }}/csi-resizer:{{ gcp_pd_csi_resizer_image_tag }} + image: {{ csi_resizer_image_repo }}:{{ csi_resizer_image_tag }} args: - "--v=5" - "--csi-address=/csi/csi.sock" @@ -52,7 +53,7 @@ spec: - name: gce-pd-driver # Don't change base image without changing pdImagePlaceholder in # test/k8s-integration/main.go - image: {{ gcp_pd_csi_image_repo }}/gcp-compute-persistent-disk-csi-driver:{{ gcp_pd_csi_driver_image_tag }} + image: {{ gcp_pd_csi_plugin_image_repo }}:{{ gcp_pd_csi_plugin_image_tag }} args: - "--v=5" - "--endpoint=unix:/csi/csi.sock" diff --git a/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2 b/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2 index 82c1f6bc216..204ff972e75 100644 --- a/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2 +++ b/roles/kubernetes-apps/csi_driver/gcp_pd/templates/gcp-pd-csi-node.yml.j2 @@ -21,7 +21,7 @@ spec: serviceAccountName: csi-gce-pd-node-sa containers: - name: csi-driver-registrar - image: {{ gcp_pd_csi_image_repo }}/csi-node-driver-registrar:{{ gcp_pd_csi_registrar_image_tag }} + image: {{ csi_node_driver_registrar_image_repo }}:{{ csi_node_driver_registrar_image_tag }} args: - "--v=5" - "--csi-address=/csi/csi.sock" @@ -45,7 +45,7 @@ spec: privileged: true # Don't change base image without changing pdImagePlaceholder in # test/k8s-integration/main.go - image: {{ gcp_pd_csi_image_repo }}/gcp-compute-persistent-disk-csi-driver:{{ gcp_pd_csi_driver_image_tag }} + image: {{ gcp_pd_csi_plugin_image_repo }}:{{ gcp_pd_csi_plugin_image_tag }} args: - "--v=5" - "--endpoint=unix:/csi/csi.sock" diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 index b7c33169872..d2f0758ed59 100644 --- a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 +++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-rbac.yml.j2 @@ -40,7 +40,7 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "patch"] + verbs: ["get", "list", "watch", "patch", "update"] {% if external_vsphere_version >= "7.0u1" %} - apiGroups: ["cns.vmware.com"] resources: ["triggercsifullsyncs"] diff --git a/roles/kubernetes-apps/metallb/defaults/main.yml b/roles/kubernetes-apps/metallb/defaults/main.yml index 66a43fa3cbf..cd2162e4761 100644 --- a/roles/kubernetes-apps/metallb/defaults/main.yml +++ b/roles/kubernetes-apps/metallb/defaults/main.yml @@ -1,6 +1,5 @@ --- metallb_enabled: false -metallb_version: v0.12.1 metallb_log_level: info metallb_protocol: "layer2" metallb_port: "7472" diff --git a/roles/kubernetes/control-plane/defaults/main/main.yml b/roles/kubernetes/control-plane/defaults/main/main.yml index ad703702950..42f9c7654f7 100644 --- a/roles/kubernetes/control-plane/defaults/main/main.yml +++ b/roles/kubernetes/control-plane/defaults/main/main.yml @@ -18,6 +18,11 @@ kube_apiserver_node_port_range: "30000-32767" # ETCD backend for k8s data kube_apiserver_storage_backend: etcd3 +# CIS 1.2.26 +# Validate that the service account token +# in the request is actually present in etcd. +kube_apiserver_service_account_lookup: true + kube_etcd_cacert_file: ca.pem kube_etcd_cert_file: node-{{ inventory_hostname }}.pem kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem @@ -89,6 +94,19 @@ kube_apiserver_pod_eviction_unreachable_timeout_seconds: "300" # 1.10+ admission plugins kube_apiserver_enable_admission_plugins: [] +# enable admission plugins configuration +kube_apiserver_admission_control_config_file: false + +# data structure to configure EventRateLimit admission plugin +# this should have the following structure: +# kube_apiserver_admission_event_rate_limits: +# : +# type: +# qps: +# burst: +# cache_size: +kube_apiserver_admission_event_rate_limits: {} + # 1.10+ list of disabled admission plugins kube_apiserver_disable_admission_plugins: [] @@ -98,13 +116,17 @@ kube_api_runtime_config: [] ## Enable/Disable Kube API Server Authentication Methods kube_token_auth: false kube_oidc_auth: false + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication kube_webhook_token_auth: false kube_webhook_token_auth_url_skip_tls_verify: false -## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication # kube_webhook_token_auth_url: https://... -kube_webhook_authorization: false +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + ## Variables for webhook token authz https://kubernetes.io/docs/reference/access-authn-authz/webhook/ # kube_webhook_authorization_url: https://... +kube_webhook_authorization: false kube_webhook_authorization_url_skip_tls_verify: false diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index 23f798d649c..f339989c8a2 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -83,6 +83,30 @@ dest: "{{ kube_config_dir }}/kubeadm-config.yaml" mode: 0640 +- name: kubeadm | Create directory to store admission control configurations + file: + path: "{{ kube_config_dir }}/admission-controls" + state: directory + mode: 0640 + when: kube_apiserver_admission_control_config_file + +- name: kubeadm | Push admission control config file + template: + src: "admission-controls.{{ kubeadmConfig_api_version }}.yaml.j2" + dest: "{{ kube_config_dir }}/admission-controls/admission-controls.yaml" + mode: 0640 + when: kube_apiserver_admission_control_config_file + +- name: kubeadm | Push admission control config files + template: + src: "{{ item|lower }}.{{ kubeadmConfig_api_version }}.yaml.j2" + dest: "{{ kube_config_dir }}/admission-controls/{{ item|lower }}.yaml" + mode: 0640 + when: + - kube_apiserver_admission_control_config_file + - item in kube_apiserver_admission_plugins_needs_configuration + loop: "{{ kube_apiserver_enable_admission_plugins[0].split(',') }}" + - name: kubeadm | Check if apiserver.crt contains all needed SANs shell: | set -o pipefail @@ -182,6 +206,12 @@ tags: - kubeadm_token +- name: PodSecurityPolicy | install PodSecurityPolicy + include_tasks: psp-install.yml + when: + - podsecuritypolicy_enabled + - inventory_hostname == first_kube_control_plane + - name: kubeadm | Join other masters include_tasks: kubeadm-secondary.yml diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml index 8459362cdae..711a2e08394 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml @@ -18,7 +18,7 @@ --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all --allow-experimental-upgrades - --etcd-upgrade={{ etcd_deployment_type == "kubeadm" | bool | lower }} + --etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }} --force register: kubeadm_upgrade # Retry is because upload config sometimes fails @@ -39,7 +39,7 @@ --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all --allow-experimental-upgrades - --etcd-upgrade={{ etcd_deployment_type == "kubeadm" | bool | lower }} + --etcd-upgrade={{ (etcd_deployment_type == "kubeadm") | bool | lower }} --force register: kubeadm_upgrade when: inventory_hostname != first_kube_control_plane diff --git a/roles/kubernetes/control-plane/tasks/psp-install.yml b/roles/kubernetes/control-plane/tasks/psp-install.yml new file mode 100644 index 00000000000..581d1286df2 --- /dev/null +++ b/roles/kubernetes/control-plane/tasks/psp-install.yml @@ -0,0 +1,38 @@ +--- +- name: Check AppArmor status + command: which apparmor_parser + register: apparmor_status + failed_when: false + changed_when: apparmor_status.rc != 0 + +- name: Set apparmor_enabled + set_fact: + apparmor_enabled: "{{ apparmor_status.rc == 0 }}" + +- name: Render templates for PodSecurityPolicy + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + mode: 0640 + register: psp_manifests + with_items: + - {file: psp.yml, type: psp, name: psp} + - {file: psp-cr.yml, type: clusterrole, name: psp-cr} + - {file: psp-crb.yml, type: rolebinding, name: psp-crb} + +- name: Add policies, roles, bindings for PodSecurityPolicy + kube: + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" + state: "latest" + register: result + until: result is succeeded + retries: 10 + delay: 6 + with_items: "{{ psp_manifests.results }}" + environment: + KUBECONFIG: "{{ kube_config_dir }}/admin.conf" + loop_control: + label: "{{ item.item.file }}" \ No newline at end of file diff --git a/roles/kubernetes/control-plane/templates/admission-controls.v1beta2.yaml.j2 b/roles/kubernetes/control-plane/templates/admission-controls.v1beta2.yaml.j2 new file mode 100644 index 00000000000..0bb4517c298 --- /dev/null +++ b/roles/kubernetes/control-plane/templates/admission-controls.v1beta2.yaml.j2 @@ -0,0 +1,9 @@ +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +{% for plugin in kube_apiserver_enable_admission_plugins[0].split(',') %} +{% if plugin in kube_apiserver_admission_plugins_needs_configuration %} +- name: {{ plugin }} + path: {{ kube_config_dir }}/{{ plugin|lower }}.yaml +{% endif %} +{% endfor %} diff --git a/roles/kubernetes/control-plane/templates/eventratelimit.v1beta2.yaml.j2 b/roles/kubernetes/control-plane/templates/eventratelimit.v1beta2.yaml.j2 new file mode 100644 index 00000000000..0d7867070e4 --- /dev/null +++ b/roles/kubernetes/control-plane/templates/eventratelimit.v1beta2.yaml.j2 @@ -0,0 +1,11 @@ +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +{% for limit in kube_apiserver_admission_event_rate_limits.values() %} +- type: {{ limit.type }} + qps: {{ limit.qps }} + burst: {{ limit.burst }} +{% if limit.cache_size is defined %} + cacheSize: {{ limit.cache_size }} +{% endif %} +{% endfor %} diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 index bf7868bd8a3..9b2e473981d 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 @@ -126,6 +126,9 @@ apiServer: {% if kube_apiserver_enable_admission_plugins|length > 0 %} enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }} {% endif %} +{% if kube_apiserver_admission_control_config_file %} + admission-control-config-file: {{ kube_config_dir }}/admission-controls.yaml +{% endif %} {% if kube_apiserver_disable_admission_plugins|length > 0 %} disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }} {% endif %} @@ -143,6 +146,9 @@ apiServer: {% if kube_token_auth|default(true) %} token-auth-file: {{ kube_token_dir }}/known_tokens.csv {% endif %} +{% if kube_apiserver_service_account_lookup %} + service-account-lookup: "{{ kube_apiserver_service_account_lookup }}" +{% endif %} {% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} oidc-issuer-url: "{{ kube_oidc_url }}" oidc-client-id: "{{ kube_oidc_client_id }}" @@ -249,6 +255,13 @@ apiServer: readOnly: false {% endif %} {% endif %} +{% if kube_apiserver_admission_control_config_file %} + - name: admission-control-configs + hostPath: {{ kube_config_dir }}/admission-controls + mountPath: {{ kube_config_dir }} + readOnly: false + pathType: DirectoryOrCreate +{% endif %} {% for volume in apiserver_extra_volumes %} - name: {{ volume.name }} hostPath: {{ volume.hostPath }} diff --git a/roles/kubernetes-apps/cluster_roles/templates/psp-cr.yml.j2 b/roles/kubernetes/control-plane/templates/psp-cr.yml.j2 similarity index 100% rename from roles/kubernetes-apps/cluster_roles/templates/psp-cr.yml.j2 rename to roles/kubernetes/control-plane/templates/psp-cr.yml.j2 diff --git a/roles/kubernetes-apps/cluster_roles/templates/psp-crb.yml.j2 b/roles/kubernetes/control-plane/templates/psp-crb.yml.j2 similarity index 100% rename from roles/kubernetes-apps/cluster_roles/templates/psp-crb.yml.j2 rename to roles/kubernetes/control-plane/templates/psp-crb.yml.j2 diff --git a/roles/kubernetes-apps/cluster_roles/templates/psp.yml.j2 b/roles/kubernetes/control-plane/templates/psp.yml.j2 similarity index 100% rename from roles/kubernetes-apps/cluster_roles/templates/psp.yml.j2 rename to roles/kubernetes/control-plane/templates/psp.yml.j2 diff --git a/roles/kubernetes/control-plane/templates/webhook-token-auth-config.yaml.j2 b/roles/kubernetes/control-plane/templates/webhook-token-auth-config.yaml.j2 index 4d0c1eccbc5..f152d11beb4 100644 --- a/roles/kubernetes/control-plane/templates/webhook-token-auth-config.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/webhook-token-auth-config.yaml.j2 @@ -4,6 +4,9 @@ clusters: cluster: server: {{ kube_webhook_token_auth_url }} insecure-skip-tls-verify: {{ kube_webhook_token_auth_url_skip_tls_verify }} +{% if kube_webhook_token_auth_ca_data is defined %} + certificate-authority-data: {{ kube_webhook_token_auth_ca_data }} +{% endif %} # users refers to the API server's webhook configuration. users: diff --git a/roles/kubernetes/control-plane/vars/main.yaml b/roles/kubernetes/control-plane/vars/main.yaml new file mode 100644 index 00000000000..57a39f78422 --- /dev/null +++ b/roles/kubernetes/control-plane/vars/main.yaml @@ -0,0 +1,3 @@ +--- +# list of admission plugins that needs to be configured +kube_apiserver_admission_plugins_needs_configuration: [EventRateLimit] diff --git a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 index 5bf8b176641..1245e59af04 100644 --- a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 +++ b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 @@ -117,3 +117,9 @@ shutdownGracePeriodCriticalPods: {{ kubelet_shutdown_grace_period_critical_pods memorySwap: swapBehavior: {{ kubelet_swap_behavior|default("LimitedSwap") }} {% endif %} +{% if kubelet_streaming_connection_idle_timeout is defined %} +streamingConnectionIdleTimeout: {{ kubelet_streaming_connection_idle_timeout }} +{% endif %} +{% if kubelet_make_iptables_util_chains is defined %} +makeIPTablesUtilChains: {{ kubelet_make_iptables_util_chains | bool }} +{% endif %} diff --git a/roles/kubernetes/node/vars/ubuntu-22.yml b/roles/kubernetes/node/vars/ubuntu-22.yml new file mode 100644 index 00000000000..59bc55dda1a --- /dev/null +++ b/roles/kubernetes/node/vars/ubuntu-22.yml @@ -0,0 +1,2 @@ +--- +kube_resolv_conf: "/run/systemd/resolve/resolv.conf" diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml index eae32a4c682..a58c2d7de13 100644 --- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml @@ -183,6 +183,13 @@ msg: "kube_pods_subnet cannot be the same network segment as kube_service_addresses" run_once: yes +- name: "Check that IP range is enough for the nodes" + assert: + that: + - 2 ** (kube_network_node_prefix - kube_pods_subnet | ipaddr('prefix')) >= groups['k8s_cluster'] | length + msg: "Not enough IPs are available for the desired node count." + run_once: yes + - name: Stop if unknown dns mode assert: that: dns_mode in ['coredns', 'coredns_dual', 'manual', 'none'] @@ -217,6 +224,12 @@ when: - inventory_hostname in groups.get('etcd',[]) +- name: Stop if container manager is not docker, crio or containerd + assert: + that: container_manager in ['docker', 'crio', 'containerd'] + msg: "The container manager, 'container_manager', must be docker, crio or containerd" + run_once: true + - name: Stop if etcd deployment type is not host or kubeadm when container_manager != docker assert: that: etcd_deployment_type in ['host', 'kubeadm'] diff --git a/roles/kubernetes/preinstall/vars/ubuntu.yml b/roles/kubernetes/preinstall/vars/ubuntu.yml index ed8084ff902..85b3f255a20 100644 --- a/roles/kubernetes/preinstall/vars/ubuntu.yml +++ b/roles/kubernetes/preinstall/vars/ubuntu.yml @@ -1,7 +1,6 @@ --- required_pkgs: - python3-apt - - aufs-tools - apt-transport-https - software-properties-common - conntrack diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index e1a35190186..f0ce2598dac 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -15,7 +15,7 @@ is_fedora_coreos: false disable_swap: true ## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.23.5 +kube_version: v1.23.6 ## The minimum version working kube_version_min_required: v1.21.0 diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index 17f1706c42c..ae1b65dc174 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -10,6 +10,7 @@ calico_ipv4pool_ipip: "Off" calico_ipip_mode: Never # valid values are 'Always', 'Never' and 'CrossSubnet' calico_vxlan_mode: Always # valid values are 'Always', 'Never' and 'CrossSubnet' +# Calico doesn't support ipip tunneling for the IPv6. calico_ipip_mode_ipv6: Never calico_vxlan_mode_ipv6: Never calico_pool_blocksize_ipv6: 116 diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml index 0b164534a8a..b961bdf40d1 100644 --- a/roles/network_plugin/calico/tasks/check.yml +++ b/roles/network_plugin/calico/tasks/check.yml @@ -173,3 +173,13 @@ - typha_enabled run_once: True delegate_to: "{{ groups['kube_control_plane'][0] }}" + +- name: "Check ipip mode is Nerver for calco ipv6" + assert: + that: + - "calico_ipip_mode_ipv6 in ['Never']" + msg: "Calico doesn't support ipip tunneling for the IPv6" + when: + - enable_dual_stack_networks + run_once: True + delegate_to: "{{ groups['kube_control_plane'][0] }}" diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml index cf2d428dbb4..c8de8cea58b 100644 --- a/roles/network_plugin/flannel/tasks/main.yml +++ b/roles/network_plugin/flannel/tasks/main.yml @@ -1,16 +1,4 @@ --- -- name: runc | Download flannel binary - include_tasks: "../../../download/tasks/download_file.yml" - vars: - download: "{{ download_defaults | combine(downloads.flannel_cni) }}" - -- name: Copy flannel binary from download dir - copy: - src: "{{ downloads.flannel_cni.dest }}" - dest: "/opt/cni/bin/flannel" - mode: 0755 - remote_src: true - - name: Flannel | Create Flannel manifests template: src: "{{ item.file }}.j2" diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 index b642ff2984a..3fe3cab841f 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -108,6 +108,17 @@ spec: values: - {{ arch }} initContainers: + - name: install-cni-plugin + image: {{ flannel_init_image_repo }}:{{ flannel_init_image_tag }} + command: + - cp + args: + - -f + - /flannel + - /opt/cni/bin/flannel + volumeMounts: + - name: cni-plugin + mountPath: /opt/cni/bin - name: install-cni image: {{ flannel_image_repo }}:{{ flannel_image_tag | regex_replace(image_arch,'') }}{{ arch }} command: @@ -135,7 +146,7 @@ spec: - name: flannel-cfg configMap: name: kube-flannel-cfg - - name: host-cni-bin + - name: cni-plugin hostPath: path: /opt/cni/bin updateStrategy: diff --git a/roles/remove-node/remove-etcd-node/tasks/main.yml b/roles/remove-node/remove-etcd-node/tasks/main.yml index e66d449de5f..7500d6d347e 100644 --- a/roles/remove-node/remove-etcd-node/tasks/main.yml +++ b/roles/remove-node/remove-etcd-node/tasks/main.yml @@ -2,7 +2,7 @@ - name: Lookup node IP in kubernetes command: > {{ kubectl }} get nodes {{ node }} - -o jsonpath={range.status.addresses[?(@.type=="InternalIP")]}{.address}{"\n"}{end} + -o jsonpath='{range .status.addresses[?(@.type=="InternalIP")]}{@.address}{"\n"}{end}' register: remove_node_ip when: - inventory_hostname in groups['etcd'] diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index fc452848bd0..e63492a721f 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -364,7 +364,9 @@ - dns - name: reset | include file with reset tasks specific to the network_plugin if exists - include_tasks: "{{ (role_path,'../network_plugin',kube_network_plugin,'tasks/reset.yml') | community.general.path_join | realpath }}" + include_role: + name: "network_plugin/{{ kube_network_plugin }}" + tasks_from: reset when: - kube_network_plugin in ['flannel', 'cilium', 'kube-router', 'calico'] tags: diff --git a/tests/common/_docker_hub_registry_mirror.yml b/tests/common/_docker_hub_registry_mirror.yml index d84a7fe2717..2e77c5218bd 100644 --- a/tests/common/_docker_hub_registry_mirror.yml +++ b/tests/common/_docker_hub_registry_mirror.yml @@ -26,6 +26,8 @@ netcheck_server_image_repo: "{{ quay_image_repo }}/kubespray/k8s-netchecker-serv nginx_image_repo: "{{ quay_image_repo }}/kubespray/nginx" +flannel_image_repo: "{{ quay_image_repo}}/kubespray/flannel" + # Kubespray settings for tests deploy_netchecker: true dns_min_replicas: 1 diff --git a/tests/files/packet_almalinux8-calico-remove-node.yml b/tests/files/packet_almalinux8-calico-remove-node.yml new file mode 100644 index 00000000000..4cb5dfc04e7 --- /dev/null +++ b/tests/files/packet_almalinux8-calico-remove-node.yml @@ -0,0 +1,7 @@ +--- +# Instance settings +cloud_image: almalinux-8 +mode: ha + +# Kubespray settings +auto_renew_certificates: true diff --git a/tests/files/packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha.yml b/tests/files/packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha.yml new file mode 100644 index 00000000000..3eda9034b99 --- /dev/null +++ b/tests/files/packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha.yml @@ -0,0 +1,18 @@ +--- +# Instance settings +cloud_image: ubuntu-2004 +mode: ha + +# use the legacy setting to test the upgrade +etcd_kubeadm_enabled: true + +upgrade_cluster_setup: true + +# Currently ipvs not available on KVM: https://packages.ubuntu.com/search?suite=focal&arch=amd64&mode=exactfilename&searchon=contents&keywords=ip_vs_sh.ko +kube_proxy_mode: iptables +enable_nodelocaldns: False + +# Pin disabling ipip mode to ensure proper upgrade +ipip: false +calico_vxlan_mode: Always +calico_network_backend: bird diff --git a/tests/scripts/molecule_run.sh b/tests/scripts/molecule_run.sh index ff2966fd027..9604238e13b 100755 --- a/tests/scripts/molecule_run.sh +++ b/tests/scripts/molecule_run.sh @@ -1,10 +1,32 @@ #!/bin/bash -set -euxo pipefail +set -euxo pipefail -o noglob export LC_ALL=C.UTF-8 export LANG=C.UTF-8 -for d in $(find roles -name molecule -type d) +_PATH='roles' +_EXCLUDE="" + +while [[ $# -gt 0 ]] ; do + case $1 in + -e|--exclude) + _EXCLUDE="${_EXCLUDE} -not -path ${_PATH}/$2/*" + shift + shift + ;; + -i|--include) + _PATH="${_PATH}/$2" + shift + shift + ;; + -h|--help) + echo "Usage: molecule_run.sh [-h|--help] [-e|--exclude] [-i|--include]" + exit 0 + ;; + esac +done + +for d in $(find ${_PATH} ${_EXCLUDE} -name molecule -type d) do pushd $(dirname $d) molecule test --all diff --git a/tests/scripts/testcases_prepare.sh b/tests/scripts/testcases_prepare.sh index bfaf65f1869..de36f492fc9 100755 --- a/tests/scripts/testcases_prepare.sh +++ b/tests/scripts/testcases_prepare.sh @@ -1,7 +1,7 @@ #!/bin/bash set -euxo pipefail -: ${ANSIBLE_MAJOR_VERSION:=2.10} +: ${ANSIBLE_MAJOR_VERSION:=2.12} /usr/bin/python -m pip uninstall -y ansible ansible-base ansible-core /usr/bin/python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt diff --git a/tests/scripts/testcases_run.sh b/tests/scripts/testcases_run.sh index ed16b24d0e8..5947309df18 100755 --- a/tests/scripts/testcases_run.sh +++ b/tests/scripts/testcases_run.sh @@ -109,6 +109,11 @@ if [ "${IDEMPOT_CHECK}" = "true" ]; then fi fi +# Test node removal procedure +if [ "${REMOVE_NODE_CHECK}" = "true" ]; then + ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e skip_confirmation=yes -e node=${REMOVE_NODE_NAME} --limit "all:!fake_hosts" remove-node.yml +fi + # Clean up at the end, this is to allow stage1 tests to include cleanup test if [ "${RESET_CHECK}" = "true" ]; then ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} ${CI_TEST_ADDITIONAL_VARS} -e reset_confirmation=yes --limit "all:!fake_hosts" reset.yml