From 6444e2460a2d7375a4c8df31d1d87417fd60c3f7 Mon Sep 17 00:00:00 2001 From: Tiana Rakoto Arimanana <5566338+b23prodtm@users.noreply.github.com> Date: Sat, 11 May 2019 20:19:39 +0200 Subject: [PATCH] Host Acces Point on Raspberry 3 B+ (#17) * Development (#10) * Raspberry configuration : ARM is to rollback kubernetes v1.12.5 Note that flannel works on amd64, arm, arm64 and ppc64le. https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/ Create setup_playbook.sh for ansible-architecture armv7l (RasPi) * Trusted Ansible repository * etcdctl must be manually installed on node from github.com/etcd-io/etcd/tree/release-3.1 * Update README.md * checksums * Bastion PI Readme FAQ * armv7l -> arm64 compatibility mode with Pi3 * Git releases search for architectures binaries * declare PI=pi # replace 'pi' with 'ubuntu' or any other user * SSH permit root login Development convenience script : $ curl -fsSL https://get.docker.com -o get-docker.sh $ sudo sh get-docker.sh * Classic server configuration kubernetes-sigs/kubesrpay/issues/4293 * Bastion sudoers * Update README.md * - Package preinstall tasks sudo -> become: yes | no - Python 3 sudo pip3 install -r requirements.txt * Ignore APT cache update errors [concurency lock issue](https://github.com/ansible/ansible/pull/47322) * https://github.com/kubernetes-sigs/kubespray/issues/2767 * Update setup_playbook.sh * Bionic python3-dev Pip3 * Update master (#8) (#9) * fix(contrib/metallb): adds missing become: true in role (#4356) On CoreOS, without this, it fails to kubectl apply MetalLB due to lack of privileges. * Fix #4237: update kube cert path (#4354) * Use sample inventory file in doc (#4052) * Revert "Fix #4237: update kube cert path (#4354)" (#4369) This reverts commit ea7a6f1cf1d95732d8305d0ca8da0da8a0050e3d. This change modified the certs dir for Kubernetes, but did not move the directories for existing clusters. * Fix support for ansible 2.7.9 (#4375) * Use wide for netchecker debug output (#4383) * Added support of bastion host for reset.yaml (#4359) * Empty commit to triger CI * Use proxy_env with kubeadm phase commands (#4325) * clarify that kubespray now supports kubeadm (fixes #4089) (#4366) * Reduce jinja2 filters in coredns templates (#4390) * Upgrade to k8s 1.13.5 * Increase CPU flavor for CI (#4389) * Fix CA cert environment variable for ectd v3 (#4381) * Added livenessProbe for local nginx apiserver proxy liveness probe (#4222) * Added configurable local apiserver proxy liveness probe * Enable API LB healthcheck by default * Fix template spacing and moved healthz location to nginx http section * Fix healthcheck listen address to allow kubelet request healthcheck * Default values for variable dns_servers and dns_domain are set in two files: (#3999) values from inventory in roles/kubespray-defaults/defaults/main.yml hardcoded values in roles/container-engine/defaults/main.yml dns_servers set empty in roles/container-engine/defaults/main.yml and skydns_server not set in docker_dns_servers variables also set default value for manual_dns_serve another variables in roles/container-engine/defaults not need to set * Fix bootsrap-os role, failing to create remote_tmp (#4384) * use ansible_remote_tmp hostvar * Use static files in KubeDNS templating task (#4379) This commit adapts the "Lay Down KubeDNS Template" task to use the static files moved by pull request [1] [1] https://github.com/kubernetes-sigs/kubespray/pull/4341 * Fix supplementary_addresses rendering error (#4403) * Corrected cloud name (#4316) The correct name is Packet, not Packet Host. * adapt inventory script to python 2.7 version (#4407) * Calico felix - Fix jinja2 boolean condition (#4348) * Fix jinja2 boolean condition * Convert all felix variable to booleans instead. * Set up k8s-cluster DNS configuration * kube-proxy=iptables initial dns setup=coredns * Update to v1.13.5 checksums * create user priv escalate * weave network ansible * --ask-become-pass * fix up item.item dict object error * Let python unversioned cmd * Update 0060-resolvconf.yml * Update install_host.yml * Add PPA repos https://github.com/kubernetes-sigs/cri-o (crio) https://github.com/kubernetes-sigs/cri-tools (crictl) * checksums Raspberries 3 B+ and A+ * rapsi A : mem config * Help files and scripts * Safe Calico Network Get current version of calico cluster version: async time increase, * Quick start scripts Guidelines * WIP Dashboard http://127.0.0.1:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login * Host AP : HOSTAPD service ISC DHCP service IP MASQUERADE ifw rules [Gatewayed] hosts (bastion-ssh-config) internet sharing /bridge * Ubuntu before 1804 Bridge connection Country code selection * Netplan.io manager * Strong encryption keys https://www.ibm.com/developerworks/library/l-wifiencrypthostapd/index.html * Timeouts * Stateful DHCPv6 Don't mix interfaces dhcpd subnet leases. Define subnet for eth0 segment to retrieve expected server addresses. Python3 script bastion host access point * Set up DHCP wi-fi clients, and redeem ip sub-network wired internet (dhclient) Script environment variables and rc.local --- README.md | 195 +++++++++--------- SCHEME | 66 ++++++ cluster.yml | 5 +- contrib/dind/roles/dind-host/tasks/main.yaml | 2 +- contrib/inventory_builder/inventory.py | 2 +- .../metallb/roles/provision/tasks/main.yml | 4 +- .../kubernetes-pv/ansible/tasks/main.yaml | 8 +- .../provision/tasks/bootstrap/deploy.yml | 2 +- .../provision/tasks/bootstrap/storage.yml | 2 +- .../roles/provision/tasks/glusterfs.yml | 4 +- .../heketi/roles/provision/tasks/heketi.yml | 2 +- .../heketi/roles/provision/tasks/main.yml | 2 +- .../heketi/roles/provision/tasks/secret.yml | 12 +- .../heketi/roles/provision/tasks/storage.yml | 2 +- .../roles/provision/tasks/storageclass.yml | 2 +- docs/ansible.md | 24 --- inventory/sample/group_vars/gatewayed.yml | 1 + .../group_vars/k8s-cluster/k8s-cluster.yml | 16 +- .../group_vars/k8s-cluster/k8s-net-calico.yml | 6 +- .../k8s-cluster/k8s-net-kube-router.yml | 2 +- inventory/sample/inventory.ini | 45 ++-- library/hap-wiz-env.py | 121 +++++++++++ roles/bootstrap-os/tasks/bootstrap-ubuntu.yml | 9 + .../container-engine/cri-o/defaults/main.yml | 4 + .../cri-o/tasks/install_host.yml | 9 + roles/container-engine/cri-o/tasks/main.yaml | 42 ++++ roles/container-engine/cri-o/vars/debian.yml | 26 +++ roles/container-engine/docker/tasks/main.yml | 17 ++ .../{redhat-aarch64.yml => redhat-arm64.yml} | 0 roles/dnsmasq/tasks/main.yml | 12 +- roles/download/defaults/main.yml | 20 ++ .../ansible/tasks/dashboard.yml | 24 ++- roles/kubernetes-apps/ansible/tasks/main.yml | 8 +- .../ansible/tasks/netchecker.yml | 18 +- .../cluster_roles/tasks/main.yml | 21 +- .../cluster_roles/tasks/oci.yml | 2 +- .../nvidia_gpu/tasks/main.yml | 9 +- .../cephfs_provisioner/tasks/main.yml | 6 +- .../local_path_provisioner/tasks/main.yml | 6 +- .../local_volume_provisioner/tasks/main.yml | 8 +- roles/kubernetes-apps/helm/tasks/main.yml | 14 +- .../cert_manager/tasks/main.yml | 6 +- .../ingress_nginx/tasks/main.yml | 10 +- .../metrics_server/tasks/main.yml | 6 +- .../network_plugin/calico/tasks/main.yml | 13 +- .../network_plugin/canal/tasks/main.yml | 8 +- .../network_plugin/cilium/tasks/main.yml | 10 +- .../network_plugin/contiv/tasks/main.yml | 6 +- .../network_plugin/flannel/tasks/main.yml | 8 +- .../network_plugin/kube-router/tasks/main.yml | 2 +- .../network_plugin/multus/tasks/main.yml | 10 +- .../openstack/tasks/main.yml | 2 +- .../policy_controller/calico/tasks/main.yml | 14 +- roles/kubernetes-apps/registry/tasks/main.yml | 12 +- roles/kubernetes/client/tasks/main.yml | 7 +- roles/kubernetes/preinstall/handlers/main.yml | 1 + .../preinstall/tasks/0020-verify-settings.yml | 8 +- .../preinstall/tasks/0040-set_facts.yml | 2 +- .../tasks/0050-create_directories.yml | 2 +- roles/network_plugin/calico/tasks/check.yml | 2 +- roles/network_plugin/calico/tasks/install.yml | 4 +- roles/network_plugin/canal/tasks/main.yml | 4 +- roles/network_plugin/cilium/tasks/main.yml | 4 +- roles/network_plugin/flannel/tasks/main.yml | 4 +- .../kube-router/tasks/annotate.yml | 6 +- roles/reset/tasks/main.yml | 2 + .../win_nodes/kubernetes_patch/tasks/main.yml | 4 +- scripts/collect-info.yaml | 6 +- scripts/gen_digest_json.sh | 23 ++- scripts/hap-wiz-bionic.sh | 144 +++++++++++++ scripts/init.d/init_dhcp_serv.sh | 74 +++++++ scripts/init.d/init_net_if.sh | 130 ++++++++++++ scripts/init.d/init_ufw.sh | 58 ++++++ scripts/init.d/net_restart.sh | 54 +++++ scripts/my_cluster.sh | 98 +++++++++ scripts/my_firewall.sh | 46 +++++ scripts/my_playbook.sh | 43 ++-- scripts/requirements.txt | 2 + scripts/start_dashboard.sh | 9 + scripts/systemctl-wpa-ssh.sh | 77 +++++++ tests/testcases/020_check-create-pod.yml | 6 +- tests/testcases/030_check-network.yml | 14 +- 82 files changed, 1377 insertions(+), 344 deletions(-) create mode 100644 SCHEME create mode 100644 inventory/sample/group_vars/gatewayed.yml create mode 100644 library/hap-wiz-env.py create mode 100644 roles/container-engine/cri-o/tasks/install_host.yml rename roles/container-engine/docker/vars/{redhat-aarch64.yml => redhat-arm64.yml} (100%) create mode 100755 scripts/hap-wiz-bionic.sh create mode 100755 scripts/init.d/init_dhcp_serv.sh create mode 100755 scripts/init.d/init_net_if.sh create mode 100755 scripts/init.d/init_ufw.sh create mode 100755 scripts/init.d/net_restart.sh create mode 100755 scripts/my_cluster.sh create mode 100755 scripts/my_firewall.sh create mode 100644 scripts/requirements.txt create mode 100755 scripts/start_dashboard.sh create mode 100755 scripts/systemctl-wpa-ssh.sh diff --git a/README.md b/README.md index ec9014d13d2..df311e9b2af 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,8 @@  +![Kubernetes Logo](https://raw.githubusercontent.com/kubernetes-sigs/kubespray/master/docs/img/kubernetes-logo.png) Deploy a Production Ready Kubernetes Cluster +============================================ If you have questions, check the [documentation](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**. You can get your invite [here](http://slack.k8s.io/) @@ -33,49 +35,18 @@ To deploy the cluster you can use : # Copy ``inventory/sample`` as ``inventory/mycluster`` cp -rfp inventory/sample inventory/mycluster - # Update Ansible inventory file with inventory builder . Single master IP is possible, see nodes with bastion - declare -a IPS=(192.168.0.16 192.168.0.17) - CONFIG_FILE=inventory/mycluster/hosts.ini python contrib/inventory_builder/inventory.py ${IPS[@]} - cat inventory/mycluster/hosts.ini - # bastion single master looks like `raspberrypi ansible_ssh_host=192.168.0.16 ip=192.168.0.16` ansible_host=192.168.0.16 ansible_user=pi" # replace 'pi' with 'ubuntu' or any other user - # Review and change parameters under ``inventory/mycluster/group_vars`` - cat inventory/mycluster/group_vars/all/all.yml - cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml - - declare PI=pi # replace 'pi' with 'ubuntu' or any other user - for ip in ${IPS[@]}; do - # You can ssh-copy-id to Ansible inventory hosts permanently for the pi user - ssh-copy-id $PI@$ip; - ssh $PI@$ip sudo bash -c "echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config"; - ssh $PI@$ip cat /etc/ssh/sshd_config | grep PermitRootLogin; - # To install etcd on nodes, Go lang is needed - ssh $PI@$ip sudo apt-get install golang -y; - # Ansible is reported as a trusted repository - ssh $PI@$ip sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 93C4A3FD7BB9C367; - # deb http://ppa.launchpad.net/ansible/ansible/ubuntu trusty main - - # The kube user which owns k8s daemons must be added to Ubuntu group. - ssh $PI@$pi sudo usermod -a -G ubuntu kube; - - # disable firewall for the setup - ssh $PI@$pi sudo ufw disable; - done - - # Adjust the ansible_memtotal_mb to your Raspberry specs - cat roles/kubernetes/preinstall/tasks/0020-verify-settings.yml | grep -b2 'that: ansible_memtotal_mb' - - # Shortcut to actually set up the playbook on hosts: - scripts/my_playbook.sh cluster.yml - - # Displays help scripts/my_playbook.sh --help - # or you can use the extended version as well - # scripts/my_playbook.sh -i inventory/mycluster/hosts.ini cluster.yml - - for ip in ${IPS[@]}; do - # --setup-firewall opens default kubernetes ports in firewalld - scripts/my_playbook.sh --setup-firewall $PI@$pi - ssh $PI@$pi sudo ufw enable; - done + # Setup cluster inventory file with inventory builder . Single master cluster is possible. + scripts/my_cluster.sh + + # Setup cluster playbook (two phases avoid too long tasks to "stall“, out of resources) + scripts/my_playbook.sh --timeout=120 cluster.yml --skip-tags=apps,resolvconf + scripts/my_playbook.sh --timeout=120 cluster.yml --tags=apps,resolvconf + + # Start Dashboard and kubernetes controllers + scripts/start_dashboard.sh + +### Accessing the dashboard +Available from the master host (e.g. raspberrypib), through proxy at locahost:8001 [https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login](https://localhost:8001api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login). See [Ansible](docs/ansible.md) documentation. Ansible uses tags to define TASK groups management. @@ -92,18 +63,23 @@ A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` en #### Known issues : See [docs](./docs/ansible.md) +> *PROBLEM* - ModuleNotFoundError: No module named 'ruamel' ```Traceback (most recent call last): File "contrib/inventory_builder/inventory.py", line 36, in from ruamel.yaml import YAML ``` +> *SOLUTION* Please install inventory builder python libraries. -> sudo pip install -r contrib/inventory_builder/requirements.txt + sudo pip install -r contrib/inventory_builder/requirements.txt + +> *PROBLEM* - CGROUPS_MEMORY missing to use ```kubeadm init``` [ERROR SystemVerification]: missing cgroups: memory +> *SOLUTION* The Linux kernel must be loaded with special cgroups enabled. Add the following to the kernel parameters: cgroup_enable=cpuset cgroup_enable=memory cgroup_memory=1 @@ -113,58 +89,93 @@ E.g. : Raspberry Ubuntu Preinstalled server uses u-boot, then in ssh session run sed "$ s/$/ cgroup_enable=cpuset cgroup_enable=memory cgroup_memory=1/" /boot/firmware/cmdline.txt | sudo tee /boot/firmware/cmdline.txt reboot -- I may not be able to build a playbook on Arm, armv7l architectures Issues with systems such as Rasbian 9 and the Raspberries first and second generation. There's [some issue](http://github.com/kubernetes-sigs/kubespray/issues/4261) to obtain 32 bits binary compatibility on those systems. Please post a comment if you find a way to enable 32 bits support for the k8s stack. - -- Kubeadm 1.10.1 known to feature arm64 binary in googlestorage.io +> *PROBLEM* +- I may not be able to build a playbook on Arm, armv7l architectures Issues with systems such as Rasbian 9 and the Raspberries first and second generation. +> *POSSIBLE ANSWER* +There's [some issue](http://github.com/kubernetes-sigs/kubespray/issues/4261) to obtain 32 bits binary compatibility on those systems. Please post a comment if you find a way to enable 32 bits support for the k8s stack. +> *PROBLEM* - When you see the Error : no PUBKEY ... could be received from GPG Look at https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#latest-releases-via-apt-debian +> *ANSWER* +Deploy Kubespray with Ansible Playbook to raspberrypi The option -b is required, as for example writing SSL keys in /etc/, installing packages and interacting with various systemd daemons. Without -b argument the playbook would fall to start ! + + ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v --become-user=root --private-key=~/.ssh/id_rsa + +- ```scripts/my_playbook.sh cluster.yml``` + > *PROBLEM* + + TASK [kubernetes/preinstall : Stop if ip var does not match local ips] + + fatal: [raspberrypi]: FAILED! => { + "assertion": "ip in ansible_all_ipv4_addresses", + "changed": false, + "evaluated_to": false, + "msg": "Assertion failed" + } + + > *ANSWER* + The host *ip* set in ```inventory//hosts.ini``` isn't the docker network interface (private). Run with ssh@... terminal : ```ifconfig``` to find the ipv4 address that's attributed to the eth0/wlan0 iface. E.g. _10.3.0.1_ (public network) + + > *PROBLEM* + + fatal: "cmd": ["timeout", "-k", "600s", "600s", "/usr/local/bin/kubeadm", "init", "--config=/etc/kubernetes/kubeadm-config.yaml" + + TASK [kubernetes/preinstall : Stop if either kube-master, kube-node or etcd is empty] + + ************************************************************************** + Wednesday 03 April 2019 16:07:14 +0200 (0:00:00.203) 0:00:40.395 ******* + ok: [raspberrypi] => (item=kube-master) => { + "changed": false, + "item": "kube-master", + "msg": "All assertions passed" + } + failed: [raspberrypi] (item=kube-node) => { + "assertion": "groups.get('kube-node')", + "changed": false, + "evaluated_to": false, + "item": "kube-node", + "msg": "Assertion failed" + } + ok: [raspberrypi] => (item=etcd) => { + "changed": false, + "item": "etcd", + "msg": "All assertions passed" + } + + > *ANSWER* + The inventory//hosts.ini file [kube-node] or [kube-master] was empty. They cannot be the same. That assertion means that a kubernetes cluster is made of at least one kube-master and one kube-node. + + > *PROBLEM* + + Error: open /etc/ssl/etcd/ssl/admin-.pem: permission denied + > *ANSWER* + The file located at /etc/ssl/etcd's owned by another user than Ubuntu and cannot be accessed by Ansible. Please change the file owner:group to ```ubuntu:ubuntu``` or the *ansible_user* or your choice. + + ssh @ 'sudo chown kube:ubuntu -R /etc/ssl/etcd/' + + > *PROBLEM* + + E: Unable to locate package unzip + + ERROR: Service 'app' failed to build + > *ANSWER* + The command ```bin/sh -c apt-get update -yqq && apt-get install -yqq --no-install-recommends git zip unzip && rm -rf /var/lib/apt/lists' returned a non-zero code: 100``` + Kubernetes container manager failed to resolve package reposirory hostnames. That's related to the cluster DNS misconfiguration. Read the [DNS Stack](docs/dns-stack.md) documentation. You may opt in for a google nameserver, your master host must have access to the internet. Default Google DNS IPs are 8.8.8.8 and 8.8.4.4. A CoreDNS service must be running, see below abput the ```top``` command. + + > *PROBLEM* + + Timeout (12s) waiting for privilege escalation prompt + Try increasing the timeout settings, you may want to run ansible with + ``--timeout=45`` and add ``--ask-become-pass`` (that's asking sudo password). + > *POSSIBLE SOLUTION* + If the error still happens, the ansible roles/ specific TASK configuration should set up the privileges escalation. Please contact the system administrator and [fill in an issue](https://github.com/kubernetes-sigs/kubespray/issues) about the TASK that must be fixed up. + +> *ISSUE* +- How much memory is left free on my master host ? +> *ANSWER* +If you don't know how much memory's available for the master host kubernetes-apps, run the following command that displays live memory usage : -- Deploy Kubespray with Ansible Playbook to raspberrypi The option -b is required, as for example writing SSL keys in /etc/, installing packages and interacting with various systemd daemons. Without -b argument the playbook would fall to start ! - -ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v --become-user=root --private-key=~/.ssh/id_rsa - -- ```scripts/my_playbook.sh``` - +TASK [kubernetes/preinstall : Stop if ip var does not match local ips] - - fatal: [raspberrypi]: FAILED! => { - "assertion": "ip in ansible_all_ipv4_addresses", - "changed": false, - "evaluated_to": false, - "msg": "Assertion failed" - } - -The host *ip* set in ```inventory//hosts.ini``` isn't the docker network interface (iface). Run with ssh@... terminal : ```ifconfig docker0``` to find the ipv4 address that's attributed to the docker0 iface. E.g. _172.17.0.1_ - - +fatal: [raspberrypi]: FAILED! => {"changed": true, "cmd": ["timeout", "-k", "600s", "600s", "/usr/local/bin/kubeadm", "init", "--config=/etc/kubernetes/kubeadm-config.yaml" - -That's if you have specified only a single machine-ip in hosts.ini. - - +TASK [kubernetes/preinstall : Stop if either kube-master, kube-node or etcd is empty] ************************************************************************** -Wednesday 03 April 2019 16:07:14 +0200 (0:00:00.203) 0:00:40.395 ******* -ok: [raspberrypi] => (item=kube-master) => { - "changed": false, - "item": "kube-master", - "msg": "All assertions passed" -} -failed: [raspberrypi] (item=kube-node) => { - "assertion": "groups.get('kube-node')", - "changed": false, - "evaluated_to": false, - "item": "kube-node", - "msg": "Assertion failed" -} -ok: [raspberrypi] => (item=etcd) => { - "changed": false, - "item": "etcd", - "msg": "All assertions passed" -} -The inventory//hosts.ini file [kube-node] or [kube-master] was empty. They cannot be the same. That assertion means that a kubernetes cluster is made of at least one kube-master and one kube-node. - -- Error: open /etc/ssl/etcd/ssl/admin-.pem: permission denied + ssh @ top + # Ctrl-C to stop monitoring -The file located at /etc/ssl/etcd's owned by another user than Ubuntu and cannot be accessed by Ansible. Please change the file owner:group to ```ubuntu:ubuntu``` or the *ansible_user* or your choice. +> *ISSUE* +- How to open firewall ports for ? +> *ANSWER* - ssh @ 'sudo chown ubuntu:ubuntu -R /etc/ssl/etcd/' + ./scripts/my_playbook.sh --firewall-setup @ - E: Unable to locate package unzip - ERROR: Service 'app' failed to build @@ -174,7 +185,7 @@ Kubernetes container manager failed to resolve package reposirory hostnames. Tha - How much memory is left free on my master host ? If you don't know how much memory's available for the master host kubernetes-apps, run the following command that displays live memory usage : - ssh $PI@$pi top + ssh $PI@$ip top # Ctrl-C to stop monitoring - Timeout (12s) waiting for privilege escalation prompt diff --git a/SCHEME b/SCHEME new file mode 100644 index 00000000000..20e80e7c411 --- /dev/null +++ b/SCHEME @@ -0,0 +1,66 @@ +# # +# # # # ##### ###### #### ##### ##### ## # # +# # # # # # # # # # # # # # # # +### # # ##### ##### #### # # # # # # # +# # # # # # # # ##### ##### ###### # +# # # # # # # # # # # # # # # +# # #### ##### ###### #### # # # # # # + + +============== +- ISP ROUTER - + _( )_( )_ +(_ W A N _) + (_) (__) +============== + | + | Home network + | ,--./,-. + | / # / + L---- | : iMac + | \ \ + | `._,._,' + S L Ansible - ssh + S + H + | DMZ IP - Bastion Host + | (eth0) + | .\V/, + | ()_()_) + L ---- (.(_)()_) raspberrypib+ + (_(_).)' + `'"'` + L ufw - netplan - isc-dhcp-server + Private | + Network I + (br0) P + V + 4 + | + L (((( HOSTAPd )))) + + O + o + o Gatewayed Host(s) + O + o + o + + etcd + .\V/, + __v_ Private ()_()_) + K8s (____\/{ docker IP (.(_)()_) raspberrypia+ + (_(_).)' + `'"'` + Calico | (wlan0) + K 8 s L (((( wpa_supplicant )))) + K + 8 + S + | (wlan0) + L (((( wpa_supplicant )))) + .\V/, + Private ()_()_) + IP (.(_)()_) raspberrypia+ + (_(_).)' + `'"'` diff --git a/cluster.yml b/cluster.yml index 3cc88fd530b..00c1d971405 100644 --- a/cluster.yml +++ b/cluster.yml @@ -16,8 +16,8 @@ - hosts: bastion[0] gather_facts: False roles: - - { role: kubespray-defaults} - - { role: bastion-ssh-config, tags: ["localhost", "bastion"]} + - { role: kubespray-defaults } + - { role: bastion-ssh-config, tags: ["localhost", "bastion"] } - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" @@ -48,6 +48,7 @@ roles: - { role: kubespray-defaults} - { role: kubernetes/preinstall, tags: preinstall } + - { role: download, tags: download, when: "not skip_downloads and container_manager == 'crio'" } - { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) } - { role: download, tags: download, when: "not skip_downloads" } environment: "{{proxy_env}}" diff --git a/contrib/dind/roles/dind-host/tasks/main.yaml b/contrib/dind/roles/dind-host/tasks/main.yaml index 9efd3420427..b0a694d6c07 100644 --- a/contrib/dind/roles/dind-host/tasks/main.yaml +++ b/contrib/dind/roles/dind-host/tasks/main.yaml @@ -28,7 +28,7 @@ - /lib/modules:/lib/modules - "{{ item }}:/dind/docker" register: containers - with_items: "{{groups.containers}}" + with_items: "{{ groups.containers }}" tags: - addresses diff --git a/contrib/inventory_builder/inventory.py b/contrib/inventory_builder/inventory.py index 3c0a136180e..56c5ce08c2c 100644 --- a/contrib/inventory_builder/inventory.py +++ b/contrib/inventory_builder/inventory.py @@ -78,7 +78,7 @@ def __init__(self, changed_hosts=None, config_file=None): try: self.hosts_file = open(config_file, 'r') self.yaml_config = yaml.load(self.hosts_file) - except FileNotFoundError: + except IOError: pass if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS: diff --git a/contrib/metallb/roles/provision/tasks/main.yml b/contrib/metallb/roles/provision/tasks/main.yml index 6ebb293de90..e00d2767495 100644 --- a/contrib/metallb/roles/provision/tasks/main.yml +++ b/contrib/metallb/roles/provision/tasks/main.yml @@ -9,8 +9,8 @@ - name: "Kubernetes Apps | Install and configure MetalLB" kube: name: "MetalLB" - kubectl: "{{bin_dir}}/kubectl" - filename: "{{ kube_config_dir }}/{{ item }}" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/{{ item.item }}" state: "{{ item.changed | ternary('latest','present') }}" become: true with_items: "{{ rendering.results }}" diff --git a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml index bdfdfd9e786..65587aa924b 100644 --- a/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml +++ b/contrib/network-storage/glusterfs/roles/kubernetes-pv/ansible/tasks/main.yaml @@ -12,9 +12,9 @@ kube: name: glusterfs namespace: default - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.type}}" - filename: "{{kube_config_dir}}/{{item.dest}}" - state: "{{item.changed | ternary('latest','present') }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.dest }}" + state: "{{ item.changed | ternary('latest','present') }}" with_items: "{{ gluster_pv.results }}" when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml index b2ceccf20b4..62039de8943 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/deploy.yml @@ -6,7 +6,7 @@ - name: "Kubernetes Apps | Install and configure Heketi Bootstrap" kube: name: "GlusterFS" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/heketi-bootstrap.json" state: "{{ rendering.changed | ternary('latest', 'present') }}" - name: "Wait for heketi bootstrap to complete." diff --git a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml index be3c42cafa7..63a475a85cc 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/bootstrap/storage.yml @@ -6,7 +6,7 @@ - name: "Create heketi storage." kube: name: "GlusterFS" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json" state: "present" vars: diff --git a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml index 265e917ff05..7d0d0abd608 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/glusterfs.yml @@ -6,7 +6,7 @@ - name: "Kubernetes Apps | Install and configure GlusterFS daemonset" kube: name: "GlusterFS" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/glusterfs-daemonset.json" state: "{{ rendering.changed | ternary('latest', 'present') }}" - name: "Kubernetes Apps | Label GlusterFS nodes" @@ -33,6 +33,6 @@ - name: "Kubernetes Apps | Install and configure Heketi Service Account" kube: name: "GlusterFS" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/heketi-service-account.json" state: "{{ rendering.changed | ternary('latest', 'present') }}" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml index ef79d95f950..0eb0e706150 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/heketi.yml @@ -6,7 +6,7 @@ - name: "Kubernetes Apps | Install and configure Heketi" kube: name: "GlusterFS" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/heketi-deployment.json" state: "{{ rendering.changed | ternary('latest', 'present') }}" - name: "Ensure heketi is up and running." diff --git a/contrib/network-storage/heketi/roles/provision/tasks/main.yml b/contrib/network-storage/heketi/roles/provision/tasks/main.yml index 23a2b4f9c72..1feb27d7b5d 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/main.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/main.yml @@ -7,7 +7,7 @@ - name: "Kubernetes Apps | Test Heketi" register: "heketi_service_state" - command: "{{bin_dir}}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true" + command: "{{ bin_dir }}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true" changed_when: false - name: "Kubernetes Apps | Bootstrap Heketi" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml index 61c176bdaee..2ac3ce930bd 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/secret.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/secret.yml @@ -1,19 +1,19 @@ --- - register: "clusterrolebinding_state" - command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true" + command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true" changed_when: false - name: "Kubernetes Apps | Deploy cluster role binding." when: "clusterrolebinding_state.stdout == \"\"" - command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account" + command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account" - register: "clusterrolebinding_state" - command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true" + command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true" changed_when: false - assert: that: "clusterrolebinding_state.stdout != \"\"" msg: "Cluster role binding is not present." - register: "secret_state" - command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true" + command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true" changed_when: false - name: "Render Heketi secret configuration." become: yes @@ -22,9 +22,9 @@ dest: "{{ kube_config_dir }}/heketi.json" - name: "Deploy Heketi config secret" when: "secret_state.stdout == \"\"" - command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json" + command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json" - register: "secret_state" - command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true" + command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true" changed_when: false - assert: that: "secret_state.stdout != \"\"" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml index d25181ca310..dabd5e02a1a 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/storage.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/storage.yml @@ -7,6 +7,6 @@ - name: "Kubernetes Apps | Install and configure Heketi Storage" kube: name: "GlusterFS" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/heketi-storage.json" state: "{{ rendering.changed | ternary('latest', 'present') }}" diff --git a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml index 858ea401f2d..8fb634940db 100644 --- a/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml +++ b/contrib/network-storage/heketi/roles/provision/tasks/storageclass.yml @@ -20,6 +20,6 @@ - name: "Kubernetes Apps | Install and configure Storace Class" kube: name: "GlusterFS" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/storageclass.yml" state: "{{ rendering.changed | ternary('latest', 'present') }}" diff --git a/docs/ansible.md b/docs/ansible.md index b18eb9a5dfd..7c487efa8e0 100644 --- a/docs/ansible.md +++ b/docs/ansible.md @@ -180,27 +180,3 @@ bastion ansible_ssh_host=x.x.x.x For more information about Ansible and bastion hosts, read [Running Ansible Through an SSH Bastion Host](http://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/) - -Docker-CE ------------ -Let's install the Community Edition as container-manager in each of your cluster machines. Here's how, to implement a for-loop in bash scripts. -``` -# You can ssh-copy-id to Ansible inventory hosts permanently for the pi user -declare PI=pi # replace 'pi' with 'ubuntu' or any other user -# Enable SSH interface and PermitRootLogin over ssh in Raspberry -for ip in ${IPS[@]}; do -# Get docker-ce (Read Ubuntu LTS https://docs.docker.com/install/linux/docker-ce/ubuntu/) - ssh $PI@$pi sudo apt-get remove docker docker-engine docker.io containerd runc -y; -# Install packages to allow apt to use a repository over HTTPS - ssh $PI@$pi sudo apt-get update && sudo apt-get install apt-transport-https ca-certificates curl gnupg-agent software-properties-common -y; -# Add Docker’s official GPG key - ssh $PI@$pi curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -; -# Use the following command to set up the stable repository. - ssh $PI@$pi sudo add-apt-repository "deb [arch=arm64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"; - -# Install Docker Community Edition - ssh $PI@$pi sudo apt-get update && sudo apt-get install docker-ce -y; -# Install the latest version of Docker CE and containerd - ssh $PI@$pi sudo apt-get install docker-ce-cli containerd.io -y; - done - ``` diff --git a/inventory/sample/group_vars/gatewayed.yml b/inventory/sample/group_vars/gatewayed.yml new file mode 100644 index 00000000000..4739c0a4b35 --- /dev/null +++ b/inventory/sample/group_vars/gatewayed.yml @@ -0,0 +1 @@ +ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q ubuntu@bastion.domain.com"' diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml index 7b323be9fb7..fdc82f82c53 100644 --- a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml @@ -72,7 +72,7 @@ kube_users: # Choose network plugin (cilium, calico, contiv, weave or flannel. Use cni for generic cni plugin) # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing -kube_network_plugin: weave +kube_network_plugin: calico # Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni kube_network_plugin_multus: false @@ -98,8 +98,8 @@ kube_apiserver_port: 6443 # (https) kube_apiserver_insecure_port: 0 # (disabled) # Kube-proxy proxyMode configuration. -# Can be ipvs, iptables -kube_proxy_mode: iptables +# Can be ipvs (ingress controller), iptables (behind firewall) +kube_proxy_mode: ipvs # A string slice of values which specify the addresses to use for NodePorts. # Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). @@ -136,7 +136,7 @@ enable_nodelocaldns: true nodelocaldns_ip: 169.254.25.10 # Can be docker_dns, host_resolvconf or none -resolvconf_mode: host_resolvconf +resolvconf_mode: docker_dns # Deploy netchecker app to verify DNS resolve as an HTTP service deploy_netchecker: false # Ip address of the kubernetes skydns service @@ -171,10 +171,14 @@ dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kube podsecuritypolicy_enabled: false # Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts -# kubeconfig_localhost: false +kubeconfig_localhost: true # Download kubectl onto the host that runs Ansible in {{ bin_dir }} -# kubectl_localhost: false +kubectl_localhost: true +# dnsmasq +# dnsmasq_upstream_dns_servers: +# - /resolvethiszone.with/10.0.4.250 +# - 8.8.8.8 # Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true) # kubelet_cgroups_per_qos: true diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml b/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml index 9d2654c762a..95109950f31 100644 --- a/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml +++ b/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml @@ -6,7 +6,7 @@ # peer_with_router: false # Enables Internet connectivity from containers -# nat_outgoing: true + nat_outgoing: true # add default ippool name # calico_pool_name: "default-pool" @@ -14,8 +14,8 @@ # add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) # calico_pool_cidr: 1.2.3.4/5 -# Global as_num (/calico/bgp/v1/global/as_num) -# global_as_num: "64512" +# Global as_num (/calico/bgp/v1/global/as_num) (default 64512 private node) + global_as_num: "65400" # You can set MTU value here. If left undefined or empty, it will # not be specified in calico CNI config, so Calico will use built-in diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-kube-router.yml b/inventory/sample/group_vars/k8s-cluster/k8s-net-kube-router.yml index e36b9c1dcf2..01af1ca66e5 100644 --- a/inventory/sample/group_vars/k8s-cluster/k8s-net-kube-router.yml +++ b/inventory/sample/group_vars/k8s-cluster/k8s-net-kube-router.yml @@ -8,7 +8,7 @@ # Enables Service Proxy -- sets up IPVS for Kubernetes Services # see docs/kube-router.md "Caveats" section -# kube_router_run_service_proxy: false + kube_router_run_service_proxy: true # Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers. # kube_router_advertise_cluster_ip: false diff --git a/inventory/sample/inventory.ini b/inventory/sample/inventory.ini index bb6505909d6..63e7218e1d3 100644 --- a/inventory/sample/inventory.ini +++ b/inventory/sample/inventory.ini @@ -1,40 +1,51 @@ # ## Configure 'ip' variable to bind kubernetes services on a # ## different ip than the default iface # ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value. +# Best Practices at https://docs.ansible.com/ansible/latest/network/getting_started/first_inventory.html?highlight=children#group-variables-within-inventory [all] -# node1 ansible_host=95.54.0.12 # ip=10.3.0.1 etcd_member_name=etcd1 -# node2 ansible_host=95.54.0.13 # ip=10.3.0.2 etcd_member_name=etcd2 # node3 ansible_host=95.54.0.14 # ip=10.3.0.3 etcd_member_name=etcd3 # node4 ansible_host=95.54.0.15 # ip=10.3.0.4 etcd_member_name=etcd4 + +[bastion] +# raspberrypib ansible_ssh_host=192.168.0.36 ansible_user=ubuntu ansible_host=192.168.0.36 ip=95.54.0.21 etcd_member_name=etcd1 + +[gatewayed] # node5 ansible_host=95.54.0.16 # ip=10.3.0.5 etcd_member_name=etcd5 # node6 ansible_host=95.54.0.17 # ip=10.3.0.6 etcd_member_name=etcd6 -# raspberrypib ansible_ssh_host=95.54.0.21 ansible_user=$PI ansible_host=95.54.0.21 ip=172.17.0.1 etcd_member_name=etcd1 -# raspberrypia ansible_ssh_host=95.54.0.22 ansible_user=$PI ansible_host=95.54.0.22 ip=172.17.0.2 etcd_member_name=etcd2 +# raspberrypia ansible_ssh_host=192.168.2.15 ansible_user=ubuntu ansible_host=192.168.2.15 ip=192.168.2.15 -# ## configure a bastion host if your nodes are not directly reachable -# [bastion] -# raspberrypib +[all:children] +gatewayed +bastion [kube-master] -# node1 -# node2 +# node3 +# node4 # raspberrypib [etcd] -# node1 -# node2 -# node3 +# node5 +# node6 # raspberrypib -# raspberrypia [kube-node] -# node2 # node3 # node4 -# node5 -# node6 +# raspberrypib + +[calico-rr] +# node3 +# node4 # raspberrypia -[k8s-cluster:children] +[rack0:children] kube-master kube-node +etcd +calico-rr + +[rack0:vars] +cluster_id="1.0.0.1" + +[k8s-cluster:children] +rack0 diff --git a/library/hap-wiz-env.py b/library/hap-wiz-env.py new file mode 100644 index 00000000000..5084aa03297 --- /dev/null +++ b/library/hap-wiz-env.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +import sys +if int(sys.version.partition('.')[0]) < 3: + print("This script needs python 3 or later. Try python3.") + exit(1) +import os +import re +import ipaddress as ip +import locale as lc +defnet = ip.ip_network('192.168.2.0/24') +defnet6 = ip.ip_network('2a01:db8:0:1::0/64') +defintnet = ip.ip_network('192.168.0.0/24') +defintnet6 = ip.ip_network('2a01:e0a:16b:dc30::0/64') +myenv = dict() +def parse_args (argv): + markers = { + "MARKER_BEGIN":"# BEGIN GENERATED hostapd", + "MARKER_END":"# END GENERATED hostapd", + "MARKERS":"${MARKER_BEGIN}\\n.*\\n.*${MARKER_END}" + } + myenv.update(markers) + defaults = { + "file":argv[0], + "NET":"", + "INTNET":defintnet.with_prefixlen, + "INT":"eth0", + "SSID":"", + "PAWD":"", + "MODE":"", + "CTY_CODE":"", + "CHANNEL":"", + "NET_start":"15", + "NET_end":"100", + "NET6":defnet6.with_prefixlen, + "INTNET6":defintnet6.with_prefixlen + } + r_parse_argv(defaults, argv, 1, 'hc', "Usage: {} [-c,--client] [ssid passphrase [mode] [country_code channel] [net-range-start net-range-end] [priv-network-ipv6/mask-length wan-network-ipv6/mask-length]]".format(argv[0])) + +def r_parse_argv(defaults, argv, i, options, usage): + """Parse script arguments recursively + i -- index in both argv and defaults + options -- Literals set of options, e.g. afh where -a, -f, -h are valid options as for -afh + """ + if i >= len(defaults) : return + pf = "-+[" + options + "]*" + client = re.compile(pf + "c(lient)?.*") + help = re.compile(pf + "h(elp)?.*") + any = re.compile(pf) + if i < len(argv): + if client.match(argv[i]): + myenv['CLIENT'] = argv[i] + del argv[i] + elif help.match(argv[1]): + print(usage) + sys.exit(0) + elif any.match(argv[i]): + del argv[i] + n = 0 + for k in defaults.keys(): + if n == i: + var = [ k, argv[i] if len(argv) > i else defaults[k] ] + var = format_argv(var) + myenv[var[0]] = var[1] + break + else: n = n + 1 + return r_parse_argv(defaults, argv, i+1, options, usage) + +def format_argv(var): + if var[0] == "NET" or var[0] == "INTNET": + net = ip.ip_network(var[1]) if var[1] != "" else defnet + m = re.match('(\d*\.){2}(\d*)', str(net.network_address)) # trim last .0 + if m: var[1] = m.group() + if var[0] == "NET": + myenv["MASK"] = str(net.netmask) + myenv["MASKb"] = "%s" % net.prefixlen + if var[0] == "INTNET": + myenv["INTMASK"] = str(net.netmask) + myenv["INTMASKb"] = "%s" % net.prefixlen + if var[0] == "NET6" or var[0] == "INTNET6": + net6 = ip.ip_network(var[1]) if var[1] != "" else defnet6 + m = re.match('(\w*:){1,7}', str(net6.network_address)) # trim last :0 + if m: var[1] = m.group() + if var[0] == "NET6": + myenv["MASK6"] = str(net6.netmask) + myenv["MASKb6"] = "%s" % net6.prefixlen + if var[0] == "INTNET6": + myenv["INTMASK6"] = str(net6.netmask) + myenv["INTMASKb6"] = "%s" % net6.prefixlen + return var + +def main(argv): + parse_args(argv) + while myenv["SSID"] == "": + myenv["SSID"] = input("Please set a name for the Wifi Network: ") + while len(myenv["PAWD"]) < 8 or len(myenv["PAWD"]) > 63: + myenv["PAWD"] = input("Please set a passphrase (8..63 characters) for the SSID " + myenv['SSID'] + ": ") + while myenv["MODE"] not in ['a','b','g']: + myenv["MODE"] = input("Please set a WIFI mode (a = IEEE 802.11ac, g = IEEE 802.11n; b = IEEE 802.11b) [a]: ") + if myenv["MODE"] == "": myenv["MODE"] = 'a' + while myenv["CTY_CODE"] == "": + cty_code = re.match(".*_([A-Z]*)", lc.getlocale()[0]).group(1) + if not cty_code: cty_code = re.match("[A-Z]*", lc.getlocale()).group() + myenv["CTY_CODE"] = input("Please set the country code to use [%s]: " % cty_code) + if myenv["CTY_CODE"] == "": myenv["CTY_CODE"] = cty_code + while myenv["CHANNEL"] == "": + myenv["CHANNEL"] = input("Please set the WI-FI channel to use with %s mode [0 = automatic channel selection]: " % myenv['MODE']) + if myenv["CHANNEL"] == "": myenv["CHANNEL"] = "0" + os.environ.update(myenv) + write_exports(myenv) + +def write_exports(envdict): + path=".hap-wiz-env.sh" + f = open(path, "w") + f.write("#!/usr/bin/env bash\nexport") + for k,v in myenv.items(): + f.write(" '{}'='{}'".format(k,v)) + f.close() + os.chmod(path, 0o755) + +if __name__ == '__main__': + main(sys.argv) diff --git a/roles/bootstrap-os/tasks/bootstrap-ubuntu.yml b/roles/bootstrap-os/tasks/bootstrap-ubuntu.yml index 5df0420ed2b..0800caae25d 100644 --- a/roles/bootstrap-os/tasks/bootstrap-ubuntu.yml +++ b/roles/bootstrap-os/tasks/bootstrap-ubuntu.yml @@ -30,6 +30,15 @@ when: - http_proxy is defined +- name: Check UFW + raw: "ufw status | grep ^Status: inactive$" + register: ufw_status + failed_when: false + changed_when: false + # This command should always run, even in check mode + check_mode: false + environment: {} + - name: Add http_proxy to /etc/apt/apt.conf if http_proxy is defined raw: echo 'Acquire::http::proxy "{{ http_proxy }}";' >> /etc/apt/apt.conf become: yes diff --git a/roles/container-engine/cri-o/defaults/main.yml b/roles/container-engine/cri-o/defaults/main.yml index 3ae39da22ce..3817c52a11c 100644 --- a/roles/container-engine/cri-o/defaults/main.yml +++ b/roles/container-engine/cri-o/defaults/main.yml @@ -1,2 +1,6 @@ --- crio_rhel_repo_base_url: 'https://cbs.centos.org/repos/paas7-openshift-origin311-candidate/x86_64/os/' +crio_debian_repo_base_url: 'https://ppa.launchpad.net/projectatomic/ppa/ubuntu' +crio_debian_repo_gpgkey: 'https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x8BECF1637AD8C79D' +ostree_debian_repo_base_url: 'https://ppa.launchpad.net/alexlarsson/flatpak/ubuntu' +ostree_debian_repo_gpgkey: 'https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xC793BFA2FA577F07' diff --git a/roles/container-engine/cri-o/tasks/install_host.yml b/roles/container-engine/cri-o/tasks/install_host.yml new file mode 100644 index 00000000000..08b9aebbaf3 --- /dev/null +++ b/roles/container-engine/cri-o/tasks/install_host.yml @@ -0,0 +1,9 @@ +--- +- name: install | Copy crictl binary from download dir + copy: + src: "{{ local_release_dir }}/{{ item }}" + dest: "{{ bin_dir }}/{{ item }}" + mode: 0755 + remote_src: yes + with_items: + - crictl diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml index cd51e043eb7..82f9a8e58b6 100644 --- a/roles/container-engine/cri-o/tasks/main.yaml +++ b/roles/container-engine/cri-o/tasks/main.yaml @@ -35,6 +35,48 @@ path: "{{ item }}" state: directory +- name: ensure ostree repository public key is installed + action: "{{ ostree_repo_key_info.pkg_key }}" + args: + id: "{{ item }}" + url: "{{ ostree_repo_key_info.url }}" + state: present + register: keyserver_task_result + until: keyserver_task_result is succeeded + retries: 4 + delay: "{{ retry_stagger | d(3) }}" + with_items: "{{ ostree_repo_key_info.repo_keys }}" + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse", "ClearLinux"] or is_atomic) + +- name: ensure cri-o repository public key is installed + action: "{{ crio_repo_key_info.pkg_key }}" + args: + id: "{{ item }}" + url: "{{ crio_repo_key_info.url }}" + state: present + register: keyserver_task_result + until: keyserver_task_result is succeeded + retries: 4 + delay: "{{ retry_stagger | d(3) }}" + with_items: "{{ crio_repo_key_info.repo_keys }}" + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse", "ClearLinux"] or is_atomic) + +- name: ensure cri-o repository is enabled + ignore_errors: yes + action: "{{ crio_repo_info.pkg_repo }}" + args: + repo: "{{ item }}" + state: present + with_items: "{{ crio_repo_info.repos }}" + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse", "ClearLinux"] or is_atomic) and (crio_repo_info.repos|length > 0) + +- include_tasks: "install_host.yml" + args: + apply: + become: yes + tags: + - upgrade + - name: Install cri-o packages become: yes package: diff --git a/roles/container-engine/cri-o/vars/debian.yml b/roles/container-engine/cri-o/vars/debian.yml index 865c6e07d56..b62a398f2b5 100644 --- a/roles/container-engine/cri-o/vars/debian.yml +++ b/roles/container-engine/cri-o/vars/debian.yml @@ -1,5 +1,31 @@ --- crio_packages: + - libostree-dev - cri-o-1.13 + - cri-o-runc crio_service: crio + +crio_repo_key_info: + pkg_key: apt_key + url: '{{ crio_debian_repo_gpgkey }}' + repo_keys: + - 018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D + +ostree_repo_key_info: + pkg_key: apt_key + url: '{{ ostree_debian_repo_gpgkey }}' + repo_keys: + - 690951F1A4DE0F905496E8C6C793BFA2FA577F07 + +crio_repo_info: + pkg_repo: apt_repository + repos: + - > + deb {{ crio_debian_repo_base_url }} + {{ ansible_distribution_release|lower }} + main + - > + deb {{ ostree_debian_repo_base_url }} + {{ ansible_distribution_release|lower }} + main diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml index 92792af2687..32c17636b58 100644 --- a/roles/container-engine/docker/tasks/main.yml +++ b/roles/container-engine/docker/tasks/main.yml @@ -1,4 +1,19 @@ --- +- set_fact: + architecture_groups: + x86_64: amd64 + aarch64: arm64 + armv7l: arm + +- name: ansible_architecture_rename + set_fact: + host_architecture: >- + {%- if ansible_architecture in architecture_groups -%} + {{architecture_groups[ansible_architecture]}} + {%- else -%} + {{ansible_architecture}} + {% endif %} + - name: check if atomic host stat: path: /run/ostree-booted @@ -164,6 +179,7 @@ ignore_errors: yes - name: Ensure docker packages are installed + become: yes action: "{{ docker_package_info.pkg_mgr }}" args: name: "{{ item.name }}" @@ -178,6 +194,7 @@ when: ansible_os_family in ["ClearLinux"] - name: get available packages on Ubuntu + become: yes command: apt-cache policy docker-ce when: - docker_task_result is failed diff --git a/roles/container-engine/docker/vars/redhat-aarch64.yml b/roles/container-engine/docker/vars/redhat-arm64.yml similarity index 100% rename from roles/container-engine/docker/vars/redhat-aarch64.yml rename to roles/container-engine/docker/vars/redhat-arm64.yml diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index 766e8796c4e..fcba53b5fcd 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -72,8 +72,8 @@ - name: Create dnsmasq manifests template: - src: "{{item.file}}.j2" - dest: "{{kube_config_dir}}/{{item.file}}" + src: "{{ item.file }}.j2" + dest: "{{kube_config_dir}}/{{ item.file }}" with_items: - {name: dnsmasq, file: dnsmasq-deploy.yml, type: deployment} - {name: dnsmasq, file: dnsmasq-svc.yml, type: svc} @@ -84,11 +84,11 @@ - name: Start Resources kube: - name: "{{item.name}}" + name: "{{ item.item.name }}" namespace: "kube-system" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.type}}" - filename: "{{kube_config_dir}}/{{item.file}}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ manifests.results }}" delegate_to: "{{ groups['kube-master'][0] }}" diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 349286c00ef..54e8c272ad9 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -54,6 +54,7 @@ flannel_version: "v0.11.0" flannel_cni_version: "v0.3.0" cni_version: "v0.6.0" +crictl_version: "v1.13.0" weave_version: 2.5.1 pod_infra_version: 3.1 @@ -67,6 +68,7 @@ kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release hyperkube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/hyperkube" etcd_download_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz" cni_download_url: "https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-{{ image_arch }}-{{ cni_version }}.tgz" +crictl_download_url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-{{ image_arch }}.tar.gz" # Checksums hyperkube_checksums: @@ -177,9 +179,14 @@ cni_binary_checksums: arm: ffb62021d2fc6e1266dc6ef7f2058125b6e6b44c016291a2b04a15ed9b4be70a #v0.6.0 arm64: 016bbc989877e35e3cd49fafe11415fb2717e52c74fde6b1650411154cb91b81 #v0.6.0 amd64: f04339a21b8edf76d415e7f17b620e63b8f37a76b2f706671587ab6464411f2d +crictl_binary_checksums: + arm: 2e478ebed85f9d70d49fd8f1d1089c8fba6e37d3461aeef91813f1ab0f0df586 #v1.13.0 + arm64: 68949c0cb5a37e7604c145d189cf1e109c08c93d9c710ba663db026b9c6f2746 #v1.13.0 + amd64: 9bdbea7a2b382494aff2ff014da328a042c5aba9096a7772e57fdf487e5a1d51 #v1.13.0 etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch] }}" cni_binary_checksum: "{{ cni_binary_checksums[image_arch] }}" +crictl_binary_checksum: "{{ crictl_binary_checksums[image_arch] }}" hyperkube_binary_checksum: "{{ hyperkube_checksums[image_arch][kube_version] }}" kubeadm_binary_checksum: "{{ kubeadm_checksums[image_arch][kubeadm_version] }}" @@ -338,6 +345,19 @@ downloads: groups: - k8s-cluster + crictl: + file: "{{ container_manager == 'crio' }}" + enabled: true + version: "{{ crictl_version }}" + dest: "{{local_release_dir}}/crictl-{{ crictl_version }}-linux-{{ image_arch }}.tar.gz" + sha256: "{{ crictl_binary_checksum }}" + url: "{{ crictl_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s-cluster + kubeadm: enabled: true file: true diff --git a/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/roles/kubernetes-apps/ansible/tasks/dashboard.yml index 8da279646cc..ddab6ae9716 100644 --- a/roles/kubernetes-apps/ansible/tasks/dashboard.yml +++ b/roles/kubernetes-apps/ansible/tasks/dashboard.yml @@ -19,13 +19,31 @@ register: manifests when: inventory_hostname == groups['kube-master'][0] +- name: Import artifacts_dir + import_role: + name: kubernetes/client + defaults_from: main + +- name: Copy dashboard template to ansible host + ignore_errors: yes + copy: + src: "{{ kube_config_dir }}/{{ item.item.file }}" + dest: "{{ artifacts_dir }}/{{ item.item.file }}" + remote_src: yes + become: no + run_once: yes + with_items: "{{ manifests.results }}" + when: kubectl_localhost|default(false) + tags: + - client + - name: Kubernetes Apps | Start dashboard kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ kube_config_dir }}/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ manifests.results }}" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index e6a6ce2808c..7233af099b8 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -38,11 +38,11 @@ - name: Kubernetes Apps | Start Resources kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ kube_config_dir }}/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: - "{{ coredns_manifests.results | default({}) }}" @@ -60,7 +60,7 @@ - coredns - nodelocaldns loop_control: - label: "{{ item.file }}" + label: "{{ item.item.file }}" - name: Kubernetes Apps | Netchecker import_tasks: tasks/netchecker.yml diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index ba31b0cd95a..456d5f55a84 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -13,7 +13,7 @@ name: "netchecker-server" namespace: "{{ netcheck_namespace }}" filename: "{{ netchecker_server_manifest.stat.path }}" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" resource: "deploy" state: latest when: inventory_hostname == groups['kube-master'][0] and netchecker_server_manifest.stat.exists @@ -44,8 +44,8 @@ - name: Kubernetes Apps | Lay Down Netchecker Template template: - src: "{{item.file}}.j2" - dest: "{{kube_config_dir}}/{{item.file}}" + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" with_items: "{{ netchecker_templates }}" register: manifests when: @@ -55,18 +55,18 @@ kube: name: "netchecker-server" namespace: "{{ netcheck_namespace }}" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" resource: "po" state: absent when: inventory_hostname == groups['kube-master'][0] - name: Kubernetes Apps | Start Netchecker Resources kube: - name: "{{item.name}}" - namespace: "{{netcheck_namespace}}" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.type}}" - filename: "{{kube_config_dir}}/{{item.file}}" + name: "{{ item.item.name }}" + namespace: "{{ netcheck_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ manifests.results }}" when: inventory_hostname == groups['kube-master'][0] and not item is skipped diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index 880e0fa07b6..4a51c540fc9 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -1,5 +1,6 @@ --- - name: Kubernetes Apps | Wait for kube-apiserver + ignore_errors: yes uri: url: "{{ kube_apiserver_endpoint }}/healthz" validate_certs: no @@ -41,10 +42,10 @@ - name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy kube: - name: "{{item.name}}" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.type}}" - filename: "{{kube_config_dir}}/{{item.file}}" + name: "{{ item.item.name }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{kube_config_dir}}/{{ item.item.file }}" state: "latest" register: result until: result is succeeded @@ -55,7 +56,7 @@ - inventory_hostname == groups['kube-master'][0] - not item is skipped loop_control: - label: "{{ item.file }}" + label: "{{ item.item.file }}" - name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes template: @@ -69,7 +70,7 @@ - name: Apply workaround to allow all nodes with cert O=system:nodes to register kube: name: "kubespray:system:node" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" resource: "clusterrolebinding" filename: "{{ kube_config_dir }}/node-crb.yml" state: latest @@ -96,7 +97,7 @@ - name: Apply webhook ClusterRole kube: name: "system:node-webhook" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" resource: "clusterrole" filename: "{{ kube_config_dir }}/node-webhook-cr.yml" state: latest @@ -121,7 +122,7 @@ - name: Grant system:nodes the webhook ClusterRole kube: name: "system:node-webhook" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" resource: "clusterrolebinding" filename: "{{ kube_config_dir }}/node-webhook-crb.yml" state: latest @@ -164,7 +165,7 @@ - name: Apply vsphere-cloud-provider ClusterRole kube: name: "system:vsphere-cloud-provider" - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" resource: "clusterrolebinding" filename: "{{ kube_config_dir }}/vsphere-rbac.yml" state: latest @@ -194,7 +195,7 @@ - name: PriorityClass | Create k8s-cluster-critical kube: name: k8s-cluster-critical - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" resource: "PriorityClass" filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml" state: latest diff --git a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml index 54ee49d78ea..22b39b3d407 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/oci.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/oci.yml @@ -10,7 +10,7 @@ - name: Apply OCI RBAC kube: - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/oci-rbac.yml" when: - cloud_provider is defined diff --git a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml index 604dc92d172..0f70055227f 100644 --- a/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml +++ b/roles/kubernetes-apps/container_engine_accelerator/nvidia_gpu/tasks/main.yml @@ -42,13 +42,12 @@ - name: Container Engine Acceleration Nvidia GPU | Apply manifests for nvidia accelerators kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.item.file }}" state: "latest" - with_items: - - "{{container_engine_accelerator_manifests.results}}" + with_items: "{{ container_engine_accelerator_manifests.results }}" when: - inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container and nvidia_driver_install_supported diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml index 4451577c2d0..c93ecfde79b 100644 --- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml @@ -69,11 +69,11 @@ - name: CephFS Provisioner | Apply manifests kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" namespace: "{{ cephfs_provisioner_namespace }}" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}" state: "latest" with_items: "{{ cephfs_provisioner_manifests.results }}" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml index 114cb31b7d5..27d52ad7c7a 100644 --- a/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_path_provisioner/tasks/main.yml @@ -30,11 +30,11 @@ - name: Local Path Provisioner | Apply manifests kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" namespace: "{{ local_path_provisioner_namespace }}" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.item.file }}" state: "latest" with_items: "{{ local_path_provisioner_manifests.results }}" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml index 38bf0e0e797..6b970317eba 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml @@ -50,13 +50,13 @@ - name: Local Volume Provisioner | Apply manifests kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" namespace: "{{ local_volume_provisioner_namespace }}" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}" state: "latest" with_items: "{{ local_volume_provisioner_manifests.results }}" when: inventory_hostname == groups['kube-master'][0] loop_control: - label: "{{ item.file }}" + label: "{{ item.item.file }}" diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 01c7666c87a..65ed77dcda1 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -7,8 +7,8 @@ - name: Helm | Lay Down Helm Manifests (RBAC) template: - src: "{{item.file}}.j2" - dest: "{{kube_config_dir}}/{{item.file}}" + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" with_items: - {name: tiller, file: tiller-namespace.yml, type: namespace} - {name: tiller, file: tiller-sa.yml, type: sa} @@ -20,11 +20,11 @@ - name: Helm | Apply Helm Manifests (RBAC) kube: - name: "{{item.name}}" + name: "{{ item.item.name }}" namespace: "{{ tiller_namespace }}" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.type}}" - filename: "{{kube_config_dir}}/{{item.file}}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ manifests.results }}" when: @@ -73,7 +73,7 @@ {% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %} {% if tiller_wait %} --wait{% endif %} --output yaml - | {{bin_dir}}/kubectl apply -f - + | {{ bin_dir }}/kubectl apply -f - changed_when: false when: - (tiller_override is defined and tiller_override != "") or (kube_version is version('v1.11.1', '>=')) diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml index faf87822b2c..d8ca7ad1735 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml @@ -47,11 +47,11 @@ - name: Cert Manager | Apply manifests kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" namespace: "{{ cert_manager_namespace }}" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.item.file }}" state: "latest" with_items: "{{ cert_manager_manifests.results }}" when: diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml index d965fe7bc16..706bb9ff1a5 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml @@ -52,8 +52,8 @@ - name: NGINX Ingress Controller | Create manifests template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.file }}" + src: "{{ item.item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.item.file }}" with_items: "{{ ingress_nginx_templates }}" register: ingress_nginx_manifests when: @@ -61,11 +61,11 @@ - name: NGINX Ingress Controller | Apply manifests kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" namespace: "{{ ingress_nginx_namespace }}" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.item.file }}" state: "latest" with_items: "{{ ingress_nginx_manifests.results }}" when: diff --git a/roles/kubernetes-apps/metrics_server/tasks/main.yml b/roles/kubernetes-apps/metrics_server/tasks/main.yml index 5c9f5b9aae6..d7dc45443a2 100644 --- a/roles/kubernetes-apps/metrics_server/tasks/main.yml +++ b/roles/kubernetes-apps/metrics_server/tasks/main.yml @@ -47,10 +47,10 @@ - name: Metrics Server | Apply manifests kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ kube_config_dir }}/addons/metrics_server/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/metrics_server/{{ item.item.file }}" state: "latest" with_items: "{{ metrics_server_manifests.results }}" when: diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml index 35bfede4801..14ce69cf6ba 100644 --- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml @@ -1,18 +1,17 @@ --- - name: Start Calico resources kube: - name: "{{item.name}}" + name: "{{ item.item.name }}" namespace: "kube-system" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.type}}" - filename: "{{kube_config_dir}}/{{item.file}}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" - with_items: - - "{{ calico_node_manifests.results }}" + with_items: "{{ calico_node_manifests.results }}" when: - inventory_hostname == groups['kube-master'][0] and not item is skipped loop_control: - label: "{{ item.file }}" + label: "{{ item.item.file }}" - name: "calico upgrade complete" shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml" diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml index de027b87152..ced06f7d567 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -1,11 +1,11 @@ --- - name: Canal | Start Resources kube: - name: "{{item.name}}" + name: "{{ item.item.name }}" namespace: "kube-system" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.type}}" - filename: "{{kube_config_dir}}/{{item.file}}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{kube_config_dir}}/{{ item.item.file }}" state: "latest" with_items: "{{ canal_manifests.results }}" when: inventory_hostname == groups['kube-master'][0] and not item is skipped diff --git a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml index 750eef87c02..46b43ed3532 100755 --- a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml @@ -1,17 +1,17 @@ --- - name: Cilium | Start Resources kube: - name: "{{item.name}}" + name: "{{ item.item.name }}" namespace: "kube-system" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.type}}" - filename: "{{kube_config_dir}}/{{item.file}}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ cilium_node_manifests.results }}" when: inventory_hostname == groups['kube-master'][0] and not item is skipped - name: Cilium | Wait for pods to run - command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" + command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" register: pods_not_ready until: pods_not_ready.stdout.find("cilium")==-1 retries: 30 diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml index 404f33f2cac..1bca923294d 100644 --- a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml @@ -2,11 +2,11 @@ - name: Contiv | Create Kubernetes resources kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ contiv_config_dir }}/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ contiv_config_dir }}/{{ item.item.file }}" state: "{{ item.changed | ternary('latest','present') }}" with_items: "{{ contiv_manifests_results.results }}" run_once: true diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml index b4c771cc1d1..3ed49db810d 100644 --- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml @@ -1,11 +1,11 @@ --- - name: Flannel | Start Resources kube: - name: "{{item.name}}" + name: "{{ item.item.name }}" namespace: "kube-system" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.type}}" - filename: "{{kube_config_dir}}/{{item.file}}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: "{{ flannel_node_manifests.results }}" when: inventory_hostname == groups['kube-master'][0] and not item is skipped diff --git a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml index 77f5b8bae8e..03e0433cc44 100644 --- a/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/kube-router/tasks/main.yml @@ -12,7 +12,7 @@ - inventory_hostname == groups['kube-master'][0] - name: kube-router | Wait for kube-router pods to be ready - command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" + command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" register: pods_not_ready until: pods_not_ready.stdout.find("kube-router")==-1 retries: 30 diff --git a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml index 6aee44cc8ba..48d00538ca3 100644 --- a/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/multus/tasks/main.yml @@ -1,11 +1,11 @@ --- - name: Multus | Start resources kube: - name: "{{item.name}}" + name: "{{ item.item.name }}" namespace: "kube-system" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.type}}" - filename: "{{kube_config_dir}}/{{item.file}}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" - with_items: "{{ multus_manifest_1.results }} + {{multus_manifest_2.results }}" + with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}" when: inventory_hostname == groups['kube-master'][0] and not item|skipped diff --git a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml index 80d5fdd29f0..7421d71832b 100644 --- a/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml +++ b/roles/kubernetes-apps/persistent_volumes/openstack/tasks/main.yml @@ -10,7 +10,7 @@ - name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class kube: name: storage-class - kubectl: "{{bin_dir}}/kubectl" + kubectl: "{{ bin_dir }}/kubectl" resource: StorageClass filename: "{{kube_config_dir}}/openstack-storage-class.yml" state: "latest" diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index d4889a991ad..bbd39d63f59 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -10,8 +10,8 @@ - name: Create calico-kube-controllers manifests template: - src: "{{item.file}}.j2" - dest: "{{kube_config_dir}}/{{item.file}}" + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" with_items: - {name: calico-kube-controllers, file: calico-kube-controllers.yml, type: deployment} - {name: calico-kube-controllers, file: calico-kube-sa.yml, type: sa} @@ -24,11 +24,11 @@ - name: Start of Calico kube controllers kube: - name: "{{item.name}}" + name: "{{ item.item.name }}" namespace: "kube-system" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.type}}" - filename: "{{kube_config_dir}}/{{item.file}}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" with_items: - "{{ calico_kube_manifests.results }}" @@ -36,4 +36,4 @@ - inventory_hostname == groups['kube-master'][0] - not item is skipped loop_control: - label: "{{ item.file }}" \ No newline at end of file + label: "{{ item.item.file }}" diff --git a/roles/kubernetes-apps/registry/tasks/main.yml b/roles/kubernetes-apps/registry/tasks/main.yml index 89e5d51fbb7..05f8f470320 100644 --- a/roles/kubernetes-apps/registry/tasks/main.yml +++ b/roles/kubernetes-apps/registry/tasks/main.yml @@ -42,11 +42,11 @@ - name: Registry | Apply manifests kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" namespace: "{{ registry_namespace }}" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ kube_config_dir }}/addons/registry/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}" state: "latest" with_items: "{{ registry_manifests.results }}" when: inventory_hostname == groups['kube-master'][0] @@ -65,11 +65,11 @@ - name: Registry | Apply PVC manifests kube: - name: "{{ item.name }}" + name: "{{ item.item.name }}" namespace: "{{ registry_namespace }}" kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item.type }}" - filename: "{{ kube_config_dir }}/addons/registry/{{ item.file }}" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}" state: "latest" with_items: "{{ registry_manifests.results }}" when: diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml index 71b505a47f2..89c9c9875b3 100644 --- a/roles/kubernetes/client/tasks/main.yml +++ b/roles/kubernetes/client/tasks/main.yml @@ -67,11 +67,12 @@ when: kubeconfig_localhost|default(false) - name: Copy kubectl binary to ansible host - fetch: + ignore_errors: yes + copy: src: "{{ bin_dir }}/kubectl" dest: "{{ artifacts_dir }}/kubectl" - flat: yes - validate_checksum: no + remote_src: yes + delegate_to: localhost become: no run_once: yes when: kubectl_localhost|default(false) diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml index d1649241b78..896f75e7ed0 100644 --- a/roles/kubernetes/preinstall/handlers/main.yml +++ b/roles/kubernetes/preinstall/handlers/main.yml @@ -36,6 +36,7 @@ when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] - name: Preinstall | reload kubelet + ignore_errors: yes service: name: kubelet state: restarted diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml index 7e59059994f..f81b7e2f8dd 100644 --- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml @@ -35,7 +35,7 @@ - name: "Stop if known booleans are set as strings (Use JSON format on CLI: -e \"{'key': true }\")" assert: that: item.value|type_debug == 'bool' - msg: "{{item.value}} isn't a bool" + msg: "{{ item.value }} isn't a bool" run_once: yes with_items: - { name: download_run_once, value: "{{ download_run_once }}" } @@ -52,13 +52,13 @@ - name: Stop if memory is too small for masters assert: - that: ansible_memtotal_mb >= 742 + that: ansible_memtotal_mb >= 700 ignore_errors: "{{ ignore_assert_errors }}" when: inventory_hostname in groups['kube-master'] - name: Stop if memory is too small for nodes assert: - that: ansible_memtotal_mb >= 463 + that: ansible_memtotal_mb >= 400 ignore_errors: "{{ ignore_assert_errors }}" when: inventory_hostname in groups['kube-node'] @@ -132,7 +132,7 @@ - name: "Get current version of calico cluster version" shell: "{{ bin_dir }}/calicoctl version | grep 'Cluster Version:' | awk '{ print $3}'" register: calico_version_on_server - async: 10 + async: 120 poll: 3 run_once: yes delegate_to: "{{ groups['kube-master'][0] }}" diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml index f9af22f150a..efd083a951d 100644 --- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml @@ -3,7 +3,7 @@ architecture_groups: x86_64: amd64 aarch64: arm64 - armv7l: arm64 + armv7l: arm - name: ansible_architecture_rename set_fact: diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml index 753e0a1e191..998c67ea595 100644 --- a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml +++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml @@ -18,7 +18,7 @@ - master - node with_items: - - "{{bin_dir}}" + - "{{ bin_dir }}" - "{{ kube_config_dir }}" - "{{ kube_cert_dir }}" - "{{ kube_manifest_dir }}" diff --git a/roles/network_plugin/calico/tasks/check.yml b/roles/network_plugin/calico/tasks/check.yml index 88fbe63a2c6..9a78bb397a9 100644 --- a/roles/network_plugin/calico/tasks/check.yml +++ b/roles/network_plugin/calico/tasks/check.yml @@ -14,7 +14,7 @@ register: calico_version_on_server run_once: yes delegate_to: "{{ groups['kube-master'][0] }}" - async: 10 + async: 120 poll: 3 changed_when: false diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml index acf3fd76114..93d5160cb5c 100644 --- a/roles/network_plugin/calico/tasks/install.yml +++ b/roles/network_plugin/calico/tasks/install.yml @@ -329,8 +329,8 @@ - name: Calico | Create calico manifests template: - src: "{{item.file}}.j2" - dest: "{{kube_config_dir}}/{{item.file}}" + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" with_items: - {name: calico-config, file: calico-config.yml, type: cm} - {name: calico-node, file: calico-node.yml, type: ds} diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml index 94527493763..d552f0d386f 100644 --- a/roles/network_plugin/canal/tasks/main.yml +++ b/roles/network_plugin/canal/tasks/main.yml @@ -40,8 +40,8 @@ - name: Canal | Create canal node manifests template: - src: "{{item.file}}.j2" - dest: "{{kube_config_dir}}/{{item.file}}" + src: "{{ item.file}}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" with_items: - {name: canal-config, file: canal-config.yaml, type: cm} - {name: canal-node, file: canal-node.yaml, type: ds} diff --git a/roles/network_plugin/cilium/tasks/main.yml b/roles/network_plugin/cilium/tasks/main.yml index 6c55be6633d..d80c06c0738 100755 --- a/roles/network_plugin/cilium/tasks/main.yml +++ b/roles/network_plugin/cilium/tasks/main.yml @@ -27,8 +27,8 @@ - name: Cilium | Create Cilium node manifests template: - src: "{{item.file}}.j2" - dest: "{{kube_config_dir}}/{{item.file}}" + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" with_items: - {name: cilium, file: cilium-config.yml, type: cm} - {name: cilium, file: cilium-crb.yml, type: clusterrolebinding} diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml index c0c3aee3ebd..a0a45ee629f 100644 --- a/roles/network_plugin/flannel/tasks/main.yml +++ b/roles/network_plugin/flannel/tasks/main.yml @@ -3,8 +3,8 @@ - name: Flannel | Create Flannel manifests template: - src: "{{item.file}}.j2" - dest: "{{kube_config_dir}}/{{item.file}}" + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" with_items: - {name: flannel, file: cni-flannel-rbac.yml, type: sa} - {name: kube-flannel, file: cni-flannel.yml, type: ds} diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml index a6a481e4c2a..029016cbb4f 100644 --- a/roles/network_plugin/kube-router/tasks/annotate.yml +++ b/roles/network_plugin/kube-router/tasks/annotate.yml @@ -1,20 +1,20 @@ --- - name: kube-router | Add annotations on kube-master - command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" + command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_master }}" delegate_to: "{{groups['kube-master'][0]}}" when: kube_router_annotations_master is defined and inventory_hostname in groups['kube-master'] - name: kube-router | Add annotations on kube-node - command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" + command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_node }}" delegate_to: "{{groups['kube-master'][0]}}" when: kube_router_annotations_node is defined and inventory_hostname in groups['kube-node'] - name: kube-router | Add common annotations on all servers - command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" + command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_all }}" delegate_to: "{{groups['kube-master'][0]}}" diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 33c0a8f3e20..f1d3cc62a24 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -128,12 +128,14 @@ - mounts - name: flush iptables + ignore_errors: yes iptables: table: "{{ item }}" flush: yes with_items: - filter - nat + - mangle when: flush_iptables|bool tags: - iptables diff --git a/roles/win_nodes/kubernetes_patch/tasks/main.yml b/roles/win_nodes/kubernetes_patch/tasks/main.yml index b2a3ad89755..e81e5c79fea 100644 --- a/roles/win_nodes/kubernetes_patch/tasks/main.yml +++ b/roles/win_nodes/kubernetes_patch/tasks/main.yml @@ -16,11 +16,11 @@ # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch" - name: Check current nodeselector for kube-proxy daemonset - shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'" + shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'" register: current_kube_proxy_state - name: Apply nodeselector patch for kube-proxy daemonset - shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\"" + shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\"" args: chdir: "{{ kubernetes_user_manifests_path }}" register: patch_kube_proxy_state diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml index 70e81828e5a..d1e020beb51 100644 --- a/scripts/collect-info.yaml +++ b/scripts/collect-info.yaml @@ -32,13 +32,13 @@ - name: etcd_info cmd: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | default('http://127.0.0.1:2379') }} cluster-health" - name: calico_info - cmd: "{{bin_dir}}/calicoctl node status" + cmd: "{{ bin_dir }}/calicoctl node status" when: '{{ kube_network_plugin == "calico" }}' - name: calico_workload_info - cmd: "{{bin_dir}}/calicoctl get workloadEndpoint -o wide" + cmd: "{{ bin_dir }}/calicoctl get workloadEndpoint -o wide" when: '{{ kube_network_plugin == "calico" }}' - name: calico_pool_info - cmd: "{{bin_dir}}/calicoctl get ippool -o wide" + cmd: "{{ bin_dir }}/calicoctl get ippool -o wide" when: '{{ kube_network_plugin == "calico" }}' - name: weave_info cmd: weave report diff --git a/scripts/gen_digest_json.sh b/scripts/gen_digest_json.sh index bb7dc8994bc..d06d6f330a8 100755 --- a/scripts/gen_digest_json.sh +++ b/scripts/gen_digest_json.sh @@ -1,11 +1,12 @@ #!/usr/bin/env bash declare -a def_list=(v1.11.0 v1.11.1 v1.11.2 v1.11.3 v1.11.5 v1.12.0 v1.12.1 v1.12.2 v1.12.3 v1.12.4 v1.12.5 v1.12.6 v1.13.0 v1.13.1 v1.13.2 v1.13.3 v1.13.4 v1.13.5) -declare -a etcd_def_list=(v3.3.12) +declare -a etcd_def_list=(v3.3.24) declare -a cni_def_list=(v0.6.0) +declare -a crio_def_list=(v1.13.0) image=$1 image_arch=$2 -[[ "$#" -lt 2 ]] && echo 'Usage: $0 --etcd|--cni|kubeadm|hyperkube -Usage: $0 --etcd|--cni|kubeadm|hyperkube version_list' && exit 1; +[[ "$#" -lt 2 ]] && echo 'Usage: $0 --etcd|--cni|--crio|kubeadm|hyperkube +Usage: $0 --etcd|--cni|--crio|kubeadm|hyperkube version_list' && exit 1; function sort_list() { printf '%s ' "${@}" | sort -Vr @@ -47,6 +48,22 @@ while [ "$#" -gt 0 ]; do case "$1" in rm $tmpfile done break;; + --crio) + image="crictl" + if [[ "$#" -gt 2 ]]; then + shift; shift + version_list=$(sort_list ${@}) + else + version_list=$(sort_list ${crio_def_list[@]}) + fi + printf "%s_binary_checksums:\n" "${image}" + for crio_version in ${version_list[@]}; do + tmpfile="/tmp/${image}-${cni_version}" + curl -sSL https://github.com/kubernetes-sigs/cri-tools/releases/download/${crio_version}/crictl-${crio_version}-linux-${image_arch}.tar.gz > $tmpfile + print_checksums $tmpfile $image_arch $crio_version + rm $tmpfile + done + break;; *) if [[ "$#" -gt 2 ]]; then shift; shift diff --git a/scripts/hap-wiz-bionic.sh b/scripts/hap-wiz-bionic.sh new file mode 100755 index 00000000000..03a2c52fc60 --- /dev/null +++ b/scripts/hap-wiz-bionic.sh @@ -0,0 +1,144 @@ +#!/usr/bin/env bash +if [ $USER != "root" ] +then + echo -e "You need to run this script as root." + exit 1 +fi +if [ ! -f /etc/os-release ] +then + echo -e "This script is made for Linux." + [ $(which sw_vers) > /dev/null ] && sw_vers + exit 1 +fi +[ "$#" -lt 2 ] && python3 library/hap-wiz-env.py --help && exit 1 +export work_dir=$(echo $0 | awk -F'/' '{ print $1 }')'/' +python3 ${work_dir}../library/hap-wiz-env.py $* +source .hap-wiz-env.sh +echo "Set Private Network $NET.0/$MASK" +echo "Set Private Network IPv6 ${NET6}0/$MASKb6" +echo "Set WAN Network $INTNET.0/$INTMASK" +echo "Set WAN Network IPv6 ${INTNET6}0/$INTMASKb6" +[ -z $CLIENT ] && rm -f hostapd.log +[ -z $CLIENT ] && touch hostapd.log +[ -z $CLIENT ] && [ -z $(which hostapd) ] && sudo apt-get -y install hostapd +[ -z $CLIENT ] && [ -z $(which brctl) ] && sudo apt-get -y install bridge-utils +[ -z $CLIENT ] && [ -z $(which dhcpd) ] && sudo apt-get -y install isc-dhcp-server +logger -st hostapd "remove bridge (br0) to wlan0" +source ${work_dir}init.d/init_net_if.sh -r +logger -st service "shutdown services" +sudo service wpa_supplicant stop +sudo service hostapd stop +sudo systemctl disable wpa_supplicant.service +source ${work_dir}init.d/init_dhcp_serv.sh -r +source ${work_dir}init.d/init_ufw.sh -r +[ -z $CLIENT ] && echo -e "### HostAPd will configure a public wireless network +IPv4 ${NET}.0/${MASKb} - ${SSID} +Example SSH'ed through bastion 'jump' host: +ssh -J $USER@$(ifconfig ${INT} | grep 'inet ' | awk '{ print $2 }') $USER@${NET}.15 +------------------------------- +" +[ -z $CLIENT ] && sleep 3 +[ -z $CLIENT ] && logger -t hostapd "Configure Access Point $SSID" +PSK_FILE=/etc/hostapd-psk +[ -z $CLIENT ] && echo -e "interface=wlan0 # the interface used by the AP +driver=nl80211 +ssid=${SSID} + +#ieee80211ac=1 # 5Ghz support +#hw_mode=a +#channel=36 +# 2,4-2,5Ghz (HT 20MHz band) +#hw_mode=b +#channel=13 +#ieee80211n=1 # 802.11n (HT 40 MHz) support +#hw_mode=g # 2,4-2,5Ghz (HT 40MHz band) +#channel=6 +hw_mode=${MODE} +channel=${CHANNEL} # 0 means the AP will search for the channel with the least interferences +#bridge=br0 +ieee80211d=1 # limit the frequencies used to those allowed in the country +country_code=${CTY_CODE} # the country code +wmm_enabled=1 # QoS support + +#source: IBM https://www.ibm.com/developerworks/library/l-wifiencrypthostapd/index.html +auth_algs=1 +wpa=2 +wpa_psk_file=${PSK_FILE} +#wpa_passphrase= +wpa_key_mgmt=WPA-PSK +# Windows client may use TKIP +wpa_pairwise=CCMP TKIP +rsn_pairwise=CCMP + +# Station MAC address -based authentication (driver=hostap or driver=nl80211) +# 0 = accept unless in deny list +# 1 = deny unless in accept list +# 2 = use external RADIUS server (accept/deny lists are searched first) +macaddr_acl=0 + +# Accept/deny lists are read from separate files +#accept_mac_file=/etc/hostapd/hostapd.accept +deny_mac_file=/etc/hostapd/hostapd.deny + +# Beacon interval in kus (1.024 ms) +beacon_int=100 + +# DTIM (delivery trafic information message) +dtim_period=2 + +# Maximum number of stations allowed in station table +max_num_sta=255 + +# RTS/CTS threshold; 2347 = disabled (default) +rts_threshold=2347 + +# Fragmentation threshold; 2346 = disabled (default) +fragm_threshold=2346 +" | sudo tee /etc/hostapd/hostapd.conf +[ -z $CLIENT ] && sudo touch /etc/hostapd/hostapd.deny +[ -z $CLIENT ] && echo -e "00:00:00:00:00:00 $(wpa_passphrase ${SSID} ${PAWD} | grep 'psk' | awk -F= 'FNR == 2 { print $2 }')" | sudo tee ${PSK_FILE} +[ -z $CLIENT ] && logger -st hostapd "configure Access Point as a Service" +[ -z $CLIENT ] && sudo sed -i -e /DAEMON_CONF=/s/^\#// -e /DAEMON_CONF=/s/=\".*\"/=\"\\/etc\\/hostapd\\/hostapd.conf\"/ /etc/default/hostapd 2> hostapd.log +[ -z $CLIENT ] && [ $(cat hostapd.log > /dev/null) ] && exit 1 +[ -z $CLIENT ] && sudo sed -i -e /DAEMON_OPTS=/s/^\#// -e "/DAEMON_OPTS=/s/=\".*\"/=\"-i wlan0\"/" /etc/default/hostapd 2> hostapd.log +[ -z $CLIENT ] && [ $(cat hostapd.log > /dev/null) ] && exit 1 +[ -z $CLIENT ] && sudo cat /etc/default/hostapd | grep "DAEMON" +[ -z $CLIENT ] && read -p "Do you want to Install Bridged Internet Sharing now [PRESS ENTER TO CANCEL/n/y] ?" SHARE +if [ -z $CLIENT ]; then case $SHARE in +# +# Bridge Mode +# + 'y'|'Y'*) + logger -st brctl "share internet connection from ${INT} to wlan0 over bridge" + sudo sed -i /bridge=br0/s/^\#// /etc/hostapd/hostapd.conf + source ${work_dir}init.d/init_net_if.sh + logger -st dhcpd "configure dynamic dhcp addresses ${NET}.${NET_start}-${NET_end}" + source ${work_dir}init.d/init_dhcp_serv.sh + ;; + 'n'|'N'*) + [ -z $(which dnsmasq) ] && sudo apt-get -y install dnsmasq + logger -st dnsmasq "configure a DNS server as a Service" + echo -e "bogus-priv +filterwin2k +# no-resolv +interface=wlan0 # Use the require wireless interface - usually wlan0 +#no-dhcp-interface=wlan0 +dhcp-range=${NET}.15,${NET}.100,${MASK},${MASKb}h + # " | sudo tee /etc/dnsmasq.conf + logger -st dnsmasq "start DNS server" + sudo dnsmasq -x /var/run/dnsmasq.pid -C /etc/dnsmasq.conf + sleep 3 + logger -st modprobe "enable IP Masquerade" + sudo modprobe ipt_MASQUERADE + sleep 1 + logger -st network "rendering configuration and restarting networks" + source ${work_dir}init.d/init_net_if.sh --client + sudo systemctl unmask dnsmasq.service + sudo systemctl enable dnsmasq.service + sudo service dnsmasq start + ;; + *);; +esac; else + source ${work_dir}init.d/init_net_if.sh $CLIENT +fi +source ${work_dir}init.d/net_restart.sh $CLIENT diff --git a/scripts/init.d/init_dhcp_serv.sh b/scripts/init.d/init_dhcp_serv.sh new file mode 100755 index 00000000000..dcf7256971b --- /dev/null +++ b/scripts/init.d/init_dhcp_serv.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash +export work_dir=$(echo $0 | awk -F'/' '{ print $1 }')'/' +[ ! -f .hap-wiz-env.sh ] && python3 ${work_dir}../library/hap-wiz-env.py $* +source .hap-wiz-env.sh +while [ "$#" -gt 0 ]; do case $1 in + -r*|-R*) + sudo systemctl disable dnsmasq.service + sudo service dnsmasq stop + sudo service isc-dhcp-server stop + sudo service isc-dhcp-server6 stop + sudo systemctl disable isc-dhcp-server.service + sudo systemctl disable isc-dhcp-server6.service + return;; + -h*|--help) + echo "Usage: $0 [-r] + Initializes DHCP services (without dnsmasq) + -r + Disable all dhcp (also with dnsmasq) services" + exit 1;; + *);; +esac; shift; done +echo -e "option domain-name-servers ${NET}.1; + +default-lease-time 600; +max-lease-time 7200; + +authoritative; + +log-facility local7; + +subnet ${INTNET}.0 netmask ${INTMASK} {} +subnet ${NET}.0 netmask ${MASK} { +#option domain-name "wifi.localhost"; +option routers ${NET}.1; #hostapd wlan0 +option subnet-mask ${MASK}; +option broadcast-address ${NET}.0; # dhcpd br0 +range ${NET}.${NET_start} ${NET}.${NET_end}; +# Example for a fixed host address +# host specialclient { +# host-identifier option client-id 00:01:00:01:4a:1f:ba:e3:60:b9:1f:01:23:45; +# fixed-address ${NET}.51; } +} +" | sudo tee /etc/dhcp/dhcpd.conf +sudo cat /etc/dhcp/dhcpd.conf +echo -e "option dhcp6.name-servers ${NET6}1; + +default-lease-time 600; +max-lease-time 7200; + +authoritative; + +log-facility local7; + +subnet6 ${INTNET6}0/${INTMASKb6} {} +subnet6 ${NET6}0/${MASKb6} { +#option dhcp6.domain-name "wifi.localhost"; +range6 ${NET6}${NET_start} ${NET6}${NET_end}; +# Example for a fixed host address +# host specialclient { +# host-identifier option dhcp6.client-id 00:01:00:01:4a:1f:ba:e3:60:b9:1f:01:23:45; +# fixed-address6 ${NET6}:127; } +} +" | sudo tee /etc/dhcp/dhcpd6.conf +sudo sed -i -e "s/INTERFACESv4=\".*\"/INTERFACESv4=\"wlan0\"/" /etc/default/isc-dhcp-server +sudo sed -i -e "s/INTERFACESv6=\".*\"/INTERFACESv6=\"wlan0\"/" /etc/default/isc-dhcp-server +sudo cat /etc/default/isc-dhcp-server +sleep 1 +logger -st dhcpd "start DHCP server" +sudo systemctl unmask isc-dhcp-server.service +sudo systemctl enable isc-dhcp-server.service +sudo service isc-dhcp-server start +sudo systemctl unmask isc-dhcp-server6.service +sudo systemctl enable isc-dhcp-server6.service +sudo service isc-dhcp-server6 start diff --git a/scripts/init.d/init_net_if.sh b/scripts/init.d/init_net_if.sh new file mode 100755 index 00000000000..f4f49abb71b --- /dev/null +++ b/scripts/init.d/init_net_if.sh @@ -0,0 +1,130 @@ +#!/usr/bin/env bash +export work_dir=$(echo $0 | awk -F'/' '{ print $1 }')'/' +[ ! -f .hap-wiz-env.sh ] && python3 ${work_dir}../library/hap-wiz-env.py $* +source .hap-wiz-env.sh +yaml='01-hostap.yaml' +clientyaml='01-cliwpa.yaml' +NP_ORIG=${work_dir}../../.netplan-store && sudo mkdir -p $NP_ORIG +logger -st netplan "disable cloud-init" +sudo mv -fv /etc/netplan/50-cloud-init.yaml $NP_ORIG +echo -e "network: { config: disabled }" | sudo tee /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg +while [ "$#" -gt 0 ]; do case $1 in + -r*|-R*) + sudo sed -i /bridge=br0/s/^/\#/ /etc/hostapd/hostapd.conf + if [ -f /etc/init.d/networking ]; then + sudo sed -i s/"${MARKERS}"//g /etc/network/interfaces + else + # ubuntu server + logger -st netplan "move configuration to $NP_ORIG" + sudo mv -fv /etc/netplan/* $NP_ORIG + logger -st netplan "reset configuration to cloud-init" + sudo mv -fv $NP_ORIG/50-cloud-init.yaml /etc/netplan + sudo rm -fv /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg + fi + return;; + -c*|--client) + if [ -f /etc/init.d/networking ]; then + echo -e "${MARKER_BEGIN} +auto lo br0 +iface lo inet loopback + +allow-hotplug ${INT} +iface ${INT} inet manual + +allow-hotplug wlan0 +iface wlan0 inet manual +${MARKER_END}" | sudo tee /etc/network/interfaces + sudo /etc/init.d/networking restart + else + logger -st netplan "/etc/netplan/$clientyaml was created" + echo -e "network: + version: 2 + renderer: networkd + ethernets: + ${INT}: + dhcp4: yes + wifis: + wlan0: + dhcp4: yes + dhcp6: yes + access-points: + \"${SSID}\": + password: \"${PAWD}\"" | sudo tee /etc/netplan/$clientyaml + logger -st netplan "apply $clientyaml" + sudo netplan try --timeout 12 + fi + return;; + -h*|--help) + echo "Usage: $0 [-r] [-c,--client] + Initializes netplan.io networks plans and eventually restart them. + -r + Removes bridge interface + --client + Render as Wifi client to netplan" + exit 1;; + *);; +esac; shift; done +if [ -f /etc/init.d/networking ]; then +# ubuntu < 18.04 + echo -e "${MARKER_BEGIN} +auto lo br0 +iface lo inet loopback + +allow-hotplug eth0 +iface ${INT} inet dhcp + network ${INTNET}.0 + +allow-hotplug wlan0 +iface wlan0 inet dhcp + address ${NET}.1 + network ${NET}.0 + netmask ${MASK} +# Bridge setup +auto br0 +iface br0 inet manual + address 10.33.0.1 + network 10.33.0.0 + netmask 255.255.255.0 + nameservers 10.33.0.1, 8.8.8.8, 8.8.4.4 +bridge_ports wlan0 ${INT} +${MARKER_END}" | sudo tee /etc/network/interfaces + logger -st brctl "share the internet wireless over bridge" + sudo brctl addbr br0 + sudo brctl addif br0 eth0 wlan0 +else +# new 18.04 netplan server (DHCPd set to bridge) +logger -st netplan "/etc/netplan/$yaml was created" + echo -e "network: + version: 2 + renderer: networkd + ethernets: + ${INT}: + dhcp4: yes + dhcp6: no + wifis: + wlan0: + dhcp4: yes + dhcp6: yes + access-points: + \"\": + password: + addresses: [${NET}.1/${MASKb}, '${NET6}1/${MASKb6}'] + bridges: + br0: + addresses: [10.33.0.1/24, '2001:db8:1:46::1/64'] + nameservers: + addresses: [10.33.0.1, '2001:db8:1:46::1', 8.8.8.8, 8.8.4.4] + interfaces: + - wlan0 + - eth0" | sudo tee /etc/netplan/$yaml +fi +logger -st network "rendering configuration and restarting networks" +if [ -f /etc/init.d/networking ]; then + sudo /etc/init.d/networking restart +else + [ $(sudo netplan try --timeout 12) 2> /dev/null ] && exit 1 +fi +logger -st dhclient "redeem ip address ${INT}" +sudo dhclient ${INT} +logger -st ip "wakeup wlan0" +sudo ip link set dev wlan0 up diff --git a/scripts/init.d/init_ufw.sh b/scripts/init.d/init_ufw.sh new file mode 100755 index 00000000000..0b1f6a253c4 --- /dev/null +++ b/scripts/init.d/init_ufw.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +[ ! -f .hap-wiz-env.sh ] && python3 ${work_dir}../library/hap-wiz-env.py $* +source .hap-wiz-env.sh +while [ "$#" -gt 0 ]; do case $1 in + -r*|-R*) + sudo sed -i -e s/"${MARKERS}"//g /etc/ufw/before.rules + sudo ufw disable + return;; + -c*|--client) + return;; + -h*|--help) + echo "Usage: $0 [-r] + Configure the firewall rules + -r + Removes all rules, disable firewall" + exit 1;; + *);; +esac; shift; done + +logger -st ipv4 "enable ip forwarding v4" +sudo sed -i /net.ipv4.ip_forward/s/^\#// /etc/sysctl.conf /etc/ufw/sysctl.conf 2> hostapd.log +[ $(cat hostapd.log > /dev/null) ] && exit 1 +logger -st ufw "configure firewall" +sudo sed -i /DEFAULT_FORWARD_POLICY/s/DROP/ACCEPT/g /etc/default/ufw 2> hostapd.log +[ $(cat hostapd.log > /dev/null) ] && exit 1 +sleep 1 +logger -st ufw "add ip masquerading rules" +echo -e "${MARKER_BEGIN} +# nat Table rules +*nat +:POSTROUTING ACCEPT [0:0] + +# Forward traffic from wlan0 through eth0. +-A POSTROUTING -s ${NET}.0/${MASKb} -o ${INT} -j MASQUERADE +-A POSTROUTING -s ${NET6}0/${MASKb6} -o ${INT} -j MASQUERADE + +# don't delete the 'COMMIT' line or these nat table rules won't be processed +COMMIT +${MARKER_END}" | sudo tee /tmp/input.rules +sudo sed -i s/^/"$(sudo cat /tmp/input.rules)"/ /etc/ufw/before.rules 2> hostapd.log +[ $(cat hostapd.log > /dev/null) ] && exit 1 +sudo rm /tmp/input.rules +sleep 1 +logger -st ufw "add packet ip forwarding" +echo -e "${MARKER_BEGIN} +-A ufw-before-forward -m state --state RELATED,ESTABLISHED -j ACCEPT +-A ufw-before-forward -i wlan0 -s ${NET}.0/${MASKb} -o ${INT} -m state --state NEW -j ACCEPT +-A ufw-before-forward -i wlan0 -s ${NET}0/${MASKb6} -o ${INT} -m state --state NEW -j ACCEPT +${MARKER_END} +" | sudo tee /tmp/input.rules +sudo sed -i /"^\# End required lines"/a "$(sudo cat /tmp/input.rules)"/ /etc/ufw/before.rules 2> hostapd.log +[ $(cat hostapd.log > /dev/null) ] && exit 1 +sudo rm /tmp/input.rules +sleep 1 +logger -st ufw "allow ${NET}.0" +sudo ufw allow from ${NET}.0/${MASKb} +sudo ufw allow from ${NET6}0/${MASKb6} +sudo ufw enable diff --git a/scripts/init.d/net_restart.sh b/scripts/init.d/net_restart.sh new file mode 100755 index 00000000000..a106cab3298 --- /dev/null +++ b/scripts/init.d/net_restart.sh @@ -0,0 +1,54 @@ +#!/bin/bash +export work_dir=$(echo $0 | awk -F'/' '{ print $1 }')'/' +[ ! -f .hap-wiz-env.sh ] && python3 ${work_dir}../library/hap-wiz-env.py $* +source .hap-wiz-env.sh +logger -st reboot "to complete the Access Point installation, reboot the Raspberry PI" +read -p "Do you want to reboot now [y/N] ?" REBOOT +if [ -f /etc/init.d/networking ]; then + sudo /etc/init.d/networking restart +else + logger -st 'rc.local' 'Work around fix netplan apply on reboot' + if [ ! -f /etc/rc.local ]; then + printf '%s\n' "#!/bin/bash" "exit 0" | sudo tee /etc/rc.local + sudo chmod +x /etc/rc.local + fi + sudo cp -f /lib/systemd/system/rc-local.service /etc/systemd/system + printf '%s\n' "[Install]" "WantedBy=multi-user.target" | sudo tee -a /etc/systemd/system/rc-local.service + sudo systemctl enable rc-local + # apply once and disable + sed -i -e s/"${MARKERS}"//g -e /"^exit"/s/"^"/"${MARKER_BEGIN}\n\ +netplan apply\n\ +systemctl restart hostapd\n\ +netplan apply\n\ +dhclient ${INT}\n\ +ip link set dev wlan0 up\n\ +dhclient br0\n\ +systemctl restart isc-dhcp-server\n\ +systemctl restart isc-dhcp-server6\n\ +${MARKER_END}\n"/ /etc/rc.local + logger -st sed "/etc/rc.local added command lines" + cat /etc/rc.local +fi +source ${work_dir}init.d/init_ufw.sh +case $REBOOT in + 'y'|'Y'*) sudo reboot;; + *) + logger -st sysctl "restarting Access Point" + sudo systemctl unmask hostapd.service + sudo systemctl enable hostapd.service + # FIX driver AP_DISABLED error : first start up interface + sudo netplan apply + sudo service hostapd start + sleep 1 + logger -st dhcpd "restart DHCP server" + # Restart up interface + sudo dhclient ${INT} + sudo ip link set dev wlan0 up + sudo dhclient br0 + sudo service isc-dhcp-server restart + sudo service isc-dhcp-server6 restart + systemctl status hostapd.service + systemctl status isc-dhcp-server.service + systemctl status isc-dhcp-server6.service + exit 0;; +esac diff --git a/scripts/my_cluster.sh b/scripts/my_cluster.sh new file mode 100755 index 00000000000..4082e8bdb3e --- /dev/null +++ b/scripts/my_cluster.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash +# Usage: $ scripts/my_cluster.sh +# Update Ansible inventory file with inventory builder . Single master cluster is possible. +# How to use 2 nodes with Calico and Route Reflectors, in docs/calico.md. +# +function cfrm_act () { +def_go=$2 +y='y' +n='n' +[ "$def_go" == "$y" ] && y='Y' +[ "$def_go" == "$n" ] && n='N' +while true; do case $go in + [nN]*) break;; + [yY]*) echo $go; break;; + *) + read -p "Confirm $1 [${y}/${n}] ? " go + [ -z $go ] && go=$def_go;; +esac; done +} +IFS=' ' # Read prompt Field Separator +if [[ "$#" -gt 1 ]]; then IPS=$@; else while [[ -z $IPS ]]; do + read -p "Please type in up to 6 local network ip${IFS}2°ip${IFS}3°ip...: (CTRL-C to exit) " -a ips + echo -e "\n" + if [[ ${#ips[@]} -ge 1 ]]; then + if [[ $(cfrm_act "you've entered the correct ips addresses ${ips[0]} ${ips[1]} ${ips[2]} ${ips[3]} ${ips[4]} ${ips[5]}" 'n') > /dev/null ]]; then + IPS=${ips[@]} + fi + else + echo -e "Enter one or more valid IP addresses of the form X.X.X.X : X€[0;255] \n" + fi +done; fi +logger -st kubespray "IPS=(${IPS[@]})\n" +YAML=inventory/mycluster/hosts.yaml +INI=inventory/mycluster/hosts.ini +logger -st kubespray "****** K8s ansible : Generate $INI and $YAML ******" +python contrib/inventory_builder/inventory.py ${IPS[@]} print_cfg +if [ $(cfrm_act "Regenerate the $INI file, are the machines up and running" 'n') > /dev/null ]; then + CONFIG_FILE=$INI python contrib/inventory_builder/inventory.py ${IPS[@]} print_cfg > $YAML +fi +cat inventory/mycluster/group_vars/all/all.yml +[ $(cfrm_act "the options" 'y') > /dev/null ] || exit 0 +cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml +[ $(cfrm_act "the kubernetes configuration" 'y') > /dev/null ] || exit 0 + +declare PI=ubuntu # replace 'pi' with 'ubuntu' or any other user +for ip in ${IPS[@]}; do + logger -st kubespray-$ip "****** K8s ip : $PI@$ip ******"; + ssh-copy-id $PI@$ip; + logger -st kubespray-$ip "configure sshd"; + ssh $PI@$ip "sudo echo 'PermitRootLogin yes' | sudo tee -a /etc/ssh/sshd_config"; + ssh $PI@$ip sudo cat /etc/ssh/sshd_config | grep PermitRootLogin; + logger -st kubespray-$ip "install go"; + ssh $PI@$ip sudo apt-get install golang -y; + logger -st kubespray-$ip "trusted repository"; + ssh $PI@$ip sudo add-apt-repository \ + "deb [arch=arm64] http://ppa.launchpad.net/ansible/ansible/ubuntu bionic main"; + ssh $PI@$ip sudo add-apt-repository \ + "deb [arch=arm64] http://ppa.launchpad.net/projectatomic/ppa/ubuntu bionic main"; + ssh $PI@$ip sudo add-apt-repository \ + "deb [arch=arm64] http://ppa.launchpad.net/alexlarsson/flatpak/ubuntu bionic main"; + logger -st kubespray-$ip "add kube user into ubuntu group"; + ssh $PI@$ip sudo usermod -a -G ubuntu kube; + logger -st kubespray-$ip "disable ubuntu firewall"; + ssh $PI@$ip sudo ufw disable; + # logger -st kubespray-$ip "Launchpad PPA repository keys" + # ssh $PI@$ip sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 93C4A3FD7BB9C367 & + # ssh $PI@$ip sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 8becf1637ad8c79d & + # ssh $PI@$ip sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys c793bfa2fa577f07 & + logger -st docker-$ip "allow https repository"; + ssh $PI@$ipsudo apt-get install \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg-agent \ + software-properties-common; + logger -st docker-$ip "add docker repository packages"; + ssh $PI@$ip sudo add-apt-repository \ + "deb [arch=arm64] https://download.docker.com/linux/ubuntu \ + bionic \ + stable"; + logger -st docker-$ip "add docker repository key"; + ssh $PI@$ipsudo "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -"; + logger -st docker-$ip "remove old docker-ce"; + ssh $PI@$ip sudo apt-get remove docker docker-engine docker.io containerd runc; + logger -st docker-$ip "get docker-ce for ubuntu bionic"; + ssh $PI@$ip "sudo apt-get update && sudo apt-get install docker-ce -y"; + ssh $PI@$ip sudo apt-get install docker-ce-cli containerd.io; +done +cat roles/kubernetes/preinstall/tasks/0020-verify-settings.yml | grep -b2 'that: ansible_memtotal_mb' +logger -st kubespray "****** K8s ansible : Run Playbook cluster.yml ******" +scripts/my_playbook.sh -i $INI cluster.yml --timeout=60 +#scripts/my_playbook.sh -i $YAML cluster.yml --timeout=60 +#for ip in ${IPS[@]}; do +# logger -st kubespray-$ip "****** K8s ip : $PI@$ip ******" +# logger -st kubespray-$ip "enable firewall" +# scripts/my_playbook.sh --firewall-setup $PI@$ip enable +# scripts/my_playbook.sh --firewall-setup $PI@$ip status +#done diff --git a/scripts/my_firewall.sh b/scripts/my_firewall.sh new file mode 100755 index 00000000000..fc6d1b63fdf --- /dev/null +++ b/scripts/my_firewall.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +usage="Usage: $0 [-n[ode]] user@host status|enable|disable|.." +node=0 +host='' +iptables_reset='sudo iptables -t nat -F +sudo iptables -t mangle -F +sudo iptables -F +sudo iptables -X' +ufw_rules='sudo ufw allow OpenSSH; +sudo ufw allow 8443/tcp; +sudo ufw allow 6443/tcp; +sudo ufw allow 2379/tcp; +sudo ufw allow 2380/tcp; +sudo ufw allow 10250/tcp; +sudo ufw allow 10251/tcp; +sudo ufw allow 10252/tcp; +sudo ufw allow 10255/tcp;' +ufw_action='' +ufw_log='sudo ufw logging on; +sudo ufw logging medium' +while [ "$#" -gt 0 ]; do case $1 in + -n*) # cluster-node + shift + node=1 + host=$1 + ufw_rules='sudo ufw allow OpenSSH; + sudo ufw allow 30000:32767/tcp; + sudo ufw allow 10250/tcp; + sudo ufw allow 10255/tcp; + sudo ufw allow 6783/tcp;' + ;; + enable) + ssh $host "${iptables_reset}" + ssh $host "${ufw_log}" + ssh $host "${ufw_rules}" + ;; + disable) + ssh $host "${iptables_reset}" + ssh $host "${ufw_rules}" + ;; + *@*) host=$1 + ;; + *) + ssh $host 'sudo ufw '$* || echo $usage + break;; +esac; shift; done diff --git a/scripts/my_playbook.sh b/scripts/my_playbook.sh index bb4d7a624bb..0e4182a741a 100755 --- a/scripts/my_playbook.sh +++ b/scripts/my_playbook.sh @@ -1,41 +1,23 @@ +#!/usr/bin/env bash function setup_crio() { + logger -st kubespray "CRI-O's currently unstable in Kubespray" ssh $1 ' + sudo add-apt-repository ppa:alexlarsson/flatpak; sudo add-apt-repository ppa:projectatomic/ppa; sudo apt-get update; - sudo apt install cri-o-1.13; + sudo apt install libostree-dev cri-o-1.13 cri-o-runc; sudo chmod 0755 /etc/crio; sudo chown ubuntu:ubuntu -R /etc/crio; sudo chmod 0755 /etc/containers; sudo chown ubuntu:ubuntu -R /etc/containers; ' || echo "Usage: $0 --crio-setup user@host" } function setup_firewall() { - while [ "$#" -gt 0 ]; do case $1 in - -n*) - shift - ssh $* 'sudo apt install firewalld; - sudo firewall-cmd --permanent --add-port=30000-32767/tcp; - sudo firewall-cmd --permanent --add-port=10250/tcp; - sudo firewall-cmd --permanent --add-port=10255/tcp; - sudo firewall-cmd --permanent --add-port=6783/tcp; - sudo firewall-cmd --reload' $* || echo "Usage: $0 --firewall-setup -n user@host" - break;; - *) - ssh $* 'sudo apt install firewalld; - sudo firewall-cmd --permanent --add-port=6443/tcp; - sudo firewall-cmd --permanent --add-port=2379/tcp; - sudo firewall-cmd --permanent --add-port=2380/tcp; - sudo firewall-cmd --permanent --add-port=10250/tcp; - sudo firewall-cmd --permanent --add-port=10251/tcp; - sudo firewall-cmd --permanent --add-port=10252/tcp; - sudo firewall-cmd --permanent --add-port=10255/tcp; - sudo firewall-cmd --reload' || echo "Usage: $0 --firewall-setup user@host" - break;; - esac; shift; done + source my_firewall.sh $* } inventory='inventory/mycluster/hosts.ini' -defaults='-b --private-key=~/.ssh/id_rsa --ask-become-pass' +defaults='-b --private-key=~/.ssh/id_rsa' options="" usage="Usage: $0 [-i,--inventory ] [ansible-playbook options]" -usage2="Usage: $0 --crio-setup|--firewall-setup @" +usage2="Usage: $0 --crio-setup|--firewall-setup @ status|enable|disable|..." [ "$#" -lt 1 ] && echo " ${usage} ${usage2} @@ -43,11 +25,11 @@ ${usage2} while [ "$#" -gt 0 ]; do case $1 in --crio-setup) shift - setup_crio $* -i ~/.ssh/id_rsa + setup_crio $@ -i ~/.ssh/id_rsa exit 0;; --firewall-setup) shift - setup_firewall $* -i ~/.ssh/id_rsa + setup_firewall $@ -i ~/.ssh/id_rsa exit 0;; -i*|--inventory) shift @@ -60,5 +42,8 @@ while [ "$#" -gt 0 ]; do case $1 in defaults="";; *) options="${options} $1";; esac; shift; done -ansible-playbook -i $inventory $defaults $options && echo "Next call must be a firewall-cmd : -${usage2}" +logger -s "Disable the cluster firewall if you can..." +logger -s "If a TASK failed on timeouts, reboot the kubernetes cluster before to retry..." +logger -s "Known TASKs that take more time : [Start of Calico kube controllers], [Kubernetes Apps | Start Resources]..." +logger -s "It's going to take about half an hour per host to complete the cluster boot process..." +logger -s "Please don't shutdown anything until it finishes..." && ansible-playbook -i ${inventory} $defaults $options && echo "Next call must be scripts/start_dashboard.sh --timeout=60" diff --git a/scripts/requirements.txt b/scripts/requirements.txt new file mode 100644 index 00000000000..39142c5f869 --- /dev/null +++ b/scripts/requirements.txt @@ -0,0 +1,2 @@ +python-nacl>=1.1.2 +libffi-dev>=3.2.1 diff --git a/scripts/start_dashboard.sh b/scripts/start_dashboard.sh new file mode 100755 index 00000000000..c0c55ccb904 --- /dev/null +++ b/scripts/start_dashboard.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +logfile=inventory/mycluster/artifacts/log.txt +# Ansible was already run with all tasks tags, unused command +# scripts/my_playbook.sh --tags=client,netchecker,dashboard cluster.yml $* 2>> $logfile +# We check the kube-api cluster health with the following script that was generated by ansible. +inventory/mycluster/artifacts/kubectl.sh --namespace=kube-system apply --force --filename=inventory/mycluster/artifacts/dashboard.yml 2>> $logfile +inventory/mycluster/artifacts/kubectl.sh proxy & +sleep 10 +curl -SL 127.0.0.1:8001/healthz diff --git a/scripts/systemctl-wpa-ssh.sh b/scripts/systemctl-wpa-ssh.sh new file mode 100755 index 00000000000..2336e1b1069 --- /dev/null +++ b/scripts/systemctl-wpa-ssh.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +# Usage: $ scripts/systemctl-wpa-ssh.sh +# source: https://www.linuxbabe.com/command-line/ubuntu-server-16-04-wifi-wpa-supplicant +# https://git.io/fjYQI +function cfrm_act () { +def_go=$2 +y='y' +n='n' +[ "$def_go" == "$y" ] && y='Y' +[ "$def_go" == "$n" ] && n='N' +while true; do case $go in + [nN]*) break;; + [yY]*) echo $go; break;; + *) + read -p " +Confirm $1 [${y}/${n}] ? " go + [ -z $go ] && go=$def_go;; +esac; done +#Usage: $0 yY|nN +} +function prompt_arrgs () { +IFS=' ' # Read prompt Field Separator +if [[ "$#" -gt 3 ]]; then + shift; shift; shift; ARRGS=$@; +else + size=$1 + desc=$2 + desc_precise=$3 + while [[ -z $ARRGS ]]; do + read -p " +Please type in $desc...: (CTRL-C to exit) " -a arrgs + if [[ ${#arrgs[@]} -ge $size ]]; then + if [[ $(cfrm_act "you've entered $desc ${arrgs[0]} ${arrgs[1]} ${arrgs[2]}.." 'n') > /dev/null ]]; then + ARRGS=${arrgs[@]} + fi + else + read -p " +Enter $size values : $desc $desc_precise" + fi + done +fi +echo $ARRGS +#Usage: $0 [array values] +} +logger -st wpa_passphrase "Add Wifi password access" +WPASS=$(prompt_arrgs 2 'your ESSID and your passphrase' 'e.g. MyWifiNetwork myWip+Swod') +[ -z WPASS ] && exit 1 +wpa_passphrase $WPASS | sudo tee /etc/wpa_supplicant.conf +iface=$(prompt_arrgs 1 "WLAN interface name" "e.g. wlp3s0" "wlan0") +[ -z iface ] && exit 1 +logger -st wpa_supplicant "Start Wifi client" +sudo wpa_supplicant -c /etc/wpa_supplicant.conf -i $iface +iwconfig +sudo wpa_supplicant -B -c /etc/wpa_supplicant.conf -i $iface +logger -st dhclient "Obtain an IP address" +sudo dhclient $iface +ifconfig $iface +logger -st systemd "Setup Wifi client service" +[ ! $(sudo cp /lib/systemd/system/wpa_supplicant.service /etc/systemd/system/wpa_supplicant.service) ] && exit 1 +[ ! $(sudo sed -i -e "/ExecStart/s/-O \/run\/wpa_supplicant/-c \/etc\/wpa_supplicant.conf -i ${iface}/g" /etc/systemd/system/wpa_supplicant.service) ] && exit 1 +# cat /etc/systemd/system/wpa_supplicant.service +sudo systemctl enable wpa_supplicant.service +logger -st systemd "Setup DHCP client service" +echo -e "[Unit] +Description= DHCP Client +Before=network.target + +[Service] +Type=simple +ExecStart=/sbin/dhclient ${iface} + +[Install] +WantedBy=multi-user.target +" | sudo tee /etc/systemd/system/dhclient.service +# cat /etc/systemd/system/dhclient.service +sudo systemctl enable dhclient.service +logger -st "$0" "Wifi configuration done." diff --git a/tests/testcases/020_check-create-pod.yml b/tests/testcases/020_check-create-pod.yml index ba76067f723..a4ff7bd3eee 100644 --- a/tests/testcases/020_check-create-pod.yml +++ b/tests/testcases/020_check-create-pod.yml @@ -17,13 +17,13 @@ when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] - name: Create test namespace - shell: "{{bin_dir}}/kubectl create namespace test" + shell: "{{ bin_dir }}/kubectl create namespace test" - name: Run a replica controller composed of 2 pods in test ns - shell: "{{bin_dir}}/kubectl run test --image={{test_image_repo}}:{{test_image_tag}} --namespace test --replicas=2 --command -- tail -f /dev/null" + shell: "{{ bin_dir }}/kubectl run test --image={{test_image_repo}}:{{test_image_tag}} --namespace test --replicas=2 --command -- tail -f /dev/null" - name: Pods are running - shell: "{{bin_dir}}/kubectl get pods --namespace test --no-headers -o json" + shell: "{{ bin_dir }}/kubectl get pods --namespace test --no-headers -o json" register: run_pods_log until: [ '(run_pods_log.stdout | from_json)["items"] | map(attribute = "status.phase") | join(",") == "Running,Running"' ] retries: 18 diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index c4e8c105b38..3faff28f68c 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -14,7 +14,7 @@ - name: Wait for pods to be ready - shell: "{{bin_dir}}/kubectl get pods -n test" + shell: "{{ bin_dir }}/kubectl get pods -n test" register: pods until: - '"ContainerCreating" not in pods.stdout' @@ -25,24 +25,24 @@ no_log: true - name: Get pod names - shell: "{{bin_dir}}/kubectl get pods -n test -o json" + shell: "{{ bin_dir }}/kubectl get pods -n test -o json" register: pods no_log: true - name: Get hostnet pods - command: "{{bin_dir}}/kubectl get pods -n test -o + command: "{{ bin_dir }}/kubectl get pods -n test -o jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'" register: hostnet_pods no_log: true - name: Get running pods - command: "{{bin_dir}}/kubectl get pods -n test -o + command: "{{ bin_dir }}/kubectl get pods -n test -o jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'" register: running_pods no_log: true - name: Check kubectl output - shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide" + shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide" register: get_pods no_log: true @@ -66,14 +66,14 @@ with_items: "{{pod_ips}}" - name: Ping between pods is working - shell: "{{bin_dir}}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}" + shell: "{{ bin_dir }}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}" when: not item[0] in pods_hostnet and not item[1] in pods_hostnet with_nested: - "{{pod_names}}" - "{{pod_ips}}" - name: Ping between hostnet pods is working - shell: "{{bin_dir}}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}" + shell: "{{ bin_dir }}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}" when: item[0] in pods_hostnet and item[1] in pods_hostnet with_nested: - "{{pod_names}}"