diff --git a/playbooks/cloud-provider-openstack-acceptance-test-csi-cinder/post.yaml b/playbooks/cloud-provider-openstack-acceptance-test-csi-cinder/post.yaml index 512afcbdb2fa9..1169f794ec9e3 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-csi-cinder/post.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-csi-cinder/post.yaml @@ -6,10 +6,9 @@ cmd: | set -e set -x - kubectl="$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh" - "$kubectl" config use-context local - "$kubectl" delete -f examples/cinder-csi-plugin/nginx.yaml - "$kubectl" delete -f manifests/cinder-csi-plugin + '{{ kubectl }}' config use-context local + '{{ kubectl }}' delete -f examples/cinder-csi-plugin/nginx.yaml + '{{ kubectl }}' delete -f manifests/cinder-csi-plugin executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' + chdir: '{{ k8s_os_provider_src_dir }}' environment: '{{ golang_env }}' diff --git a/playbooks/cloud-provider-openstack-acceptance-test-csi-cinder/run.yaml b/playbooks/cloud-provider-openstack-acceptance-test-csi-cinder/run.yaml index 64676a649b09c..e20830bd1de84 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-csi-cinder/run.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-csi-cinder/run.yaml @@ -1,6 +1,8 @@ - hosts: all roles: - - export-vexxhost-openrc + - role: export-cloud-openrc + vars: + cloud_name: 'vexxhost' become: yes tasks: - name: Run csi cinder acceptance tests with cloud-provider-openstack @@ -46,8 +48,8 @@ export EXTERNAL_CLOUD_PROVIDER_BINARY="$PWD/openstack-cloud-controller-manager" # location of where the kubernetes processes log their output - mkdir -p '{{ ansible_user_dir }}/workspace/logs/kubernetes' - export LOG_DIR='{{ ansible_user_dir }}/workspace/logs/kubernetes' + mkdir -p '{{ k8s_log_dir }}' + export LOG_DIR='{{ k8s_log_dir }}' # We need this for one of the conformance tests export ALLOW_PRIVILEGED=true # Just kick off all the processes and drop down to the command line @@ -61,23 +63,22 @@ # -E preserves the current env vars, but we need to special case PATH # Must run local-up-cluster.sh under kubernetes root directory - pushd "$GOPATH/src/k8s.io/kubernetes" + pushd '{{ k8s_src_dir }}' sudo -E PATH=$PATH SHELLOPTS=$SHELLOPTS ./hack/local-up-cluster.sh -O popd - export kubectl="$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh" # set up the config we need for kubectl to work - "$kubectl" config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt - "$kubectl" config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt - "$kubectl" config set-context local --cluster=local --user=myself - "$kubectl" config use-context local + '{{ kubectl }}' config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt + '{{ kubectl }}' config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt + '{{ kubectl }}' config set-context local --cluster=local --user=myself + '{{ kubectl }}' config use-context local # Hack for RBAC for all for the new cloud-controller process, we need to do better than this - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin # Where csi provisioner reads instance id from INSTANCE_UUID=$(curl http://169.254.169.254/openstack/latest/meta_data.json | python -c "import sys, json; print json.load(sys.stdin)['uuid']") @@ -117,45 +118,45 @@ } > /dev/null 2>&1 # Enable services - "$kubectl" create -f manifests/cinder-csi-plugin + '{{ kubectl }}' create -f manifests/cinder-csi-plugin sleep 5 # If services up if timeout 300 bash -c ' while : do - "$kubectl" get pod | sed "1d" | awk '\''$2 != "2/2" || $3 != "Running" {err = 1} END {exit err}'\'' && break + {{ kubectl }} get pod | sed "1d" | awk '\''$2 != "2/2" || $3 != "Running" {err = 1} END {exit err}'\'' && break sleep 1 done ' then echo 'Run services successful' - "$kubectl" get pod + '{{ kubectl }}' get pod else echo 'Run services failed' - "$kubectl" get pod + '{{ kubectl }}' get pod exit 1 fi # Make test - "$kubectl" create -f examples/cinder-csi-plugin/nginx.yaml + '{{ kubectl }}' create -f examples/cinder-csi-plugin/nginx.yaml # If test passed if timeout 300 bash -c ' while : do - [[ $("$kubectl" describe pod nginx | awk "/^Status:/ {print \$2}") == Running ]] && break + [[ $({{ kubectl }} describe pod nginx | awk "/^Status:/ {print \$2}") == Running ]] && break sleep 1 done ' then echo 'Run test successful' - "$kubectl" get pod + '{{ kubectl }}' get pod else echo 'Run test failed' - "$kubectl" get pod - "$kubectl" describe pod nginx - "$kubectl" describe pvc csi-pvc-cinderplugin + '{{ kubectl }}' get pod + '{{ kubectl }}' describe pod nginx + '{{ kubectl }}' describe pvc csi-pvc-cinderplugin exit 1 fi executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' - environment: '{{ golang_env | combine(vexxhost_openrc) }}' + chdir: '{{ k8s_os_provider_src_dir }}' + environment: '{{ golang_env }}' diff --git a/playbooks/cloud-provider-openstack-acceptance-test-e2e-conformance/post.yaml b/playbooks/cloud-provider-openstack-acceptance-test-e2e-conformance/post.yaml index ec37a0124a5bc..f132769475f24 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-e2e-conformance/post.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-e2e-conformance/post.yaml @@ -12,7 +12,7 @@ curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add apt-get update && sudo apt-get install google-cloud-sdk --yes apt-get install google-cloud-sdk-app-engine-java --yes - export LOG_DIR='{{ ansible_user_dir }}/workspace/logs/kubernetes' + export LOG_DIR='{{ k8s_log_dir }}' # get upload_e2e.py wget https://raw.githubusercontent.com/kubernetes/test-infra/master/testgrid/conformance/upload_e2e.py # TODO(RuiChen): Add timestamp for e2e.log in order to workaround upload_e2e.py bug diff --git a/playbooks/cloud-provider-openstack-acceptance-test-e2e-conformance/run.yaml b/playbooks/cloud-provider-openstack-acceptance-test-e2e-conformance/run.yaml index cb2a94a047d48..688d32b83026f 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-e2e-conformance/run.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-e2e-conformance/run.yaml @@ -1,6 +1,8 @@ - hosts: all roles: - - export-vexxhost-openrc + - role: export-cloud-openrc + vars: + cloud_name: 'vexxhost' become: yes tasks: - name: Run kubernetes E2E conformance tests with cloud-provider-openstack @@ -48,8 +50,8 @@ export EXTERNAL_CLOUD_PROVIDER_BINARY="$PWD/openstack-cloud-controller-manager" # location of where the kubernetes processes log their output - mkdir -p '{{ ansible_user_dir }}/workspace/logs/kubernetes' - export LOG_DIR='{{ ansible_user_dir }}/workspace/logs/kubernetes' + mkdir -p '{{ k8s_log_dir }}' + export LOG_DIR='{{ k8s_log_dir }}' # We need this for one of the conformance tests export ALLOW_PRIVILEGED=true # Just kick off all the processes and drop down to the command line @@ -73,23 +75,22 @@ # -E preserves the current env vars, but we need to special case PATH # Must run local-up-cluster.sh under kubernetes root directory - pushd "$GOPATH/src/k8s.io/kubernetes" + pushd '{{ k8s_src_dir }}' sudo -E PATH=$PATH SHELLOPTS=$SHELLOPTS ./hack/local-up-cluster.sh -O popd - export kubectl="$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh" # set up the config we need for kubectl to work - "$kubectl" config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt - "$kubectl" config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt - "$kubectl" config set-context local --cluster=local --user=myself - "$kubectl" config use-context local + '{{ kubectl }}' config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt + '{{ kubectl }}' config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt + '{{ kubectl }}' config set-context local --cluster=local --user=myself + '{{ kubectl }}' config use-context local # Hack for RBAC for all for the new cloud-controller process, we need to do better than this - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin # Sleep to wait for creating serviceaccount default/default complete sleep 5 @@ -105,7 +106,7 @@ # Run e2e test using local deployment/provider # Must run kubetest under kubernetes root directory - cd "$GOPATH/src/k8s.io/kubernetes" + cd '{{ k8s_src_dir }}' kubetest --dump=$LOG_DIR \ --test \ --build=quick \ @@ -115,5 +116,5 @@ --test_args="--ginkgo.focus=\\[Conformance\\] --ginkgo.noColor=true" \ --timeout=120m | tee $LOG_DIR/e2e.log executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' - environment: '{{ golang_env | combine(vexxhost_openrc) }}' + chdir: '{{ k8s_os_provider_src_dir }}' + environment: '{{ golang_env }}' diff --git a/playbooks/cloud-provider-openstack-acceptance-test-flexvolume-cinder/run.yaml b/playbooks/cloud-provider-openstack-acceptance-test-flexvolume-cinder/run.yaml index 5b2729fbcaa8b..df4c0175880cf 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-flexvolume-cinder/run.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-flexvolume-cinder/run.yaml @@ -15,6 +15,7 @@ environment: OVERRIDE_ENABLED_SERVICES: 'key,c-sch,c-api,c-vol,rabbit,mysql' PROJECTS: 'openstack/devstack-plugin-ceph' + - role: export-devstack-openrc tasks: - name: Run flexvolume cinder conformance tests with cloud-provider-openstack shell: @@ -27,7 +28,6 @@ make build # Create cloud-config - source /opt/stack/new/devstack/openrc admin admin mkdir -p /etc/kubernetes/ cat << EOF >> /etc/kubernetes/cloud-config [Global] @@ -55,8 +55,8 @@ export CLOUD_CONFIG=/etc/kubernetes/cloud-config # location of where the kubernetes processes log their output - mkdir -p '{{ ansible_user_dir }}/workspace/logs/kubernetes' - export LOG_DIR='{{ ansible_user_dir }}/workspace/logs/kubernetes' + mkdir -p '{{ k8s_log_dir }}' + export LOG_DIR='{{ k8s_log_dir }}' # We need this for one of the conformance tests export ALLOW_PRIVILEGED=true # Just kick off all the processes and drop down to the command line @@ -69,23 +69,22 @@ # -E preserves the current env vars, but we need to special case PATH # Must run local-up-cluster.sh under kubernetes root directory - pushd "$GOPATH/src/k8s.io/kubernetes" + pushd '{{ k8s_src_dir }}' sudo -E PATH=$PATH SHELLOPTS=$SHELLOPTS ./hack/local-up-cluster.sh -O popd - export kubectl="$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh" # set up the config we need for kubectl to work - "$kubectl" config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt - "$kubectl" config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt - "$kubectl" config set-context local --cluster=local --user=myself - "$kubectl" config use-context local + '{{ kubectl }}' config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt + '{{ kubectl }}' config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt + '{{ kubectl }}' config set-context local --cluster=local --user=myself + '{{ kubectl }}' config use-context local # Hack for RBAC for all for the new cloud-controller process, we need to do better than this - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin # Avoid missing feature 400000000000000 error in ceph # http://lists.ceph.com/pipermail/ceph-users-ceph.com/2017-July/019383.html @@ -95,25 +94,25 @@ sleep 5 # Run test export VOLUME_ID=$(cinder create 1 | awk '/ id / {print $4}') - "$kubectl" create -f examples/cinder-flexvolume/nginx.yaml + '{{ kubectl }}' create -f examples/cinder-flexvolume/nginx.yaml # If test passed if timeout 300 bash -c ' while : do - [[ $("$kubectl" describe pod nginx | awk "/^Status:/ {print \$2}") == Running ]] && break + [[ $({{ kubectl }} describe pod nginx | awk "/^Status:/ {print \$2}") == Running ]] && break sleep 1 done ' then echo 'Run test successfully' - "$kubectl" get pod nginx + '{{ kubectl }}' get pod nginx else echo 'Run test failed' - "$kubectl" get pod nginx - "$kubectl" describe pod nginx + '{{ kubectl }}' get pod nginx + '{{ kubectl }}' describe pod nginx exit 1 fi executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' + chdir: '{{ k8s_os_provider_src_dir }}' environment: '{{ golang_env }}' diff --git a/playbooks/cloud-provider-openstack-acceptance-test-k8s-cinder/post.yaml b/playbooks/cloud-provider-openstack-acceptance-test-k8s-cinder/post.yaml index ca552e39751b6..b1964c345d2fe 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-k8s-cinder/post.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-k8s-cinder/post.yaml @@ -6,9 +6,8 @@ cmd: | set -e set -x - kubectl="$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh" - "$kubectl" config use-context local - "$kubectl" delete -f examples/persistent-volume-provisioning/cinder/cinder-in-tree-full.yaml + '{{ kubectl }}' config use-context local + '{{ kubectl }}' delete -f examples/persistent-volume-provisioning/cinder/cinder-in-tree-full.yaml executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' + chdir: '{{ k8s_os_provider_src_dir }}' environment: '{{ golang_env }}' diff --git a/playbooks/cloud-provider-openstack-acceptance-test-k8s-cinder/run.yaml b/playbooks/cloud-provider-openstack-acceptance-test-k8s-cinder/run.yaml index 0fb25c5b23259..1f112830ab380 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-k8s-cinder/run.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-k8s-cinder/run.yaml @@ -1,7 +1,9 @@ - hosts: all become: yes roles: - - export-vexxhost-openrc + - role: export-cloud-openrc + vars: + cloud_name: 'vexxhost' tasks: - name: Run kubernetes cinder acceptance tests with cloud-provider-openstack shell: @@ -47,8 +49,8 @@ export EXTERNAL_CLOUD_PROVIDER_BINARY="$PWD/openstack-cloud-controller-manager" # location of where the kubernetes processes log their output - mkdir -p '{{ ansible_user_dir }}/workspace/logs/kubernetes' - export LOG_DIR='{{ ansible_user_dir }}/workspace/logs/kubernetes' + mkdir -p '{{ k8s_log_dir }}' + export LOG_DIR='{{ k8s_log_dir }}' # We need this for one of the conformance tests export ALLOW_PRIVILEGED=true # Just kick off all the processes and drop down to the command line @@ -58,37 +60,36 @@ # -E preserves the current env vars, but we need to special case PATH # Must run local-up-cluster.sh under kubernetes root directory - pushd "$GOPATH/src/k8s.io/kubernetes" + pushd '{{ k8s_src_dir }}' sudo -E PATH=$PATH SHELLOPTS=$SHELLOPTS ./hack/local-up-cluster.sh -O popd nohup ./cinder-provisioner --cloud-config "$CLOUD_CONFIG" --kubeconfig /var/run/kubernetes/admin.kubeconfig --id cinder > "$LOG_DIR/cinder-provisioner.log" 2>&1 & - export kubectl="$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh" # set up the config we need for kubectl to work - "$kubectl" config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt - "$kubectl" config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt - "$kubectl" config set-context local --cluster=local --user=myself - "$kubectl" config use-context local + '{{ kubectl }}' config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt + '{{ kubectl }}' config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt + '{{ kubectl }}' config set-context local --cluster=local --user=myself + '{{ kubectl }}' config use-context local # Hack for RBAC for all for the new cloud-controller process, we need to do better than this - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin # Sleep to wait for creating serviceaccount default/default complete sleep 5 # Remove node taint - "$kubectl" taint nodes "$HOSTNAME_OVERRIDE" node.cloudprovider.kubernetes.io/uninitialized- || true + '{{ kubectl }}' taint nodes "$HOSTNAME_OVERRIDE" node.cloudprovider.kubernetes.io/uninitialized- || true # Cinder test - "$kubectl" apply -f examples/persistent-volume-provisioning/cinder/cinder-in-tree-full.yaml + '{{ kubectl }}' apply -f examples/persistent-volume-provisioning/cinder/cinder-in-tree-full.yaml # If test passed if timeout 300 bash -c ' while : do - [[ $("$kubectl" describe pods web | awk "/^Status:/ {print \$2}") == Running ]] && break + [[ $({{ kubectl }} describe pods web | awk "/^Status:/ {print \$2}") == Running ]] && break sleep 1 done ' @@ -96,9 +97,9 @@ echo 'Run test successful' else echo 'Run test failed' - "$kubectl" describe pods web + '{{ kubectl }}' describe pods web exit 1 fi executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' - environment: '{{ golang_env | combine(vexxhost_openrc) }}' + chdir: '{{ k8s_os_provider_src_dir }}' + environment: '{{ golang_env }}' diff --git a/playbooks/cloud-provider-openstack-acceptance-test-keystone-authentication-authorization/run.yaml b/playbooks/cloud-provider-openstack-acceptance-test-keystone-authentication-authorization/run.yaml index b3e24dcb826b5..6e83b637f28bb 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-keystone-authentication-authorization/run.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-keystone-authentication-authorization/run.yaml @@ -4,6 +4,7 @@ - role: install-devstack environment: OVERRIDE_ENABLED_SERVICES: 'key,mysql' + - role: export-devstack-openrc become: yes tasks: - name: Run keystone authentication and authorization acceptance tests with cloud-provider-openstack @@ -20,7 +21,6 @@ pip install -U python-openstackclient # Create cloud-config - source /opt/stack/new/devstack/openrc admin admin mkdir -p /etc/kubernetes/ cat << EOF >> /etc/kubernetes/cloud-config [Global] @@ -78,8 +78,8 @@ export EXTERNAL_CLOUD_PROVIDER_BINARY="$PWD/openstack-cloud-controller-manager" # location of where the kubernetes processes log their output - mkdir -p '{{ ansible_user_dir }}/workspace/logs/kubernetes' - export LOG_DIR='{{ ansible_user_dir }}/workspace/logs/kubernetes' + mkdir -p '{{ k8s_log_dir }}' + export LOG_DIR='{{ k8s_log_dir }}' # We need this for one of the conformance tests export ALLOW_PRIVILEGED=true # Just kick off all the processes and drop down to the command line @@ -88,14 +88,14 @@ export MAX_TIME_FOR_URL_API_SERVER=5 export AUTHORIZATION_MODE="Node,Webhook,RBAC" - K8S_INSTALL_SCRIPT="$GOPATH/src/k8s.io/kubernetes/hack/local-up-cluster.sh" + K8S_INSTALL_SCRIPT='{{ k8s_src_dir }}/hack/local-up-cluster.sh' sed -i -e "/kube::util::wait_for_url.*$/,+1d" "$K8S_INSTALL_SCRIPT" sed -i -e '/hyperkube\" apiserver.*$/a \ --authentication-token-webhook-config-file=/etc/kubernetes/webhook.kubeconfig \\' "$K8S_INSTALL_SCRIPT" sed -i -e '/hyperkube\" apiserver.*$/a \ --authorization-webhook-config-file=/etc/kubernetes/webhook.kubeconfig \\' "$K8S_INSTALL_SCRIPT" # -E preserves the current env vars, but we need to special case PATH # Must run local-up-cluster.sh under kubernetes root directory - pushd "$GOPATH/src/k8s.io/kubernetes" + pushd '{{ k8s_src_dir }}' sudo -E PATH=$PATH SHELLOPTS=$SHELLOPTS ./hack/local-up-cluster.sh -O popd [[ "$OS_AUTH_URL" =~ "v3" ]] && keystone_url=${OS_AUTH_URL} || keystone_url=${OS_AUTH_URL}/v3 @@ -107,20 +107,19 @@ --v=10 \ --keystone-url ${keystone_url} >"${LOG_DIR}/keystone-auth.log" 2>&1 & - export kubectl="$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh" # set up the config we need for kubectl to work - "$kubectl" config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt - "$kubectl" config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt - "$kubectl" config set-context local --cluster=local --user=myself - "$kubectl" config use-context local + '{{ kubectl }}' config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt + '{{ kubectl }}' config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt + '{{ kubectl }}' config set-context local --cluster=local --user=myself + '{{ kubectl }}' config use-context local sleep 10 # Hack for RBAC for all for the new cloud-controller process, we need to do better than this - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin { authenticated_info=$(cat <<< ' @@ -167,13 +166,13 @@ # NOTE: k8s+keystone webhook test case need the OS_DOMAIN_NAME env export OS_DOMAIN_NAME=Default - "$kubectl" config set-credentials openstackuser --auth-provider=openstack - "$kubectl" config set-context --cluster=local --user=openstackuser openstackuser@local - "$kubectl" config use-context openstackuser@local - if ! "$kubectl" get pods; then + '{{ kubectl }}' config set-credentials openstackuser --auth-provider=openstack + '{{ kubectl }}' config set-context --cluster=local --user=openstackuser openstackuser@local + '{{ kubectl }}' config use-context openstackuser@local + if ! '{{ kubectl }}' get pods; then echo "Testing kubernetes+keystone authentication and authorization failed!" exit 1 fi executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' + chdir: '{{ k8s_os_provider_src_dir }}' environment: '{{ golang_env }}' diff --git a/playbooks/cloud-provider-openstack-acceptance-test-lb-octavia/post.yaml b/playbooks/cloud-provider-openstack-acceptance-test-lb-octavia/post.yaml index d360e6c5ce5f1..ad8f57a5c8b92 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-lb-octavia/post.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-lb-octavia/post.yaml @@ -1,20 +1,21 @@ - hosts: all become: yes roles: - - export-vexxhost-openrc + - role: export-cloud-openrc + vars: + cloud_name: 'vexxhost' tasks: - name: Clean up resources for lbass octavia acceptance tests shell: cmd: | set -e set -x - kubectl="$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh" - "$kubectl" config use-context local - ext_lb_svc_uid=$("$kubectl" get services external-http-nginx-service -o=jsonpath='{.metadata.uid}') || true - int_lb_svc_uid=$("$kubectl" get services internal-http-nginx-service -o=jsonpath='{.metadata.uid}') || true + '{{ kubectl }}' config use-context local + ext_lb_svc_uid=$('{{ kubectl }}' get services external-http-nginx-service -o=jsonpath='{.metadata.uid}') || true + int_lb_svc_uid=$('{{ kubectl }}' get services internal-http-nginx-service -o=jsonpath='{.metadata.uid}') || true - "$kubectl" delete -f examples/loadbalancers/internal-http-nginx.yaml || true - "$kubectl" delete -f examples/loadbalancers/external-http-nginx.yaml || true + '{{ kubectl }}' delete -f examples/loadbalancers/internal-http-nginx.yaml || true + '{{ kubectl }}' delete -f examples/loadbalancers/external-http-nginx.yaml || true for lb_svc_uid in $ext_lb_svc_uid $int_lb_svc_uid; do lb_name=$(echo $lb_svc_uid | tr -d '-' | sed 's/^/a/' | cut -c -32) @@ -32,5 +33,5 @@ } || true done executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' - environment: '{{ golang_env | combine(vexxhost_openrc) }}' + chdir: '{{ k8s_os_provider_src_dir }}' + environment: '{{ golang_env }}' diff --git a/playbooks/cloud-provider-openstack-acceptance-test-lb-octavia/run.yaml b/playbooks/cloud-provider-openstack-acceptance-test-lb-octavia/run.yaml index b0731fd2e4e6b..f59b92f4baa2d 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-lb-octavia/run.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-lb-octavia/run.yaml @@ -1,7 +1,9 @@ - hosts: all become: yes roles: - - export-vexxhost-openrc + - role: export-cloud-openrc + vars: + cloud_name: 'vexxhost' tasks: - name: Run lbass octavia acceptance tests with cloud-provider-openstack shell: @@ -53,8 +55,8 @@ export EXTERNAL_CLOUD_PROVIDER_BINARY="$PWD/openstack-cloud-controller-manager" # location of where the kubernetes processes log their output - mkdir -p '{{ ansible_user_dir }}/workspace/logs/kubernetes' - export LOG_DIR='{{ ansible_user_dir }}/workspace/logs/kubernetes' + mkdir -p '{{ k8s_log_dir }}' + export LOG_DIR='{{ k8s_log_dir }}' # We need this for one of the conformance tests export ALLOW_PRIVILEGED=true # Just kick off all the processes and drop down to the command line @@ -64,23 +66,22 @@ # -E preserves the current env vars, but we need to special case PATH # Must run local-up-cluster.sh under kubernetes root directory - pushd "$GOPATH/src/k8s.io/kubernetes" + pushd '{{ k8s_src_dir }}' sudo -E PATH=$PATH SHELLOPTS=$SHELLOPTS ./hack/local-up-cluster.sh -O popd - export kubectl="$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh" # set up the config we need for kubectl to work - "$kubectl" config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt - "$kubectl" config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt - "$kubectl" config set-context local --cluster=local --user=myself - "$kubectl" config use-context local + '{{ kubectl }}' config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt + '{{ kubectl }}' config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt + '{{ kubectl }}' config set-context local --cluster=local --user=myself + '{{ kubectl }}' config use-context local # Hack for RBAC for all for the new cloud-controller process, we need to do better than this - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin # Run test for test_case in internal external @@ -89,23 +90,23 @@ service_name="${test_case}-http-nginx-service" # Delete fake floating-network-id to use the default one in cloud config sed -i '/loadbalancer.openstack.org/d' "$test_file" - "$kubectl" create -f "$test_file" + '{{ kubectl }}' create -f "$test_file" if ! service_name="$service_name" timeout 600 bash -c ' while : do - [[ -n $("$kubectl" describe service "$service_name" | awk "/LoadBalancer Ingress/ {print \$3}") ]] && break + [[ -n $({{ kubectl }} describe service "$service_name" | awk "/LoadBalancer Ingress/ {print \$3}") ]] && break sleep 1 done ' then echo "Timed out to wait for $test_case loadbalancer services deployment!" - "$kubectl" describe pods - "$kubectl" describe services + '{{ kubectl }}' describe pods + '{{ kubectl }}' describe services exit 1 fi - ingress_ip=$("$kubectl" describe service "$service_name" | awk "/LoadBalancer Ingress/ {print \$3}") + ingress_ip=$('{{ kubectl }}' describe service "$service_name" | awk "/LoadBalancer Ingress/ {print \$3}") if curl --retry 5 --retry-max-time 30 "http://$ingress_ip" | grep 'Welcome to nginx' then echo "$test_case lb services launched sucessfully!" @@ -115,5 +116,5 @@ fi done executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' - environment: '{{ golang_env | combine(vexxhost_openrc) }}' + chdir: '{{ k8s_os_provider_src_dir }}' + environment: '{{ golang_env }}' diff --git a/playbooks/cloud-provider-openstack-acceptance-test-standalone-cinder/post.yaml b/playbooks/cloud-provider-openstack-acceptance-test-standalone-cinder/post.yaml index c370f02e24dea..aab1789ae9061 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-standalone-cinder/post.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-standalone-cinder/post.yaml @@ -6,9 +6,8 @@ cmd: | set -e set -x - kubectl="$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh" - "$kubectl" config use-context local - "$kubectl" delete -f examples/persistent-volume-provisioning/cinder/cinder-full.yaml + '{{ kubectl }}' config use-context local + '{{ kubectl }}' delete -f examples/persistent-volume-provisioning/cinder/cinder-full.yaml executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' + chdir: '{{ k8s_os_provider_src_dir }}' environment: '{{ golang_env }}' diff --git a/playbooks/cloud-provider-openstack-acceptance-test-standalone-cinder/run.yaml b/playbooks/cloud-provider-openstack-acceptance-test-standalone-cinder/run.yaml index 7c1a27e34c9f8..44406457fc5db 100644 --- a/playbooks/cloud-provider-openstack-acceptance-test-standalone-cinder/run.yaml +++ b/playbooks/cloud-provider-openstack-acceptance-test-standalone-cinder/run.yaml @@ -2,10 +2,10 @@ become: yes roles: - clone-devstack-gate-to-workspace - - create-devstack-local-conf - role: install-devstack environment: OVERRIDE_ENABLED_SERVICES: 'key,c-sch,c-api,c-vol,rabbit,mysql' + - role: export-devstack-openrc tasks: - name: Run cinder standalone acceptance tests with cloud-provider-openstack shell: @@ -18,7 +18,6 @@ make build # Create cloud-config - source /opt/stack/new/devstack/openrc admin admin mkdir -p /etc/kubernetes/ cat << EOF >> /etc/kubernetes/cloud-config [Global] @@ -46,8 +45,8 @@ export CLOUD_CONFIG=/etc/kubernetes/cloud-config # location of where the kubernetes processes log their output - mkdir -p '{{ ansible_user_dir }}/workspace/logs/kubernetes' - export LOG_DIR='{{ ansible_user_dir }}/workspace/logs/kubernetes' + mkdir -p '{{ k8s_log_dir }}' + export LOG_DIR='{{ k8s_log_dir }}' # We need this for one of the conformance tests export ALLOW_PRIVILEGED=true # Just kick off all the processes and drop down to the command line @@ -57,37 +56,36 @@ # -E preserves the current env vars, but we need to special case PATH # Must run local-up-cluster.sh under kubernetes root directory - pushd "$GOPATH/src/k8s.io/kubernetes" + pushd '{{ k8s_src_dir }}' sudo -E PATH=$PATH SHELLOPTS=$SHELLOPTS ./hack/local-up-cluster.sh -O popd nohup ./cinder-provisioner --cloud-config "$CLOUD_CONFIG" --kubeconfig /var/run/kubernetes/admin.kubeconfig --id cinder > "$LOG_DIR/cinder-provisioner.log" 2>&1 & - export kubectl="$GOPATH/src/k8s.io/kubernetes/cluster/kubectl.sh" # set up the config we need for kubectl to work - "$kubectl" config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt - "$kubectl" config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt - "$kubectl" config set-context local --cluster=local --user=myself - "$kubectl" config use-context local + '{{ kubectl }}' config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt + '{{ kubectl }}' config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt + '{{ kubectl }}' config set-context local --cluster=local --user=myself + '{{ kubectl }}' config use-context local # Hack for RBAC for all for the new cloud-controller process, we need to do better than this - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin - "$kubectl" create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin-1 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:pvl-controller kube-system-cluster-admin-2 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-node-controller kube-system-cluster-admin-3 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:cloud-controller-manager kube-system-cluster-admin-4 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:serviceaccount:kube-system:shared-informers kube-system-cluster-admin-5 --clusterrole cluster-admin + '{{ kubectl }}' create clusterrolebinding --user system:kube-controller-manager kube-system-cluster-admin-6 --clusterrole cluster-admin # Sleep to wait for creating serviceaccount default/default complete sleep 5 # Delete default storageclass - "$kubectl" delete storageclass standard + '{{ kubectl }}' delete storageclass standard # Cinder test - "$kubectl" apply -f examples/persistent-volume-provisioning/cinder/cinder-full.yaml + '{{ kubectl }}' apply -f examples/persistent-volume-provisioning/cinder/cinder-full.yaml # If test passed if timeout 300 bash -c ' while : do - [[ $("$kubectl" describe pods web | awk "/^Status:/ {print \$2}") == Running ]] && break + [[ $({{ kubectl }} describe pods web | awk "/^Status:/ {print \$2}") == Running ]] && break sleep 1 done ' @@ -95,9 +93,9 @@ echo 'Run test successful' else echo 'Run test failed' - "$kubectl" describe pods web + '{{ kubectl }}' describe pods web exit 1 fi executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' + chdir: '{{ k8s_os_provider_src_dir }}' environment: '{{ golang_env }}' diff --git a/playbooks/cloud-provider-openstack-test/pre.yaml b/playbooks/cloud-provider-openstack-test/pre.yaml index 8ad359194616a..dc9ebb3afab2e 100644 --- a/playbooks/cloud-provider-openstack-test/pre.yaml +++ b/playbooks/cloud-provider-openstack-test/pre.yaml @@ -5,6 +5,6 @@ shell: cmd: | set -x - mkdir -p $(dirname '{{ k8s_specific_src_dir }}') - mv '{{ zuul.project.src_dir }}' '{{ k8s_specific_src_dir }}' + mkdir -p $(dirname '{{ k8s_os_provider_src_dir }}') + mv '{{ zuul.project.src_dir }}' '{{ k8s_os_provider_src_dir }}' executable: /bin/bash diff --git a/playbooks/cloud-provider-openstack-unittest/run.yaml b/playbooks/cloud-provider-openstack-unittest/run.yaml index f3a01b812b3ec..b345babdcd92f 100644 --- a/playbooks/cloud-provider-openstack-unittest/run.yaml +++ b/playbooks/cloud-provider-openstack-unittest/run.yaml @@ -1,7 +1,9 @@ - hosts: all become: yes roles: - - export-opentelekomcloud-openrc + - role: export-cloud-openrc + vars: + cloud_name: 'vexxhost' tasks: - name: Run unit tests with cloud-provider-openstack shell: @@ -13,5 +15,5 @@ go get -u github.com/Masterminds/glide TESTARGS='-v' make test 2>&1 | tee $TEST_RESULTS_TXT executable: /bin/bash - chdir: '{{ k8s_specific_src_dir }}' - environment: '{{ golang_env | combine(opentelekomcloud_openrc) }}' + chdir: '{{ k8s_os_provider_src_dir }}' + environment: '{{ golang_env }}' diff --git a/playbooks/gophercloud-acceptance-test-telefonica/run.yaml b/playbooks/gophercloud-acceptance-test-telefonica/run.yaml index 0d85ef3bd2e59..6eafd246f38ea 100644 --- a/playbooks/gophercloud-acceptance-test-telefonica/run.yaml +++ b/playbooks/gophercloud-acceptance-test-telefonica/run.yaml @@ -1,7 +1,9 @@ - hosts: all become: yes roles: - - export-telefonica-openrc + - role: export-cloud-openrc + vars: + cloud_name: 'telefonica' tasks: - name: Run acceptance tests with gophercloud against telefonica public cloud shell: @@ -58,4 +60,4 @@ } 2>&1 | tee $TEST_RESULTS_TXTT executable: /bin/bash chdir: '{{ zuul.project.src_dir }}' - environment: '{{ golang_env | combine(telefonica_openrc) }}' + environment: '{{ golang_env }}' diff --git a/playbooks/terraform-provider-flexibleengine-acceptance-test-orange/run.yaml b/playbooks/terraform-provider-flexibleengine-acceptance-test-orange/run.yaml index 260c8d34b82b5..b875660192862 100644 --- a/playbooks/terraform-provider-flexibleengine-acceptance-test-orange/run.yaml +++ b/playbooks/terraform-provider-flexibleengine-acceptance-test-orange/run.yaml @@ -1,7 +1,9 @@ - hosts: all become: yes roles: - - export-orange-openrc + - role: export-cloud-openrc + vars: + cloud_name: 'orange' tasks: - name: Run acceptance tests with terraform-provider-openstack against orange public cloud shell: @@ -51,4 +53,4 @@ make testacc 2>&1 | tee $TEST_RESULTS_TXT executable: /bin/bash chdir: '{{ zuul.project.src_dir }}' - environment: '{{ golang_env | combine(orange_openrc) }}' + environment: '{{ golang_env }}' diff --git a/playbooks/terraform-provider-opentelekomcloud-acceptance-test-opentelekomcloud/run.yaml b/playbooks/terraform-provider-opentelekomcloud-acceptance-test-opentelekomcloud/run.yaml index 05805682b74df..075a0a2c714c0 100644 --- a/playbooks/terraform-provider-opentelekomcloud-acceptance-test-opentelekomcloud/run.yaml +++ b/playbooks/terraform-provider-opentelekomcloud-acceptance-test-opentelekomcloud/run.yaml @@ -1,7 +1,9 @@ - hosts: all become: yes roles: - - export-opentelekomcloud-openrc + - role: export-cloud-openrc + vars: + cloud_name: 'opentelekomcloud' tasks: - name: Run acceptance tests with terraform-provider-openstack against opentelekom public cloud shell: @@ -39,4 +41,4 @@ executable: /bin/bash chdir: '{{ zuul.project.src_dir }}' - environment: '{{ golang_env | combine(opentelekomcloud_openrc) }}' + environment: '{{ golang_env }}' diff --git a/playbooks/terraform-provider-telefonicaopencloud-acceptance-test-telefonica/run.yaml b/playbooks/terraform-provider-telefonicaopencloud-acceptance-test-telefonica/run.yaml index f93eaa2ba977f..11d6c9a9ffea1 100644 --- a/playbooks/terraform-provider-telefonicaopencloud-acceptance-test-telefonica/run.yaml +++ b/playbooks/terraform-provider-telefonicaopencloud-acceptance-test-telefonica/run.yaml @@ -1,7 +1,9 @@ - hosts: all become: yes roles: - - export-telefonica-openrc + - role: export-cloud-openrc + vars: + cloud_name: 'telefonica' tasks: - name: Run acceptance tests with terraform-provider-openstack against telefonica public cloud shell: @@ -52,4 +54,4 @@ executable: /bin/bash chdir: '{{ zuul.project.src_dir }}' - environment: '{{ golang_env | combine(telefonica_openrc) }}' + environment: '{{ golang_env }}' diff --git a/roles/export-cloud-openrc/tasks/main.yaml b/roles/export-cloud-openrc/tasks/main.yaml new file mode 100644 index 0000000000000..f70fcc13baa50 --- /dev/null +++ b/roles/export-cloud-openrc/tasks/main.yaml @@ -0,0 +1,76 @@ +- name: Set fact for opentelekomcloud openrc + set_fact: + openrc: + OS_PASSWORD: '{{ opentelekomcloud_credentials.password }}' + OS_AUTH_TYPE: '{{ opentelekomcloud_credentials.auth_type }}' + OS_AUTH_URL: '{{ opentelekomcloud_credentials.auth_url }}' + OS_IDENTITY_API_VERSION: '{{ opentelekomcloud_credentials.identity_api_version }}' + OS_DOMAIN_NAME: '{{ opentelekomcloud_credentials.domain_name }}' + OS_USER_DOMAIN_NAME: '{{ opentelekomcloud_credentials.user_domain_name }}' + OS_PROJECT_DOMAIN_NAME: '{{ opentelekomcloud_credentials.project_domain_name }}' + OS_PROJECT_NAME: '{{ opentelekomcloud_credentials.project_name}}' + OS_REGION_NAME: '{{ opentelekomcloud_credentials.region_name}}' + OS_TENANT_NAME: '{{ opentelekomcloud_credentials.project_name }}' + OS_USERNAME: '{{ opentelekomcloud_credentials.user_name }}' + OS_ACCESS_KEY: '{{ opentelekomcloud_credentials.access_key }}' + OS_SECRET_KEY: '{{ opentelekomcloud_credentials.secret_key }}' + OS_AVAILABILITY_ZONE: '{{ opentelekomcloud_credentials.availability_zone }}' + no_log: yes + when: cloud_name == 'opentelekomcloud' + +- name: Set fact for orange openrc + set_fact: + openrc: + OS_PASSWORD: '{{ orange_credentials.password }}' + OS_AUTH_TYPE: '{{ orange_credentials.auth_type }}' + OS_AUTH_URL: '{{ orange_credentials.auth_url }}' + OS_IDENTITY_API_VERSION: '{{ orange_credentials.identity_api_version }}' + OS_DOMAIN_NAME: '{{ orange_credentials.domain_name }}' + OS_PROJECT_NAME: '{{ orange_credentials.project_name}}' + OS_REGION_NAME: '{{ orange_credentials.region_name}}' + OS_TENANT_NAME: '{{ orange_credentials.project_name }}' + OS_USERNAME: '{{ orange_credentials.user_name }}' + OS_ACCESS_KEY: '{{ orange_credentials.access_key }}' + OS_SECRET_KEY: '{{ orange_credentials.secret_key }}' + OS_AVAILABILITY_ZONE: '{{ orange_credentials.availability_zone }}' + no_log: yes + when: cloud_name == 'orange' + +- name: Set fact for telefonica openrc + set_fact: + openrc: + OS_PASSWORD: '{{ telefonica_credentials.password }}' + OS_AUTH_TYPE: '{{ telefonica_credentials.auth_type }}' + OS_AUTH_URL: '{{ telefonica_credentials.auth_url }}' + OS_IDENTITY_API_VERSION: '{{ telefonica_credentials.identity_api_version }}' + OS_DOMAIN_NAME: '{{ telefonica_credentials.domain_name }}' + OS_PROJECT_NAME: '{{ telefonica_credentials.project_name}}' + OS_REGION_NAME: '{{ telefonica_credentials.region_name}}' + OS_TENANT_NAME: '{{ telefonica_credentials.project_name }}' + OS_USERNAME: '{{ telefonica_credentials.user_name }}' + no_log: yes + when: cloud_name == 'telefonica' + +- name: Set fact for vexxhost openrc + set_fact: + openrc: + OS_AUTH_TYPE: '{{ vexxhost_credentials.auth_type }}' + OS_IDENTITY_API_VERSION: '{{ vexxhost_credentials.identity_api_version }}' + OS_VOLUME_API_VERSION: '{{ vexxhost_credentials.volume_api_version }}' + OS_INTERFACE: '{{ vexxhost_credentials.interface }}' + OS_AUTH_URL: '{{ vexxhost_credentials.auth_url }}' + OS_PROJECT_ID: '{{ vexxhost_credentials.project_id }}' + OS_PROJECT_NAME: '{{ vexxhost_credentials.project_name }}' + OS_USER_DOMAIN_NAME: '{{ vexxhost_credentials.user_domain_name }}' + OS_PROJECT_DOMAIN_ID: '{{ vexxhost_credentials.project_domain_id }}' + OS_USERNAME: '{{ vexxhost_credentials.username }}' + OS_PASSWORD: '{{ vexxhost_credentials.password }}' + OS_REGION_NAME: '{{ vexxhost_credentials.region_name }}' + OS_DOMAIN_NAME: '{{ vexxhost_credentials.user_domain_name }}' + no_log: yes + when: cloud_name == 'vexxhost' + +- name: Merge openrc into golang env + set_fact: + golang_env: '{{ golang_env | combine(openrc) }}' + no_log: yes diff --git a/roles/export-devstack-openrc/defaults/main.yaml b/roles/export-devstack-openrc/defaults/main.yaml new file mode 100644 index 0000000000000..90fbc12c83f18 --- /dev/null +++ b/roles/export-devstack-openrc/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +openrc_file: '/opt/stack/new/devstack/openrc' diff --git a/roles/export-devstack-openrc/tasks/main.yaml b/roles/export-devstack-openrc/tasks/main.yaml new file mode 100644 index 0000000000000..d4c3dde732c64 --- /dev/null +++ b/roles/export-devstack-openrc/tasks/main.yaml @@ -0,0 +1,24 @@ +# Convert multiple key=value pairs to json format +# 's/^/"/' - add " at the beginning of each line +# 's/=/": "/' - change = to ": " in each line +# 's/$/",/' - add ", at the end of each line +# '$ s/,$//' - delete comma at the end of the last line + +- name: Register devstack openrc + shell: + cmd: | + source '{{ openrc_file }}' admin admin > /dev/null + openrc=$(env | grep OS_ | sed -e 's/^/"/' -e 's/=/": "/' -e 's/$/",/' | sed '$ s/,$//') + echo "{$openrc}" + executable: /bin/bash + register: output + +- name: Set fact for devstack openrc + set_fact: + openrc: '{{ output.stdout | from_json }}' + no_log: yes + +- name: Merge openrc into golang env + set_fact: + golang_env: '{{ golang_env | combine(openrc) }}' + no_log: yes diff --git a/roles/export-opentelekomcloud-openrc/tasks/main.yaml b/roles/export-opentelekomcloud-openrc/tasks/main.yaml deleted file mode 100644 index e86c6f4586622..0000000000000 --- a/roles/export-opentelekomcloud-openrc/tasks/main.yaml +++ /dev/null @@ -1,16 +0,0 @@ -- name: Set fact for opentelekomcloud openrc - set_fact: - opentelekomcloud_openrc: - OS_PASSWORD: '{{ opentelekomcloud_credentials.password }}' - OS_AUTH_TYPE: '{{ opentelekomcloud_credentials.auth_type }}' - OS_AUTH_URL: '{{ opentelekomcloud_credentials.auth_url }}' - OS_IDENTITY_API_VERSION: '{{ opentelekomcloud_credentials.identity_api_version }}' - OS_DOMAIN_NAME: '{{ opentelekomcloud_credentials.domain_name }}' - OS_PROJECT_NAME: '{{ opentelekomcloud_credentials.project_name}}' - OS_REGION_NAME: '{{ opentelekomcloud_credentials.region_name}}' - OS_TENANT_NAME: '{{ opentelekomcloud_credentials.project_name }}' - OS_USERNAME: '{{ opentelekomcloud_credentials.user_name }}' - OS_ACCESS_KEY: '{{ opentelekomcloud_credentials.access_key }}' - OS_SECRET_KEY: '{{ opentelekomcloud_credentials.secret_key }}' - OS_AVAILABILITY_ZONE: '{{ opentelekomcloud_credentials.availability_zone }}' - no_log: yes diff --git a/roles/export-orange-openrc/tasks/main.yaml b/roles/export-orange-openrc/tasks/main.yaml deleted file mode 100644 index 4e21671f3d022..0000000000000 --- a/roles/export-orange-openrc/tasks/main.yaml +++ /dev/null @@ -1,16 +0,0 @@ -- name: Set fact for orange openrc - set_fact: - orange_openrc: - OS_PASSWORD: '{{ orange_credentials.password }}' - OS_AUTH_TYPE: '{{ orange_credentials.auth_type }}' - OS_AUTH_URL: '{{ orange_credentials.auth_url }}' - OS_IDENTITY_API_VERSION: '{{ orange_credentials.identity_api_version }}' - OS_DOMAIN_NAME: '{{ orange_credentials.domain_name }}' - OS_PROJECT_NAME: '{{ orange_credentials.project_name}}' - OS_REGION_NAME: '{{ orange_credentials.region_name}}' - OS_TENANT_NAME: '{{ orange_credentials.project_name }}' - OS_USERNAME: '{{ orange_credentials.user_name }}' - OS_ACCESS_KEY: '{{ orange_credentials.access_key }}' - OS_SECRET_KEY: '{{ orange_credentials.secret_key }}' - OS_AVAILABILITY_ZONE: '{{ orange_credentials.availability_zone }}' - no_log: yes diff --git a/roles/export-telefonica-openrc/tasks/main.yaml b/roles/export-telefonica-openrc/tasks/main.yaml deleted file mode 100644 index d1d9136b4e23c..0000000000000 --- a/roles/export-telefonica-openrc/tasks/main.yaml +++ /dev/null @@ -1,13 +0,0 @@ -- name: Set fact for telefonica openrc - set_fact: - telefonica_openrc: - OS_PASSWORD: '{{ telefonica_credentials.password }}' - OS_AUTH_TYPE: '{{ telefonica_credentials.auth_type }}' - OS_AUTH_URL: '{{ telefonica_credentials.auth_url }}' - OS_IDENTITY_API_VERSION: '{{ telefonica_credentials.identity_api_version }}' - OS_DOMAIN_NAME: '{{ telefonica_credentials.domain_name }}' - OS_PROJECT_NAME: '{{ telefonica_credentials.project_name}}' - OS_REGION_NAME: '{{ telefonica_credentials.region_name}}' - OS_TENANT_NAME: '{{ telefonica_credentials.project_name }}' - OS_USERNAME: '{{ telefonica_credentials.user_name }}' - no_log: yes diff --git a/roles/export-vexxhost-openrc/tasks/main.yaml b/roles/export-vexxhost-openrc/tasks/main.yaml deleted file mode 100644 index a3d0b67438052..0000000000000 --- a/roles/export-vexxhost-openrc/tasks/main.yaml +++ /dev/null @@ -1,17 +0,0 @@ -- name: Set fact for vexxhost openrc - set_fact: - vexxhost_openrc: - OS_AUTH_TYPE: '{{ vexxhost_credentials.auth_type }}' - OS_IDENTITY_API_VERSION: '{{ vexxhost_credentials.identity_api_version }}' - OS_VOLUME_API_VERSION: '{{ vexxhost_credentials.volume_api_version }}' - OS_INTERFACE: '{{ vexxhost_credentials.interface }}' - OS_AUTH_URL: '{{ vexxhost_credentials.auth_url }}' - OS_PROJECT_ID: '{{ vexxhost_credentials.project_id }}' - OS_PROJECT_NAME: '{{ vexxhost_credentials.project_name }}' - OS_USER_DOMAIN_NAME: '{{ vexxhost_credentials.user_domain_name }}' - OS_PROJECT_DOMAIN_ID: '{{ vexxhost_credentials.project_domain_id }}' - OS_USERNAME: '{{ vexxhost_credentials.username }}' - OS_PASSWORD: '{{ vexxhost_credentials.password }}' - OS_REGION_NAME: '{{ vexxhost_credentials.region_name }}' - OS_DOMAIN_NAME: '{{ vexxhost_credentials.user_domain_name }}' - no_log: yes diff --git a/roles/install-k8s/tasks/main.yaml b/roles/install-k8s/tasks/main.yaml index 1b798d0ab0369..fc354c73150dc 100644 --- a/roles/install-k8s/tasks/main.yaml +++ b/roles/install-k8s/tasks/main.yaml @@ -1,4 +1,4 @@ -- name: Install docker, kubernetes {{ k8s_version }} and etcd {{ etcd_version }}, disable iptables +- name: Disable iptables, install docker, etcd {{ etcd_version }} and kubernetes {{ k8s_version }} shell: cmd: | set -x @@ -25,7 +25,7 @@ cp etcd-{{ etcd_version }}-linux-amd64/etcd{,ctl} /usr/local/bin/ # Build k8s cmd - git clone https://github.com/kubernetes/kubernetes ${GOPATH}/src/k8s.io/kubernetes -b '{{ k8s_version }}' - make -C ${GOPATH}/src/k8s.io/kubernetes WHAT="cmd/kubectl cmd/hyperkube" + git clone https://github.com/kubernetes/kubernetes '{{ k8s_src_dir }}' -b '{{ k8s_version }}' + make -C '{{ k8s_src_dir }}' WHAT="cmd/kubectl cmd/hyperkube" executable: /bin/bash environment: '{{ golang_env }}' diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml index 1c87613dea6b0..1574e1b430ae0 100644 --- a/zuul.d/jobs.yaml +++ b/zuul.d/jobs.yaml @@ -238,11 +238,14 @@ name: cloud-provider-openstack-test parent: golang-test description: | - Run unit test of cloud-provider-openstack repo against vexxhost cloud + Base job for all types of cloud-provider-openstack test jobs pre-run: playbooks/cloud-provider-openstack-test/pre.yaml nodeset: ubuntu-xenial-vexxhost vars: - k8s_specific_src_dir: '{{ ansible_user_dir }}/src/k8s.io/cloud-provider-openstack' + k8s_os_provider_src_dir: '{{ ansible_user_dir }}/src/k8s.io/cloud-provider-openstack' + k8s_src_dir: '{{ ansible_user_dir }}/src/k8s.io/kubernetes' + k8s_log_dir: '{{ ansible_user_dir }}/workspace/logs/kubernetes' + kubectl: '{{ ansible_user_dir }}/src/k8s.io/kubernetes/cluster/kubectl.sh' - job: name: cloud-provider-openstack-unittest @@ -251,8 +254,7 @@ Run unit test of cloud-provider-openstack run: playbooks/cloud-provider-openstack-unittest/run.yaml secrets: - - opentelekomcloud_credentials - nodeset: ubuntu-xenial-otc + - vexxhost_credentials - job: name: cloud-provider-openstack-acceptance-test