Skip to content
This repository has been archived by the owner on Jun 13, 2024. It is now read-only.

Commit

Permalink
Improve k8s Deployment and Daemonset wait conditions
Browse files Browse the repository at this point in the history
Ensure that Deployments and Daemonsets properly await
all replicas to be available. Correctly handles the
subtle edge case when a service account no longer exists.

Note that this will dramatically slow Daemonset updates
  • Loading branch information
willthames committed Feb 21, 2020
1 parent 7159520 commit bce5c48
Show file tree
Hide file tree
Showing 5 changed files with 86 additions and 13 deletions.
2 changes: 2 additions & 0 deletions changelogs/fragments/k8s_deploy_wait.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
bugfixes:
- k8s now correctly fails if a new replicaset fails to even create new pods
2 changes: 2 additions & 0 deletions changelogs/fragments/k8s_scale.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
bugfixes:
- k8s_scale has been fixed to work with recent versions of openshift python module
85 changes: 76 additions & 9 deletions molecule/default/tasks/apply.yml
Original file line number Diff line number Diff line change
Expand Up @@ -265,10 +265,19 @@
- k8s_service_5.result.spec.ports | length == 1
- k8s_service_5.result.spec.ports[0].port == 8081

- name: add a serviceaccount
k8s:
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: scale-deploy
namespace: "{{ apply_namespace }}"

- name: add a deployment
k8s:
definition:
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: scale-deploy
Expand All @@ -291,7 +300,7 @@

- name: scale the deployment
k8s_scale:
api_version: extensions/v1beta1
api_version: apps/v1
kind: Deployment
name: scale-deploy
namespace: "{{ apply_namespace }}"
Expand All @@ -314,13 +323,30 @@
that:
- scale_down_deploy_pods.resources | length == 0

- name: scale the deployment
k8s_scale:
api_version: extensions/v1beta1
kind: Deployment
name: scale-deploy
namespace: "{{ apply_namespace }}"
replicas: 1
- name: reapply the earlier deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: scale-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
apply: yes
vars:
k8s_pod_name: scale-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-green
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: reapply_after_scale

- name: get pods in scale-deploy
k8s_info:
Expand All @@ -336,6 +362,47 @@
- reapply_after_scale is changed
- scale_up_deploy_pods.resources | length == 1

- name: remove the serviceaccount
k8s:
state: absent
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: scale-deploy
namespace: "{{ apply_namespace }}"

- name: update the earlier deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: scale-deploy
namespace: "{{ apply_namespace }}"
spec:
replicas: 1
selector:
matchLabels:
app: "{{ k8s_pod_name }}"
template: "{{ k8s_pod_template }}"
wait: yes
apply: yes
vars:
k8s_pod_name: scale-deploy
k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:v0.10.0-purple
k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
register: deploy_after_serviceaccount_removal
ignore_errors: yes

- name: ensure that updating deployment after service account removal failed
assert:
that:
- deploy_after_serviceaccount_removal is failed

always:
- name: Remove namespace
k8s:
Expand Down
6 changes: 4 additions & 2 deletions plugins/module_utils/raw.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,8 @@ def _deployment_ready(deployment):
# Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty
return (deployment.status and deployment.status.replicas is not None and
deployment.status.availableReplicas == deployment.status.replicas and
deployment.status.observedGeneration == deployment.metadata.generation)
deployment.status.observedGeneration == deployment.metadata.generation and
not deployment.status.unAvailableReplicas)

def _pod_ready(pod):
return (pod.status and pod.status.containerStatuses is not None and
Expand All @@ -473,7 +474,8 @@ def _pod_ready(pod):
def _daemonset_ready(daemonset):
return (daemonset.status and daemonset.status.desiredNumberScheduled is not None and
daemonset.status.numberReady == daemonset.status.desiredNumberScheduled and
daemonset.status.observedGeneration == daemonset.metadata.generation)
daemonset.status.observedGeneration == daemonset.metadata.generation and
not daemonset.status.unAvailableReplicas)

def _custom_condition(resource):
if not resource.status or not resource.status.conditions:
Expand Down
4 changes: 2 additions & 2 deletions tests/integration/targets/kubernetes/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

- pip:
name:
- openshift>=0.9.2
- openshift>=0.10.2
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
Expand All @@ -35,7 +35,7 @@
- pip:
name:
- kubernetes-validate==1.12.0
- openshift>=0.9.2
- openshift>=0.10.2
- coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
Expand Down

0 comments on commit bce5c48

Please sign in to comment.