From d6fc815c2848da070939e0e6731baa4fac0c5684 Mon Sep 17 00:00:00 2001 From: Michal Fojtik Date: Tue, 27 Feb 2018 10:07:27 +0100 Subject: [PATCH] UPSTREAM: 60301: Fix Deployment with Recreate strategy not to wait on Pods in terminal phase --- .../pkg/controller/deployment/recreate.go | 15 +- .../controller/deployment/recreate_test.go | 137 ++++++++++++++++-- 2 files changed, 134 insertions(+), 18 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go index 4d2f2337d7a7..b6f01ef54608 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go @@ -104,8 +104,19 @@ func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSe if newRS != nil && newRS.UID == rsUID { continue } - if len(podList.Items) > 0 { - return true + for _, pod := range podList.Items { + switch pod.Status.Phase { + case v1.PodFailed, v1.PodSucceeded: + // Don't count pods in terminal state. + continue + case v1.PodUnknown: + // This happens in situation like when the node is temporarily disconnected from the cluster. + // If we can't be sure that the pod is not running, we have to count it. + return true + default: + // Pod is not in terminal phase. + return true + } } } return false diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate_test.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate_test.go index 2cf8661780a2..6b7c5e0f44c5 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate_test.go @@ -90,37 +90,142 @@ func TestOldPodsRunning(t *testing.T) { oldRSs []*extensions.ReplicaSet podMap map[types.UID]*v1.PodList - expected bool + hasOldPodsRunning bool }{ { - name: "no old RSs", - expected: false, + name: "no old RSs", + hasOldPodsRunning: false, }, { - name: "old RSs with running pods", - oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")}, - podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}), - expected: true, + name: "old RSs with running pods", + oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")}, + podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}), + hasOldPodsRunning: true, }, { - name: "old RSs without pods but with non-zero status replicas", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-blabla", 0, 1, nil)}, - expected: true, + name: "old RSs without pods but with non-zero status replicas", + oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)}, + hasOldPodsRunning: true, }, { - name: "old RSs without pods or non-zero status replicas", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-blabla", 0, 0, nil)}, - expected: false, + name: "old RSs without pods or non-zero status replicas", + oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + hasOldPodsRunning: false, + }, + { + name: "old RSs with zero status replicas but pods in terminal state are present", + oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + podMap: map[types.UID]*v1.PodList{ + "uid-1": { + Items: []v1.Pod{ + { + Status: v1.PodStatus{ + Phase: v1.PodFailed, + }, + }, + { + Status: v1.PodStatus{ + Phase: v1.PodSucceeded, + }, + }, + }, + }, + }, + hasOldPodsRunning: false, + }, + { + name: "old RSs with zero status replicas but pod in unknown phase present", + oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + podMap: map[types.UID]*v1.PodList{ + "uid-1": { + Items: []v1.Pod{ + { + Status: v1.PodStatus{ + Phase: v1.PodUnknown, + }, + }, + }, + }, + }, + hasOldPodsRunning: true, + }, + { + name: "old RSs with zero status replicas with pending pod present", + oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + podMap: map[types.UID]*v1.PodList{ + "uid-1": { + Items: []v1.Pod{ + { + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + }, + }, + }, + hasOldPodsRunning: true, + }, + { + name: "old RSs with zero status replicas with running pod present", + oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + podMap: map[types.UID]*v1.PodList{ + "uid-1": { + Items: []v1.Pod{ + { + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, + }, + }, + }, + }, + hasOldPodsRunning: true, + }, + { + name: "old RSs with zero status replicas but pods in terminal state and pending are present", + oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + podMap: map[types.UID]*v1.PodList{ + "uid-1": { + Items: []v1.Pod{ + { + Status: v1.PodStatus{ + Phase: v1.PodFailed, + }, + }, + { + Status: v1.PodStatus{ + Phase: v1.PodSucceeded, + }, + }, + }, + }, + "uid-2": { + Items: []v1.Pod{}, + }, + "uid-3": { + Items: []v1.Pod{ + { + Status: v1.PodStatus{ + Phase: v1.PodPending, + }, + }, + }, + }, + }, + hasOldPodsRunning: true, }, } for _, test := range tests { - if expected, got := test.expected, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got { - t.Errorf("%s: expected %t, got %t", test.name, expected, got) - } + t.Run(test.name, func(t *testing.T) { + if expected, got := test.hasOldPodsRunning, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got { + t.Errorf("%s: expected %t, got %t", test.name, expected, got) + } + }) } } + func rsWithUID(uid string) *extensions.ReplicaSet { d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) rs := newReplicaSet(d, fmt.Sprintf("foo-%s", uid), 0)