diff --git a/pkg/controllers/provisioning/scheduling/existingnode.go b/pkg/controllers/provisioning/scheduling/existingnode.go index 7a94a497bd..2deba7ca27 100644 --- a/pkg/controllers/provisioning/scheduling/existingnode.go +++ b/pkg/controllers/provisioning/scheduling/existingnode.go @@ -31,6 +31,7 @@ import ( type ExistingNode struct { *state.StateNode cachedAvailable v1.ResourceList // Cache so we don't have to re-subtract resources on the StateNode every time + cachedTaints []v1.Taint // Cache so we don't hae to re-construct the taints each time we attempt to schedule a pod Pods []*v1.Pod topology *Topology @@ -38,7 +39,7 @@ type ExistingNode struct { requirements scheduling.Requirements } -func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.ResourceList) *ExistingNode { +func NewExistingNode(n *state.StateNode, topology *Topology, taints []v1.Taint, daemonResources v1.ResourceList) *ExistingNode { // The state node passed in here must be a deep copy from cluster state as we modify it // the remaining daemonResources to schedule are the total daemonResources minus what has already scheduled remainingDaemonResources := resources.Subtract(daemonResources, n.DaemonSetRequests()) @@ -54,6 +55,7 @@ func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1. node := &ExistingNode{ StateNode: n, cachedAvailable: n.Available(), + cachedTaints: taints, topology: topology, requests: remainingDaemonResources, requirements: scheduling.NewLabelRequirements(n.Labels()), @@ -65,7 +67,7 @@ func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1. func (n *ExistingNode) Add(ctx context.Context, kubeClient client.Client, pod *v1.Pod, podRequests v1.ResourceList) error { // Check Taints - if err := scheduling.Taints(n.Taints()).Tolerates(pod); err != nil { + if err := scheduling.Taints(n.cachedTaints).Tolerates(pod); err != nil { return err } // determine the volumes that will be mounted if the pod schedules diff --git a/pkg/controllers/provisioning/scheduling/scheduler.go b/pkg/controllers/provisioning/scheduling/scheduler.go index 7f6d27aa86..dfacff6936 100644 --- a/pkg/controllers/provisioning/scheduling/scheduler.go +++ b/pkg/controllers/provisioning/scheduling/scheduler.go @@ -318,9 +318,10 @@ func (s *Scheduler) calculateExistingNodeClaims(stateNodes []*state.StateNode, d // create our existing nodes for _, node := range stateNodes { // Calculate any daemonsets that should schedule to the inflight node + taints := node.Taints() var daemons []*corev1.Pod for _, p := range daemonSetPods { - if err := scheduling.Taints(node.Taints()).Tolerates(p); err != nil { + if err := scheduling.Taints(taints).Tolerates(p); err != nil { continue } if err := scheduling.NewLabelRequirements(node.Labels()).Compatible(scheduling.NewPodRequirements(p)); err != nil { @@ -328,7 +329,7 @@ func (s *Scheduler) calculateExistingNodeClaims(stateNodes []*state.StateNode, d } daemons = append(daemons, p) } - s.existingNodes = append(s.existingNodes, NewExistingNode(node, s.topology, resources.RequestsForPods(daemons...))) + s.existingNodes = append(s.existingNodes, NewExistingNode(node, s.topology, taints, resources.RequestsForPods(daemons...))) // We don't use the status field and instead recompute the remaining resources to ensure we have a consistent view // of the cluster during scheduling. Depending on how node creation falls out, this will also work for cases where