From fca98900bb12d06a8e048fcd1b423ccf49b4d72a Mon Sep 17 00:00:00 2001 From: Nahshon Unna-Tsameret Date: Thu, 12 Dec 2024 10:03:17 +0200 Subject: [PATCH] Fix issues with high availability detection * add the missing "watch" permission for nodes. * make the status field a pointer to bool, to distinglish between false and field not set. * On mutation webhook, fix the eviction strategy if SNO becam HA. * fix an issue where the status field is not set if no node is added or deleted. * fix unit test (assert old HCO instead of HCO from client) * fix issue in the node controller, to ignore error if HCO is not present. Signed-off-by: Nahshon Unna-Tsameret --- api/v1beta1/hyperconverged_types.go | 2 +- api/v1beta1/zz_generated.deepcopy.go | 5 ++ .../hyperconverged_controller.go | 4 ++ controllers/nodes/nodes_controller.go | 23 +++++--- controllers/nodes/nodes_controller_test.go | 45 +++++++++++----- deploy/cluster_role.yaml | 1 + ...perator.v1.14.0.clusterserviceversion.yaml | 1 + ...perator.v1.14.0.clusterserviceversion.yaml | 3 +- docs/api.md | 2 +- pkg/components/components.go | 2 +- pkg/webhooks/mutator/hyperConvergedMutator.go | 45 +++++++++------- .../mutator/hyperConvergedMutator_test.go | 41 +++++++------- .../cluster_eviction_strategy_test.go | 54 +++++++++++-------- tests/func-tests/utils.go | 8 ++- 14 files changed, 148 insertions(+), 88 deletions(-) diff --git a/api/v1beta1/hyperconverged_types.go b/api/v1beta1/hyperconverged_types.go index 4c1481e622..e30ed6adf9 100644 --- a/api/v1beta1/hyperconverged_types.go +++ b/api/v1beta1/hyperconverged_types.go @@ -736,7 +736,7 @@ type HyperConvergedStatus struct { // InfrastructureHighlyAvailable describes whether the cluster has only one worker node // (false) or more (true). // +optional - InfrastructureHighlyAvailable bool `json:"infrastructureHighlyAvailable,omitempty"` + InfrastructureHighlyAvailable *bool `json:"infrastructureHighlyAvailable,omitempty"` } type Version struct { diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index e6e0ab7050..088d96ec6a 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -573,6 +573,11 @@ func (in *HyperConvergedStatus) DeepCopyInto(out *HyperConvergedStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.InfrastructureHighlyAvailable != nil { + in, out := &in.InfrastructureHighlyAvailable, &out.InfrastructureHighlyAvailable + *out = new(bool) + **out = **in + } return } diff --git a/controllers/hyperconverged/hyperconverged_controller.go b/controllers/hyperconverged/hyperconverged_controller.go index 3b9107fd39..cf25852f8e 100644 --- a/controllers/hyperconverged/hyperconverged_controller.go +++ b/controllers/hyperconverged/hyperconverged_controller.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -423,6 +424,9 @@ func (r *ReconcileHyperConverged) doReconcile(req *common.HcoRequest) (reconcile if init { r.eventEmitter.EmitEvent(req.Instance, corev1.EventTypeNormal, "InitHCO", "Initiating the HyperConverged") r.setInitialConditions(req) + + req.Instance.Status.InfrastructureHighlyAvailable = ptr.To(hcoutil.GetClusterInfo().IsInfrastructureHighlyAvailable()) + req.StatusDirty = true } r.setLabels(req) diff --git a/controllers/nodes/nodes_controller.go b/controllers/nodes/nodes_controller.go index 8373871ec1..47f527c757 100644 --- a/controllers/nodes/nodes_controller.go +++ b/controllers/nodes/nodes_controller.go @@ -3,22 +3,22 @@ package nodes import ( "context" + operatorhandler "github.com/operator-framework/operator-lib/handler" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/event" - - hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/api/v1beta1" - - operatorhandler "github.com/operator-framework/operator-lib/handler" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/api/v1beta1" hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" ) @@ -113,11 +113,20 @@ func (r *ReconcileNodeCounter) Reconcile(ctx context.Context, _ reconcile.Reques } err = r.client.Get(ctx, hcoKey, hco) if err != nil { + if errors.IsNotFound(err) { + return reconcile.Result{}, nil + } return reconcile.Result{}, err } - if hco.Status.InfrastructureHighlyAvailable != clusterInfo.IsInfrastructureHighlyAvailable() { - hco.Status.InfrastructureHighlyAvailable = clusterInfo.IsInfrastructureHighlyAvailable() + if !hco.ObjectMeta.DeletionTimestamp.IsZero() { + return reconcile.Result{}, nil + } + + if hco.Status.InfrastructureHighlyAvailable == nil || + *hco.Status.InfrastructureHighlyAvailable != clusterInfo.IsInfrastructureHighlyAvailable() { + + hco.Status.InfrastructureHighlyAvailable = ptr.To(clusterInfo.IsInfrastructureHighlyAvailable()) err = r.client.Status().Update(ctx, hco) if err != nil { return reconcile.Result{}, err diff --git a/controllers/nodes/nodes_controller_test.go b/controllers/nodes/nodes_controller_test.go index 3106d84dd4..f0d0aa413e 100644 --- a/controllers/nodes/nodes_controller_test.go +++ b/controllers/nodes/nodes_controller_test.go @@ -37,9 +37,6 @@ var _ = Describe("NodesController", func() { Describe("Reconcile NodesController", func() { BeforeEach(func() { - hcoutil.GetClusterInfo = func() hcoutil.ClusterInfo { - return commontestutils.ClusterInfoMock{} - } _ = os.Setenv(hcoutil.OperatorNamespaceEnv, commontestutils.Namespace) }) @@ -49,6 +46,9 @@ var _ = Describe("NodesController", func() { Context("Node Count Change", func() { It("Should update InfrastructureHighlyAvailable to true if there are two or more worker nodes", func() { + hcoutil.GetClusterInfo = func() hcoutil.ClusterInfo { + return commontestutils.ClusterInfoMock{} + } hco := commontestutils.NewHco() numWorkerNodes := 3 var nodesArray []client.Object @@ -66,8 +66,6 @@ var _ = Describe("NodesController", func() { resources := []client.Object{hco, nodesArray[0], nodesArray[1], nodesArray[2]} cl := commontestutils.InitClient(resources) - logger := zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)).WithName("nodes_controller_test") - Expect(hcoutil.GetClusterInfo().Init(context.TODO(), cl, logger)).To(Succeed()) r := &ReconcileNodeCounter{ client: cl, } @@ -82,12 +80,14 @@ var _ = Describe("NodesController", func() { cl.Get(context.TODO(), types.NamespacedName{Name: commontestutils.Name, Namespace: commontestutils.Namespace}, latestHCO), - ).ToNot(HaveOccurred()) + ).To(Succeed()) - Expect(latestHCO.Status.InfrastructureHighlyAvailable).To(BeTrue()) - Expect(res).To(Equal(reconcile.Result{})) + Expect(latestHCO.Status.InfrastructureHighlyAvailable).To(HaveValue(BeTrue())) }) It("Should update InfrastructureHighlyAvailable to false if there is only one worker node", func() { + hcoutil.GetClusterInfo = func() hcoutil.ClusterInfo { + return commontestutils.ClusterInfoSNOMock{} + } hco := commontestutils.NewHco() workerNode := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -99,8 +99,6 @@ var _ = Describe("NodesController", func() { } resources := []client.Object{hco, workerNode} cl := commontestutils.InitClient(resources) - logger := zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)).WithName("nodes_controller_test") - Expect(hcoutil.GetClusterInfo().Init(context.TODO(), cl, logger)).To(Succeed()) r := &ReconcileNodeCounter{ client: cl, } @@ -115,11 +113,34 @@ var _ = Describe("NodesController", func() { cl.Get(context.TODO(), types.NamespacedName{Name: commontestutils.Name, Namespace: commontestutils.Namespace}, latestHCO), - ).ToNot(HaveOccurred()) + ).To(Succeed()) - Expect(hco.Status.InfrastructureHighlyAvailable).To(BeFalse()) + Expect(latestHCO.Status.InfrastructureHighlyAvailable).To(HaveValue(BeFalse())) Expect(res).To(Equal(reconcile.Result{})) }) + + It("Should not return error if the HyperConverged CR is not exist", func() { + workerNode := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "worker", + Labels: map[string]string{ + "node-role.kubernetes.io/worker": "", + }, + }, + } + resources := []client.Object{workerNode} + cl := commontestutils.InitClient(resources) + logger := zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)).WithName("nodes_controller_test") + Expect(hcoutil.GetClusterInfo().Init(context.TODO(), cl, logger)).To(Succeed()) + r := &ReconcileNodeCounter{ + client: cl, + } + + // Reconcile to update HCO's status with the correct InfrastructureHighlyAvailable value + res, err := r.Reconcile(context.TODO(), request) + Expect(err).ToNot(HaveOccurred()) + Expect(res.Requeue).To(BeFalse()) + }) }) }) diff --git a/deploy/cluster_role.yaml b/deploy/cluster_role.yaml index 6362b2421d..728c7669a2 100644 --- a/deploy/cluster_role.yaml +++ b/deploy/cluster_role.yaml @@ -857,6 +857,7 @@ rules: verbs: - get - list + - watch - apiGroups: - "" resources: diff --git a/deploy/index-image/community-kubevirt-hyperconverged/1.14.0/manifests/kubevirt-hyperconverged-operator.v1.14.0.clusterserviceversion.yaml b/deploy/index-image/community-kubevirt-hyperconverged/1.14.0/manifests/kubevirt-hyperconverged-operator.v1.14.0.clusterserviceversion.yaml index 44cc608eb7..7a90ff1ca3 100644 --- a/deploy/index-image/community-kubevirt-hyperconverged/1.14.0/manifests/kubevirt-hyperconverged-operator.v1.14.0.clusterserviceversion.yaml +++ b/deploy/index-image/community-kubevirt-hyperconverged/1.14.0/manifests/kubevirt-hyperconverged-operator.v1.14.0.clusterserviceversion.yaml @@ -327,6 +327,7 @@ spec: verbs: - get - list + - watch - apiGroups: - "" resources: diff --git a/deploy/olm-catalog/community-kubevirt-hyperconverged/1.14.0/manifests/kubevirt-hyperconverged-operator.v1.14.0.clusterserviceversion.yaml b/deploy/olm-catalog/community-kubevirt-hyperconverged/1.14.0/manifests/kubevirt-hyperconverged-operator.v1.14.0.clusterserviceversion.yaml index 0934533635..c3918b1e92 100644 --- a/deploy/olm-catalog/community-kubevirt-hyperconverged/1.14.0/manifests/kubevirt-hyperconverged-operator.v1.14.0.clusterserviceversion.yaml +++ b/deploy/olm-catalog/community-kubevirt-hyperconverged/1.14.0/manifests/kubevirt-hyperconverged-operator.v1.14.0.clusterserviceversion.yaml @@ -9,7 +9,7 @@ metadata: certified: "false" console.openshift.io/disable-operand-delete: "true" containerImage: quay.io/kubevirt/hyperconverged-cluster-operator:1.14.0-unstable - createdAt: "2024-12-13 05:05:26" + createdAt: "2024-12-15 07:54:32" description: A unified operator deploying and controlling KubeVirt and its supporting operators with opinionated defaults features.operators.openshift.io/cnf: "false" @@ -327,6 +327,7 @@ spec: verbs: - get - list + - watch - apiGroups: - "" resources: diff --git a/docs/api.md b/docs/api.md index ea93567af7..b36cdf3baf 100644 --- a/docs/api.md +++ b/docs/api.md @@ -249,7 +249,7 @@ HyperConvergedStatus defines the observed state of HyperConverged | dataImportSchedule | DataImportSchedule is the cron expression that is used in for the hard-coded data import cron templates. HCO generates the value of this field once and stored in the status field, so will survive restart. | string | | false | | dataImportCronTemplates | DataImportCronTemplates is a list of the actual DataImportCronTemplates as HCO update in the SSP CR. The list contains both the common and the custom templates, including any modification done by HCO. | [][DataImportCronTemplateStatus](#dataimportcrontemplatestatus) | | false | | systemHealthStatus | SystemHealthStatus reflects the health of HCO and its secondary resources, based on the aggregated conditions. | string | | false | -| infrastructureHighlyAvailable | InfrastructureHighlyAvailable describes whether the cluster has only one worker node (false) or more (true). | bool | | false | +| infrastructureHighlyAvailable | InfrastructureHighlyAvailable describes whether the cluster has only one worker node (false) or more (true). | *bool | | false | [Back to TOC](#table-of-contents) diff --git a/pkg/components/components.go b/pkg/components/components.go index 16044015e9..4af89ccd36 100644 --- a/pkg/components/components.go +++ b/pkg/components/components.go @@ -516,7 +516,7 @@ func GetClusterPermissions() []rbacv1.PolicyRule { { APIGroups: emptyAPIGroup, Resources: stringListToSlice("nodes"), - Verbs: stringListToSlice("get", "list"), + Verbs: stringListToSlice("get", "list", "watch"), }, { APIGroups: emptyAPIGroup, diff --git a/pkg/webhooks/mutator/hyperConvergedMutator.go b/pkg/webhooks/mutator/hyperConvergedMutator.go index 4f415a0500..252d0bfc8e 100644 --- a/pkg/webhooks/mutator/hyperConvergedMutator.go +++ b/pkg/webhooks/mutator/hyperConvergedMutator.go @@ -5,8 +5,6 @@ import ( "fmt" "net/http" - hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" - "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -79,23 +77,7 @@ func (hcm *HyperConvergedMutator) mutateHyperConverged(_ context.Context, req ad } } - if hc.Spec.EvictionStrategy == nil { - ci := hcoutil.GetClusterInfo() - if ci.IsInfrastructureHighlyAvailable() { - patches = append(patches, jsonpatch.JsonPatchOperation{ - Operation: "add", - Path: "/spec/evictionStrategy", - Value: kubevirtcorev1.EvictionStrategyLiveMigrate, - }) - } else { - patches = append(patches, jsonpatch.JsonPatchOperation{ - Operation: "add", - Path: "/spec/evictionStrategy", - Value: kubevirtcorev1.EvictionStrategyNone, - }) - } - - } + patches = mutateEvictionStrategy(hc, patches) if hc.Spec.MediatedDevicesConfiguration != nil { if len(hc.Spec.MediatedDevicesConfiguration.MediatedDevicesTypes) > 0 && len(hc.Spec.MediatedDevicesConfiguration.MediatedDeviceTypes) == 0 { //nolint SA1019 @@ -122,3 +104,28 @@ func (hcm *HyperConvergedMutator) mutateHyperConverged(_ context.Context, req ad return admission.Allowed("") } + +func mutateEvictionStrategy(hc *hcov1beta1.HyperConverged, patches []jsonpatch.JsonPatchOperation) []jsonpatch.JsonPatchOperation { + if hc.Status.InfrastructureHighlyAvailable == nil { // New HyperConverged CR + return patches + } + + var value = kubevirtcorev1.EvictionStrategyNone + if hc.Spec.EvictionStrategy == nil { + if *hc.Status.InfrastructureHighlyAvailable { + value = kubevirtcorev1.EvictionStrategyLiveMigrate + } + } else if *hc.Status.InfrastructureHighlyAvailable && *hc.Spec.EvictionStrategy == kubevirtcorev1.EvictionStrategyNone { + value = kubevirtcorev1.EvictionStrategyLiveMigrate + } else { + return patches + } + + patches = append(patches, jsonpatch.JsonPatchOperation{ + Operation: "replace", + Path: "/spec/evictionStrategy", + Value: value, + }) + + return patches +} diff --git a/pkg/webhooks/mutator/hyperConvergedMutator_test.go b/pkg/webhooks/mutator/hyperConvergedMutator_test.go index dd08c87935..17fbc65869 100644 --- a/pkg/webhooks/mutator/hyperConvergedMutator_test.go +++ b/pkg/webhooks/mutator/hyperConvergedMutator_test.go @@ -47,9 +47,11 @@ var _ = Describe("test HyperConverged mutator", func() { Name: util.HyperConvergedName, Namespace: HcoValidNamespace, }, - Spec: v1beta1.HyperConvergedSpec{}, + Spec: v1beta1.HyperConvergedSpec{ + EvictionStrategy: ptr.To(kubevirtcorev1.EvictionStrategyLiveMigrate), + }, } - cr.Spec.EvictionStrategy = ptr.To(kubevirtcorev1.EvictionStrategyLiveMigrate) + cli = commontestutils.InitClient(nil) mutator = initHCMutator(s, cli) }) @@ -267,13 +269,9 @@ var _ = Describe("test HyperConverged mutator", func() { DescribeTable("check EvictionStrategy default", func(SNO bool, strategy *kubevirtcorev1.EvictionStrategy, patches []jsonpatch.JsonPatchOperation) { if SNO { - hcoutil.GetClusterInfo = func() hcoutil.ClusterInfo { - return &commontestutils.ClusterInfoSNOMock{} - } + cr.Status.InfrastructureHighlyAvailable = ptr.To(false) } else { - hcoutil.GetClusterInfo = func() hcoutil.ClusterInfo { - return &commontestutils.ClusterInfoMock{} - } + cr.Status.InfrastructureHighlyAvailable = ptr.To(true) } cr.Spec.EvictionStrategy = strategy @@ -289,48 +287,52 @@ var _ = Describe("test HyperConverged mutator", func() { true, nil, []jsonpatch.JsonPatchOperation{jsonpatch.JsonPatchOperation{ - Operation: "add", + Operation: "replace", Path: "/spec/evictionStrategy", Value: kubevirtcorev1.EvictionStrategyNone, }}, ), Entry("should not override EvictionStrategy if set and on SNO - 1", true, - pointerEvictionStrategy(kubevirtcorev1.EvictionStrategyNone), + ptr.To(kubevirtcorev1.EvictionStrategyNone), nil, ), Entry("should not override EvictionStrategy if set and on SNO - 2", true, - pointerEvictionStrategy(kubevirtcorev1.EvictionStrategyLiveMigrate), + ptr.To(kubevirtcorev1.EvictionStrategyLiveMigrate), nil, ), Entry("should not override EvictionStrategy if set and on SNO - 3", true, - pointerEvictionStrategy(kubevirtcorev1.EvictionStrategyExternal), + ptr.To(kubevirtcorev1.EvictionStrategyExternal), nil, ), Entry("should set EvictionStrategyLiveMigrate if not set and not on SNO", false, nil, []jsonpatch.JsonPatchOperation{jsonpatch.JsonPatchOperation{ - Operation: "add", + Operation: "replace", Path: "/spec/evictionStrategy", Value: kubevirtcorev1.EvictionStrategyLiveMigrate, }}, ), Entry("should not override EvictionStrategy if set and not on SNO - 1", false, - pointerEvictionStrategy(kubevirtcorev1.EvictionStrategyNone), - nil, + ptr.To(kubevirtcorev1.EvictionStrategyNone), + []jsonpatch.JsonPatchOperation{jsonpatch.JsonPatchOperation{ + Operation: "replace", + Path: "/spec/evictionStrategy", + Value: kubevirtcorev1.EvictionStrategyLiveMigrate, + }}, ), Entry("should not override EvictionStrategy if set and not on SNO - 2", false, - pointerEvictionStrategy(kubevirtcorev1.EvictionStrategyLiveMigrate), + ptr.To(kubevirtcorev1.EvictionStrategyLiveMigrate), nil, ), Entry("should not override EvictionStrategy if set and not on SNO - 3", false, - pointerEvictionStrategy(kubevirtcorev1.EvictionStrategyExternal), + ptr.To(kubevirtcorev1.EvictionStrategyExternal), nil, ), ) @@ -468,8 +470,3 @@ func initHCMutator(s *runtime.Scheme, testClient client.Client) *HyperConvergedM return mutator } - -func pointerEvictionStrategy(strategy kubevirtcorev1.EvictionStrategy) *kubevirtcorev1.EvictionStrategy { - str := strategy - return &str -} diff --git a/tests/func-tests/cluster_eviction_strategy_test.go b/tests/func-tests/cluster_eviction_strategy_test.go index 535d35a297..99c84051b9 100644 --- a/tests/func-tests/cluster_eviction_strategy_test.go +++ b/tests/func-tests/cluster_eviction_strategy_test.go @@ -2,6 +2,7 @@ package tests_test import ( "context" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -10,10 +11,15 @@ import ( v1 "kubevirt.io/api/core/v1" + "github.com/kubevirt/hyperconverged-cluster-operator/api/v1beta1" tests "github.com/kubevirt/hyperconverged-cluster-operator/tests/func-tests" ) -var _ = Describe("Cluster level evictionStrategy default value", Serial, Ordered, Label("evictionStrategy"), func() { +var ( + rmEvictionStrategyPatch = []byte(`[{"op": "remove", "path": "/spec/evictionStrategy"}]`) +) + +var _ = Describe("Cluster level evictionStrategy default value", Label("evictionStrategy"), func() { tests.FlagParse() var ( cli client.Client @@ -40,27 +46,31 @@ var _ = Describe("Cluster level evictionStrategy default value", Serial, Ordered tests.UpdateHCORetry(ctx, cli, hc) }) - It("Should set spec.evictionStrategy = None by default on single worker clusters", Label(tests.SingleNodeLabel), func(ctx context.Context) { - Expect(singleWorkerCluster).To(BeTrue(), "this test requires single worker cluster; use the %q label to skip this test", tests.SingleNodeLabel) - - hco := tests.GetHCO(ctx, cli) - hco.Spec.EvictionStrategy = nil - hco = tests.UpdateHCORetry(ctx, cli, hco) - noneEvictionStrategy := v1.EvictionStrategyNone - Expect(hco.Spec.EvictionStrategy).To(Not(BeNil())) - Expect(hco.Spec.EvictionStrategy).To(Equal(&noneEvictionStrategy)) - }) - - It("Should set spec.evictionStrategy = LiveMigrate by default with multiple worker node", Label(tests.HighlyAvailableClusterLabel), func(ctx context.Context) { - tests.FailIfSingleNode(singleWorkerCluster) - hco := tests.GetHCO(ctx, cli) - hco.Spec.EvictionStrategy = nil - hco = tests.UpdateHCORetry(ctx, cli, hco) - lmEvictionStrategy := v1.EvictionStrategyLiveMigrate - Expect(hco.Spec.EvictionStrategy).To(Not(BeNil())) - Expect(hco.Spec.EvictionStrategy).To(Equal(&lmEvictionStrategy)) - }) - + DescribeTable("test spec.evictionStrategy", func(ctx context.Context, clusterValidationFn func(bool), expectedValue v1.EvictionStrategy) { + clusterValidationFn(singleWorkerCluster) + + Expect(tests.PatchHCO(ctx, cli, rmEvictionStrategyPatch)).To(Succeed()) + + var hc *v1beta1.HyperConverged + Eventually(func(g Gomega, ctx context.Context) { + hc = tests.GetHCO(ctx, cli) + g.Expect(hc).NotTo(BeNil()) + g.Expect(hc.Spec.EvictionStrategy).To(HaveValue(Equal(expectedValue))) + }).WithContext(ctx).WithTimeout(5 * time.Second).WithPolling(100 * time.Millisecond).Should(Succeed()) + }, + Entry( + "Should set spec.evictionStrategy = None by default on single worker clusters", + Label(tests.SingleNodeLabel), + tests.FailIfHighAvailableCluster, + v1.EvictionStrategyNone, + ), + Entry( + "Should set spec.evictionStrategy = LiveMigrate by default with multiple worker node", + Label(tests.HighlyAvailableClusterLabel), + tests.FailIfSingleNodeCluster, + v1.EvictionStrategyLiveMigrate, + ), + ) }) func isSingleWorkerCluster(ctx context.Context, cli client.Client) (bool, error) { diff --git a/tests/func-tests/utils.go b/tests/func-tests/utils.go index aa67480fa5..0f695cef42 100644 --- a/tests/func-tests/utils.go +++ b/tests/func-tests/utils.go @@ -65,8 +65,12 @@ func FailIfNotOpenShift(ctx context.Context, cli client.Client, testName string) ExpectWithOffset(1, isOpenShift).To(BeTrue(), `the %q test must run on openshift cluster. Use the "!%s" label filter in order to skip this test`, testName, OpenshiftLabel) } -func FailIfSingleNode(singleWorkerCluster bool) { - ExpectWithOffset(1, singleWorkerCluster).To(BeFalse(), `this test requires a single worker cluster; use the "!%s" label filter to skip this test`, SingleNodeLabel) +func FailIfSingleNodeCluster(singleWorkerCluster bool) { + ExpectWithOffset(1, singleWorkerCluster).To(BeFalse(), `this test requires a highly available cluster; use the "!%s" label filter to skip this test`, HighlyAvailableClusterLabel) +} + +func FailIfHighAvailableCluster(singleWorkerCluster bool) { + ExpectWithOffset(1, singleWorkerCluster).To(BeTrue(), `this test requires a single worker cluster; use the "!%s" label filter to skip this test`, SingleNodeLabel) } type cacheIsOpenShift struct {