From 1c1c31a740d9b184998305516d15ab3975ced4be Mon Sep 17 00:00:00 2001 From: Nahshon Unna-Tsameret Date: Thu, 22 Oct 2020 11:56:55 +0300 Subject: [PATCH] move KV and CDI to the new operand pkg Signed-off-by: Nahshon Unna-Tsameret --- pkg/controller/common/consts.go | 6 + pkg/controller/common/hco_request.go | 7 + pkg/controller/commonTestUtils/testClient.go | 118 ++ pkg/controller/commonTestUtils/testUtils.go | 112 ++ .../hyperconverged_controller.go | 523 +-------- ...perconverged_controller_components_test.go | 1014 ++--------------- .../hyperconverged_controller_test.go | 46 +- .../hyperconverged/testClient_test.go | 118 -- .../hyperconverged/testUtils_test.go | 238 ++-- pkg/controller/operands/cdi.go | 268 +++++ pkg/controller/operands/cdi_test.go | 350 ++++++ .../ensure_result.go | 2 +- .../ensure_result_test.go | 2 +- pkg/controller/operands/kubevirt.go | 270 +++++ pkg/controller/operands/kubevirt_test.go | 512 +++++++++ pkg/controller/operands/operand.go | 118 ++ .../operands/operands_suite_test.go | 13 + pkg/controller/operands/testUtils_test.go | 47 + 18 files changed, 2072 insertions(+), 1692 deletions(-) create mode 100644 pkg/controller/common/consts.go create mode 100644 pkg/controller/commonTestUtils/testClient.go create mode 100644 pkg/controller/commonTestUtils/testUtils.go delete mode 100644 pkg/controller/hyperconverged/testClient_test.go create mode 100644 pkg/controller/operands/cdi.go create mode 100644 pkg/controller/operands/cdi_test.go rename pkg/controller/{hyperconverged => operands}/ensure_result.go (97%) rename pkg/controller/{hyperconverged => operands}/ensure_result_test.go (99%) create mode 100644 pkg/controller/operands/kubevirt.go create mode 100644 pkg/controller/operands/kubevirt_test.go create mode 100644 pkg/controller/operands/operand.go create mode 100644 pkg/controller/operands/operands_suite_test.go create mode 100644 pkg/controller/operands/testUtils_test.go diff --git a/pkg/controller/common/consts.go b/pkg/controller/common/consts.go new file mode 100644 index 0000000000..945d2c270b --- /dev/null +++ b/pkg/controller/common/consts.go @@ -0,0 +1,6 @@ +package common + +const ( + ReconcileCompleted = "ReconcileCompleted" + ReconcileCompletedMessage = "Reconcile completed successfully" +) diff --git a/pkg/controller/common/hco_request.go b/pkg/controller/common/hco_request.go index b5f6aa0f9d..82556f9653 100644 --- a/pkg/controller/common/hco_request.go +++ b/pkg/controller/common/hco_request.go @@ -14,6 +14,7 @@ type HcoRequest struct { Conditions HcoConditions // in-memory conditions Ctx context.Context // context of this request, to be use for any other call Instance *hcov1beta1.HyperConverged // the current state of the CR, as read from K8s + UpgradeMode bool // copy of the reconciler upgrade mode ComponentUpgradeInProgress bool // if in upgrade mode, accumulate the component upgrade status Dirty bool // is something was changed in the CR StatusDirty bool // is something was changed in the CR's Status @@ -25,8 +26,14 @@ func NewHcoRequest(request reconcile.Request, log logr.Logger, upgradeMode bool) Logger: log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name), Conditions: NewHcoConditions(), Ctx: context.TODO(), + UpgradeMode: upgradeMode, ComponentUpgradeInProgress: upgradeMode, Dirty: false, StatusDirty: false, } } + +func (req *HcoRequest) SetUpgradeMode(upgradeMode bool) { + req.UpgradeMode = upgradeMode + req.ComponentUpgradeInProgress = upgradeMode +} diff --git a/pkg/controller/commonTestUtils/testClient.go b/pkg/controller/commonTestUtils/testClient.go new file mode 100644 index 0000000000..be8b62dd34 --- /dev/null +++ b/pkg/controller/commonTestUtils/testClient.go @@ -0,0 +1,118 @@ +package commonTestUtils + +import ( + "context" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +type HcoTestClient struct { + client client.Client + sw *HcoTestStatusWriter + readErrors TestErrors + writeErrors TestErrors +} + +func (c *HcoTestClient) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { + if ok, err := c.readErrors.GetNextError(); ok { + return err + } + return c.client.Get(ctx, key, obj) +} + +func (c *HcoTestClient) List(ctx context.Context, list runtime.Object, opts ...client.ListOption) error { + if ok, err := c.writeErrors.GetNextError(); ok { + return err + } + return c.client.List(ctx, list, opts...) +} + +func (c *HcoTestClient) Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOption) error { + if ok, err := c.writeErrors.GetNextError(); ok { + return err + } + return c.client.Create(ctx, obj, opts...) +} + +func (c *HcoTestClient) Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOption) error { + if ok, err := c.writeErrors.GetNextError(); ok { + return err + } + return c.client.Delete(ctx, obj, opts...) +} + +func (c *HcoTestClient) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { + if ok, err := c.writeErrors.GetNextError(); ok { + return err + } + return c.client.Update(ctx, obj, opts...) +} + +func (c *HcoTestClient) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { + if ok, err := c.writeErrors.GetNextError(); ok { + return err + } + return c.client.Patch(ctx, obj, patch, opts...) +} + +func (c *HcoTestClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...client.DeleteAllOfOption) error { + if ok, err := c.writeErrors.GetNextError(); ok { + return err + } + return c.client.DeleteAllOf(ctx, obj, opts...) +} + +func (c *HcoTestClient) Status() client.StatusWriter { + return c.sw +} + +func (c *HcoTestClient) InitiateReadErrors(errs ...error) { + c.readErrors = errs +} + +func (c *HcoTestClient) InitiateWriteErrors(errs ...error) { + c.writeErrors = errs +} + +type HcoTestStatusWriter struct { + client client.Client + errors TestErrors +} + +func (sw *HcoTestStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { + if ok, err := sw.errors.GetNextError(); ok { + return err + } + return sw.client.Update(ctx, obj, opts...) +} + +func (sw *HcoTestStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { + if ok, err := sw.errors.GetNextError(); ok { + return err + } + return sw.client.Patch(ctx, obj, patch, opts...) +} + +func (sw *HcoTestStatusWriter) InitiateErrors(errs ...error) { + sw.errors = errs +} + +type TestErrors []error + +func (errs *TestErrors) GetNextError() (bool, error) { + if len(*errs) == 0 { + return false, nil + } + + err := (*errs)[0] + *errs = (*errs)[1:] + + return true, err +} + +func InitClient(clientObjects []runtime.Object) *HcoTestClient { + // Create a fake client to mock API calls + cl := fake.NewFakeClient(clientObjects...) + return &HcoTestClient{client: cl, sw: &HcoTestStatusWriter{client: cl}} +} diff --git a/pkg/controller/commonTestUtils/testUtils.go b/pkg/controller/commonTestUtils/testUtils.go new file mode 100644 index 0000000000..380ff2a07a --- /dev/null +++ b/pkg/controller/commonTestUtils/testUtils.go @@ -0,0 +1,112 @@ +package commonTestUtils + +import ( + "context" + networkaddons "github.com/kubevirt/cluster-network-addons-operator/pkg/apis" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis" + sspopv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis" + vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" + consolev1 "github.com/openshift/api/console/v1" + "k8s.io/apimachinery/pkg/runtime" + cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" + sdkapi "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk/api" + + hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + + . "github.com/onsi/gomega" +) + +// Name and Namespace of our primary resource +const ( + Name = "kubevirt-hyperconverged" + Namespace = "kubevirt-hyperconverged" + Conversion_image = "quay.io/kubevirt/kubevirt-v2v-conversion:v2.0.0" + Vmware_image = "quay.io/kubevirt/kubevirt-vmware:v2.0.0" +) + +var ( + TestLogger = logf.Log.WithName("controller_hyperconverged") + TestRequest = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: Name, + Namespace: Namespace, + }, + } +) + +func NewHco() *hcov1beta1.HyperConverged { + return &hcov1beta1.HyperConverged{ + ObjectMeta: metav1.ObjectMeta{ + Name: Name, + Namespace: Namespace, + }, + Spec: hcov1beta1.HyperConvergedSpec{}, + } +} + +func NewReq(inst *hcov1beta1.HyperConverged) *common.HcoRequest { + return &common.HcoRequest{ + Request: TestRequest, + Logger: TestLogger, + Conditions: common.NewHcoConditions(), + Ctx: context.TODO(), + Instance: inst, + } +} + +func NewHyperConvergedConfig() *sdkapi.NodePlacement { + seconds1, seconds2 := int64(1), int64(2) + return &sdkapi.NodePlacement{ + NodeSelector: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + {Key: "key1", Operator: "operator1", Values: []string{"value11, value12"}}, + {Key: "key2", Operator: "operator2", Values: []string{"value21, value22"}}, + }, + MatchFields: []corev1.NodeSelectorRequirement{ + {Key: "key1", Operator: "operator1", Values: []string{"value11, value12"}}, + {Key: "key2", Operator: "operator2", Values: []string{"value21, value22"}}, + }, + }, + }, + }, + }, + }, + Tolerations: []corev1.Toleration{ + {Key: "key1", Operator: "operator1", Value: "value1", Effect: "effect1", TolerationSeconds: &seconds1}, + {Key: "key2", Operator: "operator2", Value: "value2", Effect: "effect2", TolerationSeconds: &seconds2}, + }, + } +} + +func GetScheme() *runtime.Scheme { + s := scheme.Scheme + + for _, f := range []func(*runtime.Scheme) error{ + apis.AddToScheme, + cdiv1beta1.AddToScheme, + networkaddons.AddToScheme, + sspopv1.AddToScheme, + vmimportv1beta1.AddToScheme, + consolev1.AddToScheme, + } { + Expect(f(s)).To(BeNil()) + } + + return s +} diff --git a/pkg/controller/hyperconverged/hyperconverged_controller.go b/pkg/controller/hyperconverged/hyperconverged_controller.go index 77813c622c..dfc4c752dd 100644 --- a/pkg/controller/hyperconverged/hyperconverged_controller.go +++ b/pkg/controller/hyperconverged/hyperconverged_controller.go @@ -3,10 +3,12 @@ package hyperconverged import ( "errors" "fmt" - "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" "os" "reflect" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/operands" + "github.com/operator-framework/operator-sdk/pkg/ready" schedulingv1 "k8s.io/api/scheduling/v1" "k8s.io/apimachinery/pkg/runtime" @@ -30,10 +32,8 @@ import ( vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" objectreferencesv1 "github.com/openshift/custom-resource-status/objectreferences/v1" - virtconfig "kubevirt.io/kubevirt/pkg/virt-config" corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -77,8 +77,6 @@ const ( metricsAggregationOldCrdName = "kubevirtmetricsaggregations.kubevirt.io" nodeLabellerBundlesOldCrdName = "kubevirtnodelabellerbundles.kubevirt.io" templateValidatorsOldCrdName = "kubevirttemplatevalidators.kubevirt.io" - - kubevirtDefaultNetworkInterfaceValue = "masquerade" ) // Add creates a new HyperConverged Controller and adds it to the Manager. The Manager will set fields on the Controller @@ -87,6 +85,16 @@ func Add(mgr manager.Manager, ci hcoutil.ClusterInfo) error { return add(mgr, newReconciler(mgr, ci)) } +// temp map, until we move all the operands code +var operandMap = map[string]operands.Operand{} + +func prepareHandlerMap(clt client.Client, scheme *runtime.Scheme) { + operandMap["kvc"] = &operands.KvConfigHandler{Client: clt, Scheme: scheme} + operandMap["kvpc"] = &operands.KvPriorityClassHandler{Client: clt, Scheme: scheme} + operandMap["kv"] = &operands.KubevirtHandler{Client: clt, Scheme: scheme} + operandMap["cdi"] = &operands.CdiHandler{Client: clt, Scheme: scheme} +} + // newReconciler returns a new reconcile.Reconciler func newReconciler(mgr manager.Manager, ci hcoutil.ClusterInfo) reconcile.Reconciler { @@ -95,6 +103,8 @@ func newReconciler(mgr manager.Manager, ci hcoutil.ClusterInfo) reconcile.Reconc ownVersion = version.Version } + prepareHandlerMap(mgr.GetClient(), mgr.GetScheme()) + return &ReconcileHyperConverged{ client: mgr.GetClient(), scheme: mgr.GetScheme(), @@ -305,7 +315,7 @@ func (r *ReconcileHyperConverged) doReconcile(req *common.HcoRequest) (reconcile req.Logger.Info(fmt.Sprintf("Start upgrating from version %s to version %s", knownHcoVersion, r.ownVersion)) } - req.ComponentUpgradeInProgress = r.upgradeMode + req.SetUpgradeMode(r.upgradeMode) r.ensureConsoleCLIDownload(req) @@ -453,7 +463,7 @@ func (r *ReconcileHyperConverged) ensureHcoDeleted(req *common.HcoRequest) (reco } func (r *ReconcileHyperConverged) ensureHco(req *common.HcoRequest) error { - for _, f := range []func(*common.HcoRequest) *EnsureResult{ + for _, f := range []func(*common.HcoRequest) *operands.EnsureResult{ r.ensureKubeVirtPriorityClass, r.ensureKubeVirtConfig, r.ensureKubeVirt, @@ -758,312 +768,26 @@ func (r *ReconcileHyperConverged) checkComponentVersion(versionEnvName, actualVe return expectedVersion != "" && expectedVersion == actualVersion } -func newKubeVirtConfigForCR(cr *hcov1beta1.HyperConverged, namespace string) *corev1.ConfigMap { - labels := map[string]string{ - hcoutil.AppLabel: cr.Name, - } - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "kubevirt-config", - Labels: labels, - Namespace: namespace, - }, - // only virtconfig.SmbiosConfigKey, virtconfig.MachineTypeKey, virtconfig.SELinuxLauncherTypeKey, - // virtconfig.FeatureGatesKey and virtconfig.UseEmulationKey are going to be manipulated - // and only on HCO upgrades. - // virtconfig.MigrationsConfigKey is going to be removed if set in the past (only during upgrades). - // TODO: This is going to change in the next HCO release where the whole configMap is going - // to be continuously reconciled - Data: map[string]string{ - virtconfig.FeatureGatesKey: "DataVolumes,SRIOV,LiveMigration,CPUManager,CPUNodeDiscovery,Sidecar,Snapshot", - virtconfig.SELinuxLauncherTypeKey: "virt_launcher.process", - virtconfig.NetworkInterfaceKey: kubevirtDefaultNetworkInterfaceValue, - }, - } - val, ok := os.LookupEnv("SMBIOS") - if ok && val != "" { - cm.Data[virtconfig.SmbiosConfigKey] = val - } - val, ok = os.LookupEnv("MACHINETYPE") - if ok && val != "" { - cm.Data[virtconfig.MachineTypeKey] = val - } - val, ok = os.LookupEnv("KVM_EMULATION") - if ok && val != "" { - cm.Data[virtconfig.UseEmulationKey] = val - } - return cm -} - -func (r *ReconcileHyperConverged) ensureKubeVirtConfig(req *common.HcoRequest) *EnsureResult { - kubevirtConfig := newKubeVirtConfigForCR(req.Instance, req.Namespace) - res := NewEnsureResult(kubevirtConfig) - err := controllerutil.SetControllerReference(req.Instance, kubevirtConfig, r.scheme) - if err != nil { - return res.Error(err) - } - - key, err := client.ObjectKeyFromObject(kubevirtConfig) - if err != nil { - req.Logger.Error(err, "Failed to get object key for kubevirt config") - } - res.SetName(key.Name) - - found := &corev1.ConfigMap{} - err = r.client.Get(req.Ctx, key, found) - if err != nil { - if apierrors.IsNotFound(err) { - req.Logger.Info("Creating kubevirt config") - err = r.client.Create(req.Ctx, kubevirtConfig) - if err == nil { - return res.SetCreated() - } - } - return res.Error(err) - } - - req.Logger.Info("KubeVirt config already exists", "KubeVirtConfig.Namespace", found.Namespace, "KubeVirtConfig.Name", found.Name) - // Add it to the list of RelatedObjects if found - objectRef, err := reference.GetReference(r.scheme, found) - if err != nil { - return res.Error(err) - } - objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) - - if r.upgradeMode { - // only virtconfig.SmbiosConfigKey, virtconfig.MachineTypeKey, virtconfig.SELinuxLauncherTypeKey, - // virtconfig.FeatureGatesKey and virtconfig.UseEmulationKey are going to be manipulated - // and only on HCO upgrades. - // virtconfig.MigrationsConfigKey is going to be removed if set in the past (only during upgrades). - // TODO: This is going to change in the next HCO release where the whole configMap is going - // to be continuously reconciled - dirty := false - for _, k := range []string{ - virtconfig.FeatureGatesKey, - virtconfig.SmbiosConfigKey, - virtconfig.MachineTypeKey, - virtconfig.SELinuxLauncherTypeKey, - virtconfig.UseEmulationKey, - virtconfig.MigrationsConfigKey, - } { - if found.Data[k] != kubevirtConfig.Data[k] { - req.Logger.Info(fmt.Sprintf("Updating %s on existing KubeVirt config", k)) - found.Data[k] = kubevirtConfig.Data[k] - dirty = true - } - } - for _, k := range []string{ - virtconfig.MigrationsConfigKey, - } { - _, ok := found.Data[k] - if ok { - req.Logger.Info(fmt.Sprintf("Deleting %s on existing KubeVirt config", k)) - delete(found.Data, k) - dirty = true - } - } - if dirty { - err = r.client.Update(req.Ctx, found) - if err != nil { - req.Logger.Error(err, fmt.Sprintf("Failed updating an existing kubevirt config")) - return res.Error(err) - } - } - } - - return res.SetUpgradeDone(req.ComponentUpgradeInProgress) +func (r *ReconcileHyperConverged) ensureKubeVirtPriorityClass(req *common.HcoRequest) *operands.EnsureResult { + return operandMap["kvpc"].Ensure(req) } -func (r *ReconcileHyperConverged) ensureKubeVirtPriorityClass(req *common.HcoRequest) *EnsureResult { - req.Logger.Info("Reconciling KubeVirt PriorityClass") - pc := req.Instance.NewKubeVirtPriorityClass() - res := NewEnsureResult(pc) - key, err := client.ObjectKeyFromObject(pc) - if err != nil { - req.Logger.Error(err, "Failed to get object key for KubeVirt PriorityClass") - return res.Error(err) - } - - res.SetName(key.Name) - found := &schedulingv1.PriorityClass{} - err = r.client.Get(req.Ctx, key, found) - - if err != nil { - if apierrors.IsNotFound(err) { - // create the new object - err = r.client.Create(req.Ctx, pc, &client.CreateOptions{}) - if err == nil { - return res.SetCreated() - } - } - - return res.Error(err) - } - - // at this point we found the object in the cache and we check if something was changed - if pc.Name == found.Name && pc.Value == found.Value && pc.Description == found.Description { - req.Logger.Info("KubeVirt PriorityClass already exists", "PriorityClass.Name", pc.Name) - objectRef, err := reference.GetReference(r.scheme, found) - if err != nil { - req.Logger.Error(err, "failed getting object reference for found object") - return res.Error(err) - } - objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) - - return res.SetUpgradeDone(req.ComponentUpgradeInProgress) - } - - // something was changed but since we can't patch a priority class object, we remove it - err = r.client.Delete(req.Ctx, found, &client.DeleteOptions{}) - if err != nil { - return res.Error(err) - } - - // create the new object - err = r.client.Create(req.Ctx, pc, &client.CreateOptions{}) - if err != nil { - return res.Error(err) - } - return res.SetUpdated() +func (r *ReconcileHyperConverged) ensureKubeVirtConfig(req *common.HcoRequest) *operands.EnsureResult { + return operandMap["kvc"].Ensure(req) } -func (r *ReconcileHyperConverged) ensureKubeVirt(req *common.HcoRequest) *EnsureResult { - virt := req.Instance.NewKubeVirt() - res := NewEnsureResult(virt) - if err := controllerutil.SetControllerReference(req.Instance, virt, r.scheme); err != nil { - return res.Error(err) - } - - key, err := client.ObjectKeyFromObject(virt) - if err != nil { - req.Logger.Error(err, "Failed to get object key for KubeVirt") - } - - res.SetName(key.Name) - found := &kubevirtv1.KubeVirt{} - err = r.client.Get(req.Ctx, key, found) - if err != nil { - if apierrors.IsNotFound(err) { - req.Logger.Info("Creating kubevirt") - err = r.client.Create(req.Ctx, virt) - if err == nil { - return res.SetCreated().SetName(virt.Name) - } - } - return res.Error(err) - } - - req.Logger.Info("KubeVirt already exists", "KubeVirt.Namespace", found.Namespace, "KubeVirt.Name", found.Name) - - if !reflect.DeepEqual(found.Spec, virt.Spec) { - virt.Spec.DeepCopyInto(&found.Spec) - req.Logger.Info("Updating existing KubeVirt's Spec to its default value") - err = r.client.Update(req.Ctx, found) - if err != nil { - return res.Error(err) - } - return res.SetUpdated() - } - - // Add it to the list of RelatedObjects if found - objectRef, err := reference.GetReference(r.scheme, found) - if err != nil { - return res.Error(err) - } - objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) - - // Handle KubeVirt resource conditions - isReady := handleComponentConditions(r, req, "KubeVirt", translateKubeVirtConds(found.Status.Conditions)) - - upgradeDone := req.ComponentUpgradeInProgress && isReady && r.checkComponentVersion(hcoutil.KubevirtVersionEnvV, found.Status.ObservedKubeVirtVersion) - - return res.SetUpgradeDone(upgradeDone) +func (r *ReconcileHyperConverged) ensureKubeVirt(req *common.HcoRequest) *operands.EnsureResult { + return operandMap["kv"].Ensure(req) } -func (r *ReconcileHyperConverged) ensureCDI(req *common.HcoRequest) *EnsureResult { - cdi := req.Instance.NewCDI() - res := NewEnsureResult(cdi) - - key, err := client.ObjectKeyFromObject(cdi) - if err != nil { - req.Logger.Error(err, "Failed to get object key for CDI") - } - - res.SetName(key.Name) - found := &cdiv1beta1.CDI{} - err = r.client.Get(req.Ctx, key, found) - - if err != nil { - if apierrors.IsNotFound(err) { - req.Logger.Info("Creating CDI") - err = r.client.Create(req.Ctx, cdi) - if err == nil { - return res.SetCreated() - } - } - return res.Error(err) - } - - req.Logger.Info("CDI already exists", "CDI.Namespace", found.Namespace, "CDI.Name", found.Name) - - err = r.ensureKubeVirtStorageConfig(req) - if err != nil { - return res.Error(err) - } - - err = r.ensureKubeVirtStorageRole(req) - if err != nil { - return res.Error(err) - } - - err = r.ensureKubeVirtStorageRoleBinding(req) - if err != nil { - return res.Error(err) - } - - existingOwners := found.GetOwnerReferences() - - // Previous versions used to have HCO-operator (scope namespace) - // as the owner of CDI (scope cluster). - // It's not legal, so remove that. - if len(existingOwners) > 0 { - req.Logger.Info("CDI has owners, removing...") - found.SetOwnerReferences([]metav1.OwnerReference{}) - err = r.client.Update(req.Ctx, found) - if err != nil { - req.Logger.Error(err, "Failed to remove CDI's previous owners") - } - } - - if !reflect.DeepEqual(found.Spec, cdi.Spec) { - req.Logger.Info("Updating existing CDI' Spec to its default value") - cdi.Spec.DeepCopyInto(&found.Spec) - err = r.client.Update(req.Ctx, found) - if err != nil { - return res.Error(err) - } - return res.SetUpdated() - } - - // Add it to the list of RelatedObjects if found - objectRef, err := reference.GetReference(r.scheme, found) - if err != nil { - return res.Error(err) - } - objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) - - // Handle CDI resource conditions - isReady := handleComponentConditions(r, req, "CDI", found.Status.Conditions) - - upgradeDone := req.ComponentUpgradeInProgress && isReady && r.checkComponentVersion(hcoutil.CdiVersionEnvV, found.Status.ObservedVersion) - - return res.SetUpgradeDone(upgradeDone) +func (r *ReconcileHyperConverged) ensureCDI(req *common.HcoRequest) *operands.EnsureResult { + return operandMap["cdi"].Ensure(req) } -func (r *ReconcileHyperConverged) ensureNetworkAddons(req *common.HcoRequest) *EnsureResult { +func (r *ReconcileHyperConverged) ensureNetworkAddons(req *common.HcoRequest) *operands.EnsureResult { networkAddons := req.Instance.NewNetworkAddons() - res := NewEnsureResult(networkAddons) + res := operands.NewEnsureResult(networkAddons) key, err := client.ObjectKeyFromObject(networkAddons) if err != nil { req.Logger.Error(err, "Failed to get object key for Network Addons") @@ -1219,10 +943,10 @@ func (r *ReconcileHyperConverged) componentNotAvailable(req *common.HcoRequest, }) } -func (r *ReconcileHyperConverged) ensureKubeVirtCommonTemplateBundle(req *common.HcoRequest) *EnsureResult { +func (r *ReconcileHyperConverged) ensureKubeVirtCommonTemplateBundle(req *common.HcoRequest) *operands.EnsureResult { kvCTB := req.Instance.NewKubeVirtCommonTemplateBundle() - res := NewEnsureResult(kvCTB) + res := operands.NewEnsureResult(kvCTB) if !r.clusterInfo.IsOpenshift() { // SSP operators Only supported in OpenShift. Ignore in K8s. return res.SetUpgradeDone(true) } @@ -1333,9 +1057,9 @@ func newKubeVirtNodeLabellerBundleForCR(cr *hcov1beta1.HyperConverged, namespace } } -func (r *ReconcileHyperConverged) ensureKubeVirtNodeLabellerBundle(req *common.HcoRequest) *EnsureResult { +func (r *ReconcileHyperConverged) ensureKubeVirtNodeLabellerBundle(req *common.HcoRequest) *operands.EnsureResult { kvNLB := newKubeVirtNodeLabellerBundleForCR(req.Instance, req.Namespace) - res := NewEnsureResult(kvNLB) + res := operands.NewEnsureResult(kvNLB) if !r.clusterInfo.IsOpenshift() { // SSP operators Only supported in OpenShift. Ignore in K8s. return res.SetUpgradeDone(true) } @@ -1416,9 +1140,9 @@ func newIMSConfigForCR(cr *hcov1beta1.HyperConverged, namespace string) *corev1. } } -func (r *ReconcileHyperConverged) ensureIMSConfig(req *common.HcoRequest) *EnsureResult { +func (r *ReconcileHyperConverged) ensureIMSConfig(req *common.HcoRequest) *operands.EnsureResult { imsConfig := newIMSConfigForCR(req.Instance, req.Namespace) - res := NewEnsureResult(imsConfig) + res := operands.NewEnsureResult(imsConfig) if os.Getenv("CONVERSION_CONTAINER") == "" { return res.Error(errors.New("ims-conversion-container not specified")) } @@ -1484,9 +1208,9 @@ func (r *ReconcileHyperConverged) ensureIMSConfig(req *common.HcoRequest) *Ensur return res.SetUpgradeDone(req.ComponentUpgradeInProgress) } -func (r *ReconcileHyperConverged) ensureVMImport(req *common.HcoRequest) *EnsureResult { +func (r *ReconcileHyperConverged) ensureVMImport(req *common.HcoRequest) *operands.EnsureResult { vmImport := newVMImportForCR(req.Instance) - res := NewEnsureResult(vmImport) + res := operands.NewEnsureResult(vmImport) key, err := client.ObjectKeyFromObject(vmImport) if err != nil { @@ -1633,9 +1357,9 @@ func newKubeVirtTemplateValidatorForCR(cr *hcov1beta1.HyperConverged, namespace } } -func (r *ReconcileHyperConverged) ensureKubeVirtTemplateValidator(req *common.HcoRequest) *EnsureResult { +func (r *ReconcileHyperConverged) ensureKubeVirtTemplateValidator(req *common.HcoRequest) *operands.EnsureResult { kvTV := newKubeVirtTemplateValidatorForCR(req.Instance, req.Namespace) - res := NewEnsureResult(kvTV) + res := operands.NewEnsureResult(kvTV) if !r.clusterInfo.IsOpenshift() { // SSP operators Only supported in OpenShift. Ignore in K8s. return res.SetUpgradeDone(true) } @@ -1696,175 +1420,6 @@ func (r *ReconcileHyperConverged) ensureKubeVirtTemplateValidator(req *common.Hc return res.SetUpgradeDone(req.ComponentUpgradeInProgress && upgradeInProgress) } -func newKubeVirtStorageRoleForCR(cr *hcov1beta1.HyperConverged, namespace string) *rbacv1.Role { - labels := map[string]string{ - "app": cr.Name, - } - return &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Name: "hco.kubevirt.io:config-reader", - Labels: labels, - Namespace: namespace, - }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{"configmaps"}, - ResourceNames: []string{"kubevirt-storage-class-defaults"}, - Verbs: []string{"get", "watch", "list"}, - }, - }, - } -} - -func newKubeVirtStorageRoleBindingForCR(cr *hcov1beta1.HyperConverged, namespace string) *rbacv1.RoleBinding { - labels := map[string]string{ - "app": cr.Name, - } - return &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "hco.kubevirt.io:config-reader", - Labels: labels, - Namespace: namespace, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: "hco.kubevirt.io:config-reader", - }, - Subjects: []rbacv1.Subject{ - { - APIGroup: "rbac.authorization.k8s.io", - Kind: "Group", - Name: "system:authenticated", - }, - }, - } -} - -func newKubeVirtStorageConfigForCR(cr *hcov1beta1.HyperConverged, namespace string) *corev1.ConfigMap { - localSC := "local-sc" - if *(&cr.Spec.LocalStorageClassName) != "" { - localSC = *(&cr.Spec.LocalStorageClassName) - } - - labels := map[string]string{ - hcoutil.AppLabel: cr.Name, - } - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "kubevirt-storage-class-defaults", - Labels: labels, - Namespace: namespace, - }, - Data: map[string]string{ - "accessMode": "ReadWriteOnce", - "volumeMode": "Filesystem", - localSC + ".accessMode": "ReadWriteOnce", - localSC + ".volumeMode": "Filesystem", - }, - } -} - -func (r *ReconcileHyperConverged) ensureKubeVirtStorageRole(req *common.HcoRequest) error { - kubevirtStorageRole := newKubeVirtStorageRoleForCR(req.Instance, req.Namespace) - if err := controllerutil.SetControllerReference(req.Instance, kubevirtStorageRole, r.scheme); err != nil { - return err - } - - key, err := client.ObjectKeyFromObject(kubevirtStorageRole) - if err != nil { - req.Logger.Error(err, "Failed to get object key for kubevirt storage role") - } - - found := &rbacv1.Role{} - err = r.client.Get(req.Ctx, key, found) - if err != nil && apierrors.IsNotFound(err) { - req.Logger.Info("Creating kubevirt storage role") - return r.client.Create(req.Ctx, kubevirtStorageRole) - } - - if err != nil { - return err - } - - req.Logger.Info("KubeVirt storage role already exists", "KubeVirtConfig.Namespace", found.Namespace, "KubeVirtConfig.Name", found.Name) - // Add it to the list of RelatedObjects if found - objectRef, err := reference.GetReference(r.scheme, found) - if err != nil { - return err - } - objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) - - return nil -} - -func (r *ReconcileHyperConverged) ensureKubeVirtStorageRoleBinding(req *common.HcoRequest) error { - kubevirtStorageRoleBinding := newKubeVirtStorageRoleBindingForCR(req.Instance, req.Namespace) - if err := controllerutil.SetControllerReference(req.Instance, kubevirtStorageRoleBinding, r.scheme); err != nil { - return err - } - - key, err := client.ObjectKeyFromObject(kubevirtStorageRoleBinding) - if err != nil { - req.Logger.Error(err, "Failed to get object key for kubevirt storage rolebinding") - } - - found := &rbacv1.RoleBinding{} - err = r.client.Get(req.Ctx, key, found) - if err != nil && apierrors.IsNotFound(err) { - req.Logger.Info("Creating kubevirt storage rolebinding") - return r.client.Create(req.Ctx, kubevirtStorageRoleBinding) - } - - if err != nil { - return err - } - - req.Logger.Info("KubeVirt storage rolebinding already exists", "KubeVirtConfig.Namespace", found.Namespace, "KubeVirtConfig.Name", found.Name) - // Add it to the list of RelatedObjects if found - objectRef, err := reference.GetReference(r.scheme, found) - if err != nil { - return err - } - objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) - - return nil -} - -func (r *ReconcileHyperConverged) ensureKubeVirtStorageConfig(req *common.HcoRequest) error { - kubevirtStorageConfig := newKubeVirtStorageConfigForCR(req.Instance, req.Namespace) - if err := controllerutil.SetControllerReference(req.Instance, kubevirtStorageConfig, r.scheme); err != nil { - return err - } - - key, err := client.ObjectKeyFromObject(kubevirtStorageConfig) - if err != nil { - req.Logger.Error(err, "Failed to get object key for kubevirt storage config") - } - - found := &corev1.ConfigMap{} - err = r.client.Get(req.Ctx, key, found) - if err != nil && apierrors.IsNotFound(err) { - req.Logger.Info("Creating kubevirt storage config") - return r.client.Create(req.Ctx, kubevirtStorageConfig) - } - - if err != nil { - return err - } - - req.Logger.Info("KubeVirt storage config already exists", "KubeVirtConfig.Namespace", found.Namespace, "KubeVirtConfig.Name", found.Name) - // Add it to the list of RelatedObjects if found - objectRef, err := reference.GetReference(r.scheme, found) - if err != nil { - return err - } - objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) - - return nil -} - func newKubeVirtMetricsAggregationForCR(cr *hcov1beta1.HyperConverged, namespace string) *sspv1.KubevirtMetricsAggregation { labels := map[string]string{ hcoutil.AppLabel: cr.Name, @@ -1878,9 +1433,9 @@ func newKubeVirtMetricsAggregationForCR(cr *hcov1beta1.HyperConverged, namespace } } -func (r *ReconcileHyperConverged) ensureKubeVirtMetricsAggregation(req *common.HcoRequest) *EnsureResult { +func (r *ReconcileHyperConverged) ensureKubeVirtMetricsAggregation(req *common.HcoRequest) *operands.EnsureResult { kubevirtMetricsAggregation := newKubeVirtMetricsAggregationForCR(req.Instance, req.Namespace) - res := NewEnsureResult(kubevirtMetricsAggregation) + res := operands.NewEnsureResult(kubevirtMetricsAggregation) if !r.clusterInfo.IsOpenshift() { // SSP operators Only supported in OpenShift. Ignore in K8s. return res.SetUpgradeDone(true) } diff --git a/pkg/controller/hyperconverged/hyperconverged_controller_components_test.go b/pkg/controller/hyperconverged/hyperconverged_controller_components_test.go index 88944a5ed8..cc3bdf183d 100644 --- a/pkg/controller/hyperconverged/hyperconverged_controller_components_test.go +++ b/pkg/controller/hyperconverged/hyperconverged_controller_components_test.go @@ -1,861 +1,49 @@ package hyperconverged import ( - networkaddonsshared "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/shared" - networkaddonsv1 "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/v1" - hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" - "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" - hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" - sspv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1" - vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" - consolev1 "github.com/openshift/api/console/v1" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - "github.com/openshift/custom-resource-status/testlib" - corev1 "k8s.io/api/core/v1" - schedulingv1 "k8s.io/api/scheduling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - kubevirtv1 "kubevirt.io/client-go/api/v1" - cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" - virtconfig "kubevirt.io/kubevirt/pkg/virt-config" - "os" - "sigs.k8s.io/controller-runtime/pkg/client" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/ginkgo/extensions/table" - . "github.com/onsi/gomega" - - "context" - "fmt" - - "k8s.io/client-go/tools/reference" -) - -var _ = Describe("HyperConverged Components", func() { - - Context("KubeVirt Priority Classes", func() { - - var hco *hcov1beta1.HyperConverged - var req *common.HcoRequest - - BeforeEach(func() { - hco = newHco() - req = newReq(hco) - }) - - It("should create if not present", func() { - expectedResource := hco.NewKubeVirtPriorityClass() - cl := initClient([]runtime.Object{}) - r := initReconciler(cl) - res := r.ensureKubeVirtPriorityClass(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - key, err := client.ObjectKeyFromObject(expectedResource) - Expect(err).ToNot(HaveOccurred()) - foundResource := &schedulingv1.PriorityClass{} - Expect(cl.Get(context.TODO(), key, foundResource)).To(BeNil()) - Expect(foundResource.Name).To(Equal(expectedResource.Name)) - Expect(foundResource.Value).To(Equal(expectedResource.Value)) - Expect(foundResource.GlobalDefault).To(Equal(expectedResource.GlobalDefault)) - }) - - It("should do nothing if already exists", func() { - expectedResource := hco.NewKubeVirtPriorityClass() - cl := initClient([]runtime.Object{expectedResource}) - r := initReconciler(cl) - res := r.ensureKubeVirtPriorityClass(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - objectRef, err := reference.GetReference(r.scheme, expectedResource) - Expect(err).To(BeNil()) - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - }) - - DescribeTable("should update if something changed", func(modifiedResource *schedulingv1.PriorityClass) { - cl := initClient([]runtime.Object{modifiedResource}) - r := initReconciler(cl) - res := r.ensureKubeVirtPriorityClass(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - expectedResource := hco.NewKubeVirtPriorityClass() - key, err := client.ObjectKeyFromObject(expectedResource) - Expect(err).ToNot(HaveOccurred()) - foundResource := &schedulingv1.PriorityClass{} - Expect(cl.Get(context.TODO(), key, foundResource)) - Expect(foundResource.Name).To(Equal(expectedResource.Name)) - Expect(foundResource.Value).To(Equal(expectedResource.Value)) - Expect(foundResource.GlobalDefault).To(Equal(expectedResource.GlobalDefault)) - }, - Entry("with modified value", - &schedulingv1.PriorityClass{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "scheduling.k8s.io/v1", - Kind: "PriorityClass", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "kubevirt-cluster-critical", - }, - Value: 1, - GlobalDefault: false, - Description: "", - }), - Entry("with modified global default", - &schedulingv1.PriorityClass{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "scheduling.k8s.io/v1", - Kind: "PriorityClass", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "kubevirt-cluster-critical", - }, - Value: 1000000000, - GlobalDefault: true, - Description: "", - }), - ) - - }) - - Context("KubeVirt Config", func() { - - var hco *hcov1beta1.HyperConverged - var req *common.HcoRequest - - updatableKeys := [...]string{virtconfig.SmbiosConfigKey, virtconfig.MachineTypeKey, virtconfig.SELinuxLauncherTypeKey, virtconfig.FeatureGatesKey} - removeKeys := [...]string{virtconfig.MigrationsConfigKey} - unupdatableKeys := [...]string{virtconfig.NetworkInterfaceKey} - - BeforeEach(func() { - hco = newHco() - req = newReq(hco) - - os.Setenv("SMBIOS", "new-smbios-value-that-we-have-to-set") - os.Setenv("MACHINETYPE", "new-machinetype-value-that-we-have-to-set") - }) - - It("should create if not present", func() { - expectedResource := newKubeVirtConfigForCR(req.Instance, namespace) - cl := initClient([]runtime.Object{}) - r := initReconciler(cl) - res := r.ensureKubeVirtConfig(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &corev1.ConfigMap{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(foundResource.Name).To(Equal(expectedResource.Name)) - Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, name)) - Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace)) - }) - - It("should find if present", func() { - expectedResource := newKubeVirtConfigForCR(hco, namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) - r := initReconciler(cl) - res := r.ensureKubeVirtConfig(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(r.scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - }) - - It("should update only a few keys and only when in upgrade mode", func() { - expectedResource := newKubeVirtConfigForCR(hco, namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - outdatedResource := newKubeVirtConfigForCR(hco, namespace) - outdatedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", outdatedResource.Namespace, outdatedResource.Name) - // values we should update - outdatedResource.Data[virtconfig.SmbiosConfigKey] = "old-smbios-value-that-we-have-to-update" - outdatedResource.Data[virtconfig.MachineTypeKey] = "old-machinetype-value-that-we-have-to-update" - outdatedResource.Data[virtconfig.SELinuxLauncherTypeKey] = "old-selinuxlauncher-value-that-we-have-to-update" - outdatedResource.Data[virtconfig.FeatureGatesKey] = "old-featuregates-value-that-we-have-to-update" - // value that we should remove if configured - outdatedResource.Data[virtconfig.MigrationsConfigKey] = "old-migrationsconfig-value-that-we-should-remove" - // values we should preserve - outdatedResource.Data[virtconfig.NetworkInterfaceKey] = "old-defaultnetworkinterface-value-that-we-should-preserve" - - cl := initClient([]runtime.Object{hco, outdatedResource}) - r := initReconciler(cl) - - // force upgrade mode - r.upgradeMode = true - res := r.ensureKubeVirtConfig(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &corev1.ConfigMap{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, - foundResource), - ).To(BeNil()) - - for _, k := range updatableKeys { - Expect(foundResource.Data[k]).To(Not(Equal(outdatedResource.Data[k]))) - Expect(foundResource.Data[k]).To(Equal(expectedResource.Data[k])) - } - for _, k := range unupdatableKeys { - Expect(foundResource.Data[k]).To(Equal(outdatedResource.Data[k])) - Expect(foundResource.Data[k]).To(Not(Equal(expectedResource.Data[k]))) - } - for _, k := range removeKeys { - Expect(outdatedResource.Data).To(HaveKey(k)) - Expect(expectedResource.Data).To(Not(HaveKey(k))) - Expect(foundResource.Data).To(Not(HaveKey(k))) - } - }) - - It("should not touch it when not in in upgrade mode", func() { - expectedResource := newKubeVirtConfigForCR(hco, namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - outdatedResource := newKubeVirtConfigForCR(hco, namespace) - outdatedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", outdatedResource.Namespace, outdatedResource.Name) - // values we should update - outdatedResource.Data[virtconfig.SmbiosConfigKey] = "old-smbios-value-that-we-have-to-update" - outdatedResource.Data[virtconfig.MachineTypeKey] = "old-machinetype-value-that-we-have-to-update" - outdatedResource.Data[virtconfig.SELinuxLauncherTypeKey] = "old-selinuxlauncher-value-that-we-have-to-update" - outdatedResource.Data[virtconfig.FeatureGatesKey] = "old-featuregates-value-that-we-have-to-update" - // values we should preserve - outdatedResource.Data[virtconfig.MigrationsConfigKey] = "old-migrationsconfig-value-that-we-should-preserve" - outdatedResource.Data[virtconfig.DefaultNetworkInterface] = "old-defaultnetworkinterface-value-that-we-should-preserve" - - cl := initClient([]runtime.Object{hco, outdatedResource}) - r := initReconciler(cl) - - // ensure that we are not in upgrade mode - r.upgradeMode = false - - res := r.ensureKubeVirtConfig(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &corev1.ConfigMap{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(foundResource.Data).To(Equal(outdatedResource.Data)) - Expect(foundResource.Data).To(Not(Equal(expectedResource.Data))) - }) - }) - - Context("KubeVirt Storage Config", func() { - var hco *hcov1beta1.HyperConverged - var req *common.HcoRequest - - BeforeEach(func() { - hco = newHco() - req = newReq(hco) - }) - - It("should create if not present", func() { - expectedResource := newKubeVirtStorageConfigForCR(hco, namespace) - cl := initClient([]runtime.Object{}) - r := initReconciler(cl) - err := r.ensureKubeVirtStorageConfig(req) - Expect(err).To(BeNil()) - - foundResource := &corev1.ConfigMap{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(foundResource.Name).To(Equal(expectedResource.Name)) - Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, name)) - Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace)) - }) - - It("should find if present", func() { - expectedResource := newKubeVirtStorageConfigForCR(hco, namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) - r := initReconciler(cl) - err := r.ensureKubeVirtStorageConfig(req) - Expect(err).To(BeNil()) - - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(r.scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - }) - - It("volumeMode should be filesystem when platform is baremetal", func() { - hco.Spec.BareMetalPlatform = true - - expectedResource := newKubeVirtStorageConfigForCR(hco, namespace) - Expect(expectedResource.Data["volumeMode"]).To(Equal("Filesystem")) - }) - - It("volumeMode should be filesystem when platform is not baremetal", func() { - hco.Spec.BareMetalPlatform = false - - expectedResource := newKubeVirtStorageConfigForCR(hco, namespace) - Expect(expectedResource.Data["volumeMode"]).To(Equal("Filesystem")) - }) - - It("local storage class name should be available when specified", func() { - hco.Spec.LocalStorageClassName = "local" - - expectedResource := newKubeVirtStorageConfigForCR(hco, namespace) - Expect(expectedResource.Data["local.accessMode"]).To(Equal("ReadWriteOnce")) - Expect(expectedResource.Data["local.volumeMode"]).To(Equal("Filesystem")) - }) - }) - - Context("KubeVirt", func() { - var hco *hcov1beta1.HyperConverged - var req *common.HcoRequest - - BeforeEach(func() { - hco = newHco() - req = newReq(hco) - }) - - It("should create if not present", func() { - expectedResource := hco.NewKubeVirt(namespace) - cl := initClient([]runtime.Object{}) - r := initReconciler(cl) - res := r.ensureKubeVirt(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &kubevirtv1.KubeVirt{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(foundResource.Name).To(Equal(expectedResource.Name)) - Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, name)) - Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace)) - }) - - It("should find if present", func() { - expectedResource := hco.NewKubeVirt(namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) - r := initReconciler(cl) - res := r.ensureKubeVirt(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(r.scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - // Check conditions - Expect(req.Conditions[conditionsv1.ConditionAvailable]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "KubeVirtConditions", - Message: "KubeVirt resource has no conditions", - })) - Expect(req.Conditions[conditionsv1.ConditionProgressing]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "KubeVirtConditions", - Message: "KubeVirt resource has no conditions", - })) - Expect(req.Conditions[conditionsv1.ConditionUpgradeable]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionUpgradeable, - Status: corev1.ConditionFalse, - Reason: "KubeVirtConditions", - Message: "KubeVirt resource has no conditions", - })) - }) - - It("should set default UninstallStrategy if missing", func() { - expectedResource := hco.NewKubeVirt(namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - missingUSResource := hco.NewKubeVirt(namespace) - missingUSResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", missingUSResource.Namespace, missingUSResource.Name) - missingUSResource.Spec.UninstallStrategy = "" - - cl := initClient([]runtime.Object{hco, missingUSResource}) - r := initReconciler(cl) - res := r.ensureKubeVirt(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Err).To(BeNil()) - - foundResource := &kubevirtv1.KubeVirt{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(foundResource.Spec.UninstallStrategy).To(Equal(expectedResource.Spec.UninstallStrategy)) - }) - - It("should add node placement if missing in KubeVirt", func() { - existingResource := hco.NewKubeVirt() - - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - - cl := initClient([]runtime.Object{hco, existingResource}) - r := initReconciler(cl) - res := r.ensureKubeVirt(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Err).To(BeNil()) - - foundResource := &kubevirtv1.KubeVirt{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.Infra).To(BeNil()) - Expect(existingResource.Spec.Workloads).To(BeNil()) - - Expect(foundResource.Spec.Infra).ToNot(BeNil()) - Expect(foundResource.Spec.Infra.NodePlacement).ToNot(BeNil()) - Expect(foundResource.Spec.Infra.NodePlacement.Affinity).ToNot(BeNil()) - Expect(foundResource.Spec.Infra.NodePlacement.NodeSelector["key1"]).Should(Equal("value1")) - Expect(foundResource.Spec.Infra.NodePlacement.NodeSelector["key2"]).Should(Equal("value2")) - - Expect(foundResource.Spec.Workloads).ToNot(BeNil()) - Expect(foundResource.Spec.Workloads.NodePlacement).ToNot(BeNil()) - Expect(foundResource.Spec.Workloads.NodePlacement.Tolerations).Should(Equal(hco.Spec.Workloads.NodePlacement.Tolerations)) - - Expect(req.Conditions).To(BeEmpty()) - }) - - It("should remove node placement if missing in HCO CR", func() { - - hcoNodePlacement := newHco() - hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - existingResource := hcoNodePlacement.NewKubeVirt() - - cl := initClient([]runtime.Object{hco, existingResource}) - r := initReconciler(cl) - res := r.ensureKubeVirt(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Err).To(BeNil()) - - foundResource := &kubevirtv1.KubeVirt{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.Infra).ToNot(BeNil()) - Expect(existingResource.Spec.Workloads).ToNot(BeNil()) - - Expect(foundResource.Spec.Infra).To(BeNil()) - Expect(foundResource.Spec.Workloads).To(BeNil()) - - Expect(req.Conditions).To(BeEmpty()) - }) - - It("should modify node placement according to HCO CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - existingResource := hco.NewKubeVirt() - - // now, modify HCO's node placement - seconds3 := int64(3) - hco.Spec.Infra.NodePlacement.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ - Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, - }) - - hco.Spec.Workloads.NodePlacement.NodeSelector["key1"] = "something else" - - cl := initClient([]runtime.Object{hco, existingResource}) - r := initReconciler(cl) - res := r.ensureKubeVirt(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Err).To(BeNil()) - - foundResource := &kubevirtv1.KubeVirt{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.Infra).ToNot(BeNil()) - Expect(existingResource.Spec.Infra.NodePlacement).ToNot(BeNil()) - Expect(existingResource.Spec.Infra.NodePlacement.Tolerations).To(HaveLen(2)) - Expect(existingResource.Spec.Workloads).ToNot(BeNil()) - - Expect(existingResource.Spec.Workloads.NodePlacement).ToNot(BeNil()) - Expect(existingResource.Spec.Workloads.NodePlacement.NodeSelector["key1"]).Should(Equal("value1")) - - Expect(foundResource.Spec.Infra).ToNot(BeNil()) - Expect(foundResource.Spec.Infra.NodePlacement).ToNot(BeNil()) - Expect(foundResource.Spec.Infra.NodePlacement.Tolerations).To(HaveLen(3)) - - Expect(foundResource.Spec.Workloads).ToNot(BeNil()) - Expect(foundResource.Spec.Workloads.NodePlacement).ToNot(BeNil()) - Expect(foundResource.Spec.Workloads.NodePlacement.NodeSelector["key1"]).Should(Equal("something else")) - - Expect(req.Conditions).To(BeEmpty()) - }) - - It("should handle conditions", func() { - expectedResource := hco.NewKubeVirt(namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - expectedResource.Status.Conditions = []kubevirtv1.KubeVirtCondition{ - kubevirtv1.KubeVirtCondition{ - Type: kubevirtv1.KubeVirtConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "Foo", - Message: "Bar", - }, - kubevirtv1.KubeVirtCondition{ - Type: kubevirtv1.KubeVirtConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", - }, - kubevirtv1.KubeVirtCondition{ - Type: kubevirtv1.KubeVirtConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", - }, - } - cl := initClient([]runtime.Object{hco, expectedResource}) - r := initReconciler(cl) - res := r.ensureKubeVirt(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(r.scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - // Check conditions - Expect(req.Conditions[conditionsv1.ConditionAvailable]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "KubeVirtNotAvailable", - Message: "KubeVirt is not available: Bar", - })) - Expect(req.Conditions[conditionsv1.ConditionProgressing]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "KubeVirtProgressing", - Message: "KubeVirt is progressing: Bar", - })) - Expect(req.Conditions[conditionsv1.ConditionUpgradeable]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionUpgradeable, - Status: corev1.ConditionFalse, - Reason: "KubeVirtProgressing", - Message: "KubeVirt is progressing: Bar", - })) - Expect(req.Conditions[conditionsv1.ConditionDegraded]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "KubeVirtDegraded", - Message: "KubeVirt is degraded: Bar", - })) - }) - }) - - Context("CDI", func() { - var hco *hcov1beta1.HyperConverged - var req *common.HcoRequest - - BeforeEach(func() { - hco = newHco() - req = newReq(hco) - }) - - It("should create if not present", func() { - expectedResource := hco.NewCDI() - cl := initClient([]runtime.Object{}) - r := initReconciler(cl) - res := r.ensureCDI(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &cdiv1beta1.CDI{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(foundResource.Name).To(Equal(expectedResource.Name)) - Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, name)) - Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace)) - }) - - It("should find if present", func() { - expectedResource := hco.NewCDI() - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) - r := initReconciler(cl) - res := r.ensureCDI(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(r.scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - // Check conditions - Expect(req.Conditions[conditionsv1.ConditionAvailable]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "CDIConditions", - Message: "CDI resource has no conditions", - })) - Expect(req.Conditions[conditionsv1.ConditionProgressing]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "CDIConditions", - Message: "CDI resource has no conditions", - })) - Expect(req.Conditions[conditionsv1.ConditionUpgradeable]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionUpgradeable, - Status: corev1.ConditionFalse, - Reason: "CDIConditions", - Message: "CDI resource has no conditions", - })) - }) - - It("should set default UninstallStrategy if missing", func() { - expectedResource := hco.NewCDI() - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - missingUSResource := hco.NewCDI() - missingUSResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/%s/dummies/%s", missingUSResource.Namespace, missingUSResource.Name) - missingUSResource.Spec.UninstallStrategy = nil - - cl := initClient([]runtime.Object{hco, missingUSResource}) - r := initReconciler(cl) - res := r.ensureCDI(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Err).To(BeNil()) - - foundResource := &cdiv1beta1.CDI{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(*foundResource.Spec.UninstallStrategy).To(Equal(*expectedResource.Spec.UninstallStrategy)) - }) - - It("should add node placement if missing in CDI", func() { - existingResource := hco.NewCDI() - - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - - cl := initClient([]runtime.Object{hco, existingResource}) - r := initReconciler(cl) - res := r.ensureCDI(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Err).To(BeNil()) - - foundResource := &cdiv1beta1.CDI{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.Infra.Affinity).To(BeNil()) - Expect(existingResource.Spec.Infra.Tolerations).To(BeEmpty()) - Expect(existingResource.Spec.Infra.NodeSelector).To(BeNil()) - Expect(existingResource.Spec.Workloads.Affinity).To(BeNil()) - Expect(existingResource.Spec.Workloads.Tolerations).To(BeEmpty()) - Expect(existingResource.Spec.Workloads.NodeSelector).To(BeNil()) - - Expect(foundResource.Spec.Infra.Affinity).ToNot(BeNil()) - Expect(foundResource.Spec.Infra.NodeSelector["key1"]).Should(Equal("value1")) - Expect(foundResource.Spec.Infra.NodeSelector["key2"]).Should(Equal("value2")) - - Expect(foundResource.Spec.Workloads).ToNot(BeNil()) - Expect(foundResource.Spec.Workloads.Tolerations).Should(Equal(hco.Spec.Workloads.NodePlacement.Tolerations)) - - Expect(req.Conditions).To(BeEmpty()) - }) - - It("should remove node placement if missing in HCO CR", func() { - - hcoNodePlacement := newHco() - hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - existingResource := hcoNodePlacement.NewCDI() - - cl := initClient([]runtime.Object{hco, existingResource}) - r := initReconciler(cl) - res := r.ensureCDI(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Err).To(BeNil()) - - foundResource := &cdiv1beta1.CDI{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.Infra.Affinity).ToNot(BeNil()) - Expect(existingResource.Spec.Infra.Tolerations).ToNot(BeEmpty()) - Expect(existingResource.Spec.Infra.NodeSelector).ToNot(BeNil()) - Expect(existingResource.Spec.Workloads.Affinity).ToNot(BeNil()) - Expect(existingResource.Spec.Workloads.Tolerations).ToNot(BeEmpty()) - Expect(existingResource.Spec.Workloads.NodeSelector).ToNot(BeNil()) - - Expect(foundResource.Spec.Infra.Affinity).To(BeNil()) - Expect(foundResource.Spec.Infra.Tolerations).To(BeEmpty()) - Expect(foundResource.Spec.Infra.NodeSelector).To(BeNil()) - Expect(foundResource.Spec.Workloads.Affinity).To(BeNil()) - Expect(foundResource.Spec.Workloads.Tolerations).To(BeEmpty()) - Expect(foundResource.Spec.Workloads.NodeSelector).To(BeNil()) - - Expect(req.Conditions).To(BeEmpty()) - }) - - It("should modify node placement according to HCO CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - existingResource := hco.NewCDI() - - // now, modify HCO's node placement - seconds3 := int64(3) - hco.Spec.Infra.NodePlacement.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ - Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, - }) - - hco.Spec.Workloads.NodePlacement.NodeSelector["key1"] = "something else" - - cl := initClient([]runtime.Object{hco, existingResource}) - r := initReconciler(cl) - res := r.ensureCDI(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Err).To(BeNil()) - - foundResource := &cdiv1beta1.CDI{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) + "os" - Expect(existingResource.Spec.Infra.Tolerations).To(HaveLen(2)) - Expect(existingResource.Spec.Workloads.NodeSelector["key1"]).Should(Equal("value1")) + networkaddonsshared "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/shared" + networkaddonsv1 "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/v1" + hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/commonTestUtils" + hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" + sspv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1" + vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" + consolev1 "github.com/openshift/api/console/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + "github.com/openshift/custom-resource-status/testlib" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" - Expect(foundResource.Spec.Infra.Tolerations).To(HaveLen(3)) - Expect(foundResource.Spec.Workloads.NodeSelector["key1"]).Should(Equal("something else")) + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" - Expect(req.Conditions).To(BeEmpty()) - }) + "context" + "fmt" - It("should handle conditions", func() { - expectedResource := hco.NewCDI() - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - expectedResource.Status.Conditions = []conditionsv1.Condition{ - conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "Foo", - Message: "Bar", - }, - conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", - }, - conditionsv1.Condition{ - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", - }, - } - cl := initClient([]runtime.Object{hco, expectedResource}) - r := initReconciler(cl) - res := r.ensureCDI(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) + "k8s.io/client-go/tools/reference" +) - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(r.scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - // Check conditions - Expect(req.Conditions[conditionsv1.ConditionAvailable]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "CDINotAvailable", - Message: "CDI is not available: Bar", - })) - Expect(req.Conditions[conditionsv1.ConditionProgressing]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "CDIProgressing", - Message: "CDI is progressing: Bar", - })) - Expect(req.Conditions[conditionsv1.ConditionUpgradeable]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionUpgradeable, - Status: corev1.ConditionFalse, - Reason: "CDIProgressing", - Message: "CDI is progressing: Bar", - })) - Expect(req.Conditions[conditionsv1.ConditionDegraded]).To(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "CDIDegraded", - Message: "CDI is degraded: Bar", - })) - }) - }) +var _ = Describe("HyperConverged Components", func() { Context("NetworkAddonsConfig", func() { var hco *hcov1beta1.HyperConverged var req *common.HcoRequest BeforeEach(func() { - hco = newHco() - req = newReq(hco) + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) }) It("should create if not present", func() { expectedResource := hco.NewNetworkAddons() - cl := initClient([]runtime.Object{}) + cl := commonTestUtils.InitClient([]runtime.Object{}) r := initReconciler(cl) res := r.ensureNetworkAddons(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -878,7 +66,7 @@ var _ = Describe("HyperConverged Components", func() { It("should find if present", func() { expectedResource := hco.NewNetworkAddons() expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) r := initReconciler(cl) res := r.ensureNetworkAddons(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -916,7 +104,7 @@ var _ = Describe("HyperConverged Components", func() { existingResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", existingResource.Namespace, existingResource.Name) existingResource.Spec.ImagePullPolicy = corev1.PullAlways // set non-default value - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureNetworkAddons(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -937,10 +125,10 @@ var _ = Describe("HyperConverged Components", func() { It("should add node placement if missing in CNAO", func() { existingResource := hco.NewNetworkAddons() - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{commonTestUtils.NewHyperConvergedConfig()} - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureNetworkAddons(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -969,12 +157,12 @@ var _ = Describe("HyperConverged Components", func() { It("should remove node placement if missing in HCO CR", func() { - hcoNodePlacement := newHco() - hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} + hcoNodePlacement := commonTestUtils.NewHco() + hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} existingResource := hcoNodePlacement.NewNetworkAddons() - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureNetworkAddons(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -996,8 +184,8 @@ var _ = Describe("HyperConverged Components", func() { It("should modify node placement according to HCO CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} existingResource := hco.NewNetworkAddons() // now, modify HCO's node placement @@ -1008,7 +196,7 @@ var _ = Describe("HyperConverged Components", func() { hco.Spec.Workloads.NodePlacement.NodeSelector["key1"] = "something else" - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureNetworkAddons(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1056,7 +244,7 @@ var _ = Describe("HyperConverged Components", func() { Message: "Bar", }, } - cl := initClient([]runtime.Object{hco, expectedResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) r := initReconciler(cl) res := r.ensureNetworkAddons(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1101,13 +289,13 @@ var _ = Describe("HyperConverged Components", func() { var req *common.HcoRequest BeforeEach(func() { - hco = newHco() - req = newReq(hco) + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) }) It("should create if not present", func() { expectedResource := hco.NewKubeVirtCommonTemplateBundle() - cl := initClient([]runtime.Object{}) + cl := commonTestUtils.InitClient([]runtime.Object{}) r := initReconciler(cl) res := r.ensureKubeVirtCommonTemplateBundle(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1127,7 +315,7 @@ var _ = Describe("HyperConverged Components", func() { It("should find if present", func() { expectedResource := hco.NewKubeVirtCommonTemplateBundle() expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) r := initReconciler(cl) res := r.ensureKubeVirtCommonTemplateBundle(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1147,7 +335,7 @@ var _ = Describe("HyperConverged Components", func() { existingResource.Spec.Version = "Non default value" - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureKubeVirtCommonTemplateBundle(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1235,13 +423,13 @@ var _ = Describe("HyperConverged Components", func() { var req *common.HcoRequest BeforeEach(func() { - hco = newHco() - req = newReq(hco) + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) }) It("should create if not present", func() { expectedResource := newKubeVirtNodeLabellerBundleForCR(hco, namespace) - cl := initClient([]runtime.Object{}) + cl := commonTestUtils.InitClient([]runtime.Object{}) r := initReconciler(cl) res := r.ensureKubeVirtNodeLabellerBundle(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1261,7 +449,7 @@ var _ = Describe("HyperConverged Components", func() { It("should find if present", func() { expectedResource := newKubeVirtNodeLabellerBundleForCR(hco, namespace) expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) r := initReconciler(cl) res := r.ensureKubeVirtNodeLabellerBundle(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1281,7 +469,7 @@ var _ = Describe("HyperConverged Components", func() { existingResource.Spec.Version = "Non default value" - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureKubeVirtNodeLabellerBundle(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1300,9 +488,9 @@ var _ = Describe("HyperConverged Components", func() { It("should add node placement if missing in KubeVirtNodeLabellerBundle", func() { existingResource := newKubeVirtNodeLabellerBundleForCR(hco, namespace) - hco.Spec.Workloads.NodePlacement = NewHyperConvergedConfig() + hco.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureKubeVirtNodeLabellerBundle(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1330,11 +518,11 @@ var _ = Describe("HyperConverged Components", func() { It("should remove node placement if missing in HCO CR", func() { - hcoNodePlacement := newHco() - hcoNodePlacement.Spec.Workloads.NodePlacement = NewHyperConvergedConfig() + hcoNodePlacement := commonTestUtils.NewHco() + hcoNodePlacement.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() existingResource := newKubeVirtNodeLabellerBundleForCR(hcoNodePlacement, namespace) - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureKubeVirtNodeLabellerBundle(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1356,7 +544,7 @@ var _ = Describe("HyperConverged Components", func() { It("should modify node placement according to HCO CR", func() { - hco.Spec.Workloads.NodePlacement = NewHyperConvergedConfig() + hco.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() existingResource := newKubeVirtNodeLabellerBundleForCR(hco, namespace) // now, modify HCO's node placement @@ -1367,7 +555,7 @@ var _ = Describe("HyperConverged Components", func() { hco.Spec.Workloads.NodePlacement.NodeSelector["key1"] = "something else" - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureKubeVirtNodeLabellerBundle(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1487,13 +675,13 @@ var _ = Describe("HyperConverged Components", func() { var req *common.HcoRequest BeforeEach(func() { - hco = newHco() - req = newReq(hco) + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) }) It("should create if not present", func() { expectedResource := newKubeVirtTemplateValidatorForCR(hco, namespace) - cl := initClient([]runtime.Object{}) + cl := commonTestUtils.InitClient([]runtime.Object{}) r := initReconciler(cl) res := r.ensureKubeVirtTemplateValidator(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1513,7 +701,7 @@ var _ = Describe("HyperConverged Components", func() { It("should find if present", func() { expectedResource := newKubeVirtTemplateValidatorForCR(hco, namespace) expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) r := initReconciler(cl) res := r.ensureKubeVirtTemplateValidator(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1533,7 +721,7 @@ var _ = Describe("HyperConverged Components", func() { existingResource.Spec.TemplateValidatorReplicas = 5 // set non-default value - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureKubeVirtTemplateValidator(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1552,9 +740,9 @@ var _ = Describe("HyperConverged Components", func() { It("should add node placement if missing in KubeVirtTemplateValidator", func() { existingResource := newKubeVirtTemplateValidatorForCR(hco, namespace) - hco.Spec.Infra.NodePlacement = NewHyperConvergedConfig() + hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureKubeVirtTemplateValidator(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1582,11 +770,11 @@ var _ = Describe("HyperConverged Components", func() { It("should remove node placement if missing in HCO CR", func() { - hcoNodePlacement := newHco() - hcoNodePlacement.Spec.Infra.NodePlacement = NewHyperConvergedConfig() + hcoNodePlacement := commonTestUtils.NewHco() + hcoNodePlacement.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() existingResource := newKubeVirtTemplateValidatorForCR(hcoNodePlacement, namespace) - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureKubeVirtTemplateValidator(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1608,7 +796,7 @@ var _ = Describe("HyperConverged Components", func() { It("should modify node placement according to HCO CR", func() { - hco.Spec.Infra.NodePlacement = NewHyperConvergedConfig() + hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() existingResource := newKubeVirtTemplateValidatorForCR(hco, namespace) // now, modify HCO's node placement @@ -1619,7 +807,7 @@ var _ = Describe("HyperConverged Components", func() { hco.Spec.Infra.NodePlacement.NodeSelector["key1"] = "something else" - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureKubeVirtTemplateValidator(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1712,13 +900,13 @@ var _ = Describe("HyperConverged Components", func() { var req *common.HcoRequest BeforeEach(func() { - hco = newHco() - req = newReq(hco) + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) }) It("should create if not present", func() { expectedResource := newKubeVirtMetricsAggregationForCR(hco, namespace) - cl := initClient([]runtime.Object{}) + cl := commonTestUtils.InitClient([]runtime.Object{}) r := initReconciler(cl) res := r.ensureKubeVirtMetricsAggregation(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1738,7 +926,7 @@ var _ = Describe("HyperConverged Components", func() { It("should find if present", func() { expectedResource := newKubeVirtMetricsAggregationForCR(hco, namespace) expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) r := initReconciler(cl) res := r.ensureKubeVirtMetricsAggregation(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1758,7 +946,7 @@ var _ = Describe("HyperConverged Components", func() { existingResource.Spec.Version = "non-default value" - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureKubeVirtMetricsAggregation(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1847,15 +1035,15 @@ var _ = Describe("HyperConverged Components", func() { BeforeEach(func() { os.Setenv("CONVERSION_CONTAINER", "new-conversion-container-value") os.Setenv("VMWARE_CONTAINER", "new-vmware-container-value") - hco = newHco() - req = newReq(hco) + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) }) It("should error if environment vars not specified", func() { os.Unsetenv("CONVERSION_CONTAINER") os.Unsetenv("VMWARE_CONTAINER") - cl := initClient([]runtime.Object{}) + cl := commonTestUtils.InitClient([]runtime.Object{}) r := initReconciler(cl) res := r.ensureIMSConfig(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1864,7 +1052,7 @@ var _ = Describe("HyperConverged Components", func() { It("should create if not present", func() { expectedResource := newIMSConfigForCR(hco, namespace) - cl := initClient([]runtime.Object{}) + cl := commonTestUtils.InitClient([]runtime.Object{}) r := initReconciler(cl) res := r.ensureIMSConfig(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1884,7 +1072,7 @@ var _ = Describe("HyperConverged Components", func() { It("should find if present", func() { expectedResource := newIMSConfigForCR(hco, namespace) expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) r := initReconciler(cl) res := r.ensureIMSConfig(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -1923,7 +1111,7 @@ var _ = Describe("HyperConverged Components", func() { outdatedResource.Data[k] = v } - cl := initClient([]runtime.Object{hco, outdatedResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, outdatedResource}) r := initReconciler(cl) res := r.ensureIMSConfig(req) @@ -1957,13 +1145,13 @@ var _ = Describe("HyperConverged Components", func() { var req *common.HcoRequest BeforeEach(func() { - hco = newHco() - req = newReq(hco) + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) }) It("should create if not present", func() { expectedResource := newVMImportForCR(hco) - cl := initClient([]runtime.Object{}) + cl := commonTestUtils.InitClient([]runtime.Object{}) r := initReconciler(cl) res := r.ensureVMImport(req) @@ -1984,7 +1172,7 @@ var _ = Describe("HyperConverged Components", func() { It("should find if present", func() { expectedResource := newVMImportForCR(hco) expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/vmimportconfigs/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) r := initReconciler(cl) res := r.ensureVMImport(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -2004,7 +1192,7 @@ var _ = Describe("HyperConverged Components", func() { existingResource.Spec.ImagePullPolicy = corev1.PullAlways // set non-default value - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureVMImport(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -2023,10 +1211,10 @@ var _ = Describe("HyperConverged Components", func() { It("should add node placement if missing in VM-Import", func() { existingResource := newVMImportForCR(hco) - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureVMImport(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -2060,12 +1248,12 @@ var _ = Describe("HyperConverged Components", func() { It("should remove node placement if missing in HCO CR", func() { - hcoNodePlacement := newHco() - hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} + hcoNodePlacement := commonTestUtils.NewHco() + hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} existingResource := newVMImportForCR(hcoNodePlacement) - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureVMImport(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -2092,8 +1280,8 @@ var _ = Describe("HyperConverged Components", func() { It("should modify node placement according to HCO CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} existingResource := newVMImportForCR(hco) // now, modify HCO's node placement @@ -2104,7 +1292,7 @@ var _ = Describe("HyperConverged Components", func() { hco.Spec.Infra.NodePlacement.NodeSelector["key1"] = "something else" - cl := initClient([]runtime.Object{hco, existingResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) r := initReconciler(cl) res := r.ensureVMImport(req) Expect(res.UpgradeDone).To(BeFalse()) @@ -2136,13 +1324,13 @@ var _ = Describe("HyperConverged Components", func() { var req *common.HcoRequest BeforeEach(func() { - hco = newHco() - req = newReq(hco) + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) }) It("should create if not present", func() { expectedResource := hco.NewConsoleCLIDownload() - cl := initClient([]runtime.Object{}) + cl := commonTestUtils.InitClient([]runtime.Object{}) r := initReconciler(cl) err := r.ensureConsoleCLIDownload(req) @@ -2162,7 +1350,7 @@ var _ = Describe("HyperConverged Components", func() { It("should find if present", func() { expectedResource := hco.NewConsoleCLIDownload() expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/consoleclidownloads/%s", expectedResource.Namespace, expectedResource.Name) - cl := initClient([]runtime.Object{hco, expectedResource}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) r := initReconciler(cl) err := r.ensureConsoleCLIDownload(req) Expect(err).To(BeNil()) @@ -2177,7 +1365,7 @@ var _ = Describe("HyperConverged Components", func() { DescribeTable("should update if something changed", func(modifiedResource *consolev1.ConsoleCLIDownload) { os.Setenv(hcoutil.KubevirtVersionEnvV, "100") - cl := initClient([]runtime.Object{modifiedResource}) + cl := commonTestUtils.InitClient([]runtime.Object{modifiedResource}) r := initReconciler(cl) err := r.ensureConsoleCLIDownload(req) Expect(err).To(BeNil()) diff --git a/pkg/controller/hyperconverged/hyperconverged_controller_test.go b/pkg/controller/hyperconverged/hyperconverged_controller_test.go index bfd7074074..3f4cb9dbac 100644 --- a/pkg/controller/hyperconverged/hyperconverged_controller_test.go +++ b/pkg/controller/hyperconverged/hyperconverged_controller_test.go @@ -5,6 +5,8 @@ import ( "encoding/json" "errors" "fmt" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/commonTestUtils" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/operands" "os" "time" @@ -46,13 +48,13 @@ var _ = Describe("HyperconvergedController", func() { Context("HCO Lifecycle", func() { BeforeEach(func() { - os.Setenv("CONVERSION_CONTAINER", conversion_image) - os.Setenv("VMWARE_CONTAINER", vmware_image) + os.Setenv("CONVERSION_CONTAINER", commonTestUtils.Conversion_image) + os.Setenv("VMWARE_CONTAINER", commonTestUtils.Vmware_image) os.Setenv("OPERATOR_NAMESPACE", namespace) }) It("should handle not found", func() { - cl := initClient([]runtime.Object{}) + cl := commonTestUtils.InitClient([]runtime.Object{}) r := initReconciler(cl) res, err := r.Reconcile(request) @@ -71,7 +73,7 @@ var _ = Describe("HyperconvergedController", func() { Conditions: []conditionsv1.Condition{}, }, } - cl := initClient([]runtime.Object{hco}) + cl := commonTestUtils.InitClient([]runtime.Object{hco}) r := initReconciler(cl) // Do the reconcile @@ -102,8 +104,8 @@ var _ = Describe("HyperconvergedController", func() { }) It("should create all managed resources", func() { - hco := newHco() - cl := initClient([]runtime.Object{hco}) + hco := commonTestUtils.NewHco() + cl := commonTestUtils.InitClient([]runtime.Object{hco}) r := initReconciler(cl) // Do the reconcile @@ -171,13 +173,13 @@ var _ = Describe("HyperconvergedController", func() { } // These are all of the objects that we expect to "find" in the client because // we already created them in a previous reconcile. - expectedKVConfig := newKubeVirtConfigForCR(hco, namespace) + expectedKVConfig := operands.NewKubeVirtConfigForCR(hco, namespace) expectedKVConfig.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/configmaps/%s", expectedKVConfig.Namespace, expectedKVConfig.Name) - expectedKVStorageConfig := newKubeVirtStorageConfigForCR(hco, namespace) + expectedKVStorageConfig := operands.NewKubeVirtStorageConfigForCR(hco, namespace) expectedKVStorageConfig.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/configmaps/%s", expectedKVStorageConfig.Namespace, expectedKVStorageConfig.Name) - expectedKVStorageRole := newKubeVirtStorageRoleForCR(hco, namespace) + expectedKVStorageRole := operands.NewKubeVirtStorageRoleForCR(hco, namespace) expectedKVStorageRole.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/roles/%s", expectedKVStorageRole.Namespace, expectedKVStorageRole.Name) - expectedKVStorageRoleBinding := newKubeVirtStorageRoleBindingForCR(hco, namespace) + expectedKVStorageRoleBinding := operands.NewKubeVirtStorageRoleBindingForCR(hco, namespace) expectedKVStorageRoleBinding.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/rolebindings/%s", expectedKVStorageRoleBinding.Namespace, expectedKVStorageRoleBinding.Name) expectedKV := hco.NewKubeVirt(namespace) expectedKV.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/kubevirts/%s", expectedKV.Namespace, expectedKV.Name) @@ -192,7 +194,7 @@ var _ = Describe("HyperconvergedController", func() { expectedKVTV := newKubeVirtTemplateValidatorForCR(hco, namespace) expectedKVTV.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/tv/%s", expectedKVTV.Namespace, expectedKVTV.Name) // Add all of the objects to the client - cl := initClient([]runtime.Object{hco, expectedKVConfig, expectedKVStorageConfig, expectedKVStorageRole, expectedKVStorageRoleBinding, expectedKV, expectedCDI, expectedCNA, expectedKVCTB, expectedKVNLB, expectedKVTV}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedKVConfig, expectedKVStorageConfig, expectedKVStorageRole, expectedKVStorageRoleBinding, expectedKV, expectedCDI, expectedCNA, expectedKVCTB, expectedKVNLB, expectedKVTV}) r := initReconciler(cl) // Do the reconcile @@ -255,13 +257,13 @@ var _ = Describe("HyperconvergedController", func() { } // These are all of the objects that we expect to "find" in the client because // we already created them in a previous reconcile. - expectedKVConfig := newKubeVirtConfigForCR(hco, namespace) + expectedKVConfig := operands.NewKubeVirtConfigForCR(hco, namespace) expectedKVConfig.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/configmaps/%s", expectedKVConfig.Namespace, expectedKVConfig.Name) - expectedKVStorageConfig := newKubeVirtStorageConfigForCR(hco, namespace) + expectedKVStorageConfig := operands.NewKubeVirtStorageConfigForCR(hco, namespace) expectedKVStorageConfig.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/configmaps/%s", expectedKVStorageConfig.Namespace, expectedKVStorageConfig.Name) - expectedKVStorageRole := newKubeVirtStorageRoleForCR(hco, namespace) + expectedKVStorageRole := operands.NewKubeVirtStorageRoleForCR(hco, namespace) expectedKVStorageRole.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/role/%s", expectedKVStorageRole.Namespace, expectedKVStorageRole.Name) - expectedKVStorageRoleBinding := newKubeVirtStorageRoleBindingForCR(hco, namespace) + expectedKVStorageRoleBinding := operands.NewKubeVirtStorageRoleBindingForCR(hco, namespace) expectedKVStorageRoleBinding.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/role/%s", expectedKVStorageRoleBinding.Namespace, expectedKVStorageRoleBinding.Name) expectedKV := hco.NewKubeVirt(namespace) expectedKV.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/kubevirts/%s", expectedKV.Namespace, expectedKV.Name) @@ -321,7 +323,7 @@ var _ = Describe("HyperconvergedController", func() { expectedKVTV.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/tv/%s", expectedKVTV.Namespace, expectedKVTV.Name) expectedKVTV.Status.Conditions = getGenericCompletedConditions() // Add all of the objects to the client - cl := initClient([]runtime.Object{hco, expectedKVConfig, expectedKVStorageConfig, expectedKV, expectedCDI, expectedCNA, expectedKVCTB, expectedKVNLB, expectedKVTV}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedKVConfig, expectedKVStorageConfig, expectedKV, expectedCDI, expectedCNA, expectedKVCTB, expectedKVNLB, expectedKVTV}) r := initReconciler(cl) // Do the reconcile @@ -503,8 +505,8 @@ var _ = Describe("HyperconvergedController", func() { origConds := expected.hco.Status.Conditions BeforeEach(func() { - os.Setenv("CONVERSION_CONTAINER", conversion_image) - os.Setenv("VMWARE_CONTAINER", vmware_image) + os.Setenv("CONVERSION_CONTAINER", commonTestUtils.Conversion_image) + os.Setenv("VMWARE_CONTAINER", commonTestUtils.Vmware_image) os.Setenv("OPERATOR_NAMESPACE", namespace) os.Setenv(hcoutil.HcoKvIoVersionName, version.Version) }) @@ -543,8 +545,8 @@ var _ = Describe("HyperconvergedController", func() { ) BeforeEach(func() { - os.Setenv("CONVERSION_CONTAINER", conversion_image) - os.Setenv("VMWARE_CONTAINER", vmware_image) + os.Setenv("CONVERSION_CONTAINER", commonTestUtils.Conversion_image) + os.Setenv("VMWARE_CONTAINER", commonTestUtils.Vmware_image) os.Setenv("OPERATOR_NAMESPACE", namespace) expected.kv.Status.ObservedKubeVirtVersion = newComponentVersion @@ -1182,7 +1184,7 @@ var _ = Describe("HyperconvergedController", func() { expected.hco.Status.Conditions = nil cl := expected.initClient() rsc := schema.GroupResource{Group: hcoutil.APIVersionGroup, Resource: "hyperconvergeds.hco.kubevirt.io"} - cl.initiateWriteErrors( + cl.InitiateWriteErrors( nil, apierrors.NewConflict(rsc, "hco", errors.New("test error")), ) @@ -1205,7 +1207,7 @@ var _ = Describe("HyperconvergedController", func() { expected.hco.Status.Conditions = nil cl := expected.initClient() rs := schema.GroupResource{hcoutil.APIVersionGroup, "hyperconvergeds.hco.kubevirt.io"} - cl.Status().(*hcoTestStatusWriter).initiateErrors(apierrors.NewConflict(rs, "hco", errors.New("test error"))) + cl.Status().(*commonTestUtils.HcoTestStatusWriter).InitiateErrors(apierrors.NewConflict(rs, "hco", errors.New("test error"))) r := initReconciler(cl) r.ownVersion = os.Getenv(hcoutil.HcoKvIoVersionName) diff --git a/pkg/controller/hyperconverged/testClient_test.go b/pkg/controller/hyperconverged/testClient_test.go deleted file mode 100644 index b0f5a26e44..0000000000 --- a/pkg/controller/hyperconverged/testClient_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package hyperconverged - -import ( - "context" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -type hcoTestClient struct { - client client.Client - sw *hcoTestStatusWriter - readErrors testErrors - writeErrors testErrors -} - -func (c *hcoTestClient) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { - if ok, err := c.readErrors.getNextError(); ok { - return err - } - return c.client.Get(ctx, key, obj) -} - -func (c *hcoTestClient) List(ctx context.Context, list runtime.Object, opts ...client.ListOption) error { - if ok, err := c.writeErrors.getNextError(); ok { - return err - } - return c.client.List(ctx, list, opts...) -} - -func (c *hcoTestClient) Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOption) error { - if ok, err := c.writeErrors.getNextError(); ok { - return err - } - return c.client.Create(ctx, obj, opts...) -} - -func (c *hcoTestClient) Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOption) error { - if ok, err := c.writeErrors.getNextError(); ok { - return err - } - return c.client.Delete(ctx, obj, opts...) -} - -func (c *hcoTestClient) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { - if ok, err := c.writeErrors.getNextError(); ok { - return err - } - return c.client.Update(ctx, obj, opts...) -} - -func (c *hcoTestClient) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { - if ok, err := c.writeErrors.getNextError(); ok { - return err - } - return c.client.Patch(ctx, obj, patch, opts...) -} - -func (c *hcoTestClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...client.DeleteAllOfOption) error { - if ok, err := c.writeErrors.getNextError(); ok { - return err - } - return c.client.DeleteAllOf(ctx, obj, opts...) -} - -func (c *hcoTestClient) Status() client.StatusWriter { - return c.sw -} - -func (c *hcoTestClient) initiateReadErrors(errs ...error) { - c.readErrors = errs -} - -func (c *hcoTestClient) initiateWriteErrors(errs ...error) { - c.writeErrors = errs -} - -type hcoTestStatusWriter struct { - client client.Client - errors testErrors -} - -func (sw *hcoTestStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { - if ok, err := sw.errors.getNextError(); ok { - return err - } - return sw.client.Update(ctx, obj, opts...) -} - -func (sw *hcoTestStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { - if ok, err := sw.errors.getNextError(); ok { - return err - } - return sw.client.Patch(ctx, obj, patch, opts...) -} - -func (sw *hcoTestStatusWriter) initiateErrors(errs ...error) { - sw.errors = errs -} - -type testErrors []error - -func (errs *testErrors) getNextError() (bool, error) { - if len(*errs) == 0 { - return false, nil - } - - err := (*errs)[0] - *errs = (*errs)[1:] - - return true, err -} - -func initClient(clientObjects []runtime.Object) *hcoTestClient { - // Create a fake client to mock API calls - cl := fake.NewFakeClient(clientObjects...) - return &hcoTestClient{client: cl, sw: &hcoTestStatusWriter{client: cl}} -} diff --git a/pkg/controller/hyperconverged/testUtils_test.go b/pkg/controller/hyperconverged/testUtils_test.go index 69fe6cd8be..ca15b4abe1 100644 --- a/pkg/controller/hyperconverged/testUtils_test.go +++ b/pkg/controller/hyperconverged/testUtils_test.go @@ -3,41 +3,61 @@ package hyperconverged import ( "context" "fmt" - "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" - sdkapi "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk/api" "os" - "github.com/operator-framework/operator-sdk/pkg/ready" - "sigs.k8s.io/controller-runtime/pkg/manager" - "github.com/go-logr/logr" - networkaddons "github.com/kubevirt/cluster-network-addons-operator/pkg/apis" networkaddonsv1 "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/v1" - "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis" hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/operands" hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" "github.com/kubevirt/hyperconverged-cluster-operator/version" - sspopv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis" sspv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1" vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - consolev1 "github.com/openshift/api/console/v1" conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + "github.com/operator-framework/operator-sdk/pkg/ready" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" schedulingv1 "k8s.io/api/scheduling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" kubevirtv1 "kubevirt.io/client-go/api/v1" cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/commonTestUtils" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// Mock TestRequest to simulate Reconcile() being called on an event for a watched resource +var ( + request = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: name, + Namespace: namespace, + }, + } ) -type basicExpected struct { +func initReconciler(client client.Client) *ReconcileHyperConverged { + s := commonTestUtils.GetScheme() + prepareHandlerMap(client, s) + + // Create a ReconcileHyperConverged object with the scheme and fake client + return &ReconcileHyperConverged{ + client: client, + scheme: s, + clusterInfo: clusterInfoMock{}, + eventEmitter: &eventEmitterMock{}, + firstLoop: true, + } +} + +type BasicExpected struct { hco *hcov1beta1.HyperConverged pc *schedulingv1.PriorityClass kvConfig *corev1.ConfigMap @@ -55,7 +75,7 @@ type basicExpected struct { imsConfig *corev1.ConfigMap } -func (be basicExpected) toArray() []runtime.Object { +func (be BasicExpected) toArray() []runtime.Object { return []runtime.Object{ be.hco, be.pc, @@ -75,45 +95,13 @@ func (be basicExpected) toArray() []runtime.Object { } } -func (be basicExpected) initClient() *hcoTestClient { - return initClient(be.toArray()) +func (be BasicExpected) initClient() *commonTestUtils.HcoTestClient { + return commonTestUtils.InitClient(be.toArray()) } -// Mock request to simulate Reconcile() being called on an event for a watched resource -var ( - request = reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - conversion_image = "quay.io/kubevirt/kubevirt-v2v-conversion:v2.0.0" - vmware_image = "quay.io/kubevirt/kubevirt-vmware:v2.0.0" -) +func getBasicDeployment() *BasicExpected { -func newHco() *hcov1beta1.HyperConverged { - return &hcov1beta1.HyperConverged{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: hcov1beta1.HyperConvergedSpec{}, - } -} - -func newReq(inst *hcov1beta1.HyperConverged) *common.HcoRequest { - return &common.HcoRequest{ - Request: request, - Logger: log, - Conditions: common.NewHcoConditions(), - Ctx: context.TODO(), - Instance: inst, - } -} - -func getBasicDeployment() *basicExpected { - - res := &basicExpected{} + res := &BasicExpected{} hco := &hcov1beta1.HyperConverged{ ObjectMeta: metav1.ObjectMeta{ @@ -126,8 +114,8 @@ func getBasicDeployment() *basicExpected { { Type: hcov1beta1.ConditionReconcileComplete, Status: corev1.ConditionTrue, - Reason: reconcileCompleted, - Message: reconcileCompletedMessage, + Reason: common.ReconcileCompleted, + Message: common.ReconcileCompletedMessage, }, }, Versions: hcov1beta1.Versions{ @@ -140,18 +128,18 @@ func getBasicDeployment() *basicExpected { res.pc = hco.NewKubeVirtPriorityClass() // These are all of the objects that we expect to "find" in the client because // we already created them in a previous reconcile. - expectedKVConfig := newKubeVirtConfigForCR(hco, namespace) + expectedKVConfig := operands.NewKubeVirtConfigForCR(hco, namespace) expectedKVConfig.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/configmaps/%s", expectedKVConfig.Namespace, expectedKVConfig.Name) res.kvConfig = expectedKVConfig - expectedKVStorageConfig := newKubeVirtStorageConfigForCR(hco, namespace) + expectedKVStorageConfig := operands.NewKubeVirtStorageConfigForCR(hco, namespace) expectedKVStorageConfig.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/configmaps/%s", expectedKVStorageConfig.Namespace, expectedKVStorageConfig.Name) res.kvStorageConfig = expectedKVStorageConfig - expectedKVStorageRole := newKubeVirtStorageRoleForCR(hco, namespace) + expectedKVStorageRole := operands.NewKubeVirtStorageRoleForCR(hco, namespace) expectedKVStorageRole.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/roles/%s", expectedKVStorageConfig.Namespace, expectedKVStorageConfig.Name) res.kvStorageRole = expectedKVStorageRole - expectedKVStorageRoleBinding := newKubeVirtStorageRoleBindingForCR(hco, namespace) + expectedKVStorageRoleBinding := operands.NewKubeVirtStorageRoleBindingForCR(hco, namespace) expectedKVStorageRoleBinding.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/rolebindings/%s", expectedKVStorageConfig.Namespace, expectedKVStorageConfig.Name) res.kvStorageRoleBinding = expectedKVStorageRoleBinding @@ -199,7 +187,7 @@ func getBasicDeployment() *basicExpected { res.kvTv = expectedKVTV expectedVMI := newVMImportForCR(hco) - expectedVMI.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/vmimportconfigs/%s", expectedVMI.Namespace, expectedVMI.Name) + expectedVMI.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/vmimportconfigs/%s", expectedVMI.Namespace, expectedVMI.Name) expectedVMI.Status.Conditions = getGenericCompletedConditions() res.vmi = expectedVMI @@ -208,59 +196,12 @@ func getBasicDeployment() *basicExpected { res.kvMtAg = kvMtAg res.imsConfig = newIMSConfigForCR(hco, namespace) - res.imsConfig.Data["v2v-conversion-image"] = conversion_image - res.imsConfig.Data["kubevirt-vmware-image"] = vmware_image + res.imsConfig.Data["v2v-conversion-image"] = commonTestUtils.Conversion_image + res.imsConfig.Data["kubevirt-vmware-image"] = commonTestUtils.Vmware_image return res } -func NewHyperConvergedConfig() *sdkapi.NodePlacement { - seconds1, seconds2 := int64(1), int64(2) - return &sdkapi.NodePlacement{ - NodeSelector: map[string]string{ - "key1": "value1", - "key2": "value2", - }, - Affinity: &corev1.Affinity{ - NodeAffinity: &corev1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - {Key: "key1", Operator: "operator1", Values: []string{"value11, value12"}}, - {Key: "key2", Operator: "operator2", Values: []string{"value21, value22"}}, - }, - MatchFields: []corev1.NodeSelectorRequirement{ - {Key: "key1", Operator: "operator1", Values: []string{"value11, value12"}}, - {Key: "key2", Operator: "operator2", Values: []string{"value21, value22"}}, - }, - }, - }, - }, - }, - }, - Tolerations: []corev1.Toleration{ - {Key: "key1", Operator: "operator1", Value: "value1", Effect: "effect1", TolerationSeconds: &seconds1}, - {Key: "key2", Operator: "operator2", Value: "value2", Effect: "effect2", TolerationSeconds: &seconds2}, - }, - } -} - -func checkAvailability(hco *hcov1beta1.HyperConverged, expected corev1.ConditionStatus) { - found := false - for _, cond := range hco.Status.Conditions { - if cond.Type == conditionsv1.ConditionType(kubevirtv1.KubeVirtConditionAvailable) { - found = true - Expect(cond.Status).To(Equal(expected)) - break - } - } - - if !found { - Fail(fmt.Sprintf(`Can't find 'Available' condition; %v`, hco.Status.Conditions)) - } -} - // returns the HCO after reconcile, and the returned requeue func doReconcile(cl client.Client, hco *hcov1beta1.HyperConverged) (*hcov1beta1.HyperConverged, bool) { r := initReconciler(cl) @@ -283,6 +224,31 @@ func doReconcile(cl client.Client, hco *hcov1beta1.HyperConverged) (*hcov1beta1. return foundResource, res.Requeue } +type clusterInfoMock struct{} + +func (clusterInfoMock) CheckRunningInOpenshift(_ client.Reader, _ context.Context, _ logr.Logger, _ bool) error { + return nil +} + +func (clusterInfoMock) IsOpenshift() bool { + return true +} + +func (clusterInfoMock) IsRunningLocally() bool { + return false +} + +type eventEmitterMock struct{} + +func (eventEmitterMock) Init(_ context.Context, _ manager.Manager, _ hcoutil.ClusterInfo, _ logr.Logger) { +} + +func (eventEmitterMock) EmitEvent(_ runtime.Object, _, _, _ string) { +} + +func (eventEmitterMock) UpdateClient(_ context.Context, _ client.Reader, _ logr.Logger) { +} + func getGenericCompletedConditions() []conditionsv1.Condition { return []conditionsv1.Condition{ { @@ -317,44 +283,6 @@ func getGenericProgressingConditions() []conditionsv1.Condition { } } -func initReconciler(client client.Client) *ReconcileHyperConverged { - // Setup Scheme for all resources - s := scheme.Scheme - for _, f := range []func(*runtime.Scheme) error{ - apis.AddToScheme, - cdiv1beta1.AddToScheme, - networkaddons.AddToScheme, - sspopv1.AddToScheme, - vmimportv1beta1.AddToScheme, - consolev1.AddToScheme, - } { - Expect(f(s)).To(BeNil()) - } - - // Create a ReconcileHyperConverged object with the scheme and fake client - return &ReconcileHyperConverged{ - client: client, - scheme: s, - clusterInfo: clusterInfoMock{}, - eventEmitter: &eventEmitterMock{}, - firstLoop: true, - } -} - -type clusterInfoMock struct{} - -func (clusterInfoMock) CheckRunningInOpenshift(_ client.Reader, _ context.Context, _ logr.Logger, _ bool) error { - return nil -} - -func (clusterInfoMock) IsOpenshift() bool { - return true -} - -func (clusterInfoMock) IsRunningLocally() bool { - return false -} - func checkHcoReady() (bool, error) { _, err := os.Stat(ready.FileName) @@ -367,13 +295,17 @@ func checkHcoReady() (bool, error) { return false, err } -type eventEmitterMock struct{} - -func (eventEmitterMock) Init(_ context.Context, _ manager.Manager, _ hcoutil.ClusterInfo, _ logr.Logger) { -} - -func (eventEmitterMock) EmitEvent(_ runtime.Object, _, _, _ string) { -} +func checkAvailability(hco *hcov1beta1.HyperConverged, expected corev1.ConditionStatus) { + found := false + for _, cond := range hco.Status.Conditions { + if cond.Type == conditionsv1.ConditionType(kubevirtv1.KubeVirtConditionAvailable) { + found = true + Expect(cond.Status).To(Equal(expected)) + break + } + } -func (eventEmitterMock) UpdateClient(_ context.Context, _ client.Reader, _ logr.Logger) { + if !found { + Fail(fmt.Sprintf(`Can't find 'Available' condition; %v`, hco.Status.Conditions)) + } } diff --git a/pkg/controller/operands/cdi.go b/pkg/controller/operands/cdi.go new file mode 100644 index 0000000000..6f2fbaae8f --- /dev/null +++ b/pkg/controller/operands/cdi.go @@ -0,0 +1,268 @@ +package operands + +import ( + hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" + hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" + objectreferencesv1 "github.com/openshift/custom-resource-status/objectreferences/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/reference" + cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +type CdiHandler genericOperand + +func (h *CdiHandler) Ensure(req *common.HcoRequest) *EnsureResult { + cdi := req.Instance.NewCDI() + res := NewEnsureResult(cdi) + + key, err := client.ObjectKeyFromObject(cdi) + if err != nil { + req.Logger.Error(err, "Failed to get object key for CDI") + } + + res.SetName(key.Name) + found := &cdiv1beta1.CDI{} + err = h.Client.Get(req.Ctx, key, found) + + if err != nil { + if apierrors.IsNotFound(err) { + req.Logger.Info("Creating CDI") + err = h.Client.Create(req.Ctx, cdi) + if err == nil { + return res.SetCreated() + } + } + return res.Error(err) + } + + req.Logger.Info("CDI already exists", "CDI.Namespace", found.Namespace, "CDI.Name", found.Name) + + err = h.ensureKubeVirtStorageConfig(req) + if err != nil { + return res.Error(err) + } + + err = h.ensureKubeVirtStorageRole(req) + if err != nil { + return res.Error(err) + } + + err = h.ensureKubeVirtStorageRoleBinding(req) + if err != nil { + return res.Error(err) + } + + existingOwners := found.GetOwnerReferences() + + // Previous versions used to have HCO-operator (scope namespace) + // as the owner of CDI (scope cluster). + // It's not legal, so remove that. + if len(existingOwners) > 0 { + req.Logger.Info("CDI has owners, removing...") + found.SetOwnerReferences([]metav1.OwnerReference{}) + err = h.Client.Update(req.Ctx, found) + if err != nil { + req.Logger.Error(err, "Failed to remove CDI's previous owners") + } + } + + if !reflect.DeepEqual(found.Spec, cdi.Spec) { + req.Logger.Info("Updating existing CDI' Spec to its default value") + cdi.Spec.DeepCopyInto(&found.Spec) + err = h.Client.Update(req.Ctx, found) + if err != nil { + return res.Error(err) + } + return res.SetUpdated() + } + + // Add it to the list of RelatedObjects if found + objectRef, err := reference.GetReference(h.Scheme, found) + if err != nil { + return res.Error(err) + } + objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) + + // Handle CDI resource conditions + isReady := handleComponentConditions(req, "CDI", found.Status.Conditions) + + upgradeDone := req.ComponentUpgradeInProgress && isReady && checkComponentVersion(hcoutil.CdiVersionEnvV, found.Status.ObservedVersion) + + return res.SetUpgradeDone(upgradeDone) +} + +func (h *CdiHandler) ensureKubeVirtStorageRole(req *common.HcoRequest) error { + kubevirtStorageRole := NewKubeVirtStorageRoleForCR(req.Instance, req.Namespace) + if err := controllerutil.SetControllerReference(req.Instance, kubevirtStorageRole, h.Scheme); err != nil { + return err + } + + key, err := client.ObjectKeyFromObject(kubevirtStorageRole) + if err != nil { + req.Logger.Error(err, "Failed to get object key for kubevirt storage role") + } + + found := &rbacv1.Role{} + err = h.Client.Get(req.Ctx, key, found) + if err != nil && apierrors.IsNotFound(err) { + req.Logger.Info("Creating kubevirt storage role") + return h.Client.Create(req.Ctx, kubevirtStorageRole) + } + + if err != nil { + return err + } + + req.Logger.Info("KubeVirt storage role already exists", "KubeVirtConfig.Namespace", found.Namespace, "KubeVirtConfig.Name", found.Name) + // Add it to the list of RelatedObjects if found + objectRef, err := reference.GetReference(h.Scheme, found) + if err != nil { + return err + } + objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) + + return nil +} + +func (h *CdiHandler) ensureKubeVirtStorageRoleBinding(req *common.HcoRequest) error { + kubevirtStorageRoleBinding := NewKubeVirtStorageRoleBindingForCR(req.Instance, req.Namespace) + if err := controllerutil.SetControllerReference(req.Instance, kubevirtStorageRoleBinding, h.Scheme); err != nil { + return err + } + + key, err := client.ObjectKeyFromObject(kubevirtStorageRoleBinding) + if err != nil { + req.Logger.Error(err, "Failed to get object key for kubevirt storage rolebinding") + } + + found := &rbacv1.RoleBinding{} + err = h.Client.Get(req.Ctx, key, found) + if err != nil && apierrors.IsNotFound(err) { + req.Logger.Info("Creating kubevirt storage rolebinding") + return h.Client.Create(req.Ctx, kubevirtStorageRoleBinding) + } + + if err != nil { + return err + } + + req.Logger.Info("KubeVirt storage rolebinding already exists", "KubeVirtConfig.Namespace", found.Namespace, "KubeVirtConfig.Name", found.Name) + // Add it to the list of RelatedObjects if found + objectRef, err := reference.GetReference(h.Scheme, found) + if err != nil { + return err + } + objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) + + return nil +} + +func (h *CdiHandler) ensureKubeVirtStorageConfig(req *common.HcoRequest) error { + kubevirtStorageConfig := NewKubeVirtStorageConfigForCR(req.Instance, req.Namespace) + if err := controllerutil.SetControllerReference(req.Instance, kubevirtStorageConfig, h.Scheme); err != nil { + return err + } + + key, err := client.ObjectKeyFromObject(kubevirtStorageConfig) + if err != nil { + req.Logger.Error(err, "Failed to get object key for kubevirt storage config") + } + + found := &corev1.ConfigMap{} + err = h.Client.Get(req.Ctx, key, found) + if err != nil && apierrors.IsNotFound(err) { + req.Logger.Info("Creating kubevirt storage config") + return h.Client.Create(req.Ctx, kubevirtStorageConfig) + } + + if err != nil { + return err + } + + req.Logger.Info("KubeVirt storage config already exists", "KubeVirtConfig.Namespace", found.Namespace, "KubeVirtConfig.Name", found.Name) + // Add it to the list of RelatedObjects if found + objectRef, err := reference.GetReference(h.Scheme, found) + if err != nil { + return err + } + objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) + + return nil +} + +func NewKubeVirtStorageRoleForCR(cr *hcov1beta1.HyperConverged, namespace string) *rbacv1.Role { + labels := map[string]string{ + "app": cr.Name, + } + return &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hco.kubevirt.io:config-reader", + Labels: labels, + Namespace: namespace, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + ResourceNames: []string{"kubevirt-storage-class-defaults"}, + Verbs: []string{"get", "watch", "list"}, + }, + }, + } +} + +func NewKubeVirtStorageRoleBindingForCR(cr *hcov1beta1.HyperConverged, namespace string) *rbacv1.RoleBinding { + labels := map[string]string{ + "app": cr.Name, + } + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hco.kubevirt.io:config-reader", + Labels: labels, + Namespace: namespace, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: "hco.kubevirt.io:config-reader", + }, + Subjects: []rbacv1.Subject{ + { + APIGroup: "rbac.authorization.k8s.io", + Kind: "Group", + Name: "system:authenticated", + }, + }, + } +} + +func NewKubeVirtStorageConfigForCR(cr *hcov1beta1.HyperConverged, namespace string) *corev1.ConfigMap { + localSC := "local-sc" + if *(&cr.Spec.LocalStorageClassName) != "" { + localSC = *(&cr.Spec.LocalStorageClassName) + } + + labels := map[string]string{ + hcoutil.AppLabel: cr.Name, + } + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubevirt-storage-class-defaults", + Labels: labels, + Namespace: namespace, + }, + Data: map[string]string{ + "accessMode": "ReadWriteOnce", + "volumeMode": "Filesystem", + localSC + ".accessMode": "ReadWriteOnce", + localSC + ".volumeMode": "Filesystem", + }, + } +} diff --git a/pkg/controller/operands/cdi_test.go b/pkg/controller/operands/cdi_test.go new file mode 100644 index 0000000000..4aa6079334 --- /dev/null +++ b/pkg/controller/operands/cdi_test.go @@ -0,0 +1,350 @@ +package operands + +import ( + "context" + "fmt" + hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/commonTestUtils" + hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + "github.com/openshift/custom-resource-status/testlib" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/reference" + cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" +) + +var _ = Describe("CDI Operand", func() { + Context("CDI", func() { + var ( + hco *hcov1beta1.HyperConverged + req *common.HcoRequest + ) + + BeforeEach(func() { + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) + }) + + It("should create if not present", func() { + expectedResource := hco.NewCDI() + cl := commonTestUtils.InitClient([]runtime.Object{}) + handler := &CdiHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + foundResource := &cdiv1beta1.CDI{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, + foundResource), + ).To(BeNil()) + Expect(foundResource.Name).To(Equal(expectedResource.Name)) + Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, commonTestUtils.Name)) + Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace)) + }) + + It("should find if present", func() { + expectedResource := hco.NewCDI() + expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) + handler := &CdiHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + // Check HCO's status + Expect(hco.Status.RelatedObjects).To(Not(BeNil())) + objectRef, err := reference.GetReference(handler.Scheme, expectedResource) + Expect(err).To(BeNil()) + // ObjectReference should have been added + Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) + // Check conditions + Expect(req.Conditions[conditionsv1.ConditionAvailable]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "CDIConditions", + Message: "CDI resource has no conditions", + })) + Expect(req.Conditions[conditionsv1.ConditionProgressing]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionProgressing, + Status: corev1.ConditionTrue, + Reason: "CDIConditions", + Message: "CDI resource has no conditions", + })) + Expect(req.Conditions[conditionsv1.ConditionUpgradeable]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionUpgradeable, + Status: corev1.ConditionFalse, + Reason: "CDIConditions", + Message: "CDI resource has no conditions", + })) + }) + + It("should set default UninstallStrategy if missing", func() { + expectedResource := hco.NewCDI() + expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) + missingUSResource := hco.NewCDI() + missingUSResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/%s/dummies/%s", missingUSResource.Namespace, missingUSResource.Name) + missingUSResource.Spec.UninstallStrategy = nil + + cl := commonTestUtils.InitClient([]runtime.Object{hco, missingUSResource}) + handler := &CdiHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Err).To(BeNil()) + + foundResource := &cdiv1beta1.CDI{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, + foundResource), + ).To(BeNil()) + Expect(*foundResource.Spec.UninstallStrategy).To(Equal(*expectedResource.Spec.UninstallStrategy)) + }) + + It("should add node placement if missing in CDI", func() { + existingResource := hco.NewCDI() + + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := &CdiHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Err).To(BeNil()) + + foundResource := &cdiv1beta1.CDI{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.Infra.Affinity).To(BeNil()) + Expect(existingResource.Spec.Infra.Tolerations).To(BeEmpty()) + Expect(existingResource.Spec.Infra.NodeSelector).To(BeNil()) + Expect(existingResource.Spec.Workloads.Affinity).To(BeNil()) + Expect(existingResource.Spec.Workloads.Tolerations).To(BeEmpty()) + Expect(existingResource.Spec.Workloads.NodeSelector).To(BeNil()) + + Expect(foundResource.Spec.Infra.Affinity).ToNot(BeNil()) + Expect(foundResource.Spec.Infra.NodeSelector["key1"]).Should(Equal("value1")) + Expect(foundResource.Spec.Infra.NodeSelector["key2"]).Should(Equal("value2")) + + Expect(foundResource.Spec.Workloads).ToNot(BeNil()) + Expect(foundResource.Spec.Workloads.Tolerations).Should(Equal(hco.Spec.Workloads.NodePlacement.Tolerations)) + + Expect(req.Conditions).To(BeEmpty()) + }) + + It("should remove node placement if missing in HCO CR", func() { + + hcoNodePlacement := commonTestUtils.NewHco() + hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + existingResource := hcoNodePlacement.NewCDI() + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := &CdiHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Err).To(BeNil()) + + foundResource := &cdiv1beta1.CDI{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.Infra.Affinity).ToNot(BeNil()) + Expect(existingResource.Spec.Infra.Tolerations).ToNot(BeEmpty()) + Expect(existingResource.Spec.Infra.NodeSelector).ToNot(BeNil()) + Expect(existingResource.Spec.Workloads.Affinity).ToNot(BeNil()) + Expect(existingResource.Spec.Workloads.Tolerations).ToNot(BeEmpty()) + Expect(existingResource.Spec.Workloads.NodeSelector).ToNot(BeNil()) + + Expect(foundResource.Spec.Infra.Affinity).To(BeNil()) + Expect(foundResource.Spec.Infra.Tolerations).To(BeEmpty()) + Expect(foundResource.Spec.Infra.NodeSelector).To(BeNil()) + Expect(foundResource.Spec.Workloads.Affinity).To(BeNil()) + Expect(foundResource.Spec.Workloads.Tolerations).To(BeEmpty()) + Expect(foundResource.Spec.Workloads.NodeSelector).To(BeNil()) + + Expect(req.Conditions).To(BeEmpty()) + }) + + It("should modify node placement according to HCO CR", func() { + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + existingResource := hco.NewCDI() + + // now, modify HCO's node placement + seconds3 := int64(3) + hco.Spec.Infra.NodePlacement.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ + Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + }) + + hco.Spec.Workloads.NodePlacement.NodeSelector["key1"] = "something else" + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := &CdiHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Err).To(BeNil()) + + foundResource := &cdiv1beta1.CDI{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.Infra.Tolerations).To(HaveLen(2)) + Expect(existingResource.Spec.Workloads.NodeSelector["key1"]).Should(Equal("value1")) + + Expect(foundResource.Spec.Infra.Tolerations).To(HaveLen(3)) + Expect(foundResource.Spec.Workloads.NodeSelector["key1"]).Should(Equal("something else")) + + Expect(req.Conditions).To(BeEmpty()) + }) + + It("should handle conditions", func() { + expectedResource := hco.NewCDI() + expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) + expectedResource.Status.Conditions = []conditionsv1.Condition{ + conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "Foo", + Message: "Bar", + }, + conditionsv1.Condition{ + Type: conditionsv1.ConditionProgressing, + Status: corev1.ConditionTrue, + Reason: "Foo", + Message: "Bar", + }, + conditionsv1.Condition{ + Type: conditionsv1.ConditionDegraded, + Status: corev1.ConditionTrue, + Reason: "Foo", + Message: "Bar", + }, + } + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) + handler := &CdiHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + // Check HCO's status + Expect(hco.Status.RelatedObjects).To(Not(BeNil())) + objectRef, err := reference.GetReference(handler.Scheme, expectedResource) + Expect(err).To(BeNil()) + // ObjectReference should have been added + Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) + // Check conditions + Expect(req.Conditions[conditionsv1.ConditionAvailable]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "CDINotAvailable", + Message: "CDI is not available: Bar", + })) + Expect(req.Conditions[conditionsv1.ConditionProgressing]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionProgressing, + Status: corev1.ConditionTrue, + Reason: "CDIProgressing", + Message: "CDI is progressing: Bar", + })) + Expect(req.Conditions[conditionsv1.ConditionUpgradeable]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionUpgradeable, + Status: corev1.ConditionFalse, + Reason: "CDIProgressing", + Message: "CDI is progressing: Bar", + })) + Expect(req.Conditions[conditionsv1.ConditionDegraded]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionDegraded, + Status: corev1.ConditionTrue, + Reason: "CDIDegraded", + Message: "CDI is degraded: Bar", + })) + }) + }) + + Context("KubeVirt Storage Config", func() { + var hco *hcov1beta1.HyperConverged + var req *common.HcoRequest + + BeforeEach(func() { + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) + }) + + It("should create if not present", func() { + expectedResource := NewKubeVirtStorageConfigForCR(hco, commonTestUtils.Namespace) + cl := commonTestUtils.InitClient([]runtime.Object{}) + handler := &CdiHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + err := handler.ensureKubeVirtStorageConfig(req) + Expect(err).To(BeNil()) + + foundResource := &corev1.ConfigMap{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, + foundResource), + ).To(BeNil()) + Expect(foundResource.Name).To(Equal(expectedResource.Name)) + Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, commonTestUtils.Name)) + Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace)) + }) + + It("should find if present", func() { + expectedResource := NewKubeVirtStorageConfigForCR(hco, commonTestUtils.Namespace) + expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) + handler := &CdiHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + err := handler.ensureKubeVirtStorageConfig(req) + Expect(err).To(BeNil()) + + // Check HCO's status + Expect(hco.Status.RelatedObjects).To(Not(BeNil())) + objectRef, err := reference.GetReference(handler.Scheme, expectedResource) + Expect(err).To(BeNil()) + // ObjectReference should have been added + Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) + }) + + It("volumeMode should be filesystem when platform is baremetal", func() { + hco.Spec.BareMetalPlatform = true + + expectedResource := NewKubeVirtStorageConfigForCR(hco, commonTestUtils.Namespace) + Expect(expectedResource.Data["volumeMode"]).To(Equal("Filesystem")) + }) + + It("volumeMode should be filesystem when platform is not baremetal", func() { + hco.Spec.BareMetalPlatform = false + + expectedResource := NewKubeVirtStorageConfigForCR(hco, commonTestUtils.Namespace) + Expect(expectedResource.Data["volumeMode"]).To(Equal("Filesystem")) + }) + + It("local storage class name should be available when specified", func() { + hco.Spec.LocalStorageClassName = "local" + + expectedResource := NewKubeVirtStorageConfigForCR(hco, commonTestUtils.Namespace) + Expect(expectedResource.Data["local.accessMode"]).To(Equal("ReadWriteOnce")) + Expect(expectedResource.Data["local.volumeMode"]).To(Equal("Filesystem")) + }) + }) +}) diff --git a/pkg/controller/hyperconverged/ensure_result.go b/pkg/controller/operands/ensure_result.go similarity index 97% rename from pkg/controller/hyperconverged/ensure_result.go rename to pkg/controller/operands/ensure_result.go index 237925d80b..6fb91e0542 100644 --- a/pkg/controller/hyperconverged/ensure_result.go +++ b/pkg/controller/operands/ensure_result.go @@ -1,4 +1,4 @@ -package hyperconverged +package operands import ( "fmt" diff --git a/pkg/controller/hyperconverged/ensure_result_test.go b/pkg/controller/operands/ensure_result_test.go similarity index 99% rename from pkg/controller/hyperconverged/ensure_result_test.go rename to pkg/controller/operands/ensure_result_test.go index c6c0b7ec93..ddcce464b9 100644 --- a/pkg/controller/hyperconverged/ensure_result_test.go +++ b/pkg/controller/operands/ensure_result_test.go @@ -1,4 +1,4 @@ -package hyperconverged +package operands import ( "errors" diff --git a/pkg/controller/operands/kubevirt.go b/pkg/controller/operands/kubevirt.go new file mode 100644 index 0000000000..81a05b458a --- /dev/null +++ b/pkg/controller/operands/kubevirt.go @@ -0,0 +1,270 @@ +package operands + +import ( + "fmt" + hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" + hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + objectreferencesv1 "github.com/openshift/custom-resource-status/objectreferences/v1" + corev1 "k8s.io/api/core/v1" + schedulingv1 "k8s.io/api/scheduling/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/reference" + kubevirtv1 "kubevirt.io/client-go/api/v1" + virtconfig "kubevirt.io/kubevirt/pkg/virt-config" + "os" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + kubevirtDefaultNetworkInterfaceValue = "masquerade" +) + +type KubevirtHandler genericOperand + +func (kv *KubevirtHandler) Ensure(req *common.HcoRequest) *EnsureResult { + virt := req.Instance.NewKubeVirt() + res := NewEnsureResult(virt) + if err := controllerutil.SetControllerReference(req.Instance, virt, kv.Scheme); err != nil { + return res.Error(err) + } + + key, err := client.ObjectKeyFromObject(virt) + if err != nil { + req.Logger.Error(err, "Failed to get object key for KubeVirt") + } + + res.SetName(key.Name) + found := &kubevirtv1.KubeVirt{} + err = kv.Client.Get(req.Ctx, key, found) + if err != nil { + if apierrors.IsNotFound(err) { + req.Logger.Info("Creating kubevirt") + err = kv.Client.Create(req.Ctx, virt) + if err == nil { + return res.SetCreated().SetName(virt.Name) + } + } + return res.Error(err) + } + + req.Logger.Info("KubeVirt already exists", "KubeVirt.Namespace", found.Namespace, "KubeVirt.Name", found.Name) + + if !reflect.DeepEqual(found.Spec, virt.Spec) { + virt.Spec.DeepCopyInto(&found.Spec) + req.Logger.Info("Updating existing KubeVirt's Spec to its default value") + err = kv.Client.Update(req.Ctx, found) + if err != nil { + return res.Error(err) + } + return res.SetUpdated() + } + + // Add it to the list of RelatedObjects if found + objectRef, err := reference.GetReference(kv.Scheme, found) + if err != nil { + return res.Error(err) + } + objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) + + // Handle KubeVirt resource conditions + isReady := handleComponentConditions(req, "KubeVirt", translateKubeVirtConds(found.Status.Conditions)) + + upgradeDone := req.ComponentUpgradeInProgress && isReady && checkComponentVersion(hcoutil.KubevirtVersionEnvV, found.Status.ObservedKubeVirtVersion) + + return res.SetUpgradeDone(upgradeDone) +} + +type KvConfigHandler genericOperand + +func (kvc *KvConfigHandler) Ensure(req *common.HcoRequest) *EnsureResult { + kubevirtConfig := NewKubeVirtConfigForCR(req.Instance, req.Namespace) + res := NewEnsureResult(kubevirtConfig) + err := controllerutil.SetControllerReference(req.Instance, kubevirtConfig, kvc.Scheme) + if err != nil { + return res.Error(err) + } + + key, err := client.ObjectKeyFromObject(kubevirtConfig) + if err != nil { + req.Logger.Error(err, "Failed to get object key for kubevirt config") + } + res.SetName(key.Name) + + found := &corev1.ConfigMap{} + err = kvc.Client.Get(req.Ctx, key, found) + if err != nil { + if apierrors.IsNotFound(err) { + req.Logger.Info("Creating kubevirt config") + err = kvc.Client.Create(req.Ctx, kubevirtConfig) + if err == nil { + return res.SetCreated() + } + } + return res.Error(err) + } + + req.Logger.Info("KubeVirt config already exists", "KubeVirtConfig.Namespace", found.Namespace, "KubeVirtConfig.Name", found.Name) + // Add it to the list of RelatedObjects if found + objectRef, err := reference.GetReference(kvc.Scheme, found) + if err != nil { + return res.Error(err) + } + objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) + + if req.UpgradeMode { + + changed := false + // only virtconfig.SmbiosConfigKey, virtconfig.MachineTypeKey, virtconfig.SELinuxLauncherTypeKey, + // virtconfig.FeatureGatesKey and virtconfig.UseEmulationKey are going to be manipulated + // and only on HCO upgrades. + // virtconfig.MigrationsConfigKey is going to be removed if set in the past (only during upgrades). + // TODO: This is going to change in the next HCO release where the whole configMap is going + // to be continuously reconciled + for _, k := range []string{ + virtconfig.FeatureGatesKey, + virtconfig.SmbiosConfigKey, + virtconfig.MachineTypeKey, + virtconfig.SELinuxLauncherTypeKey, + virtconfig.UseEmulationKey, + virtconfig.MigrationsConfigKey, + } { + if found.Data[k] != kubevirtConfig.Data[k] { + req.Logger.Info(fmt.Sprintf("Updating %s on existing KubeVirt config", k)) + found.Data[k] = kubevirtConfig.Data[k] + changed = true + } + } + for _, k := range []string{virtconfig.MigrationsConfigKey} { + _, ok := found.Data[k] + if ok { + req.Logger.Info(fmt.Sprintf("Deleting %s on existing KubeVirt config", k)) + delete(found.Data, k) + changed = true + } + } + + if changed { + err = kvc.Client.Update(req.Ctx, found) + if err != nil { + req.Logger.Error(err, "Failed updating the kubevirt config map") + return res.Error(err) + } + } + } + + return res.SetUpgradeDone(req.ComponentUpgradeInProgress) +} + +type KvPriorityClassHandler genericOperand + +func (kvpc *KvPriorityClassHandler) Ensure(req *common.HcoRequest) *EnsureResult { + req.Logger.Info("Reconciling KubeVirt PriorityClass") + pc := req.Instance.NewKubeVirtPriorityClass() + res := NewEnsureResult(pc) + key, err := client.ObjectKeyFromObject(pc) + if err != nil { + req.Logger.Error(err, "Failed to get object key for KubeVirt PriorityClass") + return res.Error(err) + } + + res.SetName(key.Name) + found := &schedulingv1.PriorityClass{} + err = kvpc.Client.Get(req.Ctx, key, found) + + if err != nil { + if apierrors.IsNotFound(err) { + // create the new object + err = kvpc.Client.Create(req.Ctx, pc, &client.CreateOptions{}) + if err == nil { + return res.SetCreated() + } + } + + return res.Error(err) + } + + // at this point we found the object in the cache and we check if something was changed + if pc.Name == found.Name && pc.Value == found.Value && pc.Description == found.Description { + req.Logger.Info("KubeVirt PriorityClass already exists", "PriorityClass.Name", pc.Name) + objectRef, err := reference.GetReference(kvpc.Scheme, found) + if err != nil { + req.Logger.Error(err, "failed getting object reference for found object") + return res.Error(err) + } + objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) + + return res.SetUpgradeDone(req.ComponentUpgradeInProgress) + } + + // something was changed but since we can't patch a priority class object, we remove it + err = kvpc.Client.Delete(req.Ctx, found, &client.DeleteOptions{}) + if err != nil { + return res.Error(err) + } + + // create the new object + err = kvpc.Client.Create(req.Ctx, pc, &client.CreateOptions{}) + if err != nil { + return res.Error(err) + } + return res.SetUpdated() +} + +// translateKubeVirtConds translates list of KubeVirt conditions to a list of custom resource +// conditions. +func translateKubeVirtConds(orig []kubevirtv1.KubeVirtCondition) []conditionsv1.Condition { + translated := make([]conditionsv1.Condition, len(orig)) + + for i, origCond := range orig { + translated[i] = conditionsv1.Condition{ + Type: conditionsv1.ConditionType(origCond.Type), + Status: origCond.Status, + Reason: origCond.Reason, + Message: origCond.Message, + } + } + + return translated +} + +func NewKubeVirtConfigForCR(cr *hcov1beta1.HyperConverged, namespace string) *corev1.ConfigMap { + labels := map[string]string{ + hcoutil.AppLabel: cr.Name, + } + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubevirt-config", + Labels: labels, + Namespace: namespace, + }, + // only virtconfig.SmbiosConfigKey, virtconfig.MachineTypeKey, virtconfig.SELinuxLauncherTypeKey, + // virtconfig.FeatureGatesKey and virtconfig.UseEmulationKey are going to be manipulated + // and only on HCO upgrades. + // virtconfig.MigrationsConfigKey is going to be removed if set in the past (only during upgrades). + // TODO: This is going to change in the next HCO release where the whole configMap is going + // to be continuously reconciled + Data: map[string]string{ + virtconfig.FeatureGatesKey: "DataVolumes,SRIOV,LiveMigration,CPUManager,CPUNodeDiscovery,Sidecar,Snapshot", + virtconfig.SELinuxLauncherTypeKey: "virt_launcher.process", + virtconfig.NetworkInterfaceKey: kubevirtDefaultNetworkInterfaceValue, + }, + } + val, ok := os.LookupEnv("SMBIOS") + if ok && val != "" { + cm.Data[virtconfig.SmbiosConfigKey] = val + } + val, ok = os.LookupEnv("MACHINETYPE") + if ok && val != "" { + cm.Data[virtconfig.MachineTypeKey] = val + } + val, ok = os.LookupEnv("KVM_EMULATION") + if ok && val != "" { + cm.Data[virtconfig.UseEmulationKey] = val + } + return cm +} diff --git a/pkg/controller/operands/kubevirt_test.go b/pkg/controller/operands/kubevirt_test.go new file mode 100644 index 0000000000..0b79d38291 --- /dev/null +++ b/pkg/controller/operands/kubevirt_test.go @@ -0,0 +1,512 @@ +package operands + +import ( + "context" + "fmt" + hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/commonTestUtils" + hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + "github.com/openshift/custom-resource-status/testlib" + corev1 "k8s.io/api/core/v1" + schedulingv1 "k8s.io/api/scheduling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/reference" + kubevirtv1 "kubevirt.io/client-go/api/v1" + "os" + "sigs.k8s.io/controller-runtime/pkg/client" + + virtconfig "kubevirt.io/kubevirt/pkg/virt-config" +) + +var _ = Describe("KubeVirt Operand", func() { + Context("KubeVirt Priority Classes", func() { + + var hco *hcov1beta1.HyperConverged + var req *common.HcoRequest + + BeforeEach(func() { + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) + }) + + It("should create if not present", func() { + expectedResource := hco.NewKubeVirtPriorityClass() + cl := commonTestUtils.InitClient([]runtime.Object{}) + handler := &KvPriorityClassHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + key, err := client.ObjectKeyFromObject(expectedResource) + Expect(err).ToNot(HaveOccurred()) + foundResource := &schedulingv1.PriorityClass{} + Expect(cl.Get(context.TODO(), key, foundResource)).To(BeNil()) + Expect(foundResource.Name).To(Equal(expectedResource.Name)) + Expect(foundResource.Value).To(Equal(expectedResource.Value)) + Expect(foundResource.GlobalDefault).To(Equal(expectedResource.GlobalDefault)) + }) + + It("should do nothing if already exists", func() { + expectedResource := hco.NewKubeVirtPriorityClass() + cl := commonTestUtils.InitClient([]runtime.Object{expectedResource}) + handler := &KvPriorityClassHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + objectRef, err := reference.GetReference(handler.Scheme, expectedResource) + Expect(err).To(BeNil()) + Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) + }) + + DescribeTable("should update if something changed", func(modifiedResource *schedulingv1.PriorityClass) { + cl := commonTestUtils.InitClient([]runtime.Object{modifiedResource}) + handler := &KvPriorityClassHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + expectedResource := hco.NewKubeVirtPriorityClass() + key, err := client.ObjectKeyFromObject(expectedResource) + Expect(err).ToNot(HaveOccurred()) + foundResource := &schedulingv1.PriorityClass{} + Expect(cl.Get(context.TODO(), key, foundResource)) + Expect(foundResource.Name).To(Equal(expectedResource.Name)) + Expect(foundResource.Value).To(Equal(expectedResource.Value)) + Expect(foundResource.GlobalDefault).To(Equal(expectedResource.GlobalDefault)) + }, + Entry("with modified value", + &schedulingv1.PriorityClass{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "scheduling.k8s.io/v1", + Kind: "PriorityClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "kubevirt-cluster-critical", + }, + Value: 1, + GlobalDefault: false, + Description: "", + }), + Entry("with modified global default", + &schedulingv1.PriorityClass{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "scheduling.k8s.io/v1", + Kind: "PriorityClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "kubevirt-cluster-critical", + }, + Value: 1000000000, + GlobalDefault: true, + Description: "", + }), + ) + + }) + + Context("KubeVirt Config", func() { + + var hco *hcov1beta1.HyperConverged + var req *common.HcoRequest + + updatableKeys := [...]string{virtconfig.SmbiosConfigKey, virtconfig.MachineTypeKey, virtconfig.SELinuxLauncherTypeKey, virtconfig.FeatureGatesKey} + removeKeys := [...]string{virtconfig.MigrationsConfigKey} + unupdatableKeys := [...]string{virtconfig.NetworkInterfaceKey} + + BeforeEach(func() { + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) + + os.Setenv("SMBIOS", "new-smbios-value-that-we-have-to-set") + os.Setenv("MACHINETYPE", "new-machinetype-value-that-we-have-to-set") + }) + + It("should create if not present", func() { + expectedResource := NewKubeVirtConfigForCR(req.Instance, commonTestUtils.Namespace) + cl := commonTestUtils.InitClient([]runtime.Object{}) + handler := &KvConfigHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + foundResource := &corev1.ConfigMap{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, + foundResource), + ).To(BeNil()) + Expect(foundResource.Name).To(Equal(expectedResource.Name)) + Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, commonTestUtils.Name)) + Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace)) + }) + + It("should find if present", func() { + expectedResource := NewKubeVirtConfigForCR(hco, commonTestUtils.Namespace) + expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) + handler := &KvConfigHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + // Check HCO's status + Expect(hco.Status.RelatedObjects).To(Not(BeNil())) + objectRef, err := reference.GetReference(handler.Scheme, expectedResource) + Expect(err).To(BeNil()) + // ObjectReference should have been added + Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) + }) + + It("should update only a few keys and only when in upgrade mode", func() { + expectedResource := NewKubeVirtConfigForCR(hco, commonTestUtils.Namespace) + expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) + outdatedResource := NewKubeVirtConfigForCR(hco, commonTestUtils.Namespace) + outdatedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", outdatedResource.Namespace, outdatedResource.Name) + // values we should update + outdatedResource.Data[virtconfig.SmbiosConfigKey] = "old-smbios-value-that-we-have-to-update" + outdatedResource.Data[virtconfig.MachineTypeKey] = "old-machinetype-value-that-we-have-to-update" + outdatedResource.Data[virtconfig.SELinuxLauncherTypeKey] = "old-selinuxlauncher-value-that-we-have-to-update" + outdatedResource.Data[virtconfig.FeatureGatesKey] = "old-featuregates-value-that-we-have-to-update" + // value that we should remove if configured + outdatedResource.Data[virtconfig.MigrationsConfigKey] = "old-migrationsconfig-value-that-we-should-remove" + // values we should preserve + outdatedResource.Data[virtconfig.NetworkInterfaceKey] = "old-defaultnetworkinterface-value-that-we-should-preserve" + + cl := commonTestUtils.InitClient([]runtime.Object{hco, outdatedResource}) + handler := &KvConfigHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + + // force upgrade mode + req.UpgradeMode = true + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + foundResource := &corev1.ConfigMap{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, + foundResource), + ).To(BeNil()) + + for _, k := range updatableKeys { + Expect(foundResource.Data[k]).To(Not(Equal(outdatedResource.Data[k]))) + Expect(foundResource.Data[k]).To(Equal(expectedResource.Data[k])) + } + for _, k := range unupdatableKeys { + Expect(foundResource.Data[k]).To(Equal(outdatedResource.Data[k])) + Expect(foundResource.Data[k]).To(Not(Equal(expectedResource.Data[k]))) + } + for _, k := range removeKeys { + Expect(outdatedResource.Data).To(HaveKey(k)) + Expect(expectedResource.Data).To(Not(HaveKey(k))) + Expect(foundResource.Data).To(Not(HaveKey(k))) + } + }) + + It("should not touch it when not in in upgrade mode", func() { + expectedResource := NewKubeVirtConfigForCR(hco, commonTestUtils.Namespace) + expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) + outdatedResource := NewKubeVirtConfigForCR(hco, commonTestUtils.Namespace) + outdatedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", outdatedResource.Namespace, outdatedResource.Name) + // values we should update + outdatedResource.Data[virtconfig.SmbiosConfigKey] = "old-smbios-value-that-we-have-to-update" + outdatedResource.Data[virtconfig.MachineTypeKey] = "old-machinetype-value-that-we-have-to-update" + outdatedResource.Data[virtconfig.SELinuxLauncherTypeKey] = "old-selinuxlauncher-value-that-we-have-to-update" + outdatedResource.Data[virtconfig.FeatureGatesKey] = "old-featuregates-value-that-we-have-to-update" + // values we should preserve + outdatedResource.Data[virtconfig.MigrationsConfigKey] = "old-migrationsconfig-value-that-we-should-preserve" + outdatedResource.Data[virtconfig.DefaultNetworkInterface] = "old-defaultnetworkinterface-value-that-we-should-preserve" + + cl := commonTestUtils.InitClient([]runtime.Object{hco, outdatedResource}) + handler := &KvConfigHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + + // ensure that we are not in upgrade mode + req.UpgradeMode = false + + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + foundResource := &corev1.ConfigMap{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(foundResource.Data).To(Equal(outdatedResource.Data)) + Expect(foundResource.Data).To(Not(Equal(expectedResource.Data))) + }) + }) + + Context("KubeVirt", func() { + var hco *hcov1beta1.HyperConverged + var req *common.HcoRequest + + BeforeEach(func() { + hco = commonTestUtils.NewHco() + req = commonTestUtils.NewReq(hco) + }) + + It("should create if not present", func() { + expectedResource := hco.NewKubeVirt(commonTestUtils.Namespace) + cl := commonTestUtils.InitClient([]runtime.Object{}) + handler := &KubevirtHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + foundResource := &kubevirtv1.KubeVirt{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, + foundResource), + ).To(BeNil()) + Expect(foundResource.Name).To(Equal(expectedResource.Name)) + Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, commonTestUtils.Name)) + Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace)) + }) + + It("should find if present", func() { + expectedResource := hco.NewKubeVirt(commonTestUtils.Namespace) + expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) + handler := &KubevirtHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + // Check HCO's status + Expect(hco.Status.RelatedObjects).To(Not(BeNil())) + objectRef, err := reference.GetReference(handler.Scheme, expectedResource) + Expect(err).To(BeNil()) + // ObjectReference should have been added + Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) + // Check conditions + Expect(req.Conditions[conditionsv1.ConditionAvailable]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "KubeVirtConditions", + Message: "KubeVirt resource has no conditions", + })) + Expect(req.Conditions[conditionsv1.ConditionProgressing]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionProgressing, + Status: corev1.ConditionTrue, + Reason: "KubeVirtConditions", + Message: "KubeVirt resource has no conditions", + })) + Expect(req.Conditions[conditionsv1.ConditionUpgradeable]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionUpgradeable, + Status: corev1.ConditionFalse, + Reason: "KubeVirtConditions", + Message: "KubeVirt resource has no conditions", + })) + }) + + It("should set default UninstallStrategy if missing", func() { + expectedResource := hco.NewKubeVirt(commonTestUtils.Namespace) + expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) + missingUSResource := hco.NewKubeVirt(commonTestUtils.Namespace) + missingUSResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", missingUSResource.Namespace, missingUSResource.Name) + missingUSResource.Spec.UninstallStrategy = "" + + cl := commonTestUtils.InitClient([]runtime.Object{hco, missingUSResource}) + handler := &KubevirtHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Err).To(BeNil()) + + foundResource := &kubevirtv1.KubeVirt{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, + foundResource), + ).To(BeNil()) + Expect(foundResource.Spec.UninstallStrategy).To(Equal(expectedResource.Spec.UninstallStrategy)) + }) + + It("should add node placement if missing in KubeVirt", func() { + existingResource := hco.NewKubeVirt() + + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := &KubevirtHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Err).To(BeNil()) + + foundResource := &kubevirtv1.KubeVirt{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.Infra).To(BeNil()) + Expect(existingResource.Spec.Workloads).To(BeNil()) + + Expect(foundResource.Spec.Infra).ToNot(BeNil()) + Expect(foundResource.Spec.Infra.NodePlacement).ToNot(BeNil()) + Expect(foundResource.Spec.Infra.NodePlacement.Affinity).ToNot(BeNil()) + Expect(foundResource.Spec.Infra.NodePlacement.NodeSelector["key1"]).Should(Equal("value1")) + Expect(foundResource.Spec.Infra.NodePlacement.NodeSelector["key2"]).Should(Equal("value2")) + + Expect(foundResource.Spec.Workloads).ToNot(BeNil()) + Expect(foundResource.Spec.Workloads.NodePlacement).ToNot(BeNil()) + Expect(foundResource.Spec.Workloads.NodePlacement.Tolerations).Should(Equal(hco.Spec.Workloads.NodePlacement.Tolerations)) + + Expect(req.Conditions).To(BeEmpty()) + }) + + It("should remove node placement if missing in HCO CR", func() { + + hcoNodePlacement := commonTestUtils.NewHco() + hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + existingResource := hcoNodePlacement.NewKubeVirt() + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := &KubevirtHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Err).To(BeNil()) + + foundResource := &kubevirtv1.KubeVirt{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.Infra).ToNot(BeNil()) + Expect(existingResource.Spec.Workloads).ToNot(BeNil()) + + Expect(foundResource.Spec.Infra).To(BeNil()) + Expect(foundResource.Spec.Workloads).To(BeNil()) + + Expect(req.Conditions).To(BeEmpty()) + }) + + It("should modify node placement according to HCO CR", func() { + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + existingResource := hco.NewKubeVirt() + + // now, modify HCO's node placement + seconds3 := int64(3) + hco.Spec.Infra.NodePlacement.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ + Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + }) + + hco.Spec.Workloads.NodePlacement.NodeSelector["key1"] = "something else" + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := &KubevirtHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Err).To(BeNil()) + + foundResource := &kubevirtv1.KubeVirt{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.Infra).ToNot(BeNil()) + Expect(existingResource.Spec.Infra.NodePlacement).ToNot(BeNil()) + Expect(existingResource.Spec.Infra.NodePlacement.Tolerations).To(HaveLen(2)) + Expect(existingResource.Spec.Workloads).ToNot(BeNil()) + + Expect(existingResource.Spec.Workloads.NodePlacement).ToNot(BeNil()) + Expect(existingResource.Spec.Workloads.NodePlacement.NodeSelector["key1"]).Should(Equal("value1")) + + Expect(foundResource.Spec.Infra).ToNot(BeNil()) + Expect(foundResource.Spec.Infra.NodePlacement).ToNot(BeNil()) + Expect(foundResource.Spec.Infra.NodePlacement.Tolerations).To(HaveLen(3)) + + Expect(foundResource.Spec.Workloads).ToNot(BeNil()) + Expect(foundResource.Spec.Workloads.NodePlacement).ToNot(BeNil()) + Expect(foundResource.Spec.Workloads.NodePlacement.NodeSelector["key1"]).Should(Equal("something else")) + + Expect(req.Conditions).To(BeEmpty()) + }) + + It("should handle conditions", func() { + expectedResource := hco.NewKubeVirt(commonTestUtils.Namespace) + expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) + expectedResource.Status.Conditions = []kubevirtv1.KubeVirtCondition{ + kubevirtv1.KubeVirtCondition{ + Type: kubevirtv1.KubeVirtConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "Foo", + Message: "Bar", + }, + kubevirtv1.KubeVirtCondition{ + Type: kubevirtv1.KubeVirtConditionProgressing, + Status: corev1.ConditionTrue, + Reason: "Foo", + Message: "Bar", + }, + kubevirtv1.KubeVirtCondition{ + Type: kubevirtv1.KubeVirtConditionDegraded, + Status: corev1.ConditionTrue, + Reason: "Foo", + Message: "Bar", + }, + } + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) + handler := &KubevirtHandler{Client: cl, Scheme: commonTestUtils.GetScheme()} + res := handler.Ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + // Check HCO's status + Expect(hco.Status.RelatedObjects).To(Not(BeNil())) + objectRef, err := reference.GetReference(handler.Scheme, expectedResource) + Expect(err).To(BeNil()) + // ObjectReference should have been added + Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) + // Check conditions + Expect(req.Conditions[conditionsv1.ConditionAvailable]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: "KubeVirtNotAvailable", + Message: "KubeVirt is not available: Bar", + })) + Expect(req.Conditions[conditionsv1.ConditionProgressing]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionProgressing, + Status: corev1.ConditionTrue, + Reason: "KubeVirtProgressing", + Message: "KubeVirt is progressing: Bar", + })) + Expect(req.Conditions[conditionsv1.ConditionUpgradeable]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionUpgradeable, + Status: corev1.ConditionFalse, + Reason: "KubeVirtProgressing", + Message: "KubeVirt is progressing: Bar", + })) + Expect(req.Conditions[conditionsv1.ConditionDegraded]).To(testlib.RepresentCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionDegraded, + Status: corev1.ConditionTrue, + Reason: "KubeVirtDegraded", + Message: "KubeVirt is degraded: Bar", + })) + }) + }) +}) diff --git a/pkg/controller/operands/operand.go b/pkg/controller/operands/operand.go new file mode 100644 index 0000000000..30f11ec97d --- /dev/null +++ b/pkg/controller/operands/operand.go @@ -0,0 +1,118 @@ +package operands + +import ( + "fmt" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "os" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Operand interface { + Ensure(req *common.HcoRequest) *EnsureResult +} + +type genericOperand struct { + Client client.Client + Scheme *runtime.Scheme +} + +// handleComponentConditions - read and process a sub-component conditions. +// returns true if the the conditions indicates "ready" state and false if not. +func handleComponentConditions(req *common.HcoRequest, component string, componentConds []conditionsv1.Condition) (isReady bool) { + isReady = true + if len(componentConds) == 0 { + isReady = false + reason := fmt.Sprintf("%sConditions", component) + message := fmt.Sprintf("%s resource has no conditions", component) + req.Logger.Info(fmt.Sprintf("%s's resource is not reporting Conditions on it's Status", component)) + req.Conditions.SetStatusCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: reason, + Message: message, + }) + req.Conditions.SetStatusCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionProgressing, + Status: corev1.ConditionTrue, + Reason: reason, + Message: message, + }) + req.Conditions.SetStatusCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionUpgradeable, + Status: corev1.ConditionFalse, + Reason: reason, + Message: message, + }) + } else { + foundAvailableCond := false + foundProgressingCond := false + foundDegradedCond := false + for _, condition := range componentConds { + switch condition.Type { + case conditionsv1.ConditionAvailable: + foundAvailableCond = true + if condition.Status == corev1.ConditionFalse { + isReady = false + msg := fmt.Sprintf("%s is not available: %v", component, string(condition.Message)) + componentNotAvailable(req, component, msg) + } + case conditionsv1.ConditionProgressing: + foundProgressingCond = true + if condition.Status == corev1.ConditionTrue { + isReady = false + req.Logger.Info(fmt.Sprintf("%s is 'Progressing'", component)) + req.Conditions.SetStatusCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionProgressing, + Status: corev1.ConditionTrue, + Reason: fmt.Sprintf("%sProgressing", component), + Message: fmt.Sprintf("%s is progressing: %v", component, string(condition.Message)), + }) + req.Conditions.SetStatusCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionUpgradeable, + Status: corev1.ConditionFalse, + Reason: fmt.Sprintf("%sProgressing", component), + Message: fmt.Sprintf("%s is progressing: %v", component, string(condition.Message)), + }) + } + case conditionsv1.ConditionDegraded: + foundDegradedCond = true + if condition.Status == corev1.ConditionTrue { + isReady = false + req.Logger.Info(fmt.Sprintf("%s is 'Degraded'", component)) + req.Conditions.SetStatusCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionDegraded, + Status: corev1.ConditionTrue, + Reason: fmt.Sprintf("%sDegraded", component), + Message: fmt.Sprintf("%s is degraded: %v", component, string(condition.Message)), + }) + } + } + } + + if !foundAvailableCond { + componentNotAvailable(req, component, `missing "Available" condition`) + } + + isReady = isReady && foundAvailableCond && foundProgressingCond && foundDegradedCond + } + + return isReady +} + +func componentNotAvailable(req *common.HcoRequest, component string, msg string) { + req.Logger.Info(fmt.Sprintf("%s is not 'Available'", component)) + req.Conditions.SetStatusCondition(conditionsv1.Condition{ + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionFalse, + Reason: fmt.Sprintf("%sNotAvailable", component), + Message: msg, + }) +} + +func checkComponentVersion(versionEnvName, actualVersion string) bool { + expectedVersion := os.Getenv(versionEnvName) + return expectedVersion != "" && expectedVersion == actualVersion +} diff --git a/pkg/controller/operands/operands_suite_test.go b/pkg/controller/operands/operands_suite_test.go new file mode 100644 index 0000000000..dd507c0c2c --- /dev/null +++ b/pkg/controller/operands/operands_suite_test.go @@ -0,0 +1,13 @@ +package operands_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestHyperconverged(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Operands Suite") +} diff --git a/pkg/controller/operands/testUtils_test.go b/pkg/controller/operands/testUtils_test.go new file mode 100644 index 0000000000..c34b93e7c9 --- /dev/null +++ b/pkg/controller/operands/testUtils_test.go @@ -0,0 +1,47 @@ +package operands_test + +import ( + "context" + hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" + "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" +) + +// name and namespace of our primary resource +const ( + name = "kubevirt-hyperconverged" + namespace = "kubevirt-hyperconverged" +) + +var ( + request = reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: name, + Namespace: namespace, + }, + } + log = logf.Log.WithName("controller_hyperconverged") +) + +func newHco() *hcov1beta1.HyperConverged { + return &hcov1beta1.HyperConverged{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: hcov1beta1.HyperConvergedSpec{}, + } +} + +func newReq(inst *hcov1beta1.HyperConverged) *common.HcoRequest { + return &common.HcoRequest{ + Request: request, + Logger: log, + Conditions: common.NewHcoConditions(), + Ctx: context.TODO(), + Instance: inst, + } +}