diff --git a/artifacts/flagger/crd.yaml b/artifacts/flagger/crd.yaml index 3ae25fa8b..a39bb578c 100644 --- a/artifacts/flagger/crd.yaml +++ b/artifacts/flagger/crd.yaml @@ -502,6 +502,9 @@ spec: skipAnalysis: description: Skip analysis and promote canary type: boolean + revertOnDeletion: + description: Revert mutated resources to original spec on deletion + type: boolean analysis: description: Canary analysis for this canary type: object @@ -650,6 +653,8 @@ spec: - Finalising - Succeeded - Failed + - Terminating + - Terminated canaryWeight: description: Traffic weight percentage routed to canary type: number diff --git a/charts/flagger/crds/crd.yaml b/charts/flagger/crds/crd.yaml index 3ae25fa8b..a39bb578c 100644 --- a/charts/flagger/crds/crd.yaml +++ b/charts/flagger/crds/crd.yaml @@ -502,6 +502,9 @@ spec: skipAnalysis: description: Skip analysis and promote canary type: boolean + revertOnDeletion: + description: Revert mutated resources to original spec on deletion + type: boolean analysis: description: Canary analysis for this canary type: object @@ -650,6 +653,8 @@ spec: - Finalising - Succeeded - Failed + - Terminating + - Terminated canaryWeight: description: Traffic weight percentage routed to canary type: number diff --git a/docs/gitbook/usage/how-it-works.md b/docs/gitbook/usage/how-it-works.md index 1b247074a..4fa930861 100644 --- a/docs/gitbook/usage/how-it-works.md +++ b/docs/gitbook/usage/how-it-works.md @@ -240,6 +240,32 @@ kubectl wait canary/podinfo --for=condition=promoted --timeout=5m kubectl get canary/podinfo | grep Succeeded ``` +### Canary finalizers + +The default behavior of Flagger on canary deletion is to leave resources that aren't owned by the controller +in their current state. This simplifies the deletion action and avoids possible deadlocks during resource +finalization. In the event the canary was introduced with existing resource(s) (i.e. service, virtual service, etc.), +they would be mutated during the initialization phase and no longer reflect their initial state. If the desired +functionality upon deletion is to revert the resources to their initial state, the `revertOnDeletion` attribute +can be enabled. + +```yaml +spec: + revertOnDeletion: true +``` + +When a deletion action is submitted to the cluster, Flagger will attempt to revert the following resources: + +* [Canary target](#canary-target) replicas will be updated to the primary replica count +* [Canary service](#canary-service) selector will be reverted +* Mesh/Ingress traffic routed to the target + +The recommended approach to disable canary analysis would be utilization of the `skipAnalysis` +attribute, which limits the need for resource reconciliation. Utilizing the `revertOnDeletion` attribute should be +enabled when you no longer plan to rely on Flagger for deployment management. + +**Note** When this feature is enabled expect a delay in the delete action due to the reconciliation. + ### Canary analysis The canary analysis defines: diff --git a/go.sum b/go.sum index 923489986..dc82318c5 100644 --- a/go.sum +++ b/go.sum @@ -322,4 +322,4 @@ modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= \ No newline at end of file diff --git a/kustomize/base/flagger/crd.yaml b/kustomize/base/flagger/crd.yaml index 3ae25fa8b..a39bb578c 100644 --- a/kustomize/base/flagger/crd.yaml +++ b/kustomize/base/flagger/crd.yaml @@ -502,6 +502,9 @@ spec: skipAnalysis: description: Skip analysis and promote canary type: boolean + revertOnDeletion: + description: Revert mutated resources to original spec on deletion + type: boolean analysis: description: Canary analysis for this canary type: object @@ -650,6 +653,8 @@ spec: - Finalising - Succeeded - Failed + - Terminating + - Terminated canaryWeight: description: Traffic weight percentage routed to canary type: number diff --git a/pkg/apis/flagger/v1beta1/canary.go b/pkg/apis/flagger/v1beta1/canary.go index a68a0a43f..25bafe0da 100644 --- a/pkg/apis/flagger/v1beta1/canary.go +++ b/pkg/apis/flagger/v1beta1/canary.go @@ -94,6 +94,10 @@ type CanarySpec struct { // SkipAnalysis promotes the canary without analysing it // +optional SkipAnalysis bool `json:"skipAnalysis,omitempty"` + + // revert canary mutation on deletion of canary resource + // +optional + RevertOnDeletion bool `json:"revertOnDeletion,omitempty"` } // CanaryService defines how ClusterIP services, service mesh or ingress routing objects are generated @@ -298,7 +302,7 @@ type CanaryWebhook struct { URL string `json:"url"` // Request timeout for this webhook - Timeout string `json:"timeout"` + Timeout string `json:"timeout,omitempty"` // Metadata (key-value pairs) for this webhook // +optional diff --git a/pkg/apis/flagger/v1beta1/status.go b/pkg/apis/flagger/v1beta1/status.go index b44341e4c..0c83f5a59 100644 --- a/pkg/apis/flagger/v1beta1/status.go +++ b/pkg/apis/flagger/v1beta1/status.go @@ -57,6 +57,12 @@ const ( // CanaryPhaseFailed means the canary analysis failed // and the canary deployment has been scaled to zero CanaryPhaseFailed CanaryPhase = "Failed" + // CanaryPhaseTerminating means the canary has been marked + // for deletion and in the finalizing state + CanaryPhaseTerminating CanaryPhase = "Terminating" + // CanaryPhaseTerminated means the canary has been finalized + // and successfully deleted + CanaryPhaseTerminated CanaryPhase = "Terminated" ) // CanaryStatus is used for state persistence (read-only) diff --git a/pkg/canary/controller.go b/pkg/canary/controller.go index d54c1a202..635f1af10 100644 --- a/pkg/canary/controller.go +++ b/pkg/canary/controller.go @@ -19,4 +19,5 @@ type Controller interface { HaveDependenciesChanged(canary *flaggerv1.Canary) (bool, error) ScaleToZero(canary *flaggerv1.Canary) error ScaleFromZero(canary *flaggerv1.Canary) error + Finalize(canary *flaggerv1.Canary) error } diff --git a/pkg/canary/daemonset_controller.go b/pkg/canary/daemonset_controller.go index 0642f6b83..2bbcf57a1 100644 --- a/pkg/canary/daemonset_controller.go +++ b/pkg/canary/daemonset_controller.go @@ -296,3 +296,12 @@ func (c *DaemonSetController) getSelectorLabel(daemonSet *appsv1.DaemonSet) (str func (c *DaemonSetController) HaveDependenciesChanged(cd *flaggerv1.Canary) (bool, error) { return c.configTracker.HasConfigChanged(cd) } + +//Finalize scale the reference instance from zero +func (c *DaemonSetController) Finalize(cd *flaggerv1.Canary) error { + + if err := c.ScaleFromZero(cd); err != nil { + return err + } + return nil +} diff --git a/pkg/canary/daemonset_controller_test.go b/pkg/canary/daemonset_controller_test.go index 0222a7103..3899b1638 100644 --- a/pkg/canary/daemonset_controller_test.go +++ b/pkg/canary/daemonset_controller_test.go @@ -194,3 +194,19 @@ func TestDaemonSetController_Scale(t *testing.T) { } }) } + +func TestDaemonSetController_Finalize(t *testing.T) { + mocks := newDaemonSetFixture() + err := mocks.controller.Initialize(mocks.canary, true) + require.NoError(t, err) + + err = mocks.controller.Finalize(mocks.canary) + require.NoError(t, err) + + dep, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get("podinfo", metav1.GetOptions{}) + require.NoError(t, err) + + _, ok := dep.Spec.Template.Spec.NodeSelector["flagger.app/scale-to-zero"] + + assert.False(t, ok) +} diff --git a/pkg/canary/deployment_controller.go b/pkg/canary/deployment_controller.go index 3a89526c9..746c951c8 100644 --- a/pkg/canary/deployment_controller.go +++ b/pkg/canary/deployment_controller.go @@ -179,6 +179,27 @@ func (c *DeploymentController) ScaleFromZero(cd *flaggerv1.Canary) error { return nil } +// Scale sets the canary deployment replicas +func (c *DeploymentController) Scale(cd *flaggerv1.Canary, replicas int32) error { + targetName := cd.Spec.TargetRef.Name + dep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(targetName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("deployment %s.%s not found", targetName, cd.Namespace) + } + return fmt.Errorf("deployment %s.%s query error %v", targetName, cd.Namespace, err) + } + + depCopy := dep.DeepCopy() + depCopy.Spec.Replicas = int32p(replicas) + + _, err = c.kubeClient.AppsV1().Deployments(dep.Namespace).Update(depCopy) + if err != nil { + return fmt.Errorf("scaling %s.%s to %v failed: %v", depCopy.GetName(), depCopy.Namespace, replicas, err) + } + return nil +} + // GetMetadata returns the pod label selector and svc ports func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, map[string]int32, error) { targetName := cd.Spec.TargetRef.Name @@ -376,3 +397,39 @@ func (c *DeploymentController) getSelectorLabel(deployment *appsv1.Deployment) ( func (c *DeploymentController) HaveDependenciesChanged(cd *flaggerv1.Canary) (bool, error) { return c.configTracker.HasConfigChanged(cd) } + +// Finalize will set the replica count from the primary to the reference instance. This method is used +// during a delete to attempt to revert the deployment back to the original state. Error is returned if unable +// update the reference deployment replicas to the primary replicas +func (c *DeploymentController) Finalize(cd *flaggerv1.Canary) error { + + primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) + + //Get ref deployment + refDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(cd.Spec.TargetRef.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + //2. Get primary if possible, if not scale from zero + primaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(primaryName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + if err := c.ScaleFromZero(cd); err != nil { + return err + } + return nil + } + return err + } + + //3. If both ref and primary present update the replicas of the ref to match the primary + if refDep.Spec.Replicas != primaryDep.Spec.Replicas { + //3. Set the replicas value on the original reference deployment + if err := c.Scale(cd, int32Default(primaryDep.Spec.Replicas)); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/canary/deployment_controller_test.go b/pkg/canary/deployment_controller_test.go index 0c24d8e79..ed0aee50f 100644 --- a/pkg/canary/deployment_controller_test.go +++ b/pkg/canary/deployment_controller_test.go @@ -188,3 +188,48 @@ func TestDeploymentController_HasTargetChanged(t *testing.T) { require.NoError(t, err) assert.True(t, isNew) } + +func TestDeploymentController_Finalize(t *testing.T) { + + mocks := newDeploymentFixture() + + tables := []struct { + mocks deploymentControllerFixture + callInitialize bool + shouldError bool + expectedReplicas int32 + canary *flaggerv1.Canary + }{ + //Primary not found returns error + {mocks, false, false, 1, mocks.canary}, + //Happy path + {mocks, true, false, 1, mocks.canary}, + } + + for _, table := range tables { + if table.callInitialize { + err := mocks.controller.Initialize(table.canary, true) + if err != nil { + t.Fatal(err.Error()) + } + } + + err := mocks.controller.Finalize(table.canary) + + if table.shouldError && err == nil { + t.Error("Expected error while calling Finalize, but none was returned") + } else if !table.shouldError && err != nil { + t.Errorf("Expected no error would be returned while calling Finalize, but returned %s", err) + } + + if table.expectedReplicas > 0 { + c, err := mocks.kubeClient.AppsV1().Deployments(mocks.canary.Namespace).Get(mocks.canary.Name, metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + if int32Default(c.Spec.Replicas) != table.expectedReplicas { + t.Errorf("Expected replicas %d recieved replicas %d", table.expectedReplicas, c.Spec.Replicas) + } + } + } +} diff --git a/pkg/canary/service_controller.go b/pkg/canary/service_controller.go index c62949a25..94e14c736 100644 --- a/pkg/canary/service_controller.go +++ b/pkg/canary/service_controller.go @@ -221,3 +221,7 @@ func (c *ServiceController) IsPrimaryReady(_ *flaggerv1.Canary) error { func (c *ServiceController) IsCanaryReady(_ *flaggerv1.Canary) (bool, error) { return true, nil } + +func (c *ServiceController) Finalize(cd *flaggerv1.Canary) error { + return nil +} diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 0b92c458a..9708909ba 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -128,6 +128,21 @@ func NewController( } ctrl.enqueue(new) + } else if !newCanary.DeletionTimestamp.IsZero() && hasFinalizer(&newCanary, finalizer) || + !hasFinalizer(&newCanary, finalizer) && newCanary.Spec.RevertOnDeletion { + //If this was marked for deletion and has finalizers enqueue for finalizing or + //If this canary doesn't have finalizers and RevertOnDeletion is true updated speck enqueue + ctrl.enqueue(new) + } + + //If canary no longer desires reverting, finalizers should be removed + if oldCanary.Spec.RevertOnDeletion && !newCanary.Spec.RevertOnDeletion { + ctrl.logger.Infof("%s.%s opting out, deleting finalizers", newCanary.Name, newCanary.Namespace) + err := ctrl.removeFinalizer(&newCanary, finalizer) + if err != nil { + ctrl.logger.Warnf("Failed to remove finalizers for %s.%s", oldCanary.Name, oldCanary.Namespace) + return + } } }, DeleteFunc: func(old interface{}) { @@ -217,6 +232,33 @@ func (c *Controller) syncHandler(key string) error { return nil } + //Finalize if canary has been marked for deletion and revert is desired + if cd.Spec.RevertOnDeletion && cd.ObjectMeta.DeletionTimestamp != nil { + + //If finalizers have been previously removed proceed + if !hasFinalizer(cd, finalizer) { + c.logger.Infof("Canary %s.%s has been finalized", cd.Name, cd.Namespace) + return nil + } + + if cd.Status.Phase != flaggerv1.CanaryPhaseTerminated { + if err := c.finalize(cd); err != nil { + return fmt.Errorf("unable to finalize to canary %s.%s error %s", cd.Name, cd.Namespace, err) + } + } + + //Remove finalizer from Canary + if err := c.removeFinalizer(cd, finalizer); err != nil { + return fmt.Errorf("unable to remove finalizer for canary %s.%s", cd.Name, cd.Namespace) + } + + //record event + c.recordEventInfof(cd, "Terminated canary %s.%s", cd.Name, cd.Namespace) + + c.logger.Infof("Canary %s.%s has been successfully processed and marked for deletion", cd.Name, cd.Namespace) + return nil + } + // set status condition for new canaries if cd.Status.Conditions == nil { if ok, conditions := canary.MakeStatusConditions(cd, flaggerv1.CanaryPhaseInitializing); ok { @@ -233,6 +275,14 @@ func (c *Controller) syncHandler(key string) error { } c.canaries.Store(fmt.Sprintf("%s.%s", cd.Name, cd.Namespace), cd) + + //If opt in for revertOnDeletion add finaliers if not present + if cd.Spec.RevertOnDeletion && !hasFinalizer(cd, finalizer) { + if err := c.addFinalizer(cd, finalizer); err != nil { + return fmt.Errorf("unable to add finalizer to canary %s.%s", cd.Name, cd.Namespace) + } + + } c.logger.Infof("Synced %s", key) return nil diff --git a/pkg/controller/finalizer.go b/pkg/controller/finalizer.go new file mode 100644 index 000000000..3c6310263 --- /dev/null +++ b/pkg/controller/finalizer.go @@ -0,0 +1,217 @@ +package controller + +import ( + "fmt" + + ex "github.com/pkg/errors" + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" + "github.com/weaveworks/flagger/pkg/canary" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" +) + +const finalizer = "finalizer.flagger.app" + +func (c *Controller) finalize(old interface{}) error { + var r *flaggerv1.Canary + var ok bool + + //Ensure interface is a canary + if r, ok = old.(*flaggerv1.Canary); !ok { + c.logger.Warnf("Received unexpected object: %v", old) + return nil + } + + _, err := c.flaggerClient.FlaggerV1beta1().Canaries(r.Namespace).Get(r.Name, metav1.GetOptions{}) + if err != nil { + c.logger.With("canary", fmt.Sprintf("%s.%s", r.Name, r.Namespace)). + Errorf("Canary %s.%s not found nothing to finalize", r.Name, r.Namespace) + return nil + } + + //Retrieve a controller + canaryController := c.canaryFactory.Controller(r.Spec.TargetRef.Kind) + + //Set the status to terminating if not already in that state + if r.Status.Phase != flaggerv1.CanaryPhaseTerminating { + if err := canaryController.SetStatusPhase(r, flaggerv1.CanaryPhaseTerminating); err != nil { + c.logger.Infof("Failed to update status to finalizing %s", err) + return err + } + //record event + c.recordEventInfof(r, "Terminating canary %s.%s", r.Name, r.Namespace) + } + + err = c.revertTargetRef(canaryController, r) + if err != nil { + if errors.IsNotFound(err) { + //No reason to wait not found + c.logger.Warnf("%s.%s failed due to %s not found", r.Name, r.Namespace, r.Spec.TargetRef.Kind) + return nil + } + c.logger.Debugf("%s.%s failed due to %s", r.Name, r.Namespace, err) + return err + } else { + //Ensure that targetRef has met a ready state + c.logger.Infof("Checking is canary is ready %s.%s", r.Name, r.Namespace) + ready, err := canaryController.IsCanaryReady(r) + if err != nil && ready { + return fmt.Errorf("%s.%s has not reached ready state during finalizing", r.Name, r.Namespace) + } + + } + + c.logger.Infof("%s.%s moving forward with router finalizing", r.Name, r.Namespace) + labelSelector, ports, err := canaryController.GetMetadata(r) + if err != nil { + c.logger.Errorf("%s.%s failed to get metadata for router finalizing", r.Name, r.Namespace) + return err + } + //Revert the router + if err := c.revertRouter(r, labelSelector, ports); err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + + c.logger.Infof("%s.%s moving forward with mesh finalizing", r.Name, r.Namespace) + //TODO if I can't revert the mesh continue on? + //Revert the Mesh + if err := c.revertMesh(r); err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + + c.logger.Infof("Finalization complete for %s.%s", r.Name, r.Namespace) + + return nil +} + +func (c *Controller) revertTargetRef(ctrl canary.Controller, r *flaggerv1.Canary) error { + if err := ctrl.Finalize(r); err != nil { + return err + } + c.logger.Infof("%s.%s kind %s reverted", r.Name, r.Namespace, r.Spec.TargetRef.Kind) + return nil +} + +//revertRouter +func (c *Controller) revertRouter(r *flaggerv1.Canary, labelSelector string, ports map[string]int32) error { + router := c.routerFactory.KubernetesRouter(r.Spec.TargetRef.Kind, labelSelector, map[string]string{}, ports) + if err := router.Finalize(r); err != nil { + c.logger.Errorf("%s.%s router failed with error %s", r.Name, r.Namespace, err) + return err + } + c.logger.Infof("Service %s.%s reverted", r.Name, r.Namespace) + return nil +} + +//revertMesh reverts defined mesh provider based upon the implementation's respective Finalize method. +//If the Finalize method encounters and error that is returned, else revert is considered successful. +func (c *Controller) revertMesh(r *flaggerv1.Canary) error { + provider := c.meshProvider + if r.Spec.Provider != "" { + provider = r.Spec.Provider + } + + //Establish provider + meshRouter := c.routerFactory.MeshRouter(provider) + + //Finalize mesh + err := meshRouter.Finalize(r) + if err != nil { + c.logger.Errorf("%s.%s mesh failed with error %s", r.Name, r.Namespace, err) + return err + } + + c.logger.Infof("%s.%s mesh provider %s reverted", r.Name, r.Namespace, provider) + return nil +} + +//hasFinalizer evaluates the finalizers of a given canary for for existence of a provide finalizer string. +//It returns a boolean, true if the finalizer is found false otherwise. +func hasFinalizer(canary *flaggerv1.Canary, finalizerString string) bool { + currentFinalizers := canary.ObjectMeta.Finalizers + + for _, f := range currentFinalizers { + if f == finalizerString { + return true + } + } + return false +} + +//addFinalizer adds a provided finalizer to the specified canary resource. +//If failures occur the error will be returned otherwise the action is deemed successful +//and error will be nil. +func (c *Controller) addFinalizer(canary *flaggerv1.Canary, finalizerString string) error { + firstTry := true + err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { + + var selErr error + if !firstTry { + canary, selErr = c.flaggerClient.FlaggerV1beta1().Canaries(canary.Namespace).Get(canary.GetName(), metav1.GetOptions{}) + if selErr != nil { + return selErr + } + } + + copy := canary.DeepCopy() + copy.ObjectMeta.Finalizers = append(copy.ObjectMeta.Finalizers, finalizerString) + + _, err = c.flaggerClient.FlaggerV1beta1().Canaries(canary.Namespace).Update(copy) + + firstTry = false + return + }) + + if err != nil { + c.logger.Errorf("Failed to add finalizer %s", err) + return ex.Wrap(err, "Add finalizer failed") + } + return nil +} + +//removeFinalizer removes a provided finalizer to the specified canary resource. +//If failures occur the error will be returned otherwise the action is deemed successful +//and error will be nil. +func (c *Controller) removeFinalizer(canary *flaggerv1.Canary, finalizerString string) error { + firstTry := true + err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { + + var selErr error + if !firstTry { + canary, selErr = c.flaggerClient.FlaggerV1beta1().Canaries(canary.Namespace).Get(canary.GetName(), metav1.GetOptions{}) + if selErr != nil { + return selErr + } + } + copy := canary.DeepCopy() + + newSlice := make([]string, 0) + for _, item := range copy.ObjectMeta.Finalizers { + if item == finalizerString { + continue + } + newSlice = append(newSlice, item) + } + if len(newSlice) == 0 { + newSlice = nil + } + copy.ObjectMeta.Finalizers = newSlice + + _, err = c.flaggerClient.FlaggerV1beta1().Canaries(canary.Namespace).Update(copy) + + firstTry = false + return + }) + + if err != nil { + return ex.Wrap(err, "Remove finalizer failed") + } + return nil +} diff --git a/pkg/controller/finalizer_test.go b/pkg/controller/finalizer_test.go new file mode 100644 index 000000000..02d189071 --- /dev/null +++ b/pkg/controller/finalizer_test.go @@ -0,0 +1,114 @@ +package controller + +import ( + "fmt" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" + fakeFlagger "github.com/weaveworks/flagger/pkg/client/clientset/versioned/fake" + "github.com/weaveworks/flagger/pkg/logger" + "k8s.io/apimachinery/pkg/runtime" + k8sTesting "k8s.io/client-go/testing" + + "testing" +) + +//Test has finalizers +func TestFinalizer_hasFinalizer(t *testing.T) { + + withFinalizer := newDeploymentTestCanary() + withFinalizer.Finalizers = append(withFinalizer.Finalizers, finalizer) + + tables := []struct { + canary *flaggerv1.Canary + result bool + }{ + {newDeploymentTestCanary(), false}, + {withFinalizer, true}, + } + + for _, table := range tables { + isPresent := hasFinalizer(table.canary, finalizer) + if isPresent != table.result { + t.Errorf("Result of hasFinalizer returned [%t], but expected [%t]", isPresent, table.result) + } + } +} + +func TestFinalizer_addFinalizer(t *testing.T) { + + mockError := fmt.Errorf("failed to add finalizer to canary %s", "testCanary") + cs := fakeFlagger.NewSimpleClientset(newDeploymentTestCanary()) + //prepend so it is evaluated over the catch all * + cs.PrependReactor("update", "canaries", func(action k8sTesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, mockError + }) + + logger, _ := logger.NewLogger("debug") + m := fixture{ + canary: newDeploymentTestCanary(), + flaggerClient: cs, + ctrl: &Controller{ + flaggerClient: cs, + logger: logger, + }, + logger: logger, + } + + tables := []struct { + mock fixture + canary *flaggerv1.Canary + error error + }{ + {newDeploymentFixture(nil), newDeploymentTestCanary(), nil}, + {m, m.canary, mockError}, + } + + for _, table := range tables { + response := table.mock.ctrl.addFinalizer(table.canary, finalizer) + + if table.error != nil && response == nil { + t.Errorf("Expected an error from addFinalizer, but wasn't present") + } else if table.error == nil && response != nil { + t.Errorf("Expected no error from addFinalizer, but returned error %s", response) + } + } + +} + +func TestFinalizer_removeFinalizer(t *testing.T) { + + withFinalizer := newDeploymentTestCanary() + withFinalizer.Finalizers = append(withFinalizer.Finalizers, finalizer) + + mockError := fmt.Errorf("failed to add finalizer to canary %s", "testCanary") + cs := fakeFlagger.NewSimpleClientset(newDeploymentTestCanary()) + //prepend so it is evaluated over the catch all * + cs.PrependReactor("update", "canaries", func(action k8sTesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, mockError + }) + m := fixture{ + canary: withFinalizer, + flaggerClient: cs, + ctrl: &Controller{flaggerClient: cs}, + } + + tables := []struct { + mock fixture + canary *flaggerv1.Canary + error error + }{ + {newDeploymentFixture(nil), withFinalizer, nil}, + {m, m.canary, mockError}, + } + + for _, table := range tables { + response := table.mock.ctrl.removeFinalizer(table.canary, finalizer) + + if table.error != nil && response == nil { + t.Errorf("Expected an error from addFinalizer, but wasn't present") + } else if table.error == nil && response != nil { + t.Errorf("Expected no error from addFinalizer, but returned error %s", response) + } + + } +} diff --git a/pkg/router/appmesh.go b/pkg/router/appmesh.go index 83a32cf34..ac7335dca 100644 --- a/pkg/router/appmesh.go +++ b/pkg/router/appmesh.go @@ -483,6 +483,10 @@ func (ar *AppMeshRouter) gatewayAnnotations(canary *flaggerv1.Canary) map[string return a } +func (ar *AppMeshRouter) Finalize(canary *flaggerv1.Canary) error { + return nil +} + func int64p(i int64) *int64 { return &i } diff --git a/pkg/router/contour.go b/pkg/router/contour.go index dfd7dd113..0b362cb90 100644 --- a/pkg/router/contour.go +++ b/pkg/router/contour.go @@ -417,3 +417,7 @@ func (cr *ContourRouter) makeLinkerdHeaderValue(canary *flaggerv1.Canary, servic } } + +func (cr *ContourRouter) Finalize(canary *flaggerv1.Canary) error { + return nil +} diff --git a/pkg/router/gloo.go b/pkg/router/gloo.go index c2401e04e..3eab5f0a4 100644 --- a/pkg/router/gloo.go +++ b/pkg/router/gloo.go @@ -185,3 +185,7 @@ func (gr *GlooRouter) SetRoutes( } return nil } + +func (gr *GlooRouter) Finalize(canary *flaggerv1.Canary) error { + return nil +} diff --git a/pkg/router/ingress.go b/pkg/router/ingress.go index 70682aac0..14d2b03f1 100644 --- a/pkg/router/ingress.go +++ b/pkg/router/ingress.go @@ -237,3 +237,7 @@ func (i *IngressRouter) makeHeaderAnnotations(annotations map[string]string, func (i *IngressRouter) GetAnnotationWithPrefix(suffix string) string { return fmt.Sprintf("%v/%v", i.annotationsPrefix, suffix) } + +func (i *IngressRouter) Finalize(canary *flaggerv1.Canary) error { + return nil +} diff --git a/pkg/router/istio.go b/pkg/router/istio.go index 3cdfd220a..547183191 100644 --- a/pkg/router/istio.go +++ b/pkg/router/istio.go @@ -1,6 +1,7 @@ package router import ( + "encoding/json" "fmt" "github.com/google/go-cmp/cmp" @@ -211,6 +212,22 @@ func (ir *IstioRouter) reconcileVirtualService(canary *flaggerv1.Canary) error { vtClone := virtualService.DeepCopy() vtClone.Spec = newSpec + //If annotation kubectl.kubernetes.io/last-applied-configuration is present no need to duplicate + //serialization. If not present store the serialized object in annotation + //flagger.kubernetes.app/original-configuration + if _, ok := vtClone.Annotations[kubectlAnnotation]; !ok { + b, err := json.Marshal(virtualService.Spec) + if err != nil { + ir.logger.Warnf("Unable to marshal VS %s for orig-configuration annotation", virtualService.Name) + } + + if vtClone.ObjectMeta.Annotations == nil { + vtClone.ObjectMeta.Annotations = make(map[string]string) + } + + vtClone.ObjectMeta.Annotations[configAnnotation] = string(b) + } + _, err = ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Update(vtClone) if err != nil { return fmt.Errorf("VirtualService %s.%s update error: %w", apexName, canary.Namespace, err) @@ -348,6 +365,50 @@ func (ir *IstioRouter) SetRoutes( return nil } +func (ir *IstioRouter) Finalize(canary *flaggerv1.Canary) error { + + //Need to see if I can get the annotation orig-configuration + apexName, _, _ := canary.GetServiceNames() + + vs, err := ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(apexName, metav1.GetOptions{}) + if err != nil { + return err + } + + var storedSpec istiov1alpha3.VirtualServiceSpec + //If able to get and unMarshal update the spec + if a, ok := vs.ObjectMeta.Annotations[kubectlAnnotation]; ok { + var storedVS istiov1alpha3.VirtualService + err := json.Unmarshal([]byte(a), &storedVS) + if err != nil { + return fmt.Errorf("VirtualService %s.%s failed to unMarshal annotation %s, unable to revert", + apexName, canary.Namespace, kubectlAnnotation) + } + storedSpec = storedVS.Spec + } else if a, ok := vs.ObjectMeta.Annotations[configAnnotation]; ok { + var spec istiov1alpha3.VirtualServiceSpec + err := json.Unmarshal([]byte(a), &spec) + if err != nil { + return fmt.Errorf("VirtualService %s.%s failed to unMarshal annotation %s, unable to revert", + apexName, canary.Namespace, configAnnotation) + } + storedSpec = spec + } else { + ir.logger.Warnf("VirtualService %s.%s original configuration not found, unable to revert", apexName, canary.Namespace) + return nil + } + + clone := vs.DeepCopy() + clone.Spec = storedSpec + + _, err = ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Update(clone) + if err != nil { + return fmt.Errorf("VirtualService %s.%s update error %v, unable to revert", apexName, canary.Namespace, err) + } + + return nil +} + // mergeMatchConditions appends the URI match rules to canary conditions func mergeMatchConditions(canary, defaults []istiov1alpha3.HTTPMatchRequest) []istiov1alpha3.HTTPMatchRequest { for i := range canary { diff --git a/pkg/router/istio_test.go b/pkg/router/istio_test.go index 0b9c3591d..3e5926303 100644 --- a/pkg/router/istio_test.go +++ b/pkg/router/istio_test.go @@ -1,13 +1,17 @@ package router import ( + "encoding/json" "fmt" "testing" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" istiov1alpha3 "github.com/weaveworks/flagger/pkg/apis/istio/v1alpha3" ) @@ -329,3 +333,118 @@ func TestIstioRouter_GatewayPort(t *testing.T) { port := vs.Spec.Http[0].Route[0].Destination.Port.Number assert.Equal(t, uint32(mocks.canary.Spec.Service.Port), port) } + +func TestIstioRouter_Finalize(t *testing.T) { + mocks := newFixture(nil) + router := &IstioRouter{ + logger: mocks.logger, + flaggerClient: mocks.flaggerClient, + istioClient: mocks.meshClient, + kubeClient: mocks.kubeClient, + } + + flaggerSpec := &istiov1alpha3.VirtualServiceSpec{ + Http: []istiov1alpha3.HTTPRoute{ + { + Match: mocks.canary.Spec.Service.Match, + Rewrite: mocks.canary.Spec.Service.Rewrite, + Timeout: mocks.canary.Spec.Service.Timeout, + Retries: mocks.canary.Spec.Service.Retries, + CorsPolicy: mocks.canary.Spec.Service.CorsPolicy, + }, + }, + } + + kubectlSpec := &istiov1alpha3.VirtualServiceSpec{ + Hosts: []string{"podinfo"}, + Gateways: []string{"ingressgateway.istio-system.svc.cluster.local"}, + Http: []istiov1alpha3.HTTPRoute{ + { + Match: nil, + Route: []istiov1alpha3.DestinationWeight{ + { + Destination: istiov1alpha3.Destination{Host: "podinfo"}, + }, + }, + }, + }, + } + + tables := []struct { + router *IstioRouter + spec *istiov1alpha3.VirtualServiceSpec + shouldError bool + createVS bool + canary *v1beta1.Canary + callReconcile bool + annotation string + }{ + //VS not found + {router: router, spec: nil, shouldError: true, createVS: false, canary: mocks.canary, callReconcile: false, annotation: ""}, + //No annotation found but still finalizes + {router: router, spec: nil, shouldError: false, createVS: false, canary: mocks.canary, callReconcile: true, annotation: ""}, + //Spec should match annotation after finalize + {router: router, spec: flaggerSpec, shouldError: false, createVS: true, canary: mocks.canary, callReconcile: true, annotation: "flagger"}, + //Need to test kubectl annotation + {router: router, spec: kubectlSpec, shouldError: false, createVS: true, canary: mocks.canary, callReconcile: true, annotation: "kubectl"}, + } + + for _, table := range tables { + + var err error + + if table.createVS { + vs, err := router.istioClient.NetworkingV1alpha3().VirtualServices(table.canary.Namespace).Get(table.canary.Name, metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + } + + if vs.Annotations == nil { + vs.Annotations = make(map[string]string) + } + + if table.annotation == "flagger" { + b, err := json.Marshal(table.spec) + if err != nil { + t.Fatal(err.Error()) + } + + vs.Annotations[configAnnotation] = string(b) + } else if table.annotation == "kubectl" { + vs.Annotations[kubectlAnnotation] = `{"apiVersion": "networking.istio.io/v1alpha3","kind": "VirtualService","metadata": {"annotations": {},"name": "podinfo","namespace": "test"}, "spec": {"gateways": ["ingressgateway.istio-system.svc.cluster.local"],"hosts": ["podinfo"],"http": [{"route": [{"destination": {"host": "podinfo"}}]}]}}` + + } + _, err = router.istioClient.NetworkingV1alpha3().VirtualServices(table.canary.Namespace).Update(vs) + if err != nil { + t.Fatal(err.Error()) + } + } + + if table.callReconcile { + err = router.Reconcile(table.canary) + if err != nil { + t.Fatal(err.Error()) + + } + } + + err = router.Finalize(table.canary) + + if table.shouldError && err == nil { + t.Errorf("Expected error from Finalize but error was not returned") + } else if !table.shouldError && err != nil { + t.Errorf("Expected no error from Finalize but error was returned %s", err) + } else if table.spec != nil { + vs, err := router.istioClient.NetworkingV1alpha3().VirtualServices(table.canary.Namespace).Get(table.canary.Name, metav1.GetOptions{}) + if err != nil { + t.Fatal(err.Error()) + + } + if cmp.Diff(vs.Spec, *table.spec) != "" { + + t.Errorf("Expected spec %+v but recieved %+v", table.spec, vs.Spec) + } + } + + } +} diff --git a/pkg/router/kubernetes.go b/pkg/router/kubernetes.go index a81a3f68a..663d4302c 100644 --- a/pkg/router/kubernetes.go +++ b/pkg/router/kubernetes.go @@ -10,4 +10,6 @@ type KubernetesRouter interface { Initialize(canary *flaggerv1.Canary) error // Reconcile creates or updates the main service Reconcile(canary *flaggerv1.Canary) error + // Revert router + Finalize(canary *flaggerv1.Canary) error } diff --git a/pkg/router/kubernetes_default.go b/pkg/router/kubernetes_default.go index 4511ecb97..80ecb58a2 100644 --- a/pkg/router/kubernetes_default.go +++ b/pkg/router/kubernetes_default.go @@ -1,6 +1,7 @@ package router import ( + "encoding/json" "fmt" "github.com/google/go-cmp/cmp" @@ -162,3 +163,61 @@ func (c *KubernetesDefaultRouter) reconcileService(canary *flaggerv1.Canary, nam return nil } + +//Finalize reverts the apex router if not owned by the Flagger controller. +func (c *KubernetesDefaultRouter) Finalize(canary *flaggerv1.Canary) error { + apexName, _, _ := canary.GetServiceNames() + + svc, err := c.kubeClient.CoreV1().Services(canary.Namespace).Get(apexName, metav1.GetOptions{}) + if err != nil { + return err + } + + //No need to do any reconciliation if the router is owned by the controller + if hasCanaryOwnerRef, isOwned := c.isOwnedByCanary(svc, canary.Name); !hasCanaryOwnerRef && !isOwned { + //If kubectl annotation is present that will be utilized, else reconcile + if a, ok := svc.Annotations[kubectlAnnotation]; ok { + var storedSvc corev1.Service + err := json.Unmarshal([]byte(a), &storedSvc) + if err != nil { + return fmt.Errorf("router %s.%s failed to unMarshal annotation %s, unable to revert", + svc.Name, svc.Namespace, kubectlAnnotation) + } + clone := svc.DeepCopy() + clone.Spec.Selector = storedSvc.Spec.Selector + + _, err = c.kubeClient.CoreV1().Services(canary.Namespace).Update(clone) + if err != nil { + return fmt.Errorf("service %s update error: %w", clone.Name, err) + } + + } else { + err = c.reconcileService(canary, apexName, canary.Spec.TargetRef.Name) + if err != nil { + return err + } + } + } + + return nil +} + +//isOwnedByCanary evaluates if an object contains an OwnerReference declaration, that is of kind Canary and +//has the same ref name as the Canary under evaluation. It returns two bool the first returns true if +//an OwnerReference is present and the second, returns if it is owned by the supplied name. +func (c KubernetesDefaultRouter) isOwnedByCanary(obj interface{}, name string) (bool, bool) { + var object metav1.Object + var ok bool + if object, ok = obj.(metav1.Object); ok { + if ownerRef := metav1.GetControllerOf(object); ownerRef != nil { + if ownerRef.Kind == flaggerv1.CanaryKind { + //And the name exists return true + if name == ownerRef.Name { + return true, true + } + return true, false + } + } + } + return false, false +} diff --git a/pkg/router/kubernetes_default_test.go b/pkg/router/kubernetes_default_test.go index 89002885a..a5efd24a1 100644 --- a/pkg/router/kubernetes_default_test.go +++ b/pkg/router/kubernetes_default_test.go @@ -1,11 +1,17 @@ package router import ( + "fmt" "testing" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/kubernetes/fake" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -104,3 +110,255 @@ func TestServiceRouter_Undo(t *testing.T) { assert.Equal(t, "http", canarySvc.Spec.Ports[0].Name) assert.Equal(t, int32(9898), canarySvc.Spec.Ports[0].Port) } + +func TestServiceRouter_isOwnedByCanary(t *testing.T) { + mocks := newFixture(nil) + router := &KubernetesDefaultRouter{ + kubeClient: mocks.kubeClient, + flaggerClient: mocks.flaggerClient, + logger: mocks.logger, + } + + isController := new(bool) + *isController = true + + tables := []struct { + svc *corev1.Service + isOwned bool + hasOwnerRef bool + }{ + //owned + { + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podinfo", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "flagger.app/v1alpha3", + Kind: "Canary", + Name: "podinfo", + Controller: isController, + }, + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: "http", + Protocol: "TCP", + Port: 8080, + }}, + Selector: map[string]string{"app": "podinfo"}, + }, + }, isOwned: true, hasOwnerRef: true, + }, + //Owner ref but kind not Canary + { + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podinfo", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "flagger.app/v1alpha3", + Kind: "Deployment", + Name: "podinfo", + Controller: isController, + }, + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: "http", + Protocol: "TCP", + Port: 8080, + }}, + Selector: map[string]string{"app": "podinfo"}, + }, + }, isOwned: false, hasOwnerRef: false, + }, + //Owner ref but name doesn't match + { + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podinfo", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "flagger.app/v1alpha3", + Kind: "Canary", + Name: "notpodinfo", + Controller: isController, + }, + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: "http", + Protocol: "TCP", + Port: 8080, + }}, + Selector: map[string]string{"app": "podinfo"}, + }, + }, isOwned: false, hasOwnerRef: true, + }, + //No ownerRef + { + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podinfo", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: "http", + Protocol: "TCP", + Port: 8080, + }}, + Selector: map[string]string{"app": "podinfo"}, + }, + }, isOwned: false, hasOwnerRef: false, + }, + } + + for _, table := range tables { + hasOwnerRef, wasOwned := router.isOwnedByCanary(table.svc, mocks.canary.Name) + if table.isOwned && !wasOwned { + t.Error("Expected to be owned, but was not") + } else if !table.isOwned && wasOwned { + t.Error("Expected not to be owned but was") + } else if table.hasOwnerRef && !hasOwnerRef { + t.Error("Expected to contain OwnerReference but not present") + } else if !table.hasOwnerRef && hasOwnerRef { + t.Error("Expected not to have an OwnerReference but present") + } + } + +} + +func TestServiceRouter_Finalize(t *testing.T) { + + mocks := newFixture(nil) + router := &KubernetesDefaultRouter{ + kubeClient: mocks.kubeClient, + flaggerClient: mocks.flaggerClient, + logger: mocks.logger, + labelSelector: "app", + } + + isController := new(bool) + *isController = true + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podinfo", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "flagger.app/v1alpha3", + Kind: "Canary", + Name: "NotOwned", + Controller: isController, + }, + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: "http", + Protocol: "TCP", + Port: 9898, + }}, + Selector: map[string]string{"app": "podinfo"}, + }, + } + + kubectlSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podinfo", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "flagger.app/v1alpha3", + Kind: "Canary", + Name: "NotOwned", + Controller: isController, + }, + }, + Annotations: map[string]string{ + kubectlAnnotation: `{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"app":"podinfo"},"name":"podinfo","namespace":"test"},"spec":{"ports":[{"name":"http","port":9898,"protocol":"TCP","targetPort":9898}],"selector":{"app":"podinfo"},"type":"ClusterIP"}}`, + }, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Name: "http", + Protocol: "TCP", + Port: 9898, + }}, + Selector: map[string]string{"app": "podinfo"}, + }, + } + + tables := []struct { + router *KubernetesDefaultRouter + callSetupMethods bool + shouldError bool + canary *v1beta1.Canary + shouldMutate bool + }{ + //Won't reconcile since it is owned and would be garbage collected + {router: router, callSetupMethods: true, shouldError: false, canary: mocks.canary, shouldMutate: false}, + //Service not found + {router: &KubernetesDefaultRouter{kubeClient: fake.NewSimpleClientset(), logger: mocks.logger}, callSetupMethods: false, shouldError: true, canary: mocks.canary, shouldMutate: false}, + //Not owned + {router: &KubernetesDefaultRouter{kubeClient: fake.NewSimpleClientset(svc), logger: mocks.logger}, callSetupMethods: false, shouldError: false, canary: mocks.canary, shouldMutate: true}, + //Kubectl annotation + {router: &KubernetesDefaultRouter{kubeClient: fake.NewSimpleClientset(kubectlSvc), logger: mocks.logger}, callSetupMethods: false, shouldError: false, canary: mocks.canary, shouldMutate: true}, + } + + for _, table := range tables { + + if table.callSetupMethods { + err := table.router.Initialize(table.canary) + if err != nil { + t.Fatal(err.Error()) + } + + err = table.router.Reconcile(table.canary) + if err != nil { + t.Fatal(err.Error()) + } + } + + err := table.router.Finalize(table.canary) + + if table.shouldError && err == nil { + t.Error("Should have errored") + } else if !table.shouldError && err != nil { + t.Errorf("Shouldn't error but did %s", err) + } + + svc, err := table.router.kubeClient.CoreV1().Services(table.canary.Namespace).Get(table.canary.Name, metav1.GetOptions{}) + if err != nil { + if !errors.IsNotFound(err) { + if svc.Spec.Ports[0].Name != "http" { + t.Errorf("Got svc port name %s wanted %s", svc.Spec.Ports[0].Name, "http") + } + + if svc.Spec.Ports[0].Port != 9898 { + t.Errorf("Got svc port %v wanted %v", svc.Spec.Ports[0].Port, 9898) + } + + if table.shouldMutate { + if svc.Spec.Selector["app"] != table.canary.Name { + t.Errorf("Got svc selector %v wanted %v", svc.Spec.Selector["app"], table.canary.Name) + } + } else { + if svc.Spec.Selector["app"] != fmt.Sprintf("%s-primary", table.canary.Name) { + t.Errorf("Got svc selector %v wanted %v", svc.Spec.Selector["app"], fmt.Sprintf("%s-primary", table.canary.Name)) + } + } + } + } + } + +} diff --git a/pkg/router/kubernetes_noop.go b/pkg/router/kubernetes_noop.go index b516cee8c..b561c6861 100644 --- a/pkg/router/kubernetes_noop.go +++ b/pkg/router/kubernetes_noop.go @@ -16,3 +16,7 @@ func (c *KubernetesNoopRouter) Initialize(_ *flaggerv1.Canary) error { func (c *KubernetesNoopRouter) Reconcile(_ *flaggerv1.Canary) error { return nil } + +func (c *KubernetesNoopRouter) Finalize(canary *flaggerv1.Canary) error { + return nil +} diff --git a/pkg/router/nop.go b/pkg/router/nop.go index 4775cfda8..91d468b3d 100644 --- a/pkg/router/nop.go +++ b/pkg/router/nop.go @@ -22,3 +22,7 @@ func (*NopRouter) GetRoutes(canary *flaggerv1.Canary) (primaryWeight int, canary } return 100, 0, false, nil } + +func (c *NopRouter) Finalize(canary *flaggerv1.Canary) error { + return nil +} diff --git a/pkg/router/router.go b/pkg/router/router.go index bd72e583a..91bd63efe 100644 --- a/pkg/router/router.go +++ b/pkg/router/router.go @@ -2,8 +2,12 @@ package router import flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" +const configAnnotation = "flagger.kubernetes.io/original-configuration" +const kubectlAnnotation = "kubectl.kubernetes.io/last-applied-configuration" + type Interface interface { Reconcile(canary *flaggerv1.Canary) error SetRoutes(canary *flaggerv1.Canary, primaryWeight int, canaryWeight int, mirrored bool) error GetRoutes(canary *flaggerv1.Canary) (primaryWeight int, canaryWeight int, mirrored bool, err error) + Finalize(canary *flaggerv1.Canary) error } diff --git a/pkg/router/smi.go b/pkg/router/smi.go index 332822073..a20e4528a 100644 --- a/pkg/router/smi.go +++ b/pkg/router/smi.go @@ -224,3 +224,7 @@ func (sr *SmiRouter) getWithConvert(canary *flaggerv1.Canary, host string) (*smi } return ts, nil } + +func (sr *SmiRouter) Finalize(canary *flaggerv1.Canary) error { + return nil +} diff --git a/test/e2e-istio-tests.sh b/test/e2e-istio-tests.sh index 316006018..53231439d 100755 --- a/test/e2e-istio-tests.sh +++ b/test/e2e-istio-tests.sh @@ -328,6 +328,237 @@ done echo '✔ A/B testing promotion test passed' +cat <>> Waiting for finalizers to be present' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl get canary podinfo -n test -o jsonpath='{.metadata.finalizers}' | grep "finalizer.flagger.app" && ok=true || ok=false + sleep 10 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n test describe canary/podinfo + echo "No more retries left" + exit 1 + fi +done + +kubectl delete canary podinfo -n test + +echo '>>> Waiting for primary to revert' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl get deployment podinfo -n test -o jsonpath='{.spec.replicas}' | grep 1 && ok=true || ok=false + sleep 10 + kubectl -n istio-system logs deployment/flagger --tail 10 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n test describe canary/podinfo + echo "No more retries left" + exit 1 + fi +done +echo '✔ Delete testing passed' + + + +cat <>> Waiting for canary to initialize' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false + sleep 5 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n istio-system logs deployment/flagger + echo "No more retries left" + exit 1 + fi +done + +kubectl delete canary podinfo -n test + +echo '>>> Waiting for revert' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl get svc/podinfo vs/podinfo -n test -o jsonpath="{range .items[*]}{.metadata.name}{'\n'}{end}" | wc -l | grep 2 && ok=true || ok=false + sleep 10 + kubectl -n istio-system logs deployment/flagger --tail 10 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n test describe canary/podinfo + kubectl -n test describe svc/podinfo + kubectl -n test describe vs/podinfo + echo "No more retries left" + exit 1 + fi +done +echo '✔ Revert testing passed' + + kubectl -n istio-system logs deployment/flagger echo '✔ All tests passed' diff --git a/test/e2e-kubernetes-tests-daemonset.sh b/test/e2e-kubernetes-tests-daemonset.sh index 443f4dfbb..e5ab93c39 100755 --- a/test/e2e-kubernetes-tests-daemonset.sh +++ b/test/e2e-kubernetes-tests-daemonset.sh @@ -108,4 +108,92 @@ done echo '✔ Canary promotion test passed' + +cat <>> Waiting for finalizers to be present' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl get canary podinfo -n test -o jsonpath='{.metadata.finalizers}' | grep "finalizer.flagger.app" && ok=true || ok=false + sleep 10 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n test describe canary/podinfo + echo "No more retries left" + exit 1 + fi +done + +kubectl delete canary podinfo -n test + +echo '>>> Waiting for primary to revert' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl get daemonset podinfo -n test -o jsonpath='{.status.numberReady}' | grep 1 && ok=true || ok=false + sleep 10 + kubectl -n flagger-system logs deployment/flagger --tail 1 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n test describe canary/podinfo + echo "No more retries left" + exit 1 + fi +done + +echo '✔ Canary finalize passed' + kubectl -n flagger-system logs deployment/flagger