Skip to content

Commit

Permalink
feat: Delete machine (#40)
Browse files Browse the repository at this point in the history
* feat: Microvm deletion

* Remove finalizers from Cluster

We do not currently create any supporting infrastructure for our
machines. We can add this back if we need it.
  • Loading branch information
Callisto13 authored Dec 10, 2021

Verified

This commit was signed with the committer’s verified signature.
williamdes William Desportes
1 parent 90f8216 commit cbc4042
Showing 8 changed files with 184 additions and 35 deletions.
6 changes: 6 additions & 0 deletions api/v1alpha1/condition_consts.go
Original file line number Diff line number Diff line change
@@ -30,6 +30,12 @@ const (
// MicrovmPendingReason indicates the microvm is in a pending state.
MicrovmPendingReason = "MicrovmPending"

// MicrovmDeletingReason indicates the microvm is in a deleted state.
MicrovmDeletingReason = "MicrovmDeleting"

// MicrovmDeletedFailedReason indicates the microvm failed to deleted cleanly.
MicrovmDeleteFailedReason = "MicrovmDeleteFailed"

// MicrovmUnknownStateReason indicates that the microvm in in an unknown or unsupported state
// for reconciliation.
MicrovmUnknownStateReason = "MicrovmUnknownState"
6 changes: 0 additions & 6 deletions api/v1alpha1/microvmcluster_types.go
Original file line number Diff line number Diff line change
@@ -8,12 +8,6 @@ import (
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)

const (
// ClusterFinalizer allows ReconcileMicrovmCluster to clean up esources associated with MicrovmCluster before
// removing it from the apiserver.
ClusterFinalizer = "microvmcluster.infrastructure.cluster.x-k8s.io"
)

// MicrovmClusterSpec defines the desired state of MicrovmCluster.
type MicrovmClusterSpec struct {
// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
4 changes: 4 additions & 0 deletions controllers/helpers_test.go
Original file line number Diff line number Diff line change
@@ -356,3 +356,7 @@ func hasMachineFinalizer(machine *infrav1.MicrovmMachine) bool {

return false
}

func assertMachineNotReady(g *WithT, machine *infrav1.MicrovmMachine) {
g.Expect(machine.Status.Ready).To(BeFalse())
}
7 changes: 1 addition & 6 deletions controllers/microvmcluster_controller.go
Original file line number Diff line number Diff line change
@@ -24,7 +24,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -120,9 +119,7 @@ func (r *MicrovmClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque
func (r *MicrovmClusterReconciler) reconcileDelete(_ context.Context, clusterScope *scope.ClusterScope) (reconcile.Result, error) {
clusterScope.Info("Reconciling MicrovmCluster delete")

// TODO: do any required deletion

controllerutil.RemoveFinalizer(clusterScope.MvmCluster, infrav1.ClusterFinalizer)
// We currently do not do any Cluster creation so there is nothing to delete.

return reconcile.Result{}, nil
}
@@ -134,8 +131,6 @@ func (r *MicrovmClusterReconciler) reconcileNormal(ctx context.Context, clusterS
return reconcile.Result{}, errControlplaneEndpointRequired
}

controllerutil.AddFinalizer(clusterScope.MvmCluster, infrav1.ClusterFinalizer)

clusterScope.MvmCluster.Status.Ready = true

available := r.isAPIServerAvailable(ctx, clusterScope)
15 changes: 1 addition & 14 deletions controllers/microvmcluster_controller_test.go
Original file line number Diff line number Diff line change
@@ -42,8 +42,6 @@ func TestClusterReconciliationNoEndpoint(t *testing.T) {

c := conditions.Get(reconciled, infrav1.LoadBalancerAvailableCondition)
g.Expect(c).To(BeNil())

g.Expect(reconciled.Finalizers).To(HaveLen(0))
}

func TestClusterReconciliationWithClusterEndpoint(t *testing.T) {
@@ -89,8 +87,6 @@ func TestClusterReconciliationWithClusterEndpoint(t *testing.T) {
c = conditions.Get(reconciled, clusterv1.ReadyCondition)
g.Expect(c).ToNot(BeNil())
g.Expect(c.Status).To(Equal(corev1.ConditionTrue))

g.Expect(reconciled.Finalizers).To(HaveLen(1))
}

func TestClusterReconciliationWithMvmClusterEndpoint(t *testing.T) {
@@ -136,8 +132,6 @@ func TestClusterReconciliationWithMvmClusterEndpoint(t *testing.T) {
c = conditions.Get(reconciled, clusterv1.ReadyCondition)
g.Expect(c).ToNot(BeNil())
g.Expect(c.Status).To(Equal(corev1.ConditionTrue))

g.Expect(reconciled.Finalizers).To(HaveLen(1))
}

func TestClusterReconciliationWithClusterEndpointAPIServerNotReady(t *testing.T) {
@@ -177,8 +171,6 @@ func TestClusterReconciliationWithClusterEndpointAPIServerNotReady(t *testing.T)
c = conditions.Get(reconciled, clusterv1.ReadyCondition)
g.Expect(c).ToNot(BeNil())
g.Expect(c.Status).To(Equal(corev1.ConditionFalse))

g.Expect(reconciled.Finalizers).To(HaveLen(1))
}

func TestClusterReconciliationMicrovmAlreadyDeleted(t *testing.T) {
@@ -221,8 +213,6 @@ func TestClusterReconciliationNotOwner(t *testing.T) {

c := conditions.Get(reconciled, infrav1.LoadBalancerAvailableCondition)
g.Expect(c).To(BeNil())

g.Expect(reconciled.Finalizers).To(HaveLen(0))
}

func TestClusterReconciliationWhenPaused(t *testing.T) {
@@ -251,8 +241,6 @@ func TestClusterReconciliationWhenPaused(t *testing.T) {

c := conditions.Get(reconciled, infrav1.LoadBalancerAvailableCondition)
g.Expect(c).To(BeNil())

g.Expect(reconciled.Finalizers).To(HaveLen(0))
}

func TestClusterReconciliationDelete(t *testing.T) {
@@ -276,7 +264,6 @@ func TestClusterReconciliationDelete(t *testing.T) {
g.Expect(result.RequeueAfter).To(Equal(time.Duration(0)))

// TODO: when we move to envtest this should return an NotFound error. #30
reconciled, err := getMicrovmCluster(context.TODO(), client, testClusterName, testClusterNamespace)
_, err = getMicrovmCluster(context.TODO(), client, testClusterName, testClusterNamespace)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(reconciled.Finalizers).To(HaveLen(0))
}
53 changes: 45 additions & 8 deletions controllers/microvmmachine_controller.go
Original file line number Diff line number Diff line change
@@ -145,14 +145,52 @@ func (r *MicrovmMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return r.reconcileNormal(ctx, machineScope)
}

func (r *MicrovmMachineReconciler) reconcileDelete(_ context.Context, machineScope *scope.MachineScope) (reconcile.Result, error) {
func (r *MicrovmMachineReconciler) reconcileDelete(ctx context.Context, machineScope *scope.MachineScope) (reconcile.Result, error) {
machineScope.Info("Reconciling MicrovmMachine delete")

// TODO: call flintlock to delete
mvmSvc, err := r.getMicrovmService(machineScope)
if err != nil {
machineScope.Error(err, "failed to get microvm service")

return ctrl.Result{}, nil
}

microvm, err := mvmSvc.Get(ctx)
if err != nil && !isSpecNotFound(err) {
machineScope.Error(err, "failed getting microvm")

return ctrl.Result{}, err
}

if microvm != nil {
machineScope.Info("deleting microvm")

// Mark the machine as no longer ready before we delete
machineScope.SetNotReady(infrav1.MicrovmDeletingReason, clusterv1.ConditionSeverityInfo, "")
if err := machineScope.Patch(); err != nil {
machineScope.Error(err, "failed to patch object")

return ctrl.Result{}, err
}

// TODO: we should check the state returned from the above Get and only
// call Delete if not "deleting". Flintlock #310
if _, err := mvmSvc.Delete(ctx); err != nil {
machineScope.SetNotReady(infrav1.MicrovmDeleteFailedReason, clusterv1.ConditionSeverityError, "")

return ctrl.Result{}, err
}

return ctrl.Result{RequeueAfter: requeuePeriod}, nil
}

// By this point Flintlock has no record of the MvM, so we are good to clear
// the finalizer
controllerutil.RemoveFinalizer(machineScope.MvmMachine, infrav1.MachineFinalizer)

return reconcile.Result{}, nil
machineScope.Info("microvm deleted")

return ctrl.Result{}, nil
}

func (r *MicrovmMachineReconciler) reconcileNormal(ctx context.Context, machineScope *scope.MachineScope) (reconcile.Result, error) {
@@ -180,12 +218,10 @@ func (r *MicrovmMachineReconciler) reconcileNormal(ctx context.Context, machineS
}

microvm, err := mvmSvc.Get(ctx)
if err != nil {
if !isSpecNotFound(err) {
machineScope.Error(err, "failed checking if microvm exists")
if err != nil && !isSpecNotFound(err) {
machineScope.Error(err, "failed checking if microvm exists")

return ctrl.Result{}, err
}
return ctrl.Result{}, err
}

controllerutil.AddFinalizer(machineScope.MvmMachine, infrav1.MachineFinalizer)
@@ -226,6 +262,7 @@ func (r *MicrovmMachineReconciler) reconcileNormal(ctx context.Context, machineS
return ctrl.Result{RequeueAfter: requeuePeriod}, errMicrovmUnknownState
}

machineScope.Info("microvm created")
machineScope.MvmMachine.Spec.ProviderID = &microvm.Spec.Id
machineScope.SetReady()

116 changes: 115 additions & 1 deletion controllers/microvmmachine_controller_test.go
Original file line number Diff line number Diff line change
@@ -7,15 +7,20 @@ import (
"encoding/base64"
"errors"
"testing"
"time"

. "github.com/onsi/gomega"

apierrors "k8s.io/apimachinery/pkg/api/errors"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"

flintlocktypes "github.com/weaveworks/flintlock/api/types"

"github.com/weaveworks/cluster-api-provider-microvm/api/v1alpha1"
infrav1 "github.com/weaveworks/cluster-api-provider-microvm/api/v1alpha1"
"github.com/weaveworks/cluster-api-provider-microvm/internal/services/microvm/mock_client"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func TestMachineReconcile(t *testing.T) {
@@ -57,6 +62,15 @@ func TestMachineReconcile(t *testing.T) {
t.Run("and create microvm succeeds", machineReconcileNoVmCreateSucceeds)
t.Run("and create microvm succeeds and reconciles again", machineReconcileNoVmCreateAdditionReconcile)
})

t.Run("microvm_has_deletion_timestamp", func(t *testing.T) {
t.Parallel()

t.Run("and delete microvm succeeds", machineReconcileDeleteVmSucceeds)
t.Run("microvm get returns nil", machineReconcileDeleteGetReturnsNil)
t.Run("microvm get returns error", machineReconcileDeleteGetErrors)
t.Run("microvm delete returns error", machineReconcileDeleteDeleteErrors)
})
}

func machineReconcileMissingMvmMachine(t *testing.T) {
@@ -337,9 +351,109 @@ func machineReconcileNoVmCreateAdditionReconcile(t *testing.T) {
g.Expect(result.IsZero()).To(BeFalse(), "Expect requeue to be requested after create")

withExistingMicrovm(&fakeAPIClient, flintlocktypes.MicroVMStatus_CREATED)
result, err = reconcileMachine(client, &fakeAPIClient)
_, err = reconcileMachine(client, &fakeAPIClient)
g.Expect(err).NotTo(HaveOccurred(), "Reconciling should not return an error")

reconciled, err := getMicrovmMachine(client, testMachineName, testClusterNamespace)
g.Expect(err).NotTo(HaveOccurred(), "Getting microvm machine should not fail")
assertMachineReconciled(g, reconciled)
}

func machineReconcileDeleteVmSucceeds(t *testing.T) {
t.Parallel()
g := NewWithT(t)

apiObjects := defaultClusterObjects()
apiObjects.MvmMachine.DeletionTimestamp = &metav1.Time{
Time: time.Now(),
}
apiObjects.MvmMachine.Finalizers = []string{v1alpha1.MachineFinalizer}

fakeAPIClient := mock_client.FakeClient{}
withExistingMicrovm(&fakeAPIClient, flintlocktypes.MicroVMStatus_CREATED)

client := createFakeClient(g, apiObjects.AsRuntimeObjects())

result, err := reconcileMachine(client, &fakeAPIClient)
g.Expect(err).NotTo(HaveOccurred(), "Reconciling when deleting microvm should not return error")
g.Expect(result.Requeue).To(BeFalse())
g.Expect(result.RequeueAfter).To(BeNumerically(">", time.Duration(0)))

g.Expect(fakeAPIClient.DeleteMicroVMCallCount()).To(Equal(1))
_, deleteReq, _ := fakeAPIClient.DeleteMicroVMArgsForCall(0)
g.Expect(deleteReq.Id).To(Equal(testMachineName))
g.Expect(deleteReq.Namespace).To(Equal(testClusterNamespace))

_, err = getMicrovmMachine(client, testMachineName, testClusterNamespace)
g.Expect(apierrors.IsNotFound(err)).To(BeFalse())
}

func machineReconcileDeleteGetReturnsNil(t *testing.T) {
t.Parallel()
g := NewWithT(t)

apiObjects := defaultClusterObjects()
apiObjects.MvmMachine.DeletionTimestamp = &metav1.Time{
Time: time.Now(),
}
apiObjects.MvmMachine.Finalizers = []string{v1alpha1.MachineFinalizer}

fakeAPIClient := mock_client.FakeClient{}
withMissingMicrovm(&fakeAPIClient)

client := createFakeClient(g, apiObjects.AsRuntimeObjects())

result, err := reconcileMachine(client, &fakeAPIClient)
g.Expect(err).NotTo(HaveOccurred(), "Reconciling when deleting microvm should not return error")
g.Expect(result.Requeue).To(BeFalse())
g.Expect(result.RequeueAfter).To(Equal(time.Duration(0)))

g.Expect(fakeAPIClient.DeleteMicroVMCallCount()).To(Equal(0))

_, err = getMicrovmMachine(client, testMachineName, testClusterNamespace)
g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
}

func machineReconcileDeleteGetErrors(t *testing.T) {
t.Parallel()
g := NewWithT(t)

apiObjects := defaultClusterObjects()
apiObjects.MvmMachine.DeletionTimestamp = &metav1.Time{
Time: time.Now(),
}
apiObjects.MvmMachine.Finalizers = []string{v1alpha1.MachineFinalizer}

fakeAPIClient := mock_client.FakeClient{}
withExistingMicrovm(&fakeAPIClient, flintlocktypes.MicroVMStatus_CREATED)
fakeAPIClient.GetMicroVMReturns(nil, errors.New("something terrible happened"))

client := createFakeClient(g, apiObjects.AsRuntimeObjects())
_, err := reconcileMachine(client, &fakeAPIClient)
g.Expect(err).To(HaveOccurred(), "Reconciling when microvm service exists errors should return error")
}

func machineReconcileDeleteDeleteErrors(t *testing.T) {
t.Parallel()
g := NewWithT(t)

apiObjects := defaultClusterObjects()
apiObjects.MvmMachine.DeletionTimestamp = &metav1.Time{
Time: time.Now(),
}
apiObjects.MvmMachine.Finalizers = []string{v1alpha1.MachineFinalizer}

fakeAPIClient := mock_client.FakeClient{}
withExistingMicrovm(&fakeAPIClient, flintlocktypes.MicroVMStatus_CREATED)
fakeAPIClient.DeleteMicroVMReturns(nil, errors.New("something terrible happened"))

client := createFakeClient(g, apiObjects.AsRuntimeObjects())
_, err := reconcileMachine(client, &fakeAPIClient)
g.Expect(err).To(HaveOccurred(), "Reconciling when deleting microvm errors should return error")

reconciled, err := getMicrovmMachine(client, testMachineName, testClusterNamespace)
g.Expect(err).NotTo(HaveOccurred(), "Getting microvm machine should not fail")

assertConditionFalse(g, reconciled, infrav1.MicrovmReadyCondition, infrav1.MicrovmDeleteFailedReason)
assertMachineNotReady(g, reconciled)
}
12 changes: 12 additions & 0 deletions internal/services/microvm/service.go
Original file line number Diff line number Diff line change
@@ -11,6 +11,7 @@ import (
"github.com/weaveworks/cluster-api-provider-microvm/internal/defaults"
"github.com/weaveworks/cluster-api-provider-microvm/internal/scope"
"github.com/yitsushi/macpot"
"google.golang.org/protobuf/types/known/emptypb"
"k8s.io/utils/pointer"

flintlockv1 "github.com/weaveworks/flintlock/api/services/microvm/v1alpha1"
@@ -89,3 +90,14 @@ func (s *Service) Get(ctx context.Context) (*flintlocktypes.MicroVM, error) {

return resp.Microvm, nil
}

func (s *Service) Delete(ctx context.Context) (*emptypb.Empty, error) {
s.scope.V(defaults.LogLevelDebug).Info("Deleting microvm for machine", "machine-name", s.scope.Name(), "cluster-name", s.scope.ClusterName())

input := &flintlockv1.DeleteMicroVMRequest{
Id: s.scope.Name(),
Namespace: s.scope.Namespace(),
}

return s.client.DeleteMicroVM(ctx, input)
}

0 comments on commit cbc4042

Please sign in to comment.