diff --git a/docs/FAQ.md b/docs/FAQ.md index 9ede8f41f..471f9acb1 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -37,6 +37,7 @@ this document. Few of the answers assume that the MCM being used is in conjuctio * [What health checks are performed on a machine?](#what-health-checks-are-performed-on-a-machine) * [How does rate limiting replacement of machine work in MCM ? How is it related to meltdown protection?](#how-does-rate-limiting-replacement-of-machine-work-in-mcm-how-is-it-related-to-meltdown-protection) * [How MCM responds when scale-out/scale-in is done during rolling update of a machinedeployment?](#how-mcm-responds-when-scale-outscale-in-is-done-during-rolling-update-of-a-machinedeployment) + * [How some unhealthy machines are drained quickly?](#how-some-unhealthy-machines-are-drained-quickly-) * [Troubleshooting](#troubleshooting) * [My machine is stuck in deletion for 1 hr, why?](#My-machine-is-stuck-in-deletion-for-1-hr-why) @@ -256,6 +257,24 @@ During update for scaling event, a machineSet is updated if any of the below is Once scaling is achieved, rollout continues. +## How some unhealthy machines are drained quickly ? + +If a node is unhealthy for more than the `machine-health-timeout` specified for the `machine-controller`, the controller +health-check moves the machine phase to `Failed`. By default, the `machine-health-timeout` is 10` minutes. + +`Failed` machines have their deletion timestamp set and the machine then moves to the `Terminating` phase. The node +drain process is initiated. The drain process is invoked either *gracefully* or *forcefully*. + +The usual drain process is graceful. Pods are evicted from the node and the drain process waits until any existing +attached volumes are mounted on new node. However, if the node `Ready` is `False` or the `ReadonlyFilesystem` is `True` +for greater than `5` minutes (non-configurable), then a forceful drain is initiated. In a forceful drain, pods are deleted +and `VolumeAttachment` objects associated with the old node are also marked for deletion. This is followed by the deletion of the +cloud provider VM associated with the `Machine` and then finally ending with the `Node` object deletion. + +During the deletion of the VM we only delete the local data disks and boot disks associated with the VM. The disks associated +with persistent volumes are left un-touched as their attach/de-detach, mount/unmount processes are handled by k8s +attach-detach controller in conjunction with the CSI driver. + # Troubleshooting ### My machine is stuck in deletion for 1 hr, why? diff --git a/pkg/apis/machine/v1alpha1/machine_types.go b/pkg/apis/machine/v1alpha1/machine_types.go index 1051f31de..7ddc6b8dc 100644 --- a/pkg/apis/machine/v1alpha1/machine_types.go +++ b/pkg/apis/machine/v1alpha1/machine_types.go @@ -188,7 +188,7 @@ const ( // MachineOperationHealthCheck indicates that the operation was a create MachineOperationHealthCheck MachineOperationType = "HealthCheck" - // MachineOperationDelete indicates that the operation was a create + // MachineOperationDelete indicates that the operation was a delete MachineOperationDelete MachineOperationType = "Delete" ) diff --git a/pkg/util/provider/machinecontroller/machine.go b/pkg/util/provider/machinecontroller/machine.go index 7435a3828..dbd95b480 100644 --- a/pkg/util/provider/machinecontroller/machine.go +++ b/pkg/util/provider/machinecontroller/machine.go @@ -24,13 +24,6 @@ import ( "strings" "time" - machineapi "github.com/gardener/machine-controller-manager/pkg/apis/machine" - "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1" - "github.com/gardener/machine-controller-manager/pkg/apis/machine/validation" - "github.com/gardener/machine-controller-manager/pkg/util/provider/driver" - "github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/codes" - "github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/status" - "github.com/gardener/machine-controller-manager/pkg/util/provider/machineutils" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,6 +32,14 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" + + machineapi "github.com/gardener/machine-controller-manager/pkg/apis/machine" + "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1" + "github.com/gardener/machine-controller-manager/pkg/apis/machine/validation" + "github.com/gardener/machine-controller-manager/pkg/util/provider/driver" + "github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/codes" + "github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/status" + "github.com/gardener/machine-controller-manager/pkg/util/provider/machineutils" ) /* @@ -592,6 +593,9 @@ func (c *controller) triggerDeletionFlow(ctx context.Context, deleteMachineReque case strings.Contains(machine.Status.LastOperation.Description, machineutils.InitiateDrain): return c.drainNode(ctx, deleteMachineRequest) + case strings.Contains(machine.Status.LastOperation.Description, machineutils.DelVolumesAttachments): + return c.deleteNodeVolAttachments(ctx, deleteMachineRequest) + case strings.Contains(machine.Status.LastOperation.Description, machineutils.InitiateVMDeletion): return c.deleteVM(ctx, deleteMachineRequest) diff --git a/pkg/util/provider/machinecontroller/machine_test.go b/pkg/util/provider/machinecontroller/machine_test.go index 9204628cb..3e49b0eb8 100644 --- a/pkg/util/provider/machinecontroller/machine_test.go +++ b/pkg/util/provider/machinecontroller/machine_test.go @@ -21,15 +21,6 @@ import ( "math" "time" - machineapi "github.com/gardener/machine-controller-manager/pkg/apis/machine" - "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1" - "github.com/gardener/machine-controller-manager/pkg/apis/machine/validation" - fakemachineapi "github.com/gardener/machine-controller-manager/pkg/client/clientset/versioned/typed/machine/v1alpha1/fake" - customfake "github.com/gardener/machine-controller-manager/pkg/fakeclient" - "github.com/gardener/machine-controller-manager/pkg/util/provider/driver" - "github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/codes" - "github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/status" - "github.com/gardener/machine-controller-manager/pkg/util/provider/machineutils" . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" @@ -39,6 +30,16 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" k8stesting "k8s.io/client-go/testing" + + machineapi "github.com/gardener/machine-controller-manager/pkg/apis/machine" + "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1" + "github.com/gardener/machine-controller-manager/pkg/apis/machine/validation" + fakemachineapi "github.com/gardener/machine-controller-manager/pkg/client/clientset/versioned/typed/machine/v1alpha1/fake" + customfake "github.com/gardener/machine-controller-manager/pkg/fakeclient" + "github.com/gardener/machine-controller-manager/pkg/util/provider/driver" + "github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/codes" + "github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/status" + "github.com/gardener/machine-controller-manager/pkg/util/provider/machineutils" ) const testNamespace = "test" @@ -1491,7 +1492,7 @@ var _ = Describe("machine", func() { }, }, expect: expect{ - err: fmt.Errorf("Machine deletion in process. Drain successful. %s", machineutils.InitiateVMDeletion), + err: fmt.Errorf("Drain successful. %s", machineutils.InitiateVMDeletion), retry: machineutils.ShortRetry, nodeTerminationConditionIsSet: true, machine: newMachine( @@ -1631,7 +1632,7 @@ var _ = Describe("machine", func() { ), }, }), - Entry("Drain skipping as machine is NotReady for a long time (5 minutes)", &data{ + Entry("Force Drain as machine is NotReady for a long time (5 minutes)", &data{ setup: setup{ secrets: []*corev1.Secret{ { @@ -1696,7 +1697,7 @@ var _ = Describe("machine", func() { }, }, expect: expect{ - err: fmt.Errorf("Skipping drain as machine is NotReady for over 5minutes. %s", machineutils.InitiateVMDeletion), + err: fmt.Errorf(fmt.Sprintf("Force Drain successful. %s", machineutils.DelVolumesAttachments)), retry: machineutils.ShortRetry, machine: newMachine( &v1alpha1.MachineTemplateSpec{ @@ -1715,7 +1716,7 @@ var _ = Describe("machine", func() { LastUpdateTime: metav1.Now(), }, LastOperation: v1alpha1.LastOperation{ - Description: fmt.Sprintf("Skipping drain as machine is NotReady for over 5minutes. %s", machineutils.InitiateVMDeletion), + Description: fmt.Sprintf("Force Drain successful. %s", machineutils.DelVolumesAttachments), State: v1alpha1.MachineStateProcessing, Type: v1alpha1.MachineOperationDelete, LastUpdateTime: metav1.Now(), @@ -1733,7 +1734,7 @@ var _ = Describe("machine", func() { ), }, }), - Entry("Drain skipping as machine is in ReadonlyFilesystem for a long time (5 minutes)", &data{ + Entry("Force Drain as machine is in ReadonlyFilesystem for a long time (5 minutes)", &data{ setup: setup{ secrets: []*corev1.Secret{ { @@ -1798,7 +1799,7 @@ var _ = Describe("machine", func() { }, }, expect: expect{ - err: fmt.Errorf("Skipping drain as machine is in ReadonlyFilesystem for over 5minutes. %s", machineutils.InitiateVMDeletion), + err: fmt.Errorf(fmt.Sprintf("Force Drain successful. %s", machineutils.DelVolumesAttachments)), retry: machineutils.ShortRetry, machine: newMachine( &v1alpha1.MachineTemplateSpec{ @@ -1817,7 +1818,7 @@ var _ = Describe("machine", func() { LastUpdateTime: metav1.Now(), }, LastOperation: v1alpha1.LastOperation{ - Description: fmt.Sprintf("Skipping drain as machine is in ReadonlyFilesystem for over 5minutes. %s", machineutils.InitiateVMDeletion), + Description: fmt.Sprintf("Force Drain successful. %s", machineutils.DelVolumesAttachments), State: v1alpha1.MachineStateProcessing, Type: v1alpha1.MachineOperationDelete, LastUpdateTime: metav1.Now(), @@ -1835,7 +1836,7 @@ var _ = Describe("machine", func() { ), }, }), - Entry("Drain skipping as machine is NotReady for a long time(5 min) ,also ReadonlyFilesystem is true for a long time (5 minutes)", &data{ + Entry("Force Drain as machine is NotReady for a long time(5 min) ,also ReadonlyFilesystem is true for a long time (5 minutes)", &data{ setup: setup{ secrets: []*corev1.Secret{ { @@ -1905,7 +1906,7 @@ var _ = Describe("machine", func() { }, }, expect: expect{ - err: fmt.Errorf("Skipping drain as machine is NotReady for over 5minutes. %s", machineutils.InitiateVMDeletion), + err: fmt.Errorf(fmt.Sprintf("Force Drain successful. %s", machineutils.DelVolumesAttachments)), retry: machineutils.ShortRetry, machine: newMachine( &v1alpha1.MachineTemplateSpec{ @@ -1924,7 +1925,7 @@ var _ = Describe("machine", func() { LastUpdateTime: metav1.Now(), }, LastOperation: v1alpha1.LastOperation{ - Description: fmt.Sprintf("Skipping drain as machine is NotReady for over 5minutes. %s", machineutils.InitiateVMDeletion), + Description: fmt.Sprintf("Force Drain successful. %s", machineutils.DelVolumesAttachments), State: v1alpha1.MachineStateProcessing, Type: v1alpha1.MachineOperationDelete, LastUpdateTime: metav1.Now(), @@ -2007,7 +2008,7 @@ var _ = Describe("machine", func() { }, }, expect: expect{ - err: fmt.Errorf("Machine deletion in process. Drain successful. %s", machineutils.InitiateVMDeletion), + err: fmt.Errorf("Drain successful. %s", machineutils.InitiateVMDeletion), retry: machineutils.ShortRetry, machine: newMachine( &v1alpha1.MachineTemplateSpec{ @@ -2109,7 +2110,7 @@ var _ = Describe("machine", func() { }, }, expect: expect{ - err: fmt.Errorf("Machine deletion in process. Drain successful. %s", machineutils.InitiateVMDeletion), + err: fmt.Errorf("Drain successful. %s", machineutils.InitiateVMDeletion), retry: machineutils.ShortRetry, machine: newMachine( &v1alpha1.MachineTemplateSpec{ @@ -2236,7 +2237,7 @@ var _ = Describe("machine", func() { LastUpdateTime: metav1.Now(), }, LastOperation: v1alpha1.LastOperation{ - Description: fmt.Sprintf("Drain failed due to - Failed to update node. However, since it's a force deletion shall continue deletion of VM. %s", machineutils.InitiateVMDeletion), + Description: fmt.Sprintf("Drain failed due to - Failed to update node. However, since it's a force deletion shall continue deletion of VM. %s", machineutils.DelVolumesAttachments), State: v1alpha1.MachineStateProcessing, Type: v1alpha1.MachineOperationDelete, LastUpdateTime: metav1.Now(), @@ -2450,7 +2451,7 @@ var _ = Describe("machine", func() { LastUpdateTime: metav1.Now(), }, LastOperation: v1alpha1.LastOperation{ - Description: fmt.Sprintf("Drain failed due to - Failed to update node. However, since it's a force deletion shall continue deletion of VM. %s", machineutils.InitiateVMDeletion), + Description: fmt.Sprintf("Drain failed due to - Failed to update node. However, since it's a force deletion shall continue deletion of VM. %s", machineutils.DelVolumesAttachments), State: v1alpha1.MachineStateProcessing, Type: v1alpha1.MachineOperationDelete, LastUpdateTime: metav1.Now(), diff --git a/pkg/util/provider/machinecontroller/machine_util.go b/pkg/util/provider/machinecontroller/machine_util.go index 0fb700da7..df445192d 100644 --- a/pkg/util/provider/machinecontroller/machine_util.go +++ b/pkg/util/provider/machinecontroller/machine_util.go @@ -45,6 +45,7 @@ import ( utiltime "github.com/gardener/machine-controller-manager/pkg/util/time" v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -52,6 +53,8 @@ import ( "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + storageclient "k8s.io/client-go/kubernetes/typed/storage/v1" + storagelisters "k8s.io/client-go/listers/storage/v1" "k8s.io/klog/v2" ) @@ -1009,7 +1012,6 @@ func (c *controller) drainNode(ctx context.Context, deleteMachineRequest *driver printLogInitError(message, &err, &description, machine) skipDrain = true } else { - for _, condition := range machine.Status.Conditions { if condition.Type == v1.NodeReady { nodeReadyCondition = condition @@ -1018,16 +1020,17 @@ func (c *controller) drainNode(ctx context.Context, deleteMachineRequest *driver } } + klog.V(3).Infof("(drainNode) For node %q, machine %q, nodeReadyCondition: %s, readOnlyFileSystemCondition: %s", nodeName, machine.Name, nodeReadyCondition, readOnlyFileSystemCondition) if !isConditionEmpty(nodeReadyCondition) && (nodeReadyCondition.Status != v1.ConditionTrue) && (time.Since(nodeReadyCondition.LastTransitionTime.Time) > nodeNotReadyDuration) { - // If node is in NotReady state over 5 minutes then skip the drain - message := "Skipping drain as machine is NotReady for over 5minutes." + message := "Setting forceDeletePods & forceDeleteMachine to true for drain as machine is NotReady for over 5min" + forceDeleteMachine = true + forceDeletePods = true printLogInitError(message, &err, &description, machine) - skipDrain = true } else if !isConditionEmpty(readOnlyFileSystemCondition) && (readOnlyFileSystemCondition.Status != v1.ConditionFalse) && (time.Since(readOnlyFileSystemCondition.LastTransitionTime.Time) > nodeNotReadyDuration) { - // If node is set to ReadonlyFilesystem over 5 minutes then skip the drain - message := "Skipping drain as machine is in ReadonlyFilesystem for over 5minutes." + message := "Setting forceDeletePods & forceDeleteMachine to true for drain as machine is in ReadonlyFilesystem for over 5min" + forceDeleteMachine = true + forceDeletePods = true printLogInitError(message, &err, &description, machine) - skipDrain = true } } @@ -1106,21 +1109,26 @@ func (c *controller) drainNode(ctx context.Context, deleteMachineRequest *driver c.nodeLister, c.volumeAttachmentHandler, ) + klog.V(3).Infof("(drainNode) Invoking RunDrain, forceDeleteMachine: %t, forceDeletePods: %t, timeOutDuration: %s", forceDeletePods, forceDeleteMachine, timeOutDuration) err = drainOptions.RunDrain(ctx) if err == nil { // Drain successful klog.V(2).Infof("Drain successful for machine %q ,providerID %q, backing node %q. \nBuf:%v \nErrBuf:%v", machine.Name, getProviderID(machine), getNodeName(machine), buf, errBuf) - description = fmt.Sprintf("Drain successful. %s", machineutils.InitiateVMDeletion) + if forceDeletePods { + description = fmt.Sprintf("Force Drain successful. %s", machineutils.DelVolumesAttachments) + } else { // regular drain already waits for vol detach and attach for another node. + description = fmt.Sprintf("Drain successful. %s", machineutils.InitiateVMDeletion) + } + err = fmt.Errorf(description) state = v1alpha1.MachineStateProcessing // Return error even when machine object is updated - err = fmt.Errorf("Machine deletion in process. " + description) } else if err != nil && forceDeleteMachine { // Drain failed on force deletion klog.Warningf("Drain failed for machine %q. However, since it's a force deletion shall continue deletion of VM. \nBuf:%v \nErrBuf:%v \nErr-Message:%v", machine.Name, buf, errBuf, err) - description = fmt.Sprintf("Drain failed due to - %s. However, since it's a force deletion shall continue deletion of VM. %s", err.Error(), machineutils.InitiateVMDeletion) + description = fmt.Sprintf("Drain failed due to - %s. However, since it's a force deletion shall continue deletion of VM. %s", err.Error(), machineutils.DelVolumesAttachments) state = v1alpha1.MachineStateProcessing } else { klog.Warningf("Drain failed for machine %q , providerID %q ,backing node %q. \nBuf:%v \nErrBuf:%v \nErr-Message:%v", machine.Name, getProviderID(machine), getNodeName(machine), buf, errBuf, err) @@ -1150,6 +1158,66 @@ func (c *controller) drainNode(ctx context.Context, deleteMachineRequest *driver return machineutils.ShortRetry, err } +// deleteNodeVolAttachments deletes VolumeAttachment(s) for a node before moving to VM deletion stage. +func (c *controller) deleteNodeVolAttachments(ctx context.Context, deleteMachineRequest *driver.DeleteMachineRequest) (machineutils.RetryPeriod, error) { + var ( + description string + state v1alpha1.MachineState + machine = deleteMachineRequest.Machine + nodeName = machine.Labels[v1alpha1.NodeLabelKey] + retryPeriod = machineutils.ShortRetry + ) + node, err := c.nodeLister.Get(nodeName) + if err != nil { + if !apierrors.IsNotFound(err) { + // an error other than NotFound, let us try again later. + return retryPeriod, err + } + // node not found move to vm deletion + description = fmt.Sprintf("Skipping deleteNodeVolAttachments due to - %s. Moving to VM Deletion. %s", err.Error(), machineutils.InitiateVMDeletion) + state = v1alpha1.MachineStateProcessing + retryPeriod = 0 + } else if len(node.Status.VolumesAttached) == 0 { + description = fmt.Sprintf("Node Volumes for node: %s are already detached. Moving to VM Deletion. %s", nodeName, machineutils.InitiateVMDeletion) + state = v1alpha1.MachineStateProcessing + retryPeriod = 0 + } else { + // case: where node.Status.VolumesAttached > 0 + liveNodeVolAttachments, err := getLiveVolumeAttachmentsForNode(c.volumeAttachementLister, nodeName, machine.Name) + if err != nil { + klog.Errorf("(deleteNodeVolAttachments) Error obtaining VolumeAttachment(s) for node %q, machine %q: %s", nodeName, machine.Name, err) + return retryPeriod, err + } + if len(liveNodeVolAttachments) == 0 { + description = fmt.Sprintf("No Live VolumeAttachments for node: %s. Moving to VM Deletion. %s", nodeName, machineutils.InitiateVMDeletion) + state = v1alpha1.MachineStateProcessing + } else { + err = deleteVolumeAttachmentsForNode(ctx, c.targetCoreClient.StorageV1().VolumeAttachments(), nodeName, liveNodeVolAttachments) + if err != nil { + klog.Errorf("(deleteNodeVolAttachments) Error deleting volume attachments for node %q, machine %q: %s", nodeName, machine.Name, err) + } else { + klog.V(3).Infof("(deleteNodeVolAttachments) Successfully deleted all volume attachments for node %q, machine %q", nodeName, machine.Name) + } + return retryPeriod, nil + } + } + now := metav1.Now() + klog.V(4).Infof("(deleteVolumeAttachmentsForNode) For node %q, machine %q, set LastOperation.Description: %q", nodeName, machine.Name, description) + err = c.machineStatusUpdate( + ctx, + machine, + v1alpha1.LastOperation{ + Description: description, + State: state, + Type: machine.Status.LastOperation.Type, + LastUpdateTime: now, + }, + machine.Status.CurrentStatus, + machine.Status.LastKnownState, + ) + return retryPeriod, err +} + // deleteVM attempts to delete the VM backed by the machine object func (c *controller) deleteVM(ctx context.Context, deleteMachineRequest *driver.DeleteMachineRequest) (machineutils.RetryPeriod, error) { var ( @@ -1508,6 +1576,34 @@ func (c *controller) tryMarkingMachineFailed(ctx context.Context, machine, clone return machineutils.ShortRetry, err } +func getLiveVolumeAttachmentsForNode(volAttachLister storagelisters.VolumeAttachmentLister, nodeName string, machineName string) ([]*storagev1.VolumeAttachment, error) { + volAttachments, err := volAttachLister.List(labels.NewSelector()) + if err != nil { + return nil, fmt.Errorf("cant list volume attachments for node %q, machine %q: %w", nodeName, machineName, err) + } + nodeVolAttachments := make([]*storagev1.VolumeAttachment, 0, len(volAttachments)) + for _, va := range volAttachments { + if va.Spec.NodeName == nodeName && va.ObjectMeta.DeletionTimestamp == nil { + nodeVolAttachments = append(nodeVolAttachments, va) + } + } + return nodeVolAttachments, nil +} + +func deleteVolumeAttachmentsForNode(ctx context.Context, attachIf storageclient.VolumeAttachmentInterface, nodeName string, volAttachments []*storagev1.VolumeAttachment) error { + klog.V(3).Infof("(deleteVolumeAttachmentsForNode) Deleting #%d VolumeAttachment(s) for node %q", len(volAttachments), nodeName) + var errs []error + var delOpts = metav1.DeleteOptions{} + for _, va := range volAttachments { + err := attachIf.Delete(ctx, va.Name, delOpts) + if err != nil { + errs = append(errs, err) + } + klog.V(4).Infof("(deleteVolumeAttachmentsForNode) Deleted VolumeAttachment %q for node %q", va.Name, nodeName) + } + return errors.Join(errs...) +} + func getProviderID(machine *v1alpha1.Machine) string { return machine.Spec.ProviderID } diff --git a/pkg/util/provider/machineutils/utils.go b/pkg/util/provider/machineutils/utils.go index e9fe909f8..8bc546a0f 100644 --- a/pkg/util/provider/machineutils/utils.go +++ b/pkg/util/provider/machineutils/utils.go @@ -30,6 +30,9 @@ const ( // InitiateDrain specifies next step as initiate node drain InitiateDrain = "Initiate node drain" + // DelVolumesAttachments specifies next step as deleting volume attachments + DelVolumesAttachments = "Delete Volume Attachments" + // InitiateVMDeletion specifies next step as initiate VM deletion InitiateVMDeletion = "Initiate VM deletion" diff --git a/pkg/util/provider/metrics/metrics.go b/pkg/util/provider/metrics/metrics.go index 6e73e6724..50033312a 100644 --- a/pkg/util/provider/metrics/metrics.go +++ b/pkg/util/provider/metrics/metrics.go @@ -113,4 +113,4 @@ func init() { registerMachineSubsystemMetrics() registerCloudAPISubsystemMetrics() registerMiscellaneousMetrics() -} \ No newline at end of file +}