From 3b0a6d300c4277967c89350e901500842016d434 Mon Sep 17 00:00:00 2001 From: Xieql Date: Sat, 21 Oct 2023 10:21:20 +0800 Subject: [PATCH 1/2] backup: init restore controller Signed-off-by: Xieql --- .../backup_restore_migrate_shared.go | 146 +++++++++++ .../backup_restore_migrate_shared_test.go | 242 +++++++++++++++++- pkg/fleet-manager/restore_controller.go | 174 +++++++++++++ .../backup/include-ns.yaml | 0 .../backup/label-selector.yaml | 0 .../backup/schedule.yaml | 0 .../testdata/restore/custom-policy.yaml | 18 ++ .../testdata/restore/minimal.yaml | 15 ++ 8 files changed, 588 insertions(+), 7 deletions(-) create mode 100644 pkg/fleet-manager/restore_controller.go rename pkg/fleet-manager/{backup-testdata => testdata}/backup/include-ns.yaml (100%) rename pkg/fleet-manager/{backup-testdata => testdata}/backup/label-selector.yaml (100%) rename pkg/fleet-manager/{backup-testdata => testdata}/backup/schedule.yaml (100%) create mode 100644 pkg/fleet-manager/testdata/restore/custom-policy.yaml create mode 100644 pkg/fleet-manager/testdata/restore/minimal.yaml diff --git a/pkg/fleet-manager/backup_restore_migrate_shared.go b/pkg/fleet-manager/backup_restore_migrate_shared.go index 63c9f8afc..d70cd4a00 100644 --- a/pkg/fleet-manager/backup_restore_migrate_shared.go +++ b/pkg/fleet-manager/backup_restore_migrate_shared.go @@ -15,6 +15,7 @@ package fleet import ( "context" + "errors" "fmt" "reflect" "sort" @@ -311,3 +312,148 @@ func listResourcesFromClusterClient(ctx context.Context, namespace string, label } return clusterClient.List(ctx, objList, opts) } + +// fetchRestoreDestinationClusters retrieves the destination clusters for a restore operation. +// It first fetches the referred backup and then determines the destination clusters based on the restore and backup specifications: +// If the restore destination is not set, it returns the clusters from the backup destination. +// If set, it ensures that the restore destination is a subset of the backup destination. +// +// Returns: +// - string: The name of the fleet where the restore's set of fleetClusters resides. +// - map[ClusterKey]*fleetCluster: A map of cluster keys to fleet clusters. +// - error: An error object indicating any issues encountered during the operation. +func (r *RestoreManager) fetchRestoreDestinationClusters(ctx context.Context, restore *backupapi.Restore) (string, map[ClusterKey]*fleetCluster, error) { + // Retrieve the referred backup in the current Kurator host cluster + key := client.ObjectKey{ + Name: restore.Spec.BackupName, + Namespace: restore.Namespace, + } + referredBackup := &backupapi.Backup{} + if err := r.Client.Get(ctx, key, referredBackup); err != nil { + return "", nil, fmt.Errorf("failed to retrieve the referred backup '%s': %w", restore.Spec.BackupName, err) + } + + // Get the base clusters from the referred backup + baseClusters, err := fetchDestinationClusters(ctx, r.Client, restore.Namespace, referredBackup.Spec.Destination) + if err != nil { + return "", nil, fmt.Errorf("failed to fetch fleet clusters for the backup '%s': %w", referredBackup.Name, err) + } + + // If the restore destination is not set, return the base fleet clusters directly + if restore.Spec.Destination == nil { + return referredBackup.Spec.Destination.Fleet, baseClusters, nil + } + + // If the restore destination is set, try to get the clusters from the restore destination + restoreClusters, err := fetchDestinationClusters(ctx, r.Client, restore.Namespace, *restore.Spec.Destination) + if err != nil { + return "", nil, fmt.Errorf("failed to fetch restore clusters for the restore '%s': %w", restore.Name, err) + } + + // Check the fleet and clusters between the restore and the referred backup + if referredBackup.Spec.Destination.Fleet != restore.Spec.Destination.Fleet { + // if we make sure only one fleet in one ns, this error will never happen + return "", nil, errors.New("the restore destination fleet must be the same as the backup's") + } + + // In our design, the restore destination must be a subset of the backup destination. + if !isFleetClusterSubset(baseClusters, restoreClusters) { + return "", nil, errors.New("the restore clusters must be a subset of the base clusters") + } + + return restore.Spec.Destination.Fleet, restoreClusters, nil +} + +// isFleetClusterSubset is the helper function to check if one set of clusters is a subset of another +func isFleetClusterSubset(baseClusters, subsetClusters map[ClusterKey]*fleetCluster) bool { + for key := range subsetClusters { + if _, exists := baseClusters[key]; !exists { + return false + } + } + return true +} + +// buildVeleroScheduleInstance constructs a Velero Restore instance configured to Restore operations on the specified cluster. +func buildVeleroRestoreInstance(restoreSpec *backupapi.RestoreSpec, labels map[string]string, veleroBackupName, veleroRestoreName string) *velerov1.Restore { + veleroRestore := &velerov1.Restore{ + ObjectMeta: generateVeleroResourceObjectMeta(veleroRestoreName, labels), + Spec: velerov1.RestoreSpec{ + BackupName: veleroBackupName, + }, + } + if restoreSpec.Policy != nil { + veleroRestore.Spec.NamespaceMapping = restoreSpec.Policy.NamespaceMapping + veleroRestore.Spec.PreserveNodePorts = restoreSpec.Policy.PreserveNodePorts + // in velero, the restore does not contain namespace scope filter + if restoreSpec.Policy.ResourceFilter != nil { + veleroRestore.Spec.IncludedNamespaces = restoreSpec.Policy.ResourceFilter.IncludedNamespaces + veleroRestore.Spec.ExcludedNamespaces = restoreSpec.Policy.ResourceFilter.ExcludedNamespaces + veleroRestore.Spec.IncludedResources = restoreSpec.Policy.ResourceFilter.IncludedResources + veleroRestore.Spec.ExcludedResources = restoreSpec.Policy.ResourceFilter.ExcludedResources + veleroRestore.Spec.IncludeClusterResources = restoreSpec.Policy.ResourceFilter.IncludeClusterResources + veleroRestore.Spec.LabelSelector = restoreSpec.Policy.ResourceFilter.LabelSelector + veleroRestore.Spec.OrLabelSelectors = restoreSpec.Policy.ResourceFilter.OrLabelSelectors + } + if restoreSpec.Policy.PreserveStatus != nil { + veleroRestore.Spec.RestoreStatus = &velerov1.RestoreStatusSpec{ + IncludedResources: restoreSpec.Policy.PreserveStatus.IncludedResources, + ExcludedResources: restoreSpec.Policy.PreserveStatus.ExcludedResources, + } + } + } + + return veleroRestore +} + +// allRestoreCompleted checks if all restore operations are completed by inspecting the phase of each RestoreDetails instance in the provided slice. +func allRestoreCompleted(clusterDetails []*backupapi.RestoreDetails) bool { + for _, detail := range clusterDetails { + if detail.RestoreStatusInCluster == nil || detail.RestoreStatusInCluster.Phase != velerov1.RestorePhaseCompleted { + return false + } + } + return true +} + +// syncVeleroRestoreStatus synchronizes the status of Velero restore resources across different clusters. +// Note: Returns the modified ClusterDetails to capture internal changes due to Go's slice behavior. +func syncVeleroRestoreStatus(ctx context.Context, destinationClusters map[ClusterKey]*fleetCluster, clusterDetails []*backupapi.RestoreDetails, creatorKind, creatorNamespace, creatorName string) ([]*backupapi.RestoreDetails, error) { + log := ctrl.LoggerFrom(ctx) + + if clusterDetails == nil { + clusterDetails = []*backupapi.RestoreDetails{} + } + + // Initialize a map to store the velero restore status of each cluster currently recorded. The combination of detail.ClusterName, detail.ClusterKind, and detail.BackupNameInCluster uniquely identifies a Velero restore object. + statusMap := make(map[string]*backupapi.RestoreDetails) + for _, detail := range clusterDetails { + key := fmt.Sprintf("%s-%s-%s", detail.ClusterName, detail.ClusterKind, detail.RestoreNameInCluster) + statusMap[key] = detail + } + // Loop through each target cluster to retrieve the status of Velero restore resources using the client associated with the respective target cluster. + for clusterKey, clusterAccess := range destinationClusters { + name := generateVeleroResourceName(clusterKey.Name, creatorKind, creatorNamespace, creatorName) + veleroRestore := &velerov1.Restore{} + err := getResourceFromClusterClient(ctx, name, VeleroNamespace, *clusterAccess, veleroRestore) + if err != nil { + log.Error(err, "failed to get velero restore instance for sync status", "restoreName", name) + return nil, err + } + + key := fmt.Sprintf("%s-%s-%s", clusterKey.Name, clusterKey.Kind, veleroRestore.Name) + if detail, exists := statusMap[key]; exists { + detail.RestoreStatusInCluster = &veleroRestore.Status + } else { + currentRestoreDetails := &backupapi.RestoreDetails{ + ClusterName: clusterKey.Name, + ClusterKind: clusterKey.Kind, + RestoreNameInCluster: veleroRestore.Name, + RestoreStatusInCluster: &veleroRestore.Status, + } + clusterDetails = append(clusterDetails, currentRestoreDetails) + } + } + + return clusterDetails, nil +} diff --git a/pkg/fleet-manager/backup_restore_migrate_shared_test.go b/pkg/fleet-manager/backup_restore_migrate_shared_test.go index 6bc3e5672..3a34553a6 100644 --- a/pkg/fleet-manager/backup_restore_migrate_shared_test.go +++ b/pkg/fleet-manager/backup_restore_migrate_shared_test.go @@ -27,19 +27,20 @@ import ( backupapi "kurator.dev/kurator/pkg/apis/backups/v1alpha1" ) -const backupTestDataPath = "backup-testdata/backup/" +const backupTestDataPath = "testdata/backup/" +const restoreTestDataPath = "testdata/restore/" // buildVeleroBackupInstanceForTest is a helper function for testing for buildVeleroBackupInstance, which constructs a Velero Backup instance with a specified TypeMeta. -func buildVeleroBackupInstanceForTest(backupSpec *backupapi.BackupSpec, labels map[string]string, veleroBackupName string, typeMeta *metav1.TypeMeta) *velerov1.Backup { +func buildVeleroBackupInstanceForTest(backupSpec *backupapi.BackupSpec, labels map[string]string, veleroBackupName string, typeMeta metav1.TypeMeta) *velerov1.Backup { veleroBackup := buildVeleroBackupInstance(backupSpec, labels, veleroBackupName) - veleroBackup.TypeMeta = *typeMeta // set TypeMeta for test + veleroBackup.TypeMeta = typeMeta // set TypeMeta for test return veleroBackup } // buildVeleroScheduleInstanceForTest is a helper function for testing buildVeleroScheduleInstance, which constructs a Velero Schedule instance with a specified TypeMeta. -func buildVeleroScheduleInstanceForTest(backupSpec *backupapi.BackupSpec, labels map[string]string, veleroBackupName string, typeMeta *metav1.TypeMeta) *velerov1.Schedule { +func buildVeleroScheduleInstanceForTest(backupSpec *backupapi.BackupSpec, labels map[string]string, veleroBackupName string, typeMeta metav1.TypeMeta) *velerov1.Schedule { veleroSchedule := buildVeleroScheduleInstance(backupSpec, labels, veleroBackupName) - veleroSchedule.TypeMeta = *typeMeta + veleroSchedule.TypeMeta = typeMeta return veleroSchedule } @@ -117,7 +118,7 @@ func TestBuildVeleroBackupInstance(t *testing.T) { }, } - typeMeta := &metav1.TypeMeta{ + typeMeta := metav1.TypeMeta{ APIVersion: "velero.io/v1", Kind: "Backup", } @@ -187,7 +188,7 @@ func TestBuildVeleroScheduleInstance(t *testing.T) { }, } - typeMeta := &metav1.TypeMeta{ + typeMeta := metav1.TypeMeta{ APIVersion: "velero.io/v1", Kind: "Schedule", } @@ -429,3 +430,230 @@ func TestGetCronInterval(t *testing.T) { }) } } + +// buildVeleroRestoreInstanceForTest is a helper function for testing buildVeleroScheduleInstance, which constructs a Velero Restore instance with a specified TypeMeta. +func buildVeleroRestoreInstanceForTest(restoreSpec *backupapi.RestoreSpec, labels map[string]string, veleroBackupName, veleroRestoreName string, typeMeta metav1.TypeMeta) *velerov1.Restore { + veleroRestore := buildVeleroRestoreInstance(restoreSpec, labels, veleroBackupName, veleroRestoreName) + veleroRestore.TypeMeta = typeMeta + return veleroRestore +} + +func TestBuildVeleroRestoreInstance(t *testing.T) { + cases := []struct { + name string + description string + creatorName string + // velero backup can be created by kurator restore or migrate + creatorKind string + creatorLabel string + clusterName string + creatorNamespace string + veleroBackupName string + restoreSpec *backupapi.RestoreSpec + }{ + { + name: "minimal", + description: "Test the minimal restore scenario where the Velero restore instance is created by Kurator 'Restore' with the creator name 'include-ns'. " + + "The restore targets the 'kurator-member1' cluster using the backup named 'include-ns'.", + creatorName: "minimal", + creatorKind: RestoreKind, + creatorLabel: RestoreNameLabel, + clusterName: "kurator-member1", + creatorNamespace: "default", + veleroBackupName: "kurator-member1-backup-include-ns", + restoreSpec: &backupapi.RestoreSpec{ + BackupName: "include-ns", + Destination: &backupapi.Destination{ + Fleet: "quickstart", + Clusters: []*corev1.ObjectReference{ + { + Kind: "AttachedCluster", + Name: "kurator-member1", + }, + }, + }, + }, + }, + { + name: "custom-policy", + description: "Test the custom policy restore scenario where resources are filtered based on the 'env: test' label. " + + "The Velero restore instance is created by Kurator 'Migrate' with the creator name 'include-ns', targeting the 'kurator-member1' cluster.", + creatorName: "custom-policy", + creatorNamespace: "default", + creatorKind: MigrateKind, + creatorLabel: MigrateNameLabel, + clusterName: "kurator-member1", + veleroBackupName: "kurator-member1-migrate-include-ns", + restoreSpec: &backupapi.RestoreSpec{ + BackupName: "include-ns", + Destination: &backupapi.Destination{ + Fleet: "quickstart", + Clusters: []*corev1.ObjectReference{ + { + Kind: "AttachedCluster", + Name: "kurator-member1", + }, + }, + }, + Policy: &backupapi.RestorePolicy{ + ResourceFilter: &backupapi.ResourceFilter{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "env": "test", + }, + }, + }, + }, + }, + }, + } + + typeMeta := metav1.TypeMeta{ + APIVersion: "velero.io/v1", + Kind: "Restore", + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + // get expect restore yaml + expectedYAML, err := getExpectedRestore(tc.name) + assert.NoError(t, err) + + // just for test, the real fleet name may not record in `tc.restoreSpec.Destination.Fleet` + restoreLabels := generateVeleroInstanceLabel(tc.creatorLabel, tc.creatorName, tc.restoreSpec.Destination.Fleet) + restoreName := generateVeleroResourceName(tc.clusterName, tc.creatorKind, tc.creatorNamespace, tc.creatorName) + + // get actual restore yaml + actualBackup := buildVeleroRestoreInstanceForTest(tc.restoreSpec, restoreLabels, tc.veleroBackupName, restoreName, typeMeta) + actualYAML, err := yaml.Marshal(actualBackup) + if err != nil { + t.Fatalf("failed to marshal actual output to YAML: %v", err) + } + + assert.Equal(t, string(expectedYAML), string(actualYAML)) + }) + } +} + +func getExpectedRestore(caseName string) ([]byte, error) { + return os.ReadFile(restoreTestDataPath + caseName + ".yaml") +} + +func TestIsFleetClusterSubset(t *testing.T) { + tests := []struct { + name string + baseClusters map[ClusterKey]*fleetCluster + subsetClusters map[ClusterKey]*fleetCluster + wantResult bool + }{ + { + name: "Subset is a true subset of base", + baseClusters: map[ClusterKey]*fleetCluster{ + {"Kind1", "Name1"}: {}, + {"Kind2", "Name2"}: {}, + }, + subsetClusters: map[ClusterKey]*fleetCluster{ + {"Kind1", "Name1"}: {}, + }, + wantResult: true, + }, + { + name: "Subset is not a subset of base", + baseClusters: map[ClusterKey]*fleetCluster{ + {"Kind1", "Name1"}: {}, + }, + subsetClusters: map[ClusterKey]*fleetCluster{ + {"Kind2", "Name2"}: {}, + }, + wantResult: false, + }, + { + name: "Both base and subset are empty", + baseClusters: map[ClusterKey]*fleetCluster{}, + subsetClusters: map[ClusterKey]*fleetCluster{}, + wantResult: true, + }, + { + name: "Base is empty, but subset is not", + baseClusters: map[ClusterKey]*fleetCluster{}, + subsetClusters: map[ClusterKey]*fleetCluster{ + {"Kind1", "Name1"}: {}, + }, + wantResult: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotResult := isFleetClusterSubset(tt.baseClusters, tt.subsetClusters) + assert.Equal(t, tt.wantResult, gotResult) + }) + } +} + +func TestAllRestoreCompleted(t *testing.T) { + tests := []struct { + name string + restoreDetails []*backupapi.RestoreDetails + wantResult bool + }{ + { + name: "All restores are completed", + restoreDetails: []*backupapi.RestoreDetails{ + { + RestoreStatusInCluster: &velerov1.RestoreStatus{ + Phase: velerov1.RestorePhaseCompleted, + }, + }, + { + RestoreStatusInCluster: &velerov1.RestoreStatus{ + Phase: velerov1.RestorePhaseCompleted, + }, + }, + }, + wantResult: true, + }, + { + name: "Not all restores are completed", + restoreDetails: []*backupapi.RestoreDetails{ + { + RestoreStatusInCluster: &velerov1.RestoreStatus{ + Phase: velerov1.RestorePhaseCompleted, + }, + }, + { + RestoreStatusInCluster: &velerov1.RestoreStatus{ + Phase: "InProgress", + }, + }, + }, + wantResult: false, + }, + { + name: "No restore details", + restoreDetails: []*backupapi.RestoreDetails{}, + wantResult: true, // No details means nothing to check for completion + }, + { + name: "Some restore details lack a status", + restoreDetails: []*backupapi.RestoreDetails{ + { + RestoreStatusInCluster: nil, + }, + { + RestoreStatusInCluster: &velerov1.RestoreStatus{ + Phase: velerov1.RestorePhaseCompleted, + }, + }, + }, + wantResult: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotResult := allRestoreCompleted(tt.restoreDetails) + assert.Equal(t, tt.wantResult, gotResult) + }) + } +} diff --git a/pkg/fleet-manager/restore_controller.go b/pkg/fleet-manager/restore_controller.go new file mode 100644 index 000000000..36233bbf2 --- /dev/null +++ b/pkg/fleet-manager/restore_controller.go @@ -0,0 +1,174 @@ +/* +Copyright Kurator Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fleet + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "sigs.k8s.io/cluster-api/util/patch" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + backupapi "kurator.dev/kurator/pkg/apis/backups/v1alpha1" +) + +// RestoreManager reconciles a Restore object +type RestoreManager struct { + client.Client + Scheme *runtime.Scheme +} + +// SetupWithManager sets up the controller with the Manager. +func (r *RestoreManager) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + For(&backupapi.Restore{}). + WithOptions(options). + Complete(r) +} + +func (r *RestoreManager) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx).WithValues("restore", req.NamespacedName) + + restore := &backupapi.Restore{} + + if err := r.Client.Get(ctx, req.NamespacedName, restore); err != nil { + if apierrors.IsNotFound(err) { + log.Info("restore does not exist") + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + patchHelper, err := patch.NewHelper(restore, r.Client) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to init patch helper for restore %s", req.NamespacedName) + } + defer func() { + if err := patchHelper.Patch(ctx, restore); err != nil { + reterr = utilerrors.NewAggregate([]error{reterr, errors.Wrapf(err, "failed to patch %s %s", restore.Name, req.NamespacedName)}) + } + }() + + if !controllerutil.ContainsFinalizer(restore, RestoreFinalizer) { + controllerutil.AddFinalizer(restore, RestoreFinalizer) + } + + // Handle deletion + if restore.GetDeletionTimestamp() != nil { + return r.reconcileDeleteRestore(ctx, restore) + } + + // Handle the main reconcile logic + return r.reconcileRestore(ctx, restore) +} + +// reconcileRestore handles the main reconcile logic for a Restore object. +func (r *RestoreManager) reconcileRestore(ctx context.Context, restore *backupapi.Restore) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + fleetName, destinationClusters, err := r.fetchRestoreDestinationClusters(ctx, restore) + if err != nil { + log.Error(err, "failed to fetch destination clusters for restore", "restoreName", restore.Name) + return ctrl.Result{}, err + } + + // Apply restore resource in target clusters + result, err := r.reconcileRestoreResources(ctx, restore, destinationClusters, fleetName) + if err != nil { + return result, err + } + + // Collect target clusters velero restore resource status to current restore + restore.Status.Details, err = syncVeleroRestoreStatus(ctx, destinationClusters, restore.Status.Details, RestoreKind, restore.Namespace, restore.Name) + if err != nil { + log.Error(err, "failed to sync velero restore status for restore", "restoreName", restore.Name) + return ctrl.Result{}, err + } + + if allRestoreCompleted(restore.Status.Details) { + return ctrl.Result{}, nil + } else { + return ctrl.Result{RequeueAfter: StatusSyncInterval}, nil + } +} + +// reconcileRestoreResources converts the restore resources into velero restore resources on the target clusters, and applies those velero restore resources. +func (r *RestoreManager) reconcileRestoreResources(ctx context.Context, restore *backupapi.Restore, destinationClusters map[ClusterKey]*fleetCluster, fleetName string) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + restoreLabels := generateVeleroInstanceLabel(RestoreNameLabel, restore.Name, fleetName) + + var tasks []func() error + for clusterKey, clusterAccess := range destinationClusters { + veleroBackupName := generateVeleroResourceName(clusterKey.Name, BackupKind, restore.Namespace, restore.Spec.BackupName) + veleroRestoreName := generateVeleroResourceName(clusterKey.Name, RestoreKind, restore.Namespace, restore.Name) + veleroRestore := buildVeleroRestoreInstance(&restore.Spec, restoreLabels, veleroBackupName, veleroRestoreName) + + task := newSyncVeleroTaskFunc(ctx, clusterAccess, veleroRestore) + tasks = append(tasks, task) + } + + g := &multierror.Group{} + for _, task := range tasks { + g.Go(task) + } + + err := g.Wait().ErrorOrNil() + + if err != nil { + log.Error(err, "Error encountered during sync velero obj when restoring") + return ctrl.Result{}, fmt.Errorf("encountered errors during processing: %v", err) + } + + return ctrl.Result{}, nil +} + +func (r *RestoreManager) reconcileDeleteRestore(ctx context.Context, restore *backupapi.Restore) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + shouldRemoveFinalizer := false + defer func() { + if shouldRemoveFinalizer { + controllerutil.RemoveFinalizer(restore, RestoreFinalizer) + log.Info("Removed finalizer", "restoreName", restore.Name) + } + }() + + _, destinationClusters, err := r.fetchRestoreDestinationClusters(ctx, restore) + if err != nil { + log.Error(err, "failed to fetch destination clusters when deleting restore", "restoreName", restore.Name) + shouldRemoveFinalizer = true + return ctrl.Result{}, err + } + + restoreList := &velerov1.RestoreList{} + // Delete all related velero restore instance + if err := deleteResourcesInClusters(ctx, VeleroNamespace, RestoreNameLabel, restore.Name, destinationClusters, restoreList); err != nil { + log.Error(err, "failed to delete velero restore Instances when delete restore", "restoreName", restore.Name) + return ctrl.Result{}, err + } + + shouldRemoveFinalizer = true + + return ctrl.Result{}, nil +} diff --git a/pkg/fleet-manager/backup-testdata/backup/include-ns.yaml b/pkg/fleet-manager/testdata/backup/include-ns.yaml similarity index 100% rename from pkg/fleet-manager/backup-testdata/backup/include-ns.yaml rename to pkg/fleet-manager/testdata/backup/include-ns.yaml diff --git a/pkg/fleet-manager/backup-testdata/backup/label-selector.yaml b/pkg/fleet-manager/testdata/backup/label-selector.yaml similarity index 100% rename from pkg/fleet-manager/backup-testdata/backup/label-selector.yaml rename to pkg/fleet-manager/testdata/backup/label-selector.yaml diff --git a/pkg/fleet-manager/backup-testdata/backup/schedule.yaml b/pkg/fleet-manager/testdata/backup/schedule.yaml similarity index 100% rename from pkg/fleet-manager/backup-testdata/backup/schedule.yaml rename to pkg/fleet-manager/testdata/backup/schedule.yaml diff --git a/pkg/fleet-manager/testdata/restore/custom-policy.yaml b/pkg/fleet-manager/testdata/restore/custom-policy.yaml new file mode 100644 index 000000000..649472410 --- /dev/null +++ b/pkg/fleet-manager/testdata/restore/custom-policy.yaml @@ -0,0 +1,18 @@ +apiVersion: velero.io/v1 +kind: Restore +metadata: + creationTimestamp: null + labels: + fleet.kurator.dev/fleet-name: quickstart + fleet.kurator.dev/plugin: backup + kurator.dev/migrate-name: custom-policy + name: kurator-member1-migrate-default-custom-policy + namespace: velero +spec: + backupName: kurator-member1-migrate-include-ns + hooks: {} + itemOperationTimeout: 0s + labelSelector: + matchLabels: + env: test +status: {} diff --git a/pkg/fleet-manager/testdata/restore/minimal.yaml b/pkg/fleet-manager/testdata/restore/minimal.yaml new file mode 100644 index 000000000..54796065f --- /dev/null +++ b/pkg/fleet-manager/testdata/restore/minimal.yaml @@ -0,0 +1,15 @@ +apiVersion: velero.io/v1 +kind: Restore +metadata: + creationTimestamp: null + labels: + fleet.kurator.dev/fleet-name: quickstart + fleet.kurator.dev/plugin: backup + kurator.dev/restore-name: minimal + name: kurator-member1-restore-default-minimal + namespace: velero +spec: + backupName: kurator-member1-backup-include-ns + hooks: {} + itemOperationTimeout: 0s +status: {} From 972a0dba8605e98ac5f64af7d3668c140df985e7 Mon Sep 17 00:00:00 2001 From: Xieql Date: Mon, 23 Oct 2023 09:39:28 +0800 Subject: [PATCH 2/2] bakcup: remove redundant reostre log kv Signed-off-by: Xieql --- pkg/fleet-manager/restore_controller.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/fleet-manager/restore_controller.go b/pkg/fleet-manager/restore_controller.go index 36233bbf2..b12386362 100644 --- a/pkg/fleet-manager/restore_controller.go +++ b/pkg/fleet-manager/restore_controller.go @@ -88,7 +88,7 @@ func (r *RestoreManager) reconcileRestore(ctx context.Context, restore *backupap fleetName, destinationClusters, err := r.fetchRestoreDestinationClusters(ctx, restore) if err != nil { - log.Error(err, "failed to fetch destination clusters for restore", "restoreName", restore.Name) + log.Error(err, "failed to fetch destination clusters for restore") return ctrl.Result{}, err } @@ -101,7 +101,7 @@ func (r *RestoreManager) reconcileRestore(ctx context.Context, restore *backupap // Collect target clusters velero restore resource status to current restore restore.Status.Details, err = syncVeleroRestoreStatus(ctx, destinationClusters, restore.Status.Details, RestoreKind, restore.Namespace, restore.Name) if err != nil { - log.Error(err, "failed to sync velero restore status for restore", "restoreName", restore.Name) + log.Error(err, "failed to sync velero restore status for restore") return ctrl.Result{}, err } @@ -150,13 +150,12 @@ func (r *RestoreManager) reconcileDeleteRestore(ctx context.Context, restore *ba defer func() { if shouldRemoveFinalizer { controllerutil.RemoveFinalizer(restore, RestoreFinalizer) - log.Info("Removed finalizer", "restoreName", restore.Name) } }() _, destinationClusters, err := r.fetchRestoreDestinationClusters(ctx, restore) if err != nil { - log.Error(err, "failed to fetch destination clusters when deleting restore", "restoreName", restore.Name) + log.Error(err, "failed to fetch destination clusters when deleting restore") shouldRemoveFinalizer = true return ctrl.Result{}, err } @@ -164,7 +163,7 @@ func (r *RestoreManager) reconcileDeleteRestore(ctx context.Context, restore *ba restoreList := &velerov1.RestoreList{} // Delete all related velero restore instance if err := deleteResourcesInClusters(ctx, VeleroNamespace, RestoreNameLabel, restore.Name, destinationClusters, restoreList); err != nil { - log.Error(err, "failed to delete velero restore Instances when delete restore", "restoreName", restore.Name) + log.Error(err, "failed to delete velero restore Instances when delete restore") return ctrl.Result{}, err }