Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

allow specifying additional rsync flags #169

Merged
merged 2 commits into from
Mar 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 17 additions & 15 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,10 @@ PVCs to refer to the new PVs.

## Preflight Validation

`pvmigrate` will run preflight migration validation to catch any potential failures prior to the migration.
`pvmigrate` will run preflight migration validation to catch any potential failures prior to the migration.

Currently supported validations are:

Currently supported validations are:
- Checking for existence of storage classes
- Checking existing PVC access modes are supported on the destination storage provider

Expand All @@ -27,19 +28,20 @@ pvmigrate --source-sc "source" --dest-sc "destination" --preflight-validation-on

## Flags

| Flag | Type | Required | Default | Description |
|--------------------------|--------|----------|------------------|--------------------------------------------------------------------------------------------------|
| --source-sc | String | ✓ | | storage provider name to migrate from |
| --dest-sc | String | ✓ | | storage provider name to migrate to |
| --namespace | String | | | only migrate PVCs within this namespace |
| --rsync-image | String | | eeacms/rsync:2.3 | the image to use to copy PVCs - must have 'rsync' on the path |
| --set-defaults | Bool | | false | change default storage class from source to dest |
| --verbose-copy | Bool | | false | show output from the rsync command used to copy data between PVCs |
| --skip-source-validation | Bool | | false | migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist |
| --preflight-validation-only | Bool | | false | skip the migration and run preflight validation only |
| --skip-preflight-validation | Bool | | false | skip preflight migration validation on the destination storage provider |
| --pod-ready-timeout | time.Duration | | 60 seconds | length of time to wait (in seconds) for validation pod(s) to go into Ready phase |
| --delete-pv-timeout | time.Duration | | 5 minutes | length of time to wait (in seconds) for backing PV to be removed when the temporary PVC is deleted |
| Flag | Type | Required | Default | Description |
|-----------------------------|---------|----------|------------------|----------------------------------------------------------------------------------------------------|
| --source-sc | String | ✓ | | storage provider name to migrate from |
| --dest-sc | String | ✓ | | storage provider name to migrate to |
| --namespace | String | | | only migrate PVCs within this namespace |
| --rsync-image | String | | eeacms/rsync:2.3 | the image to use to copy PVCs - must have 'rsync' on the path |
| --rsync-flags | String | | | A comma-separated list of additional flags to pass to rsync when copying PVCs |
| --set-defaults | Bool | | false | change default storage class from source to dest |
| --verbose-copy | Bool | | false | show output from the rsync command used to copy data between PVCs |
| --skip-source-validation | Bool | | false | migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist |
| --preflight-validation-only | Bool | | false | skip the migration and run preflight validation only |
| --skip-preflight-validation | Bool | | false | skip preflight migration validation on the destination storage provider |
| --pod-ready-timeout | Integer | | 60 | length of time to wait (in seconds) for validation pod(s) to go into Ready phase |
| --delete-pv-timeout | Integer | | 300 | length of time to wait (in seconds) for backing PV to be removed when the temporary PVC is deleted |

## Process

Expand Down
8 changes: 8 additions & 0 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"log"
"os"
"os/signal"
"strings"
"time"

"github.com/replicatedhq/pvmigrate/pkg/migrate"
Expand All @@ -30,9 +31,11 @@ func main() {
var preflightValidationOnly bool
var podReadyTimeout int
var deletePVTimeout int
var rsyncFlags string
flag.StringVar(&options.SourceSCName, "source-sc", "", "storage provider name to migrate from")
flag.StringVar(&options.DestSCName, "dest-sc", "", "storage provider name to migrate to")
flag.StringVar(&options.RsyncImage, "rsync-image", "eeacms/rsync:2.3", "the image to use to copy PVCs - must have 'rsync' on the path")
flag.StringVar(&rsyncFlags, "rsync-flags", "", "additional flags to pass to rsync command")
flag.StringVar(&options.Namespace, "namespace", "", "only migrate PVCs within this namespace")
flag.BoolVar(&options.SetDefaults, "set-defaults", false, "change default storage class from source to dest")
flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs")
Expand All @@ -48,6 +51,11 @@ func main() {
options.PodReadyTimeout = time.Duration(podReadyTimeout) * time.Second
options.DeletePVTimeout = time.Duration(deletePVTimeout) * time.Second

if rsyncFlags != "" {
rsyncFlagsSlice := strings.Split(rsyncFlags, ",")
options.RsyncFlags = rsyncFlagsSlice
}

// setup logger
logger := log.New(os.Stderr, "", 0) // this has no time prefix etc

Expand Down
31 changes: 17 additions & 14 deletions pkg/migrate/migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ type Options struct {
SourceSCName string
DestSCName string
RsyncImage string
RsyncFlags []string
Namespace string
SetDefaults bool
VerboseCopy bool
Expand All @@ -68,7 +69,7 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface,
return fmt.Errorf("failed to scale down pods: %w", err)
}

err = copyAllPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.RsyncImage, updatedMatchingPVCs, options.VerboseCopy, time.Second)
err = copyAllPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.RsyncImage, updatedMatchingPVCs, options.VerboseCopy, time.Second, options.RsyncFlags)
if err != nil {
return err
}
Expand Down Expand Up @@ -184,15 +185,15 @@ func swapDefaultStorageClasses(ctx context.Context, w *log.Logger, clientset k8s
return nil
}

func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, sourceSCName string, destSCName string, rsyncImage string, matchingPVCs map[string][]pvcCtx, verboseCopy bool, waitTime time.Duration) error {
func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, sourceSCName string, destSCName string, rsyncImage string, matchingPVCs map[string][]pvcCtx, verboseCopy bool, waitTime time.Duration, rsyncFlags []string) error {
// create a pod for each PVC migration, and wait for it to finish
w.Printf("\nCopying data from %s PVCs to %s PVCs\n", sourceSCName, destSCName)
for ns, nsPvcs := range matchingPVCs {
for _, nsPvc := range nsPvcs {
sourcePvcName, destPvcName := nsPvc.claim.Name, newPvcName(nsPvc.claim.Name)
w.Printf("Copying data from %s (%s) to %s in %s\n", sourcePvcName, nsPvc.claim.Spec.VolumeName, destPvcName, ns)

err := copyOnePVC(ctx, w, clientset, ns, sourcePvcName, destPvcName, rsyncImage, verboseCopy, waitTime, nsPvc.getNodeNameRef())
err := copyOnePVC(ctx, w, clientset, ns, sourcePvcName, destPvcName, rsyncImage, verboseCopy, waitTime, nsPvc.getNodeNameRef(), rsyncFlags)
if err != nil {
return fmt.Errorf("failed to copy PVC %s in %s: %w", nsPvc.claim.Name, ns, err)
}
Expand All @@ -201,9 +202,9 @@ func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interfa
return nil
}

func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, verboseCopy bool, waitTime time.Duration, nodeName string) error {
func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, verboseCopy bool, waitTime time.Duration, nodeName string, rsyncFlags []string) error {
w.Printf("Creating pvc migrator pod on node %s\n", nodeName)
createdPod, err := createMigrationPod(ctx, clientset, ns, sourcePvcName, destPvcName, rsyncImage, nodeName)
createdPod, err := createMigrationPod(ctx, clientset, ns, sourcePvcName, destPvcName, rsyncImage, nodeName, rsyncFlags)
if err != nil {
return err
}
Expand Down Expand Up @@ -318,7 +319,7 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac
return nil
}

func createMigrationPod(ctx context.Context, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, nodeName string) (*corev1.Pod, error) {
func createMigrationPod(ctx context.Context, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, nodeName string, rsyncFlags []string) (*corev1.Pod, error) {
// apply nodeAffinity when migrating to a local volume provisioner
var nodeAffinity *corev1.Affinity
if isDestScLocalVolumeProvisioner && nodeName != "" {
Expand All @@ -341,6 +342,15 @@ func createMigrationPod(ctx context.Context, clientset k8sclient.Interface, ns s
}
}

podArgs := []string{
"-a", // use the "archive" method to copy files recursively with permissions/ownership/etc
"-v", // show verbose output
"-P", // show progress, and resume aborted/partial transfers
"--delete", // delete files in dest that are not in source
}
podArgs = append(podArgs, rsyncFlags...)
podArgs = append(podArgs, "/source/", "/dest")

createdPod, err := clientset.CoreV1().Pods(ns).Create(ctx, &corev1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
Expand Down Expand Up @@ -381,14 +391,7 @@ func createMigrationPod(ctx context.Context, clientset k8sclient.Interface, ns s
Command: []string{
"rsync",
},
Args: []string{
"-a", // use the "archive" method to copy files recursively with permissions/ownership/etc
"-v", // show verbose output
"-P", // show progress, and resume aborted/partial transfers
"--delete", // delete files in dest that are not in source
"/source/",
"/dest",
},
Args: podArgs,
VolumeMounts: []corev1.VolumeMount{
{
MountPath: "/source",
Expand Down
82 changes: 80 additions & 2 deletions pkg/migrate/migrate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -930,6 +930,7 @@ func Test_createMigrationPod(t *testing.T) {
destPvcName string
rsyncImage string
nodeName string
rsyncFlags []string
}
tests := []struct {
name string
Expand Down Expand Up @@ -1186,6 +1187,83 @@ func Test_createMigrationPod(t *testing.T) {
isDestScLocalVolumeProvisioner = false
},
},
{
name: "additional rsync flags",
args: args{
ns: "testns",
sourcePvcName: "sourcepvc",
destPvcName: "destpvc",
rsyncImage: "imagename",
nodeName: "node1",
rsyncFlags: []string{"--exclude", "foo", "--no-o", "--no-g"},
},
want: &corev1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "migrate-sourcepvc",
Namespace: "testns",
Labels: map[string]string{
baseAnnotation: "sourcepvc",
},
},
Spec: corev1.PodSpec{
Affinity: nil,
RestartPolicy: corev1.RestartPolicyNever,
Volumes: []corev1.Volume{
{
Name: "source",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "sourcepvc",
},
},
},
{
Name: "dest",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "destpvc",
},
},
},
},
Containers: []corev1.Container{
{
Name: "pvmigrate",
Image: "imagename",
Command: []string{
"rsync",
},
Args: []string{
"-a", // use the "archive" method to copy files recursively with permissions/ownership/etc
"-v", // show verbose output
"-P", // show progress, and resume aborted/partial transfers
"--delete", // delete files in dest that are not in source
"--exclude", "foo",
"--no-o",
"--no-g",
"/source/",
"/dest",
},
VolumeMounts: []corev1.VolumeMount{
{
MountPath: "/source",
Name: "source",
},
{
MountPath: "/dest",
Name: "dest",
},
},
},
},
},
Status: corev1.PodStatus{},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
Expand All @@ -1196,7 +1274,7 @@ func Test_createMigrationPod(t *testing.T) {
tt.setGlobalFunc()
}

got, err := createMigrationPod(context.Background(), clientset, tt.args.ns, tt.args.sourcePvcName, tt.args.destPvcName, tt.args.rsyncImage, tt.args.nodeName)
got, err := createMigrationPod(context.Background(), clientset, tt.args.ns, tt.args.sourcePvcName, tt.args.destPvcName, tt.args.rsyncImage, tt.args.nodeName, tt.args.rsyncFlags)

if tt.clearGlobalFunc != nil {
tt.clearGlobalFunc()
Expand Down Expand Up @@ -3142,7 +3220,7 @@ func Test_copyAllPVCs(t *testing.T) {
}
}(testCtx, testlog, clientset, tt.events)

err := copyAllPVCs(testCtx, testlog, clientset, "sourcesc", "destsc", "testrsyncimage", tt.matchingPVCs, false, time.Millisecond*10)
err := copyAllPVCs(testCtx, testlog, clientset, "sourcesc", "destsc", "testrsyncimage", tt.matchingPVCs, false, time.Millisecond*10, nil)
if tt.wantErr {
req.Error(err)
testlog.Printf("got expected error %q", err.Error())
Expand Down