Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

VM Object graph: Include backend PVC #321

Merged
merged 4 commits into from
Jan 22, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 10 additions & 2 deletions hack/cluster-deploy-prerequisites.sh
Original file line number Diff line number Diff line change
@@ -49,8 +49,16 @@ until _kubectl wait -n kubevirt kv kubevirt --for condition=Available --timeout
sleep 1m
done

# Patch kubevirt with hotplug feature gate enabled
_kubectl patch -n kubevirt kubevirt kubevirt --type merge -p '{"spec": {"configuration": { "developerConfiguration": { "featureGates": ["HotplugVolumes"] }}}}'
# Get the default storage class to patch vmStateStorageClass
# TODO: Improve vmStateStorageClass handling
DEFAULT_STORAGE_CLASS=$(_kubectl get storageclass -o jsonpath='{.items[?(@.metadata.annotations.storageclass\.kubernetes\.io/is-default-class=="true")].metadata.name}')
if [ -n "$DEFAULT_STORAGE_CLASS" ]; then
# Patch kubevirt with VM state storage class
_kubectl patch -n kubevirt kubevirt kubevirt --type merge -p '{"spec": {"configuration": { "vmStateStorageClass": "'$DEFAULT_STORAGE_CLASS'" }}}'
fi

# Patch kubevirt with hotplug and persistent VM state feature gate enabled
_kubectl patch -n kubevirt kubevirt kubevirt --type merge -p '{"spec": {"configuration": { "developerConfiguration": { "featureGates": ["HotplugVolumes", "VMPersistentState"] }}}}'

if [[ "$KUBEVIRT_DEPLOY_CDI" != "false" ]] && [[ $CDI_DV_GC != "0" ]]; then
_kubectl patch cdi cdi --type merge -p '{"spec": {"config": {"dataVolumeTTLSeconds": '"$CDI_DV_GC"'}}}'
2 changes: 1 addition & 1 deletion hack/config.sh
Original file line number Diff line number Diff line change
@@ -17,7 +17,7 @@
KUBEVIRT_MEMORY_SIZE=${KUBEVIRT_MEMORY_SIZE:-9216M}
KUBEVIRT_PROVIDER=${KUBEVIRT_PROVIDER:-k8s-1.29}
KUBEVIRT_DEPLOY_CDI=true
KUBEVIRT_VERSION=${KUBEVIRT_VERSION:-v1.1.1}
KUBEVIRT_VERSION=${KUBEVIRT_VERSION:-v1.4.0}
KUBEVIRT_DEPLOYMENT_TIMEOUT=${KUBEVIRT_DEPLOYMENT_TIMEOUT:-480}

if [ -f cluster-up/hack/common.sh ]; then
6 changes: 5 additions & 1 deletion pkg/plugin/vm_restore_item_action.go
Original file line number Diff line number Diff line change
@@ -87,6 +87,10 @@ func (p *VMRestorePlugin) Execute(input *velero.RestoreItemActionExecuteInput) (
}

output := velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: item})
output.AdditionalItems = kvgraph.NewVirtualMachineRestoreGraph(vm)
output.AdditionalItems, err = kvgraph.NewVirtualMachineRestoreGraph(vm)
if err != nil {
return nil, errors.WithStack(err)
}

return output, nil
}
6 changes: 5 additions & 1 deletion pkg/plugin/vmi_restore_item_action.go
Original file line number Diff line number Diff line change
@@ -102,7 +102,11 @@ func (p *VMIRestorePlugin) Execute(input *velero.RestoreItemActionExecuteInput)
metadata.SetLabels(labels)

output := velero.NewRestoreItemActionExecuteOutput(input.Item)
output.AdditionalItems = kvgraph.NewVirtualMachineInstanceRestoreGraph(vmi)
output.AdditionalItems, err = kvgraph.NewVirtualMachineInstanceRestoreGraph(vmi)
if err != nil {
return nil, errors.WithStack(err)
}

return output, nil
}

36 changes: 31 additions & 5 deletions pkg/util/kvgraph/backup_graph.go
Original file line number Diff line number Diff line change
@@ -24,6 +24,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/plugin/velero"

"k8s.io/apimachinery/pkg/runtime"
k8serrors "k8s.io/apimachinery/pkg/util/errors"
v1 "kubevirt.io/api/core/v1"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
)
@@ -69,24 +70,49 @@ func NewVirtualMachineBackupGraph(vm *v1.VirtualMachine) ([]velero.ResourceIdent
if vm.Spec.Preference != nil {
resources = addPreferenceType(*vm.Spec.Preference, vm.GetNamespace(), resources)
}

var errs []error
if vm.Status.Created {
resources = addVeleroResource(vm.GetName(), namespace, "virtualmachineinstances", resources)
// Returning full backup even if there was an error retrieving the launcher pod.
// The caller can decide wether to use the backup without launcher pod or handle the error.
// The caller can decide whether to use the backup without launcher pod or handle the error.
resources, err = addLauncherPod(vm.GetName(), vm.GetNamespace(), resources)
if err != nil {
errs = append(errs, err)
}
}

resources, err = addCommonVMIObjectGraph(vm.Spec.Template.Spec, vm.GetName(), namespace, true, resources)
if err != nil {
errs = append(errs, err)
}
if len(errs) > 0 {
return resources, k8serrors.NewAggregate(errs)
}

return addCommonVMIObjectGraph(vm.Spec.Template.Spec, namespace, true, resources), err
return resources, nil
}

// NewVirtualMachineInstanceBackupGraph returns the backup object graph for a specific VMI
func NewVirtualMachineInstanceBackupGraph(vmi *v1.VirtualMachineInstance) ([]velero.ResourceIdentifier, error) {
var resources []velero.ResourceIdentifier
var err error
var errs []error
// Returning full backup even if there was an error retrieving the launcher pod.
// The caller can decide wether to use the backup without launcher pod or handle the error.
resources, err = addLauncherPod(vmi.GetName(), vmi.GetNamespace(), resources)
return addCommonVMIObjectGraph(vmi.Spec, vmi.GetNamespace(), true, resources), err
resources, err := addLauncherPod(vmi.GetName(), vmi.GetNamespace(), resources)
if err != nil {
errs = append(errs, err)
}

resources, err = addCommonVMIObjectGraph(vmi.Spec, vmi.GetName(), vmi.GetNamespace(), true, resources)
if err != nil {
errs = append(errs, err)
}
if len(errs) > 0 {
return resources, k8serrors.NewAggregate(errs)
}

return resources, nil
}

// NewDataVolumeBackupGraph returns the backup object graph for a specific DataVolume
81 changes: 78 additions & 3 deletions pkg/util/kvgraph/backup_graph_test.go
Original file line number Diff line number Diff line change
@@ -180,7 +180,7 @@ func TestNewObjectBackupGraph(t *testing.T) {
}

func TestNewVirtualMachineBackupGraph(t *testing.T) {
getVM := func(created bool) kvcore.VirtualMachine {
getVM := func(created, backend bool) kvcore.VirtualMachine {
return kvcore.VirtualMachine{
ObjectMeta: metav1.ObjectMeta{
Namespace: "",
@@ -209,6 +209,13 @@ func TestNewVirtualMachineBackupGraph(t *testing.T) {
},
},
},
Domain: kvcore.DomainSpec{
Devices: kvcore.Devices{
TPM: &kvcore.TPMDevice{
Persistent: &backend,
},
},
},
AccessCredentials: []kvcore.AccessCredential{
{
SSHPublicKey: &kvcore.SSHPublicKeyAccessCredential{
@@ -234,7 +241,7 @@ func TestNewVirtualMachineBackupGraph(t *testing.T) {
expected []velero.ResourceIdentifier
}{
{"Should include all related resources",
getVM(true),
getVM(true, false),
[]velero.ResourceIdentifier{
{
GroupResource: schema.GroupResource{Group: "instancetype.kubevirt.io", Resource: "virtualmachineinstancetype"},
@@ -284,7 +291,7 @@ func TestNewVirtualMachineBackupGraph(t *testing.T) {
},
},
{"Should not include vmi and launcher pod",
getVM(false),
getVM(false, false),
[]velero.ResourceIdentifier{
{
GroupResource: schema.GroupResource{Group: "instancetype.kubevirt.io", Resource: "virtualmachineinstancetype"},
@@ -323,6 +330,61 @@ func TestNewVirtualMachineBackupGraph(t *testing.T) {
},
},
},
{"Should include backend PVC",
getVM(true, true),
[]velero.ResourceIdentifier{
{
GroupResource: schema.GroupResource{Group: "instancetype.kubevirt.io", Resource: "virtualmachineinstancetype"},
Namespace: "",
Name: "test-instancetype",
},
{
GroupResource: schema.GroupResource{Group: "apps", Resource: "controllerrevisions"},
Namespace: "",
Name: "controller-revision-instancetype",
},
{
GroupResource: schema.GroupResource{Group: "instancetype.kubevirt.io", Resource: "virtualmachinepreference"},
Namespace: "",
Name: "test-preference",
},
{
GroupResource: schema.GroupResource{Group: "apps", Resource: "controllerrevisions"},
Namespace: "",
Name: "controller-revision-preference",
},
{
GroupResource: schema.GroupResource{Group: "kubevirt.io", Resource: "virtualmachineinstances"},
Namespace: "",
Name: "test-vm",
},
{
GroupResource: schema.GroupResource{Group: "", Resource: "pods"},
Namespace: "",
Name: "test-vmi-launcher-pod",
},
{
GroupResource: schema.GroupResource{Group: "cdi.kubevirt.io", Resource: "datavolumes"},
Namespace: "",
Name: "test-datavolume",
},
{
GroupResource: schema.GroupResource{Group: "", Resource: "persistentvolumeclaims"},
Namespace: "",
Name: "test-datavolume",
},
{
GroupResource: schema.GroupResource{Group: "", Resource: "persistentvolumeclaims"},
Namespace: "",
Name: "backend-pvc",
},
{
GroupResource: schema.GroupResource{Group: "", Resource: "secrets"},
Namespace: "",
Name: "test-ssh-secret",
},
},
},
}

for _, tc := range testCases {
@@ -343,6 +405,19 @@ func TestNewVirtualMachineBackupGraph(t *testing.T) {
},
}}, nil
}
util.ListPVCs = func(labelSelector, ns string) (*v1.PersistentVolumeClaimList, error) {
return &v1.PersistentVolumeClaimList{Items: []v1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: "backend-pvc",
Labels: map[string]string{
"persistent-state-for": "test-vm",
},
},
},
}}, nil
}
resources, err := NewVirtualMachineBackupGraph(&tc.vm)
assert.NoError(t, err)
assert.Equal(t, tc.expected, resources)
12 changes: 6 additions & 6 deletions pkg/util/kvgraph/restore_graph.go
Original file line number Diff line number Diff line change
@@ -37,32 +37,32 @@ func NewObjectRestoreGraph(item runtime.Unstructured) ([]velero.ResourceIdentifi
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(item.UnstructuredContent(), vm); err != nil {
return []velero.ResourceIdentifier{}, errors.WithStack(err)
}
return NewVirtualMachineRestoreGraph(vm), nil
return NewVirtualMachineRestoreGraph(vm)
case "VirtualMachineInstance":
vmi := new(v1.VirtualMachineInstance)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(item.UnstructuredContent(), vmi); err != nil {
return []velero.ResourceIdentifier{}, errors.WithStack(err)
}
return NewVirtualMachineInstanceRestoreGraph(vmi), nil
return NewVirtualMachineInstanceRestoreGraph(vmi)
default:
// No specific restore graph for the passed object
return []velero.ResourceIdentifier{}, nil
}
}

// NewVirtualMachineRestoreGraph returns the restore object graph for a specific VM
func NewVirtualMachineRestoreGraph(vm *v1.VirtualMachine) []velero.ResourceIdentifier {
func NewVirtualMachineRestoreGraph(vm *v1.VirtualMachine) ([]velero.ResourceIdentifier, error) {
var resources []velero.ResourceIdentifier
if vm.Spec.Instancetype != nil {
resources = addInstanceType(*vm.Spec.Instancetype, vm.GetNamespace(), resources)
}
if vm.Spec.Preference != nil {
resources = addPreferenceType(*vm.Spec.Preference, vm.GetNamespace(), resources)
}
return addCommonVMIObjectGraph(vm.Spec.Template.Spec, vm.GetNamespace(), false, resources)
return addCommonVMIObjectGraph(vm.Spec.Template.Spec, vm.GetName(), vm.GetNamespace(), false, resources)
}

// NewVirtualMachineInstanceRestoreGraph returns the restore object graph for a specific VMI
func NewVirtualMachineInstanceRestoreGraph(vmi *v1.VirtualMachineInstance) []velero.ResourceIdentifier {
return addCommonVMIObjectGraph(vmi.Spec, vmi.GetNamespace(), false, []velero.ResourceIdentifier{})
func NewVirtualMachineInstanceRestoreGraph(vmi *v1.VirtualMachineInstance) ([]velero.ResourceIdentifier, error) {
return addCommonVMIObjectGraph(vmi.Spec, vmi.GetName(), vmi.GetNamespace(), false, []velero.ResourceIdentifier{})
}
Loading
Loading