Skip to content

Commit

Permalink
Block RT VM with nonroot
Browse files Browse the repository at this point in the history
Currently RT VMs require SYS_NICE
capability.

Signed-off-by: L. Pivarc <[email protected]>
  • Loading branch information
xpivarc committed Oct 26, 2022
1 parent 6932d31 commit c53bed2
Show file tree
Hide file tree
Showing 3 changed files with 111 additions and 85 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func ValidateVirtualMachineInstanceSpec(field *k8sfield.Path, spec *v1.VirtualMa
if maxNumberOfVolumesExceeded {
return appendNewStatusCauseForMaxNumberOfVolumesExceeded(field, causes)
}

root := config.RootEnabled()
causes = append(causes, validateHostNameNotConformingToDNSLabelRules(field, spec)...)
causes = append(causes, validateSubdomainDNSSubdomainRules(field, spec)...)
causes = append(causes, validateMemoryRequestsNegativeOrNull(field, spec)...)
Expand All @@ -149,7 +149,7 @@ func ValidateVirtualMachineInstanceSpec(field *k8sfield.Path, spec *v1.VirtualMa
causes = append(causes, validateCPUIsolatorThread(field, spec)...)
causes = append(causes, validateCPUFeaturePolicies(field, spec)...)
causes = append(causes, validateStartStrategy(field, spec)...)
causes = append(causes, validateRealtime(field, spec)...)
causes = append(causes, validateRealtime(field, spec, !root)...)
causes = append(causes, validateSpecAffinity(field, spec)...)
causes = append(causes, validateSpecTopologySpreadConstraints(field, spec)...)

Expand Down Expand Up @@ -1469,8 +1469,18 @@ func validateHostNameNotConformingToDNSLabelRules(field *k8sfield.Path, spec *v1
return causes
}

func validateRealtime(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec) (causes []metav1.StatusCause) {
func validateRealtime(field *k8sfield.Path, spec *v1.VirtualMachineInstanceSpec, nonroot bool) (causes []metav1.StatusCause) {
if spec.Domain.CPU != nil && spec.Domain.CPU.Realtime != nil {
if nonroot {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueRequired,
Message: fmt.Sprintf("%s must be set to false when Root feature gate is not used",

field.Child("domain", "cpu", "realtime").String(),
),
Field: field.Child("domain", "cpu", "dedicatedCpuPlacement").String(),
})
}
causes = append(causes, validateCPURealtime(field, spec)...)
causes = append(causes, validateMemoryRealtime(field, spec)...)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3648,21 +3648,18 @@ var _ = Describe("Validating VMICreate Admitter", func() {
vmi.Spec.Domain.Memory = &v1.Memory{Hugepages: &v1.Hugepages{PageSize: "2Mi"}}
vmi.Spec.Domain.CPU.NUMA = &v1.NUMA{GuestMappingPassthrough: &v1.NUMAGuestMappingPassthrough{}}
causes := ValidateVirtualMachineInstanceSpec(k8sfield.NewPath("fake"), &vmi.Spec, config)
Expect(causes).ToNot(BeEmpty())
Expect(causes).To(ContainElement(metav1.StatusCause{Type: metav1.CauseTypeFieldValueRequired, Field: "fake.domain.cpu.dedicatedCpuPlacement", Message: "fake.domain.cpu.dedicatedCpuPlacement must be set to true when fake.domain.cpu.realtime is used"}))
})
It("should reject the realtime knob when NUMA Guest Mapping Passthrough is not defined", func() {
vmi.Spec.Domain.CPU.DedicatedCPUPlacement = true
vmi.Spec.Domain.CPU.NUMA = &v1.NUMA{}
causes := ValidateVirtualMachineInstanceSpec(k8sfield.NewPath("fake"), &vmi.Spec, config)
Expect(causes).To(HaveLen(1))
Expect(causes).To(ContainElement(metav1.StatusCause{Type: metav1.CauseTypeFieldValueRequired, Field: "fake.domain.cpu.numa.guestMappingPassthrough", Message: "fake.domain.cpu.numa.guestMappingPassthrough must be defined when fake.domain.cpu.realtime is used"}))
})
It("should reject the realtime knob when NUMA is nil", func() {
vmi.Spec.Domain.CPU.DedicatedCPUPlacement = true
vmi.Spec.Domain.CPU.NUMA = nil
causes := ValidateVirtualMachineInstanceSpec(k8sfield.NewPath("fake"), &vmi.Spec, config)
Expect(causes).To(HaveLen(1))
Expect(causes).To(ContainElement(metav1.StatusCause{Type: metav1.CauseTypeFieldValueRequired, Field: "fake.domain.cpu.numa.guestMappingPassthrough", Message: "fake.domain.cpu.numa.guestMappingPassthrough must be defined when fake.domain.cpu.realtime is used"}))
})
})
Expand Down
177 changes: 98 additions & 79 deletions tests/realtime/realtime.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,88 +67,107 @@ var _ = Describe("[sig-compute-realtime][Serial]Realtime", func() {
var err error
virtClient, err = kubecli.GetKubevirtClient()
Expect(err).ToNot(HaveOccurred())
checks.SkipTestIfNoFeatureGate(virtconfig.NUMAFeatureGate)
checks.SkipTestIfNoFeatureGate(virtconfig.CPUManager)
checks.SkipTestIfNotRealtimeCapable()
})

It("should start the realtime VM when no mask is specified", func() {
const noMask = ""
vmi := newFedoraRealtime(noMask)
byStartingTheVMI(vmi, virtClient)
By("Validating VCPU scheduler placement information")
pod := tests.GetRunningPodByVirtualMachineInstance(vmi, util.NamespaceTestDefault)
psOutput, err := exec.ExecuteCommandOnPod(
virtClient,
pod,
"compute",
[]string{tests.BinBash, "-c", "ps -u qemu -L -o policy,rtprio,psr|grep FF| awk '{print $2}'"},
)
Expect(err).ToNot(HaveOccurred())
slice := strings.Split(strings.TrimSpace(psOutput), "\n")
Expect(slice).To(HaveLen(2))
for _, l := range slice {
Expect(parsePriority(l)).To(BeEquivalentTo(1))
}
By("Validating that the memory lock limits are higher than the memory requested")
psOutput, err = exec.ExecuteCommandOnPod(
virtClient,
pod,
"compute",
[]string{tests.BinBash, "-c", "grep 'locked memory' /proc/$(ps -u qemu -o pid --noheader|xargs)/limits |tr -s ' '| awk '{print $4\" \"$5}'"},
)
Expect(err).ToNot(HaveOccurred())
limits := strings.Split(strings.TrimSpace(psOutput), " ")
softLimit, err := strconv.ParseInt(limits[0], 10, 64)
Expect(err).ToNot(HaveOccurred())
hardLimit, err := strconv.ParseInt(limits[1], 10, 64)
Expect(err).ToNot(HaveOccurred())
Expect(softLimit).To(Equal(hardLimit))
mustParse := resource.MustParse(memory)
requested, canConvert := mustParse.AsInt64()
Expect(canConvert).To(BeTrue())
Expect(hardLimit).To(BeNumerically(">", requested))
By("checking if the guest is still running")
vmi, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Get(vmi.Name, &k8smetav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(vmi.Status.Phase).To(Equal(v1.Running))
Expect(console.LoginToFedora(vmi)).To(Succeed())
Context("should start the realtime VM", func() {
BeforeEach(func() {
checks.SkipTestIfNoFeatureGate(virtconfig.NUMAFeatureGate)
checks.SkipTestIfNoFeatureGate(virtconfig.CPUManager)
checks.SkipTestIfNotRealtimeCapable()
})

It("when no mask is specified", func() {
const noMask = ""
vmi := newFedoraRealtime(noMask)
byStartingTheVMI(vmi, virtClient)
By("Validating VCPU scheduler placement information")
pod := tests.GetRunningPodByVirtualMachineInstance(vmi, util.NamespaceTestDefault)
psOutput, err := exec.ExecuteCommandOnPod(
virtClient,
pod,
"compute",
[]string{tests.BinBash, "-c", "ps -u qemu -L -o policy,rtprio,psr|grep FF| awk '{print $2}'"},
)
Expect(err).ToNot(HaveOccurred())
slice := strings.Split(strings.TrimSpace(psOutput), "\n")
Expect(slice).To(HaveLen(2))
for _, l := range slice {
Expect(parsePriority(l)).To(BeEquivalentTo(1))
}
By("Validating that the memory lock limits are higher than the memory requested")
psOutput, err = exec.ExecuteCommandOnPod(
virtClient,
pod,
"compute",
[]string{tests.BinBash, "-c", "grep 'locked memory' /proc/$(ps -u qemu -o pid --noheader|xargs)/limits |tr -s ' '| awk '{print $4\" \"$5}'"},
)
Expect(err).ToNot(HaveOccurred())
limits := strings.Split(strings.TrimSpace(psOutput), " ")
softLimit, err := strconv.ParseInt(limits[0], 10, 64)
Expect(err).ToNot(HaveOccurred())
hardLimit, err := strconv.ParseInt(limits[1], 10, 64)
Expect(err).ToNot(HaveOccurred())
Expect(softLimit).To(Equal(hardLimit))
mustParse := resource.MustParse(memory)
requested, canConvert := mustParse.AsInt64()
Expect(canConvert).To(BeTrue())
Expect(hardLimit).To(BeNumerically(">", requested))
By("checking if the guest is still running")
vmi, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Get(vmi.Name, &k8smetav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(vmi.Status.Phase).To(Equal(v1.Running))
Expect(console.LoginToFedora(vmi)).To(Succeed())
})

It("when realtime mask is specified", func() {
vmi := newFedoraRealtime("0-1,^1")
byStartingTheVMI(vmi, virtClient)
pod := tests.GetRunningPodByVirtualMachineInstance(vmi, util.NamespaceTestDefault)
By("Validating VCPU scheduler placement information")
psOutput, err := exec.ExecuteCommandOnPod(
virtClient,
pod,
"compute",
[]string{tests.BinBash, "-c", "ps -u qemu -L -o policy,rtprio,psr|grep FF| awk '{print $2}'"},
)
Expect(err).ToNot(HaveOccurred())
slice := strings.Split(strings.TrimSpace(psOutput), "\n")
Expect(slice).To(HaveLen(1))
Expect(parsePriority(slice[0])).To(BeEquivalentTo(1))

By("Validating the VCPU mask matches the scheduler profile for all cores")
psOutput, err = exec.ExecuteCommandOnPod(
virtClient,
pod,
"compute",
[]string{tests.BinBash, "-c", "ps -cT -u qemu |grep -i cpu |awk '{print $3\" \" $8}'"},
)
Expect(err).ToNot(HaveOccurred())
slice = strings.Split(strings.TrimSpace(psOutput), "\n")
Expect(slice).To(HaveLen(2))
Expect(slice[0]).To(Equal("FF 0/KVM"))
Expect(slice[1]).To(Equal("TS 1/KVM"))

By("checking if the guest is still running")
vmi, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Get(vmi.Name, &k8smetav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(vmi.Status.Phase).To(Equal(v1.Running))
Expect(console.LoginToFedora(vmi)).To(Succeed())
})
})

It("should start the realtime VM when realtime mask is specified", func() {
vmi := newFedoraRealtime("0-1,^1")
byStartingTheVMI(vmi, virtClient)
pod := tests.GetRunningPodByVirtualMachineInstance(vmi, util.NamespaceTestDefault)
By("Validating VCPU scheduler placement information")
psOutput, err := exec.ExecuteCommandOnPod(
virtClient,
pod,
"compute",
[]string{tests.BinBash, "-c", "ps -u qemu -L -o policy,rtprio,psr|grep FF| awk '{print $2}'"},
)
Expect(err).ToNot(HaveOccurred())
slice := strings.Split(strings.TrimSpace(psOutput), "\n")
Expect(slice).To(HaveLen(1))
Expect(parsePriority(slice[0])).To(BeEquivalentTo(1))

By("Validating the VCPU mask matches the scheduler profile for all cores")
psOutput, err = exec.ExecuteCommandOnPod(
virtClient,
pod,
"compute",
[]string{tests.BinBash, "-c", "ps -cT -u qemu |grep -i cpu |awk '{print $3\" \" $8}'"},
)
Expect(err).ToNot(HaveOccurred())
slice = strings.Split(strings.TrimSpace(psOutput), "\n")
Expect(slice).To(HaveLen(2))
Expect(slice[0]).To(Equal("FF 0/KVM"))
Expect(slice[1]).To(Equal("TS 1/KVM"))

By("checking if the guest is still running")
vmi, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Get(vmi.Name, &k8smetav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(vmi.Status.Phase).To(Equal(v1.Running))
Expect(console.LoginToFedora(vmi)).To(Succeed())
Context("with NonRoot", func() {
BeforeEach(func() {
if checks.HasFeature(virtconfig.Root) {
Skip("Root feature gate is enabled")
}
})

It("should deny the realtime VM", func() {
vmi := newFedoraRealtime("0-1,^1")
_, err := virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Root feature gate is not used"))
})
})

})
Expand Down

0 comments on commit c53bed2

Please sign in to comment.