From 986452a6725ea60643f4d54f0c8b81d4a2864b98 Mon Sep 17 00:00:00 2001 From: Rian Quinn Date: Wed, 27 Mar 2019 06:06:39 -0600 Subject: [PATCH] Add vClock/vIRQ support This patch adds support for vIRQs and a vClock for the Boxy hypervisor. --- arch/x86/Kbuild | 2 + arch/x86/Kconfig | 2 + arch/x86/boxy/Kconfig | 13 +++ arch/x86/boxy/Makefile | 1 + arch/x86/boxy/init.c | 84 ++++++++++++++ arch/x86/boxy/quirk.c | 49 ++++++++ arch/x86/boxy/vclock.c | 181 ++++++++++++++++++++++++++++++ arch/x86/boxy/virq.c | 64 +++++++++++ arch/x86/boxy/vmcall.S | 131 +++++++++++++++++++++ arch/x86/entry/entry_64.S | 4 + arch/x86/include/asm/boxy.h | 137 ++++++++++++++++++++++ arch/x86/include/asm/hypervisor.h | 1 + arch/x86/kernel/apic/apic.c | 4 +- arch/x86/kernel/cpu/hypervisor.c | 7 ++ 14 files changed, 678 insertions(+), 2 deletions(-) create mode 100644 arch/x86/boxy/Kconfig create mode 100644 arch/x86/boxy/Makefile create mode 100644 arch/x86/boxy/init.c create mode 100644 arch/x86/boxy/quirk.c create mode 100644 arch/x86/boxy/vclock.c create mode 100644 arch/x86/boxy/virq.c create mode 100644 arch/x86/boxy/vmcall.S create mode 100644 arch/x86/include/asm/boxy.h diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index 0038a2d10a7a57..2ee19dd8195de7 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild @@ -10,6 +10,8 @@ obj-$(CONFIG_XEN) += xen/ # Hyper-V paravirtualization support obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/ +obj-$(CONFIG_BOXY) += boxy/ + obj-y += realmode/ obj-y += kernel/ obj-y += mm/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8689e794a43c84..b63fc2675dccca 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -782,6 +782,8 @@ config QUEUED_LOCK_STAT behavior of paravirtualized queued spinlocks and report them on debugfs. +source "arch/x86/boxy/Kconfig" + source "arch/x86/xen/Kconfig" config KVM_GUEST diff --git a/arch/x86/boxy/Kconfig b/arch/x86/boxy/Kconfig new file mode 100644 index 00000000000000..e50297b764931d --- /dev/null +++ b/arch/x86/boxy/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# This Kconfig describes Boxy options +# + +config BOXY + bool "Boxy guest support" + depends on PARAVIRT && X86_64 + ---help--- + This option allows you to run Linux as a Boxy guest virtual machine. + Boxy requires a very specific kernel config so in general, this + option should not be manually selected and instead is enabled by the + Boxy build system with the proper config in place. diff --git a/arch/x86/boxy/Makefile b/arch/x86/boxy/Makefile new file mode 100644 index 00000000000000..9d05417dfd4fea --- /dev/null +++ b/arch/x86/boxy/Makefile @@ -0,0 +1 @@ +obj-y := init.o virq.o vclock.o vmcall.o quirk.o diff --git a/arch/x86/boxy/init.c b/arch/x86/boxy/init.c new file mode 100644 index 00000000000000..18c0c17c58f860 --- /dev/null +++ b/arch/x86/boxy/init.c @@ -0,0 +1,84 @@ +/** + * Copyright (C) 2019 Assured Information Security, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include + +static uint32_t __init boxy_detect(void) +{ + uint32_t eax; + uint32_t ignore[3]; + + if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return 0; + + cpuid(CPUID_BAREFLANK_SYN, &eax, &ignore[0], &ignore[1], &ignore[2]); + + /** + * TODO: + * + * We need to add a Boxy specific CPUID leaf at 0x40000000 that acks like + * VMWare and HyperV so that we play nice with nested virtualization. + * More importantly, right now we are acking with Bareflank and not + * Boxy, so this code could end up detecting someone elses hypervisor. + */ + + /** + * TODO: + * + * We need to implement versioning to ensure that we are using a guest + * that actually knows how to talk to the hypervisor. + */ + + if (eax == CPUID_BAREFLANK_ACK) + return 1; + + return 0; +} + +static void __init boxy_init_platform(void) +{ + pv_info.name = "Boxy Hypervisor"; + + boxy_virq_init(); + boxy_vclock_init(); + + x86_init.resources.probe_roms = x86_init_noop; + x86_init.mpparse.find_smp_config = x86_init_noop; + x86_init.mpparse.get_smp_config = boxy_apic_quirk; + x86_init.irqs.pre_vector_init = x86_init_noop; + x86_init.oem.arch_setup = x86_init_noop; + x86_init.oem.banner = x86_init_noop; + + x86_platform.legacy.rtc = 0; + x86_platform.legacy.warm_reset = 0; + x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT; + + legacy_pic = &null_legacy_pic; +} + +static bool __init boxy_x2apic_available(void) +{ return true; } + +const __initconst struct hypervisor_x86 x86_hyper_boxy = { + .name = "Boxy Hypervisor", + .detect = boxy_detect, + .type = X86_HYPER_BOXY, + .init.init_platform = boxy_init_platform, + .init.x2apic_available = boxy_x2apic_available, +}; diff --git a/arch/x86/boxy/quirk.c b/arch/x86/boxy/quirk.c new file mode 100644 index 00000000000000..3a1e4e348f3e97 --- /dev/null +++ b/arch/x86/boxy/quirk.c @@ -0,0 +1,49 @@ +/** + * Copyright (C) 2019 Assured Information Security, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include + +/** + * Quirk Notes + * + * Like Jailhouse, we require an x2apic, and currently, if we don't set the + * code below, the kernel will crash as it attempts to read an x2apic + * register before the apic variable is set. The code below ensures that + * we end up with symmetric IO mode with a physical x2apic while also setting + * the apic variable so that the kernel doesn't segfault. + * + * If you enable ACPI, this bug will go away as ACPI happens to call the + * default_acpi_madt_oem_check() function which sets the apic variable in the + * kernel before the init_apic_mappings() function is called. The crash + * occurs here: + * https://elixir.bootlin.com/linux/latest/source/arch/x86/kernel/apic/apic.c#L1969 + * + * Since we are calling the default_acpi_madt_oem_check() function manually, + * we need to ensure that the apic is configured properly for Boxy guests + * which includes a physical x2apic and symmetric IO mode. + */ + +void __init boxy_apic_quirk(unsigned int early) +{ + x2apic_phys = 1; + smp_found_config = 1; + + default_acpi_madt_oem_check("", ""); +} diff --git a/arch/x86/boxy/vclock.c b/arch/x86/boxy/vclock.c new file mode 100644 index 00000000000000..2a5c30b04fdf53 --- /dev/null +++ b/arch/x86/boxy/vclock.c @@ -0,0 +1,181 @@ +/** + * Copyright (C) 2019 Assured Information Security, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include + +static uint64_t g_tsc_offset = 0; +static uint64_t g_tsc_freq_khz = 0; + +/******************************************************************************/ +/* helpers */ +/******************************************************************************/ + +static uint64_t mul_div(uint64_t x, uint64_t n, uint64_t d) +{ return ((x / d) * n) + (((x % d) * n) / d); } + +static uint64_t tsc_to_nsec(uint64_t tsc) +{ return mul_div(tsc, 1000000, g_tsc_freq_khz); } + +/******************************************************************************/ +/* clock source */ +/******************************************************************************/ + +static u64 boxy_clocksource_read(struct clocksource *cs) +{ return rdtsc_ordered() - g_tsc_offset; } + +static struct clocksource boxy_clocksource = { + .name = "boxy-clocksource", + .read = boxy_clocksource_read, + .rating = 500, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES, + .archdata.vclock_mode = VCLOCK_TSC +}; + +/******************************************************************************/ +/* clock event */ +/******************************************************************************/ + +static int boxy_set_next_event( + unsigned long delta, struct clock_event_device *evt) +{ + if (hypercall_vclock_op__set_next_event(delta) != SUCCESS) { + pr_err("hypercall_vclock_op__set_next_event failed"); + BUG(); + } + + return 0; +} + +static struct clock_event_device boxy_clock_event_device = { + .name = "boxy-clock-event-device", + .features = CLOCK_EVT_FEAT_ONESHOT, + .mult = 1, + .shift = 0, + .rating = 500, + .set_next_event = boxy_set_next_event, +}; + +void boxy_vclock_event_handler(void) +{ boxy_clock_event_device.event_handler(&boxy_clock_event_device); } + +static void __init boxy_setup_percpu_clockev(void) +{ + boxy_clock_event_device.cpumask = cpumask_of(smp_processor_id()); + + clockevents_config_and_register( + &boxy_clock_event_device, g_tsc_freq_khz * 1000, 0, ~0UL); +} + +/******************************************************************************/ +/* pv_ops */ +/******************************************************************************/ + +static u64 boxy_sched_clock(void) +{ return tsc_to_nsec(boxy_clocksource_read(0)); } + +static u64 boxy_steal_clock(int cpu) +{ + /** + * Note: + * + * For now we do not support the steal clock. Timekeeping seems to work + * fine without it, and implementing this would require not only an + * additional VMCall on every sched_clock() call, but it would also + * require the hypervisor to perform time keeping on every exit and + * entry to account for the time that the VM is actually executing. + */ + + return 0; +} + +/******************************************************************************/ +/* x86_platform_ops */ +/******************************************************************************/ + +static unsigned long tsc_freq_khz(void) +{ return g_tsc_freq_khz; } + +/******************************************************************************/ +/* init functions */ +/******************************************************************************/ + +static void wallclock_init(void) +{ + if (hypercall_vclock_op__reset_host_wallclock() != SUCCESS) { + pr_err("hypercall_vclock_op__reset_host_wallclock failed"); + BUG(); + } + + if (hypercall_vclock_op__set_guest_wallclock_rtc() != SUCCESS) { + pr_err("hypercall_vclock_op__set_guest_wallclock_rtc failed"); + BUG(); + } + + if (hypercall_vclock_op__set_guest_wallclock_tsc() != SUCCESS) { + pr_err("hypercall_vclock_op__set_guest_wallclock_tsc failed"); + BUG(); + } +} + +void __init read_persistent_wall_and_boot_offset( + struct timespec64 *wall_time, struct timespec64 *boot_offset) +{ + uint64_t ret, tsc; + struct timespec64 wallclock; + + ret = hypercall_vclock_op__get_guest_wallclock( + &wallclock.tv_sec, &wallclock.tv_nsec, &tsc); + if (ret != SUCCESS) { + pr_err("hypercall_vclock_op__get_wallclock failed"); + BUG(); + } + + *wall_time = wallclock; + *boot_offset = ns_to_timespec64(tsc_to_nsec(tsc - g_tsc_offset)); +} + +void __init boxy_vclock_init(void) +{ + g_tsc_freq_khz = hypercall_vclock_op__get_tsc_freq_khz(); + if (g_tsc_freq_khz == FAILURE) { + pr_err("hypercall_vclock_op__get_tsc_freq_khz failed"); + BUG(); + } + + pv_ops.time.sched_clock = boxy_sched_clock; + pv_ops.time.steal_clock = boxy_steal_clock; + + x86_init.timers.setup_percpu_clockev = boxy_setup_percpu_clockev; + x86_init.timers.timer_init = x86_init_noop; + x86_init.timers.wallclock_init = wallclock_init; + + x86_platform.calibrate_tsc = tsc_freq_khz; + x86_platform.calibrate_cpu = tsc_freq_khz; + + g_tsc_offset = rdtsc_ordered(); + clocksource_register_khz(&boxy_clocksource, g_tsc_freq_khz); + + setup_force_cpu_cap(X86_FEATURE_NONSTOP_TSC); + setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC); + setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); + setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); +} diff --git a/arch/x86/boxy/virq.c b/arch/x86/boxy/virq.c new file mode 100644 index 00000000000000..a453156073773a --- /dev/null +++ b/arch/x86/boxy/virq.c @@ -0,0 +1,64 @@ +/** + * Copyright (C) 2019 Assured Information Security, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include +#include + +__visible void __irq_entry boxy_virq_handler(struct pt_regs *regs) +{ + uint64_t virq; + + struct pt_regs *old_regs = set_irq_regs(regs); + irq_enter(); + + virq = hypercall_virq_op__get_next_virq(); + if (virq == FAILURE) { + pr_err("hypercall_virq_op__get_next_virq failed"); + BUG(); + } + + switch(virq) { + case boxy_virq__vclock_event_handler: + boxy_vclock_event_handler(); + break; + + default: + pr_err("unknown virq"); + BUG(); + } + + irq_exit(); + set_irq_regs(old_regs); +} + +void __init boxy_virq_init(void) +{ + uint64_t ret; + + ret = hypercall_virq_op__set_hypervisor_callback_vector( + HYPERVISOR_CALLBACK_VECTOR); + if (ret != SUCCESS) { + pr_err("hypercall_virq_op__set_hypervisor_callback_vector failed"); + BUG(); + } + + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, boxy_virq_handler_sym); +} diff --git a/arch/x86/boxy/vmcall.S b/arch/x86/boxy/vmcall.S new file mode 100644 index 00000000000000..5eff880e9dd148 --- /dev/null +++ b/arch/x86/boxy/vmcall.S @@ -0,0 +1,131 @@ +/** + * Copyright (C) 2019 Assured Information Security, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + + .code64 + .intel_syntax noprefix + + .globl asm_vmcall + .type asm_vmcall, @function +asm_vmcall: + + push rbx + + mov r9, rdx + mov r8, rcx + + mov rax, rdi + mov rbx, rsi + mov rcx, r9 + mov rdx, r8 + + vmcall + + pop rbx + ret + + .globl asm_vmcall1 + .type asm_vmcall1, @function +asm_vmcall1: + + push rbx + + mov r8, rdi + + mov rax, [r8] + mov rbx, 0 + mov rcx, 0 + mov rdx, 0 + + vmcall + + mov [r8], rax + + pop rbx + ret + + .globl asm_vmcall2 + .type asm_vmcall2, @function +asm_vmcall2: + + push rbx + + mov r8, rdi + mov r9, rsi + + mov rax, [r8] + mov rbx, [r9] + mov rcx, 0 + mov rdx, 0 + + vmcall + + mov [r8], rax + mov [r9], rbx + + pop rbx + ret + + .globl asm_vmcall3 + .type asm_vmcall3, @function +asm_vmcall3: + + push rbx + + mov r8, rdi + mov r9, rsi + mov r10, rdx + + mov rax, [r8] + mov rbx, [r9] + mov rcx, [r10] + mov rdx, 0 + + vmcall + + mov [r8], rax + mov [r9], rbx + mov [r10], rcx + + pop rbx + ret + + .globl asm_vmcall4 + .type asm_vmcall4, @function +asm_vmcall4: + + push rbx + + mov r8, rdi + mov r9, rsi + mov r10, rdx + mov r11, rcx + + mov rax, [r8] + mov rbx, [r9] + mov rcx, [r10] + mov rdx, [r11] + + vmcall + + mov [r8], rax + mov [r9], rbx + mov [r10], rcx + mov [r11], rdx + + pop rbx + ret diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 1f0efdb7b6294d..1d5599656ed6e5 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1117,6 +1117,10 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ xen_hvm_callback_vector xen_evtchn_do_upcall #endif +#ifdef CONFIG_BOXY +apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ + boxy_virq_handler_sym boxy_virq_handler +#endif #if IS_ENABLED(CONFIG_HYPERV) apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ diff --git a/arch/x86/include/asm/boxy.h b/arch/x86/include/asm/boxy.h new file mode 100644 index 00000000000000..ff209ce0bc076d --- /dev/null +++ b/arch/x86/include/asm/boxy.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_BOXY_H +#define _ASM_X86_BOXY_H + +#include +#include + +#include +#include +#include +#include +#include + +#define SUCCESS 0 +#define FAILURE 0xFFFFFFFFFFFFFFFF +#define SUSPEND 0xFFFFFFFFFFFFFFFE + +#define status_t int64_t + +/* -------------------------------------------------------------------------- */ +/* VMCall Prototypes */ +/* -------------------------------------------------------------------------- */ + +uint64_t asm_vmcall(uint64_t r1, uint64_t r2, uint64_t r3, uint64_t r4); +uint64_t asm_vmcall1(void *r1); +uint64_t asm_vmcall2(void *r1, void *r2); +uint64_t asm_vmcall3(void *r1, void *r2, void *r3); +uint64_t asm_vmcall4(void *r1, void *r2, void *r3, void *r4); + +/* -------------------------------------------------------------------------- */ +/* CPUID */ +/* -------------------------------------------------------------------------- */ + +#define CPUID_BAREFLANK_SYN 0x4BF00000 +#define CPUID_BAREFLANK_ACK 0x4BF00001 + +/* -------------------------------------------------------------------------- */ +/* Virtual IRQs */ +/* -------------------------------------------------------------------------- */ + +void boxy_virq_init(void); +void boxy_virq_handler_sym(void); + +#define boxy_virq__vclock_event_handler 0xBF00000000000201 + +#define hypercall_enum_virq_op__set_hypervisor_callback_vector 0xBF10000000000100 +#define hypercall_enum_virq_op__get_next_virq 0xBF10000000000101 + +static inline status_t +hypercall_virq_op__set_hypervisor_callback_vector(uint64_t vector) +{ + return asm_vmcall( + hypercall_enum_virq_op__set_hypervisor_callback_vector, vector, 0, 0); +} + +static inline status_t +hypercall_virq_op__get_next_virq(void) +{ + return asm_vmcall( + hypercall_enum_virq_op__get_next_virq, 0, 0, 0); +} + +/* -------------------------------------------------------------------------- */ +/* Virtual Clock */ +/* -------------------------------------------------------------------------- */ + +void boxy_vclock_init(void); +void boxy_vclock_event_handler(void); + +#define hypercall_enum_vclock_op__get_tsc_freq_khz 0xBF11000000000100 +#define hypercall_enum_vclock_op__set_next_event 0xBF11000000000102 +#define hypercall_enum_vclock_op__reset_host_wallclock 0xBF11000000000103 +#define hypercall_enum_vclock_op__set_host_wallclock_rtc 0xBF11000000000104 +#define hypercall_enum_vclock_op__set_host_wallclock_tsc 0xBF11000000000105 +#define hypercall_enum_vclock_op__set_guest_wallclock_rtc 0xBF11000000000106 +#define hypercall_enum_vclock_op__set_guest_wallclock_tsc 0xBF11000000000107 +#define hypercall_enum_vclock_op__get_guest_wallclock 0xBF11000000000108 + +static inline status_t +hypercall_vclock_op__get_tsc_freq_khz(void) +{ + return asm_vmcall( + hypercall_enum_vclock_op__get_tsc_freq_khz, 0, 0, 0); +} + +static inline status_t +hypercall_vclock_op__set_next_event(uint64_t tsc_delta) +{ + return asm_vmcall( + hypercall_enum_vclock_op__set_next_event, tsc_delta, 0, 0); +} + +static inline status_t +hypercall_vclock_op__reset_host_wallclock(void) +{ + return asm_vmcall( + hypercall_enum_vclock_op__reset_host_wallclock, 0, 0, 0 + ); +} + +static inline status_t +hypercall_vclock_op__set_guest_wallclock_rtc(void) +{ + return asm_vmcall( + hypercall_enum_vclock_op__set_guest_wallclock_rtc, 0, 0, 0 + ); +} + +static inline status_t +hypercall_vclock_op__set_guest_wallclock_tsc(void) +{ + return asm_vmcall( + hypercall_enum_vclock_op__set_guest_wallclock_tsc, 0, 0, 0 + ); +} + +static inline status_t +hypercall_vclock_op__get_guest_wallclock( + int64_t *sec, long *nsec, uint64_t *tsc) +{ + uint64_t op = hypercall_enum_vclock_op__get_guest_wallclock; + + if (sec == 0 || nsec == 0 || tsc == 0) { + return FAILURE; + } + + return asm_vmcall4( + &op, sec, nsec, tsc); +} + +/* -------------------------------------------------------------------------- */ +/* Quirks */ +/* -------------------------------------------------------------------------- */ + +void boxy_apic_quirk(unsigned int early); + +#endif diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 8c5aaba6633f20..9fe84984a3c45a 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -29,6 +29,7 @@ enum x86_hypervisor_type { X86_HYPER_XEN_HVM, X86_HYPER_KVM, X86_HYPER_JAILHOUSE, + X86_HYPER_BOXY, }; #ifdef CONFIG_HYPERVISOR_GUEST diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 8e0d0fdfbc54b5..32b2b7a41ef5d3 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1753,13 +1753,13 @@ static __init void try_to_enable_x2apic(int remap_mode) /* IR is required if there is APIC ID > 255 even when running * under KVM */ -/* if (max_physical_apicid > 255 || + if (max_physical_apicid > 255 || !x86_init.hyper.x2apic_available()) { pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n"); x2apic_disable(); return; } -*/ + /* * without IR all CPUs can be addressed by IOAPIC/MSI * only in physical mode diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 479ca4728de01a..e6f57e1d3a947b 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -33,6 +33,10 @@ extern const struct hypervisor_x86 x86_hyper_xen_hvm; extern const struct hypervisor_x86 x86_hyper_kvm; extern const struct hypervisor_x86 x86_hyper_jailhouse; +#ifdef CONFIG_BOXY +extern const struct hypervisor_x86 x86_hyper_boxy; +#endif + static const __initconst struct hypervisor_x86 * const hypervisors[] = { #ifdef CONFIG_XEN_PV @@ -49,6 +53,9 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] = #ifdef CONFIG_JAILHOUSE_GUEST &x86_hyper_jailhouse, #endif +#ifdef CONFIG_BOXY + &x86_hyper_boxy, +#endif }; enum x86_hypervisor_type x86_hyper_type;