Skip to content

Commit

Permalink
Merge tag 'v6.1.119' into 6.1-main
Browse files Browse the repository at this point in the history
This is the 6.1.119 stable release

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmdAlzgACgkQONu9yGCS
# aT42uQ//dtRiaR95GtZ741kjEp3bGCYbSvhQsnCF2O5Ft1ajd7zenog2oZ83Xvnk
# JGZwtRvLrBvYekBFHiftF0i0vCxmLPQVx+GUIqCFU/ZZdITw+n99DH/zfl9soXmh
# 0DpZqNos0TkBdPmw0vYB+mIibAjG5brMwfdrXy5U0T1lrw1EeaUxat05fMBiF5XO
# vSK450RgzGa7h+8jUyK4eR7P+aUjNoUl3XZstFK1YzbVznXomvmXGFB0Xt3KIFEZ
# 5waNFJnYkkB/W9IgrcXNtskPUwh3wJ0RltavQcwnCIeRUCousW+tFPn9OGHbOC9V
# VJGiIuzGIQYEY71Ds7O6CD/3EXWs3fn1qJUU40IvJJvhh5z8G3zJWGXolaIJqLzW
# EkfzXF+mtMzjPm+IR2UrGtVMvDEOi7CdBVGDUrxqRiSrZvBKgu4zbF1ZiDI49hcP
# GuYgcPbEF28W4DoABhbzDMvIkyuyFvBInFHnI04KFlHsGMx14Y6FaMtoOz8g8z+0
# NX43nyL2JiWsSSmiViRJlP2cIdqjZb+c0CdunTo6w/Ho4rnRbaiL3Mggt70CBMCM
# /yUCLNcx0c/sEwb0YF+JNAjqGmhVm+aUZ3CRqjPSN44k4wu6TaokhWnaB9g83KP8
# BvpGK0LWnS2whCHS0SkM96Pp5QYXDoQnRJmw/xo+oYwu4/+dK2s=
# =No10
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri Nov 22 15:37:44 2024 CET
# gpg:                using RSA key 647F28654894E3BD457199BE38DBBDC86092693E
# gpg: Can't check signature: No public key
  • Loading branch information
frank-w committed Dec 6, 2024
2 parents 3427489 + e4d90d6 commit 6173d4e
Show file tree
Hide file tree
Showing 72 changed files with 759 additions and 583 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 118
SUBLEVEL = 119
EXTRAVERSION =
NAME = Curry Ramen

Expand Down
8 changes: 6 additions & 2 deletions arch/arm/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -252,27 +252,31 @@ __create_page_tables:
*/
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
ldr r6, =(_end - 1)

/* For XIP, kernel_sec_start/kernel_sec_end are currently in RO memory */
#ifndef CONFIG_XIP_KERNEL
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
str r8, [r5, #4] @ Save physical start of kernel (BE)
#else
str r8, [r5] @ Save physical start of kernel (LE)
#endif
#endif
orr r3, r8, r7 @ Add the MMU flags
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
1: str r3, [r0], #1 << PMD_ENTRY_ORDER
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
bls 1b
#ifndef CONFIG_XIP_KERNEL
eor r3, r3, r7 @ Remove the MMU flags
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
str r3, [r5, #4] @ Save physical end of kernel (BE)
#else
str r3, [r5] @ Save physical end of kernel (LE)
#endif

#ifdef CONFIG_XIP_KERNEL
#else
/*
* Map the kernel image separately as it is not located in RAM.
*/
Expand Down
34 changes: 21 additions & 13 deletions arch/arm/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1401,18 +1401,6 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
create_mapping(&map);
}

/*
* Map the kernel if it is XIP.
* It is always first in the modulearea.
*/
#ifdef CONFIG_XIP_KERNEL
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
create_mapping(&map);
#endif

/*
* Map the cache flushing regions.
*/
Expand Down Expand Up @@ -1602,12 +1590,27 @@ static void __init map_kernel(void)
* This will only persist until we turn on proper memory management later on
* and we remap the whole kernel with page granularity.
*/
#ifdef CONFIG_XIP_KERNEL
phys_addr_t kernel_nx_start = kernel_sec_start;
#else
phys_addr_t kernel_x_start = kernel_sec_start;
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
phys_addr_t kernel_nx_start = kernel_x_end;
#endif
phys_addr_t kernel_nx_end = kernel_sec_end;
struct map_desc map;

/*
* Map the kernel if it is XIP.
* It is always first in the modulearea.
*/
#ifdef CONFIG_XIP_KERNEL
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
create_mapping(&map);
#else
map.pfn = __phys_to_pfn(kernel_x_start);
map.virtual = __phys_to_virt(kernel_x_start);
map.length = kernel_x_end - kernel_x_start;
Expand All @@ -1617,7 +1620,7 @@ static void __init map_kernel(void)
/* If the nx part is small it may end up covered by the tail of the RWX section */
if (kernel_x_end == kernel_nx_end)
return;

#endif
map.pfn = __phys_to_pfn(kernel_nx_start);
map.virtual = __phys_to_virt(kernel_nx_start);
map.length = kernel_nx_end - kernel_nx_start;
Expand Down Expand Up @@ -1762,6 +1765,11 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;

#ifdef CONFIG_XIP_KERNEL
/* Store the kernel RW RAM region start/end in these variables */
kernel_sec_start = CONFIG_PHYS_OFFSET & SECTION_MASK;
kernel_sec_end = round_up(__pa(_end), SECTION_SIZE);
#endif
pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
kernel_sec_start, kernel_sec_end);

Expand Down
10 changes: 7 additions & 3 deletions arch/arm64/include/asm/mman.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
#define __ASM_MMAN_H__

#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
#include <linux/types.h>
#include <uapi/asm/mman.h>

Expand All @@ -21,19 +23,21 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)

static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
static inline unsigned long arch_calc_vm_flag_bits(struct file *file,
unsigned long flags)
{
/*
* Only allow MTE on anonymous mappings as these are guaranteed to be
* backed by tags-capable memory. The vm_flags may be overridden by a
* filesystem supporting MTE (RAM-based).
*/
if (system_supports_mte() && (flags & MAP_ANONYMOUS))
if (system_supports_mte() &&
((flags & MAP_ANONYMOUS) || shmem_file(file)))
return VM_MTE_ALLOWED;

return 0;
}
#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags)

static inline bool arch_validate_prot(unsigned long prot,
unsigned long addr __always_unused)
Expand Down
1 change: 1 addition & 0 deletions arch/parisc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ config PARISC
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_STACKWALK
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select HAVE_RELIABLE_STACKTRACE
select DMA_OPS
Expand Down
11 changes: 10 additions & 1 deletion arch/parisc/include/asm/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,16 @@

#define SMP_CACHE_BYTES L1_CACHE_BYTES

#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_PA20
#define ARCH_DMA_MINALIGN 128
#else
#define ARCH_DMA_MINALIGN 32
#endif
#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */

#define arch_slab_minalign() ((unsigned)dcache_stride)
#define cache_line_size() dcache_stride
#define dma_get_cache_alignment cache_line_size

#define __read_mostly __section(".data..read_mostly")

Expand Down
29 changes: 18 additions & 11 deletions arch/x86/kvm/lapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -2453,19 +2453,26 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;

if (apic->apicv_active) {
/* irr_pending is always true when apicv is activated. */
apic->irr_pending = true;
/*
* When APICv is enabled, KVM must always search the IRR for a pending
* IRQ, as other vCPUs and devices can set IRR bits even if the vCPU
* isn't running. If APICv is disabled, KVM _should_ search the IRR
* for a pending IRQ. But KVM currently doesn't ensure *all* hardware,
* e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching
* the IRR at this time could race with IRQ delivery from hardware that
* still sees APICv as being enabled.
*
* FIXME: Ensure other vCPUs and devices observe the change in APICv
* state prior to updating KVM's metadata caches, so that KVM
* can safely search the IRR and set irr_pending accordingly.
*/
apic->irr_pending = true;

if (apic->apicv_active)
apic->isr_count = 1;
} else {
/*
* Don't clear irr_pending, searching the IRR can race with
* updates from the CPU as APICv is still active from hardware's
* perspective. The flag will be cleared as appropriate when
* KVM injects the interrupt.
*/
else
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
}

apic->highest_isr_cache = -1;
}
EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
Expand Down
30 changes: 25 additions & 5 deletions arch/x86/kvm/vmx/nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -1126,11 +1126,14 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx = to_vmx(vcpu);

/*
* If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
* for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
* full TLB flush from the guest's perspective. This is required even
* if VPID is disabled in the host as KVM may need to synchronize the
* MMU in response to the guest TLB flush.
* If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
* same VPID as the host, and so architecturally, linear and combined
* mappings for VPID=0 must be flushed at VM-Enter and VM-Exit. KVM
* emulates L2 sharing L1's VPID=0 by using vpid01 while running L2,
* and so KVM must also emulate TLB flush of VPID=0, i.e. vpid01. This
* is required if VPID is disabled in KVM, as a TLB flush (there are no
* VPIDs) still occurs from L1's perspective, and KVM may need to
* synchronize the MMU in response to the guest TLB flush.
*
* Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
* EPT is a special snowflake, as guest-physical mappings aren't
Expand Down Expand Up @@ -2196,6 +2199,17 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,

vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);

/*
* If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
* same VPID as the host. Emulate this behavior by using vpid01 for L2
* if VPID is disabled in vmcs12. Note, if VPID is disabled, VM-Enter
* and VM-Exit are architecturally required to flush VPID=0, but *only*
* VPID=0. I.e. using vpid02 would be ok (so long as KVM emulates the
* required flushes), but doing so would cause KVM to over-flush. E.g.
* if L1 runs L2 X with VPID12=1, then runs L2 Y with VPID12 disabled,
* and then runs L2 X again, then KVM can and should retain TLB entries
* for VPID12=1.
*/
if (enable_vpid) {
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
Expand Down Expand Up @@ -5758,6 +5772,12 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return nested_vmx_fail(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);

/*
* Always flush the effective vpid02, i.e. never flush the current VPID
* and never explicitly flush vpid01. INVVPID targets a VPID, not a
* VMCS, and so whether or not the current vmcs12 has VPID enabled is
* irrelevant (and there may not be a loaded vmcs12).
*/
vpid02 = nested_get_vpid02(vcpu);
switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
Expand Down
6 changes: 4 additions & 2 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -209,9 +209,11 @@ module_param(ple_window_shrink, uint, 0444);
static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
module_param(ple_window_max, uint, 0444);

/* Default is SYSTEM mode, 1 for host-guest mode */
/* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */
int __read_mostly pt_mode = PT_MODE_SYSTEM;
#ifdef CONFIG_BROKEN
module_param(pt_mode, int, S_IRUGO);
#endif

static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
Expand Down Expand Up @@ -3098,7 +3100,7 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)

static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu))
if (is_guest_mode(vcpu) && nested_cpu_has_vpid(get_vmcs12(vcpu)))
return nested_get_vpid02(vcpu);
return to_vmx(vcpu)->vpid;
}
Expand Down
6 changes: 4 additions & 2 deletions arch/x86/mm/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -650,7 +650,8 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
paddr_next = data->next;
len = data->len;

if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
if ((phys_addr > paddr) &&
(phys_addr < (paddr + sizeof(struct setup_data) + len))) {
memunmap(data);
return true;
}
Expand Down Expand Up @@ -712,7 +713,8 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
paddr_next = data->next;
len = data->len;

if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
if ((phys_addr > paddr) &&
(phys_addr < (paddr + sizeof(struct setup_data) + len))) {
early_memunmap(data, sizeof(*data));
return true;
}
Expand Down
Loading

0 comments on commit 6173d4e

Please sign in to comment.