Skip to content

Commit

Permalink
x86/static-call: provide a way to do very early static-call updates
Browse files Browse the repository at this point in the history
Add static_call_update_early() for updating static-call targets in
very early boot.

This will be needed for support of Xen guest type specific hypercall
functions.

This is part of XSA-466 / CVE-2024-53241.

Reported-by: Andrew Cooper <[email protected]>
Signed-off-by: Juergen Gross <[email protected]>
Co-developed-by: Peter Zijlstra <[email protected]>
Co-developed-by: Josh Poimboeuf <[email protected]>
  • Loading branch information
jgross1 committed Dec 13, 2024
1 parent dda014b commit 0ef8047
Show file tree
Hide file tree
Showing 6 changed files with 55 additions and 15 deletions.
15 changes: 15 additions & 0 deletions arch/x86/include/asm/static_call.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,19 @@

extern bool __static_call_fixup(void *tramp, u8 op, void *dest);

extern void __static_call_update_early(void *tramp, void *func);

#define static_call_update_early(name, _func) \
({ \
typeof(&STATIC_CALL_TRAMP(name)) __F = (_func); \
if (static_call_initialized) { \
__static_call_update(&STATIC_CALL_KEY(name), \
STATIC_CALL_TRAMP_ADDR(name), __F);\
} else { \
WRITE_ONCE(STATIC_CALL_KEY(name).func, _func); \
__static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\
__F); \
} \
})

#endif /* _ASM_STATIC_CALL_H */
6 changes: 3 additions & 3 deletions arch/x86/include/asm/sync_core.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
#include <asm/special_insns.h>

#ifdef CONFIG_X86_32
static inline void iret_to_self(void)
static __always_inline void iret_to_self(void)
{
asm volatile (
"pushfl\n\t"
Expand All @@ -19,7 +19,7 @@ static inline void iret_to_self(void)
: ASM_CALL_CONSTRAINT : : "memory");
}
#else
static inline void iret_to_self(void)
static __always_inline void iret_to_self(void)
{
unsigned int tmp;

Expand Down Expand Up @@ -55,7 +55,7 @@ static inline void iret_to_self(void)
* Like all of Linux's memory ordering operations, this is a
* compiler barrier as well.
*/
static inline void sync_core(void)
static __always_inline void sync_core(void)
{
/*
* The SERIALIZE instruction is the most straightforward way to
Expand Down
9 changes: 9 additions & 0 deletions arch/x86/kernel/static_call.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,15 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
}
EXPORT_SYMBOL_GPL(arch_static_call_transform);

noinstr void __static_call_update_early(void *tramp, void *func)
{
BUG_ON(system_state != SYSTEM_BOOTING);
BUG_ON(!early_boot_irqs_disabled);
BUG_ON(static_call_initialized);
__text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
sync_core();
}

#ifdef CONFIG_MITIGATION_RETHUNK
/*
* This is called by apply_returns() to fix up static call trampolines,
Expand Down
37 changes: 26 additions & 11 deletions include/linux/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -216,28 +216,43 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,

#endif /* __KERNEL__ */

/**
* offset_to_ptr - convert a relative memory offset to an absolute pointer
* @off: the address of the 32-bit offset value
*/
static inline void *offset_to_ptr(const int *off)
{
return (void *)((unsigned long)off + *off);
}

#endif /* __ASSEMBLY__ */

#ifdef CONFIG_64BIT
#define ARCH_SEL(a,b) a
#else
#define ARCH_SEL(a,b) b
#endif

/*
* Force the compiler to emit 'sym' as a symbol, so that we can reference
* it from inline assembler. Necessary in case 'sym' could be inlined
* otherwise, or eliminated entirely due to lack of references that are
* visible to the compiler.
*/
#define ___ADDRESSABLE(sym, __attrs) \
static void * __used __attrs \
#define ___ADDRESSABLE(sym, __attrs) \
static void * __used __attrs \
__UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)(uintptr_t)&sym;

#define __ADDRESSABLE(sym) \
___ADDRESSABLE(sym, __section(".discard.addressable"))

/**
* offset_to_ptr - convert a relative memory offset to an absolute pointer
* @off: the address of the 32-bit offset value
*/
static inline void *offset_to_ptr(const int *off)
{
return (void *)((unsigned long)off + *off);
}
#define __ADDRESSABLE_ASM(sym) \
.pushsection .discard.addressable,"aw"; \
.align ARCH_SEL(8,4); \
ARCH_SEL(.quad, .long) __stringify(sym); \
.popsection;

#endif /* __ASSEMBLY__ */
#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))

#ifdef __CHECKER__
#define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0)
Expand Down
1 change: 1 addition & 0 deletions include/linux/static_call.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@
#ifdef CONFIG_HAVE_STATIC_CALL
#include <asm/static_call.h>

extern int static_call_initialized;
/*
* Either @site or @tramp can be NULL.
*/
Expand Down
2 changes: 1 addition & 1 deletion kernel/static_call_inline.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ extern struct static_call_site __start_static_call_sites[],
extern struct static_call_tramp_key __start_static_call_tramp_key[],
__stop_static_call_tramp_key[];

static int static_call_initialized;
int static_call_initialized;

/*
* Must be called before early_initcall() to be effective.
Expand Down

0 comments on commit 0ef8047

Please sign in to comment.