Skip to content

Commit

Permalink
Merge tag 'loongarch-6.3' of git://git.kernel.org/pub/scm/linux/kerne…
Browse files Browse the repository at this point in the history
…l/git/chenhuacai/linux-loongson

Pull LoongArch updates from Huacai Chen:

 - Make -mstrict-align configurable

 - Add kernel relocation and KASLR support

 - Add single kernel image implementation for kdump

 - Add hardware breakpoints/watchpoints support

 - Add kprobes/kretprobes/kprobes_on_ftrace support

 - Add LoongArch support for some selftests.

* tag 'loongarch-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: (23 commits)
  selftests/ftrace: Add LoongArch kprobe args string tests support
  selftests/seccomp: Add LoongArch selftesting support
  tools: Add LoongArch build infrastructure
  samples/kprobes: Add LoongArch support
  LoongArch: Mark some assembler symbols as non-kprobe-able
  LoongArch: Add kprobes on ftrace support
  LoongArch: Add kretprobes support
  LoongArch: Add kprobes support
  LoongArch: Simulate branch and PC* instructions
  LoongArch: ptrace: Add hardware single step support
  LoongArch: ptrace: Add function argument access API
  LoongArch: ptrace: Expose hardware breakpoints to debuggers
  LoongArch: Add hardware breakpoints/watchpoints support
  LoongArch: kdump: Add crashkernel=YM handling
  LoongArch: kdump: Add single kernel image implementation
  LoongArch: Add support for kernel address space layout randomization (KASLR)
  LoongArch: Add support for kernel relocation
  LoongArch: Add la_abs macro implementation
  LoongArch: Add JUMP_VIRT_ADDR macro implementation to avoid using la.abs
  LoongArch: Use la.pcrel instead of la.abs when it's trivially possible
  ...
  • Loading branch information
torvalds committed Mar 1, 2023
2 parents 64e8516 + 8883bf8 commit a8356cd
Show file tree
Hide file tree
Showing 47 changed files with 2,665 additions and 130 deletions.
65 changes: 56 additions & 9 deletions arch/loongarch/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -94,15 +94,21 @@ config LOONGARCH
select HAVE_DYNAMIC_FTRACE_WITH_ARGS
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
select HAVE_EXIT_THREAD
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GENERIC_VDSO
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_PCI
Expand Down Expand Up @@ -441,6 +447,24 @@ config ARCH_IOREMAP
protection support. However, you can enable LoongArch DMW-based
ioremap() for better performance.

config ARCH_STRICT_ALIGN
bool "Enable -mstrict-align to prevent unaligned accesses" if EXPERT
default y
help
Not all LoongArch cores support h/w unaligned access, we can use
-mstrict-align build parameter to prevent unaligned accesses.

CPUs with h/w unaligned access support:
Loongson-2K2000/2K3000/3A5000/3C5000/3D5000.

CPUs without h/w unaligned access support:
Loongson-2K500/2K1000.

This option is enabled by default to make the kernel be able to run
on all LoongArch systems. But you can disable it manually if you want
to run kernel only on systems with h/w unaligned access support in
order to optimise for performance.

config KEXEC
bool "Kexec system call"
select KEXEC_CORE
Expand All @@ -454,6 +478,7 @@ config KEXEC

config CRASH_DUMP
bool "Build kdump crash kernel"
select RELOCATABLE
help
Generate crash dump after being started by kexec. This should
be normally only set in special crash dump kernels which are
Expand All @@ -463,16 +488,38 @@ config CRASH_DUMP

For more details see Documentation/admin-guide/kdump/kdump.rst

config PHYSICAL_START
hex "Physical address where the kernel is loaded"
default "0x90000000a0000000"
depends on CRASH_DUMP
config RELOCATABLE
bool "Relocatable kernel"
help
This gives the XKPRANGE address where the kernel is loaded.
If you plan to use kernel for capturing the crash dump change
this value to start of the reserved region (the "X" value as
specified in the "crashkernel=YM@XM" command line boot parameter
passed to the panic-ed kernel).
This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required, so as to relocate
the kernel binary at runtime to a different virtual address from
its link address.

config RANDOMIZE_BASE
bool "Randomize the address of the kernel (KASLR)"
depends on RELOCATABLE
help
Randomizes the physical and virtual address at which the
kernel image is loaded, as a security feature that
deters exploit attempts relying on knowledge of the location
of kernel internals.

The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.

If unsure, say N.

config RANDOMIZE_BASE_MAX_OFFSET
hex "Maximum KASLR offset" if EXPERT
depends on RANDOMIZE_BASE
range 0x0 0x10000000
default "0x01000000"
help
When KASLR is active, this provides the maximum offset that will
be applied to the kernel image. It should be set according to the
amount of physical RAM available in the target system.

This is limited by the size of the lower address memory, 256MB.

config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
Expand Down
14 changes: 10 additions & 4 deletions arch/loongarch/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -71,14 +71,15 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
endif

ifeq ($(CONFIG_RELOCATABLE),y)
KBUILD_CFLAGS_KERNEL += -fPIE
LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext
endif

cflags-y += -ffreestanding
cflags-y += $(call cc-option, -mno-check-zero-division)

ifndef CONFIG_PHYSICAL_START
load-y = 0x9000000000200000
else
load-y = $(CONFIG_PHYSICAL_START)
endif
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)

drivers-$(CONFIG_PCI) += arch/loongarch/pci/
Expand All @@ -91,10 +92,15 @@ KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
# instead of .eh_frame so we don't discard them.
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables

ifdef CONFIG_ARCH_STRICT_ALIGN
# Don't emit unaligned accesses.
# Not all LoongArch cores support unaligned access, and as kernel we can't
# rely on others to provide emulation for these accesses.
KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
else
# Optimise for performance on hardware supports unaligned access.
KBUILD_CFLAGS += $(call cc-option,-mno-strict-align)
endif

KBUILD_CFLAGS += -isystem $(shell $(CC) -print-file-name=include)

Expand Down
1 change: 1 addition & 0 deletions arch/loongarch/configs/loongson3_defconfig
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ CONFIG_HOTPLUG_CPU=y
CONFIG_NR_CPUS=64
CONFIG_NUMA=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
CONFIG_SUSPEND=y
CONFIG_HIBERNATION=y
CONFIG_ACPI=y
Expand Down
2 changes: 2 additions & 0 deletions arch/loongarch/include/asm/addrspace.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,4 +125,6 @@ extern unsigned long vm_map_base;
#define ISA_IOSIZE SZ_16K
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)

#define PHYS_LINK_KADDR PHYSADDR(VMLINUX_LOAD_ADDRESS)

#endif /* _ASM_ADDRSPACE_H */
10 changes: 10 additions & 0 deletions arch/loongarch/include/asm/asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -188,4 +188,14 @@
#define PTRLOG 3
#endif

/* Annotate a function as being unsuitable for kprobes. */
#ifdef CONFIG_KPROBES
#define _ASM_NOKPROBE(name) \
.pushsection "_kprobe_blacklist", "aw"; \
.quad name; \
.popsection
#else
#define _ASM_NOKPROBE(name)
#endif

#endif /* __ASM_ASM_H */
17 changes: 17 additions & 0 deletions arch/loongarch/include/asm/asmmacro.h
Original file line number Diff line number Diff line change
Expand Up @@ -274,4 +274,21 @@
nor \dst, \src, zero
.endm

.macro la_abs reg, sym
#ifndef CONFIG_RELOCATABLE
la.abs \reg, \sym
#else
766:
lu12i.w \reg, 0
ori \reg, \reg, 0
lu32i.d \reg, 0
lu52i.d \reg, \reg, 0
.pushsection ".la_abs", "aw", %progbits
768:
.dword 768b-766b
.dword \sym
.popsection
#endif
.endm

#endif /* _ASM_ASMMACRO_H */
2 changes: 1 addition & 1 deletion arch/loongarch/include/asm/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@

#define PRID_SERIES_LA132 0x8000 /* Loongson 32bit */
#define PRID_SERIES_LA264 0xa000 /* Loongson 64bit, 2-issue */
#define PRID_SERIES_LA364 0xb000 /* Loongson 64bit3-issue */
#define PRID_SERIES_LA364 0xb000 /* Loongson 64bit, 3-issue */
#define PRID_SERIES_LA464 0xc000 /* Loongson 64bit, 4-issue */
#define PRID_SERIES_LA664 0xd000 /* Loongson 64bit, 6-issue */

Expand Down
145 changes: 145 additions & 0 deletions arch/loongarch/include/asm/hw_breakpoint.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2022-2023 Loongson Technology Corporation Limited
*/
#ifndef __ASM_HW_BREAKPOINT_H
#define __ASM_HW_BREAKPOINT_H

#include <asm/loongarch.h>

#ifdef __KERNEL__

/* Breakpoint */
#define LOONGARCH_BREAKPOINT_EXECUTE (0 << 0)

/* Watchpoints */
#define LOONGARCH_BREAKPOINT_LOAD (1 << 0)
#define LOONGARCH_BREAKPOINT_STORE (1 << 1)

struct arch_hw_breakpoint_ctrl {
u32 __reserved : 28,
len : 2,
type : 2;
};

struct arch_hw_breakpoint {
u64 address;
u64 mask;
struct arch_hw_breakpoint_ctrl ctrl;
};

/* Lengths */
#define LOONGARCH_BREAKPOINT_LEN_1 0b11
#define LOONGARCH_BREAKPOINT_LEN_2 0b10
#define LOONGARCH_BREAKPOINT_LEN_4 0b01
#define LOONGARCH_BREAKPOINT_LEN_8 0b00

/*
* Limits.
* Changing these will require modifications to the register accessors.
*/
#define LOONGARCH_MAX_BRP 8
#define LOONGARCH_MAX_WRP 8

/* Virtual debug register bases. */
#define CSR_CFG_ADDR 0
#define CSR_CFG_MASK (CSR_CFG_ADDR + LOONGARCH_MAX_BRP)
#define CSR_CFG_CTRL (CSR_CFG_MASK + LOONGARCH_MAX_BRP)
#define CSR_CFG_ASID (CSR_CFG_CTRL + LOONGARCH_MAX_WRP)

/* Debug register names. */
#define LOONGARCH_CSR_NAME_ADDR ADDR
#define LOONGARCH_CSR_NAME_MASK MASK
#define LOONGARCH_CSR_NAME_CTRL CTRL
#define LOONGARCH_CSR_NAME_ASID ASID

/* Accessor macros for the debug registers. */
#define LOONGARCH_CSR_WATCH_READ(N, REG, T, VAL) \
do { \
if (T == 0) \
VAL = csr_read64(LOONGARCH_CSR_##IB##N##REG); \
else \
VAL = csr_read64(LOONGARCH_CSR_##DB##N##REG); \
} while (0)

#define LOONGARCH_CSR_WATCH_WRITE(N, REG, T, VAL) \
do { \
if (T == 0) \
csr_write64(VAL, LOONGARCH_CSR_##IB##N##REG); \
else \
csr_write64(VAL, LOONGARCH_CSR_##DB##N##REG); \
} while (0)

/* Exact number */
#define CSR_FWPC_NUM 0x3f
#define CSR_MWPC_NUM 0x3f

#define CTRL_PLV_ENABLE 0x1e

#define MWPnCFG3_LoadEn 8
#define MWPnCFG3_StoreEn 9

#define MWPnCFG3_Type_mask 0x3
#define MWPnCFG3_Size_mask 0x3

static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
{
return (ctrl.len << 10) | (ctrl.type << 8);
}

static inline void decode_ctrl_reg(u32 reg, struct arch_hw_breakpoint_ctrl *ctrl)
{
reg >>= 8;
ctrl->type = reg & MWPnCFG3_Type_mask;
reg >>= 2;
ctrl->len = reg & MWPnCFG3_Size_mask;
}

struct task_struct;
struct notifier_block;
struct perf_event;
struct perf_event_attr;

extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type, int *offset);
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,
struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);

extern int arch_install_hw_breakpoint(struct perf_event *bp);
extern void arch_uninstall_hw_breakpoint(struct perf_event *bp);
extern int hw_breakpoint_slots(int type);
extern void hw_breakpoint_pmu_read(struct perf_event *bp);

void breakpoint_handler(struct pt_regs *regs);
void watchpoint_handler(struct pt_regs *regs);

#ifdef CONFIG_HAVE_HW_BREAKPOINT
extern void ptrace_hw_copy_thread(struct task_struct *task);
extern void hw_breakpoint_thread_switch(struct task_struct *next);
#else
static inline void ptrace_hw_copy_thread(struct task_struct *task)
{
}
static inline void hw_breakpoint_thread_switch(struct task_struct *next)
{
}
#endif

/* Determine number of BRP registers available. */
static inline int get_num_brps(void)
{
return csr_read64(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM;
}

/* Determine number of WRP registers available. */
static inline int get_num_wrps(void)
{
return csr_read64(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM;
}

#endif /* __KERNEL__ */
#endif /* __ASM_BREAKPOINT_H */
Loading

0 comments on commit a8356cd

Please sign in to comment.