Skip to content

Commit

Permalink
Merge tag 'mm-hotfixes-stable-2023-03-24-17-09' of git://git.kernel.o…
Browse files Browse the repository at this point in the history
…rg/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "21 hotfixes, 8 of which are cc:stable. 11 are for MM, the remainder
  are for other subsystems"

* tag 'mm-hotfixes-stable-2023-03-24-17-09' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (21 commits)
  mm: mmap: remove newline at the end of the trace
  mailmap: add entries for Richard Leitner
  kcsan: avoid passing -g for test
  kfence: avoid passing -g for test
  mm: kfence: fix using kfence_metadata without initialization in show_object()
  lib: dhry: fix unstable smp_processor_id(_) usage
  mailmap: add entry for Enric Balletbo i Serra
  mailmap: map Sai Prakash Ranjan's old address to his current one
  mailmap: map Rajendra Nayak's old address to his current one
  Revert "kasan: drop skip_kasan_poison variable in free_pages_prepare"
  mailmap: add entry for Tobias Klauser
  kasan, powerpc: don't rename memintrinsics if compiler adds prefixes
  mm/ksm: fix race with VMA iteration and mm_struct teardown
  kselftest: vm: fix unused variable warning
  mm: fix error handling for map_deny_write_exec
  mm: deduplicate error handling for map_deny_write_exec
  checksyscalls: ignore fstat to silence build warning on LoongArch
  nilfs2: fix kernel-infoleak in nilfs_ioctl_wrap_copy()
  test_maple_tree: add more testing for mas_empty_area()
  maple_tree: fix mas_skip_node() end slot detection
  ...
  • Loading branch information
torvalds committed Mar 25, 2023
2 parents 90c8ce3 + d0072ca commit 65aca32
Show file tree
Hide file tree
Showing 19 changed files with 140 additions and 53 deletions.
11 changes: 11 additions & 0 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,8 @@ Dmitry Safonov <[email protected]> <[email protected]>
Domen Puncer <[email protected]>
Douglas Gilbert <[email protected]>
Ed L. Cashin <[email protected]>
Enric Balletbo i Serra <[email protected]> <[email protected]>
Enric Balletbo i Serra <[email protected]> <[email protected]>
Erik Kaneda <[email protected]> <[email protected]>
Eugen Hristev <[email protected]> <[email protected]>
Evgeniy Polyakov <[email protected]>
Expand Down Expand Up @@ -379,6 +381,7 @@ Quentin Monnet <[email protected]> <[email protected]>
Quentin Perret <[email protected]> <[email protected]>
Rafael J. Wysocki <[email protected]> <[email protected]>
Rajeev Nandan <[email protected]> <[email protected]>
Rajendra Nayak <[email protected]> <[email protected]>
Rajesh Shah <[email protected]>
Ralf Baechle <[email protected]>
Ralf Wildenhues <[email protected]>
Expand All @@ -387,6 +390,9 @@ Rémi Denis-Courmont <[email protected]>
Ricardo Ribalda <[email protected]> <[email protected]>
Ricardo Ribalda <[email protected]> Ricardo Ribalda Delgado <[email protected]>
Ricardo Ribalda <[email protected]> <[email protected]>
Richard Leitner <[email protected]> <[email protected]>
Richard Leitner <[email protected]> <[email protected]>
Richard Leitner <[email protected]> <[email protected]>
Robert Foss <[email protected]> <[email protected]>
Roman Gushchin <[email protected]> <[email protected]>
Roman Gushchin <[email protected]> <[email protected]>
Expand All @@ -397,6 +403,7 @@ Ross Zwisler <[email protected]> <[email protected]>
Rudolf Marek <[email protected]>
Rui Saraiva <[email protected]>
Sachin P Sant <[email protected]>
Sai Prakash Ranjan <[email protected]> <[email protected]>
Sakari Ailus <[email protected]> <[email protected]>
Sam Ravnborg <[email protected]>
Sankeerth Billakanti <[email protected]> <[email protected]>
Expand Down Expand Up @@ -437,6 +444,10 @@ Thomas Graf <[email protected]>
Thomas Körper <[email protected]> <[email protected]>
Thomas Pedersen <[email protected]>
Tiezhu Yang <[email protected]> <[email protected]>
Tobias Klauser <[email protected]> <[email protected]>
Tobias Klauser <[email protected]> <[email protected]>
Tobias Klauser <[email protected]> <[email protected]>
Tobias Klauser <[email protected]> <[email protected]>
Todor Tomov <[email protected]> <[email protected]>
Tony Luck <[email protected]>
TripleX Chung <[email protected]> <[email protected]>
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H

#ifdef CONFIG_KASAN
#if defined(CONFIG_KASAN) && !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)
#define _GLOBAL_KASAN(fn) _GLOBAL(__##fn)
#define _GLOBAL_TOC_KASAN(fn) _GLOBAL_TOC(__##fn)
#define EXPORT_SYMBOL_KASAN(fn) EXPORT_SYMBOL(__##fn)
Expand Down
15 changes: 11 additions & 4 deletions arch/powerpc/include/asm/string.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,17 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
extern void * memchr(const void *,int,__kernel_size_t);
void memcpy_flushcache(void *dest, const void *src, size_t size);

#ifdef CONFIG_KASAN
/* __mem variants are used by KASAN to implement instrumented meminstrinsics. */
#ifdef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
#define __memset memset
#define __memcpy memcpy
#define __memmove memmove
#else /* CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */
void *__memset(void *s, int c, __kernel_size_t count);
void *__memcpy(void *to, const void *from, __kernel_size_t n);
void *__memmove(void *to, const void *from, __kernel_size_t n);

#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
#ifndef __SANITIZE_ADDRESS__
/*
* For files that are not instrumented (e.g. mm/slub.c) we
* should use not instrumented version of mem* functions.
Expand All @@ -46,8 +52,9 @@ void *__memmove(void *to, const void *from, __kernel_size_t n);
#ifndef __NO_FORTIFY
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
#endif

#endif
#endif /* !__SANITIZE_ADDRESS__ */
#endif /* CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */
#endif /* CONFIG_KASAN */

#ifdef CONFIG_PPC64
#ifndef CONFIG_KASAN
Expand Down
9 changes: 7 additions & 2 deletions arch/powerpc/kernel/prom_init_check.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,13 @@
# If you really need to reference something from prom_init.o add
# it to the list below:

grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null
if [ $? -eq 0 ]
has_renamed_memintrinsics()
{
grep -q "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} && \
! grep -q "^CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y" ${KCONFIG_CONFIG}
}

if has_renamed_memintrinsics
then
MEM_FUNCS="__memcpy __memset"
else
Expand Down
2 changes: 1 addition & 1 deletion fs/nilfs2/ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
if (argv->v_index > ~(__u64)0 - argv->v_nmembs)
return -EINVAL;

buf = (void *)__get_free_pages(GFP_NOFS, 0);
buf = (void *)get_zeroed_page(GFP_NOFS);
if (unlikely(!buf))
return -ENOMEM;
maxmembs = PAGE_SIZE / argv->v_size;
Expand Down
4 changes: 2 additions & 2 deletions include/trace/events/mmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ TRACE_EVENT(vm_unmapped_area,
__entry->align_offset = info->align_offset;
),

TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx\n",
TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx",
IS_ERR_VALUE(__entry->addr) ? 0 : __entry->addr,
IS_ERR_VALUE(__entry->addr) ? __entry->addr : 0,
__entry->total_vm, __entry->flags, __entry->length,
Expand Down Expand Up @@ -110,7 +110,7 @@ TRACE_EVENT(exit_mmap,
__entry->mt = &mm->mm_mt;
),

TP_printk("mt_mod %p, DESTROY\n",
TP_printk("mt_mod %p, DESTROY",
__entry->mt
)
);
Expand Down
2 changes: 1 addition & 1 deletion kernel/kcsan/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ obj-y := core.o debugfs.o report.o
KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o

CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer
CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -fno-omit-frame-pointer
CFLAGS_kcsan_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_KCSAN_KUNIT_TEST) += kcsan_test.o
6 changes: 4 additions & 2 deletions lib/dhry_run.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ MODULE_PARM_DESC(iterations,

static void dhry_benchmark(void)
{
unsigned int cpu = get_cpu();
int i, n;

if (iterations > 0) {
Expand All @@ -45,9 +46,10 @@ static void dhry_benchmark(void)
}

report:
put_cpu();
if (n >= 0)
pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n",
smp_processor_id(), n, n / DHRY_VAX);
pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n", cpu,
n, n / DHRY_VAX);
else if (n == -EAGAIN)
pr_err("Please increase the number of iterations\n");
else
Expand Down
24 changes: 5 additions & 19 deletions lib/maple_tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -5099,35 +5099,21 @@ static inline bool mas_rewind_node(struct ma_state *mas)
*/
static inline bool mas_skip_node(struct ma_state *mas)
{
unsigned char slot, slot_count;
unsigned long *pivots;
enum maple_type mt;
if (mas_is_err(mas))
return false;

mt = mte_node_type(mas->node);
slot_count = mt_slots[mt] - 1;
do {
if (mte_is_root(mas->node)) {
slot = mas->offset;
if (slot > slot_count) {
if (mas->offset >= mas_data_end(mas)) {
mas_set_err(mas, -EBUSY);
return false;
}
} else {
mas_ascend(mas);
slot = mas->offset;
mt = mte_node_type(mas->node);
slot_count = mt_slots[mt] - 1;
}
} while (slot > slot_count);

mas->offset = ++slot;
pivots = ma_pivots(mas_mn(mas), mt);
if (slot > 0)
mas->min = pivots[slot - 1] + 1;

if (slot <= slot_count)
mas->max = pivots[slot];
} while (mas->offset >= mas_data_end(mas));

mas->offset++;
return true;
}

Expand Down
48 changes: 48 additions & 0 deletions lib/test_maple_tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -2670,6 +2670,49 @@ static noinline void check_empty_area_window(struct maple_tree *mt)
rcu_read_unlock();
}

static noinline void check_empty_area_fill(struct maple_tree *mt)
{
const unsigned long max = 0x25D78000;
unsigned long size;
int loop, shift;
MA_STATE(mas, mt, 0, 0);

mt_set_non_kernel(99999);
for (shift = 12; shift <= 16; shift++) {
loop = 5000;
size = 1 << shift;
while (loop--) {
mas_set(&mas, 0);
mas_lock(&mas);
MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != 0);
MT_BUG_ON(mt, mas.last != mas.index + size - 1);
mas_store_gfp(&mas, (void *)size, GFP_KERNEL);
mas_unlock(&mas);
mas_reset(&mas);
}
}

/* No space left. */
size = 0x1000;
rcu_read_lock();
MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != -EBUSY);
rcu_read_unlock();

/* Fill a depth 3 node to the maximum */
for (unsigned long i = 629440511; i <= 629440800; i += 6)
mtree_store_range(mt, i, i + 5, (void *)i, GFP_KERNEL);
/* Make space in the second-last depth 4 node */
mtree_erase(mt, 631668735);
/* Make space in the last depth 4 node */
mtree_erase(mt, 629506047);
mas_reset(&mas);
/* Search from just after the gap in the second-last depth 4 */
rcu_read_lock();
MT_BUG_ON(mt, mas_empty_area(&mas, 629506048, 690000000, 0x5000) != 0);
rcu_read_unlock();
mt_set_non_kernel(0);
}

static DEFINE_MTREE(tree);
static int maple_tree_seed(void)
{
Expand Down Expand Up @@ -2926,6 +2969,11 @@ static int maple_tree_seed(void)
check_empty_area_window(&tree);
mtree_destroy(&tree);

mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_empty_area_fill(&tree);
mtree_destroy(&tree);


#if defined(BENCH)
skip:
#endif
Expand Down
2 changes: 1 addition & 1 deletion mm/kfence/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@

obj-y := core.o report.o

CFLAGS_kfence_test.o := -g -fno-omit-frame-pointer -fno-optimize-sibling-calls
CFLAGS_kfence_test.o := -fno-omit-frame-pointer -fno-optimize-sibling-calls
obj-$(CONFIG_KFENCE_KUNIT_TEST) += kfence_test.o
10 changes: 8 additions & 2 deletions mm/kfence/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -726,10 +726,14 @@ static const struct seq_operations objects_sops = {
};
DEFINE_SEQ_ATTRIBUTE(objects);

static int __init kfence_debugfs_init(void)
static int kfence_debugfs_init(void)
{
struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
struct dentry *kfence_dir;

if (!READ_ONCE(kfence_enabled))
return 0;

kfence_dir = debugfs_create_dir("kfence", NULL);
debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
return 0;
Expand Down Expand Up @@ -883,6 +887,8 @@ static int kfence_init_late(void)
}

kfence_init_enable();
kfence_debugfs_init();

return 0;
}

Expand Down
11 changes: 9 additions & 2 deletions mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -988,9 +988,15 @@ static int unmerge_and_remove_all_rmap_items(void)

mm = mm_slot->slot.mm;
mmap_read_lock(mm);

/*
* Exit right away if mm is exiting to avoid lockdep issue in
* the maple tree
*/
if (ksm_test_exit(mm))
goto mm_exiting;

for_each_vma(vmi, vma) {
if (ksm_test_exit(mm))
break;
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
continue;
err = unmerge_ksm_pages(vma,
Expand All @@ -999,6 +1005,7 @@ static int unmerge_and_remove_all_rmap_items(void)
goto error;
}

mm_exiting:
remove_trailing_rmap_items(&mm_slot->rmap_list);
mmap_read_unlock(mm);

Expand Down
7 changes: 1 addition & 6 deletions mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -2621,12 +2621,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,

if (map_deny_write_exec(vma, vma->vm_flags)) {
error = -EACCES;
if (file)
goto close_and_free_vma;
else if (vma->vm_file)
goto unmap_and_free_vma;
else
goto free_vma;
goto close_and_free_vma;
}

/* Allow architectures to sanity-check the vm_flags */
Expand Down
2 changes: 1 addition & 1 deletion mm/mprotect.c
Original file line number Diff line number Diff line change
Expand Up @@ -805,7 +805,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,

if (map_deny_write_exec(vma, newflags)) {
error = -EACCES;
goto out;
break;
}

/* Allow architectures to sanity-check the new flags */
Expand Down
3 changes: 2 additions & 1 deletion mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1398,6 +1398,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
unsigned int order, bool check_free, fpi_t fpi_flags)
{
int bad = 0;
bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
bool init = want_init_on_free();

VM_BUG_ON_PAGE(PageTail(page), page);
Expand Down Expand Up @@ -1470,7 +1471,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
* With hardware tag-based KASAN, memory tags must be set before the
* page becomes unavailable via debug_pagealloc or arch_free_page.
*/
if (!should_skip_kasan_poison(page, fpi_flags)) {
if (!skip_kasan_poison) {
kasan_poison_pages(page, order, init);

/* Memory is already initialized if KASAN did it internally. */
Expand Down
Loading

0 comments on commit 65aca32

Please sign in to comment.