From a63c78c3493c5dff9186c1fd2dbdf93b03f0ac3e Mon Sep 17 00:00:00 2001 From: Dominique Martinet Date: Thu, 23 May 2024 20:31:38 +0900 Subject: [PATCH 01/48] 9p: v9fs_fid_find: also lookup by inode if not found dentry [ Upstream commit 38d222b3163f7b7d737e5d999ffc890a12870e36 ] It's possible for v9fs_fid_find "find by dentry" branch to not turn up anything despite having an entry set (because e.g. uid doesn't match), in which case the calling code will generally make an extra lookup to the server. In this case we might have had better luck looking by inode, so fall back to look up by inode if we have one and the lookup by dentry failed. Message-Id: <20240523210024.1214386-1-asmadeus@codewreck.org> Reviewed-by: Christian Schoenebeck Signed-off-by: Dominique Martinet Signed-off-by: Sasha Levin --- fs/9p/fid.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/9p/fid.c b/fs/9p/fid.c index de009a33e0e26..f84412290a30c 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c @@ -131,10 +131,9 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any) } } spin_unlock(&dentry->d_lock); - } else { - if (dentry->d_inode) - ret = v9fs_fid_find_inode(dentry->d_inode, false, uid, any); } + if (!ret && dentry->d_inode) + ret = v9fs_fid_find_inode(dentry->d_inode, false, uid, any); return ret; } From 0d6c0b3b6f3fa056e82eec1a4b9321a01019ae44 Mon Sep 17 00:00:00 2001 From: Pedro Falcato Date: Wed, 7 Aug 2024 10:47:25 +0100 Subject: [PATCH 02/48] 9p: Avoid creating multiple slab caches with the same name [ Upstream commit 79efebae4afc2221fa814c3cae001bede66ab259 ] In the spirit of [1], avoid creating multiple slab caches with the same name. Instead, add the dev_name into the mix. [1]: https://lore.kernel.org/all/20240807090746.2146479-1-pedro.falcato@gmail.com/ Signed-off-by: Pedro Falcato Reported-by: syzbot+3c5d43e97993e1fa612b@syzkaller.appspotmail.com Message-ID: <20240807094725.2193423-1-pedro.falcato@gmail.com> Signed-off-by: Dominique Martinet Signed-off-by: Sasha Levin --- net/9p/client.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/net/9p/client.c b/net/9p/client.c index b05f73c291b4b..e7ea6c5c7463d 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -978,6 +978,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) int err; struct p9_client *clnt; char *client_id; + char *cache_name; clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); if (!clnt) @@ -1034,15 +1035,22 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) if (err) goto close_trans; + cache_name = kasprintf(GFP_KERNEL, "9p-fcall-cache-%s", dev_name); + if (!cache_name) { + err = -ENOMEM; + goto close_trans; + } + /* P9_HDRSZ + 4 is the smallest packet header we can have that is * followed by data accessed from userspace by read */ clnt->fcall_cache = - kmem_cache_create_usercopy("9p-fcall-cache", clnt->msize, + kmem_cache_create_usercopy(cache_name, clnt->msize, 0, 0, P9_HDRSZ + 4, clnt->msize - (P9_HDRSZ + 4), NULL); + kfree(cache_name); return clnt; close_trans: From 68ec5395bc2485533acb1fa7de4a7c2f45db14a1 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 24 Sep 2024 14:08:44 -0700 Subject: [PATCH 03/48] selftests/bpf: Verify that sync_linked_regs preserves subreg_def [ Upstream commit a41b3828ec056a631ad22413d4560017fed5c3bd ] This test was added because of a bug in verifier.c:sync_linked_regs(), upon range propagation it destroyed subreg_def marks for registers. The test is written in a way to return an upper half of a register that is affected by range propagation and must have it's subreg_def preserved. This gives a return value of 0 and leads to undefined return value if subreg_def mark is not preserved. Signed-off-by: Eduard Zingerman Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240924210844.1758441-2-eddyz87@gmail.com Signed-off-by: Sasha Levin --- .../selftests/bpf/progs/verifier_scalar_ids.c | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c index 13b29a7faa71a..d24d3a36ec144 100644 --- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c +++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c @@ -656,4 +656,71 @@ __naked void two_old_ids_one_cur_id(void) : __clobber_all); } +SEC("socket") +/* Note the flag, see verifier.c:opt_subreg_zext_lo32_rnd_hi32() */ +__flag(BPF_F_TEST_RND_HI32) +__success +/* This test was added because of a bug in verifier.c:sync_linked_regs(), + * upon range propagation it destroyed subreg_def marks for registers. + * The subreg_def mark is used to decide whether zero extension instructions + * are needed when register is read. When BPF_F_TEST_RND_HI32 is set it + * also causes generation of statements to randomize upper halves of + * read registers. + * + * The test is written in a way to return an upper half of a register + * that is affected by range propagation and must have it's subreg_def + * preserved. This gives a return value of 0 and leads to undefined + * return value if subreg_def mark is not preserved. + */ +__retval(0) +/* Check that verifier believes r1/r0 are zero at exit */ +__log_level(2) +__msg("4: (77) r1 >>= 32 ; R1_w=0") +__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0") +__msg("6: (95) exit") +__msg("from 3 to 4") +__msg("4: (77) r1 >>= 32 ; R1_w=0") +__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0") +__msg("6: (95) exit") +/* Verify that statements to randomize upper half of r1 had not been + * generated. + */ +__xlated("call unknown") +__xlated("r0 &= 2147483647") +__xlated("w1 = w0") +/* This is how disasm.c prints BPF_ZEXT_REG at the moment, x86 and arm + * are the only CI archs that do not need zero extension for subregs. + */ +#if !defined(__TARGET_ARCH_x86) && !defined(__TARGET_ARCH_arm64) +__xlated("w1 = w1") +#endif +__xlated("if w0 < 0xa goto pc+0") +__xlated("r1 >>= 32") +__xlated("r0 = r1") +__xlated("exit") +__naked void linked_regs_and_subreg_def(void) +{ + asm volatile ( + "call %[bpf_ktime_get_ns];" + /* make sure r0 is in 32-bit range, otherwise w1 = w0 won't + * assign same IDs to registers. + */ + "r0 &= 0x7fffffff;" + /* link w1 and w0 via ID */ + "w1 = w0;" + /* 'if' statement propagates range info from w0 to w1, + * but should not affect w1->subreg_def property. + */ + "if w0 < 10 goto +0;" + /* r1 is read here, on archs that require subreg zero + * extension this would cause zext patch generation. + */ + "r1 >>= 32;" + "r0 = r1;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; From 4b3441089235dd881638c140fda8c3339162126d Mon Sep 17 00:00:00 2001 From: Sergey Matsievskiy Date: Wed, 25 Sep 2024 21:44:15 +0300 Subject: [PATCH 04/48] irqchip/ocelot: Fix trigger register address [ Upstream commit 9e9c4666abb5bb444dac37e2d7eb5250c8d52a45 ] Controllers, supported by this driver, have two sets of registers: * (main) interrupt registers control peripheral interrupt sources. * device interrupt registers configure per-device (network interface) interrupts and act as an extra stage before the main interrupt registers. In the driver unmask code, device trigger registers are used in the mask calculation of the main interrupt sticky register, mixing two kinds of registers. Use the main interrupt trigger register instead. Signed-off-by: Sergey Matsievskiy Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/all/20240925184416.54204-2-matsievskiysv@gmail.com Signed-off-by: Sasha Levin --- drivers/irqchip/irq-mscc-ocelot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c index 4d0c3532dbe73..c19ab379e8c5e 100644 --- a/drivers/irqchip/irq-mscc-ocelot.c +++ b/drivers/irqchip/irq-mscc-ocelot.c @@ -37,7 +37,7 @@ static struct chip_props ocelot_props = { .reg_off_ena_clr = 0x1c, .reg_off_ena_set = 0x20, .reg_off_ident = 0x38, - .reg_off_trigger = 0x5c, + .reg_off_trigger = 0x4, .n_irq = 24, }; @@ -70,7 +70,7 @@ static struct chip_props jaguar2_props = { .reg_off_ena_clr = 0x1c, .reg_off_ena_set = 0x20, .reg_off_ident = 0x38, - .reg_off_trigger = 0x5c, + .reg_off_trigger = 0x4, .n_irq = 29, }; From 975cb1d2121511584695d0e47fdb90e6782da007 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 2 Oct 2024 13:51:41 +0900 Subject: [PATCH 05/48] nvme: tcp: avoid race between queue_lock lock and destroy [ Upstream commit 782373ba27660ba7d330208cf5509ece6feb4545 ] Commit 76d54bf20cdc ("nvme-tcp: don't access released socket during error recovery") added a mutex_lock() call for the queue->queue_lock in nvme_tcp_get_address(). However, the mutex_lock() races with mutex_destroy() in nvme_tcp_free_queue(), and causes the WARN below. DEBUG_LOCKS_WARN_ON(lock->magic != lock) WARNING: CPU: 3 PID: 34077 at kernel/locking/mutex.c:587 __mutex_lock+0xcf0/0x1220 Modules linked in: nvmet_tcp nvmet nvme_tcp nvme_fabrics iw_cm ib_cm ib_core pktcdvd nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6 nft_reject nft_ct nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 ip_set nf_tables qrtr sunrpc ppdev 9pnet_virtio 9pnet pcspkr netfs parport_pc parport e1000 i2c_piix4 i2c_smbus loop fuse nfnetlink zram bochs drm_vram_helper drm_ttm_helper ttm drm_kms_helper xfs drm sym53c8xx floppy nvme scsi_transport_spi nvme_core nvme_auth serio_raw ata_generic pata_acpi dm_multipath qemu_fw_cfg [last unloaded: ib_uverbs] CPU: 3 UID: 0 PID: 34077 Comm: udisksd Not tainted 6.11.0-rc7 #319 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-2.fc40 04/01/2014 RIP: 0010:__mutex_lock+0xcf0/0x1220 Code: 08 84 d2 0f 85 c8 04 00 00 8b 15 ef b6 c8 01 85 d2 0f 85 78 f4 ff ff 48 c7 c6 20 93 ee af 48 c7 c7 60 91 ee af e8 f0 a7 6d fd <0f> 0b e9 5e f4 ff ff 48 b8 00 00 00 00 00 fc ff df 4c 89 f2 48 c1 RSP: 0018:ffff88811305f760 EFLAGS: 00010286 RAX: 0000000000000000 RBX: ffff88812c652058 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000004 RDI: 0000000000000001 RBP: ffff88811305f8b0 R08: 0000000000000001 R09: ffffed1075c36341 R10: ffff8883ae1b1a0b R11: 0000000000010498 R12: 0000000000000000 R13: 0000000000000000 R14: dffffc0000000000 R15: ffff88812c652058 FS: 00007f9713ae4980(0000) GS:ffff8883ae180000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fcd78483c7c CR3: 0000000122c38000 CR4: 00000000000006f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ? __warn.cold+0x5b/0x1af ? __mutex_lock+0xcf0/0x1220 ? report_bug+0x1ec/0x390 ? handle_bug+0x3c/0x80 ? exc_invalid_op+0x13/0x40 ? asm_exc_invalid_op+0x16/0x20 ? __mutex_lock+0xcf0/0x1220 ? nvme_tcp_get_address+0xc2/0x1e0 [nvme_tcp] ? __pfx___mutex_lock+0x10/0x10 ? __lock_acquire+0xd6a/0x59e0 ? nvme_tcp_get_address+0xc2/0x1e0 [nvme_tcp] nvme_tcp_get_address+0xc2/0x1e0 [nvme_tcp] ? __pfx_nvme_tcp_get_address+0x10/0x10 [nvme_tcp] nvme_sysfs_show_address+0x81/0xc0 [nvme_core] dev_attr_show+0x42/0x80 ? __asan_memset+0x1f/0x40 sysfs_kf_seq_show+0x1f0/0x370 seq_read_iter+0x2cb/0x1130 ? rw_verify_area+0x3b1/0x590 ? __mutex_lock+0x433/0x1220 vfs_read+0x6a6/0xa20 ? lockdep_hardirqs_on+0x78/0x100 ? __pfx_vfs_read+0x10/0x10 ksys_read+0xf7/0x1d0 ? __pfx_ksys_read+0x10/0x10 ? __x64_sys_openat+0x105/0x1d0 do_syscall_64+0x93/0x180 ? lockdep_hardirqs_on_prepare+0x16d/0x400 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on+0x78/0x100 ? do_syscall_64+0x9f/0x180 ? __pfx_ksys_read+0x10/0x10 ? lockdep_hardirqs_on_prepare+0x16d/0x400 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on+0x78/0x100 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on_prepare+0x16d/0x400 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on+0x78/0x100 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on_prepare+0x16d/0x400 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on+0x78/0x100 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on_prepare+0x16d/0x400 ? do_syscall_64+0x9f/0x180 ? lockdep_hardirqs_on+0x78/0x100 ? do_syscall_64+0x9f/0x180 ? do_syscall_64+0x9f/0x180 entry_SYSCALL_64_after_hwframe+0x76/0x7e RIP: 0033:0x7f9713f55cfa Code: 55 48 89 e5 48 83 ec 20 48 89 55 e8 48 89 75 f0 89 7d f8 e8 e8 74 f8 ff 48 8b 55 e8 48 8b 75 f0 41 89 c0 8b 7d f8 31 c0 0f 05 <48> 3d 00 f0 ff ff 77 2e 44 89 c7 48 89 45 f8 e8 42 75 f8 ff 48 8b RSP: 002b:00007ffd7f512e70 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 RAX: ffffffffffffffda RBX: 000055c38f316859 RCX: 00007f9713f55cfa RDX: 0000000000000fff RSI: 00007ffd7f512eb0 RDI: 0000000000000011 RBP: 00007ffd7f512e90 R08: 0000000000000000 R09: 00000000ffffffff R10: 0000000000000000 R11: 0000000000000246 R12: 000055c38f317148 R13: 0000000000000000 R14: 00007f96f4004f30 R15: 000055c3b6b623c0 The WARN is observed when the blktests test case nvme/014 is repeated with tcp transport. It is rare, and 200 times repeat is required to recreate in some test environments. To avoid the WARN, check the NVME_TCP_Q_LIVE flag before locking queue->queue_lock. The flag is cleared long time before the lock gets destroyed. Signed-off-by: Hannes Reinecke Signed-off-by: Shin'ichiro Kawasaki Signed-off-by: Keith Busch Signed-off-by: Sasha Levin --- drivers/nvme/host/tcp.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index f1d62d74426f0..be04c5f3856d2 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2444,10 +2444,11 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) len = nvmf_get_address(ctrl, buf, size); + if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) + return len; + mutex_lock(&queue->queue_lock); - if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) - goto done; ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); if (ret > 0) { if (len > 0) @@ -2455,7 +2456,7 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n", (len) ? "," : "", &src_addr); } -done: + mutex_unlock(&queue->queue_lock); return len; From f49a9d86c4cd2f901544f46e5a77e8009d2a18d0 Mon Sep 17 00:00:00 2001 From: SurajSonawane2415 Date: Mon, 7 Oct 2024 16:44:16 +0530 Subject: [PATCH 06/48] block: Fix elevator_get_default() checking for NULL q->tag_set [ Upstream commit b402328a24ee7193a8ab84277c0c90ae16768126 ] elevator_get_default() and elv_support_iosched() both check for whether or not q->tag_set is non-NULL, however it's not possible for them to be NULL. This messes up some static checkers, as the checking of tag_set isn't consistent. Remove the checks, which both simplifies the logic and avoids checker errors. Signed-off-by: SurajSonawane2415 Link: https://lore.kernel.org/r/20241007111416.13814-1-surajsonawane0215@gmail.com Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- block/elevator.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/elevator.c b/block/elevator.c index 5ff093cb3cf8f..ba072d8f660e6 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -558,7 +558,7 @@ EXPORT_SYMBOL_GPL(elv_unregister); static inline bool elv_support_iosched(struct request_queue *q) { if (!queue_is_mq(q) || - (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))) + (q->tag_set->flags & BLK_MQ_F_NO_SCHED)) return false; return true; } @@ -569,7 +569,7 @@ static inline bool elv_support_iosched(struct request_queue *q) */ static struct elevator_type *elevator_get_default(struct request_queue *q) { - if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) + if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) return NULL; if (q->nr_hw_queues != 1 && From 6e306b87c035b9b187c416e0565d83f9ebff8a62 Mon Sep 17 00:00:00 2001 From: Stefan Blum Date: Sun, 6 Oct 2024 10:12:23 +0200 Subject: [PATCH 07/48] HID: multitouch: Add support for B2402FVA track point [ Upstream commit 1a5cbb526ec4b885177d06a8bc04f38da7dbb1d9 ] By default the track point does not work on the Asus Expertbook B2402FVA. From libinput record i got the ID of the track point device: evdev: # Name: ASUE1201:00 04F3:32AE # ID: bus 0x18 vendor 0x4f3 product 0x32ae version 0x100 I found that the track point is functional, when i set the MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU class for the reported device. Signed-off-by: Stefan Blum Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin --- drivers/hid/hid-multitouch.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index e7199ae2e3d91..7584e5a3aafeb 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2020,6 +2020,10 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ELAN, 0x3148) }, + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_ELAN, 0x32ae) }, + /* Elitegroup panel */ { .driver_data = MT_CLS_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP, From ded2b3b2bc508890b66c06fd4b6842448814a202 Mon Sep 17 00:00:00 2001 From: WangYuli Date: Mon, 7 Oct 2024 12:08:03 +0800 Subject: [PATCH 08/48] HID: multitouch: Add quirk for HONOR MagicBook Art 14 touchpad [ Upstream commit 7a5ab8071114344f62a8b1e64ed3452a77257d76 ] The behavior of HONOR MagicBook Art 14 touchpad is not consistent after reboots, as sometimes it reports itself as a touchpad, and sometimes as a mouse. Similarly to GLO-GXXX it is possible to call MT_QUIRK_FORCE_GET_FEATURE as a workaround to force set feature in mt_set_input_mode() for such special touchpad device. [jkosina@suse.com: reword changelog a little bit] Link: https://gitlab.freedesktop.org/libinput/libinput/-/issues/1040 Signed-off-by: Wentao Guan Signed-off-by: WangYuli Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin --- drivers/hid/hid-multitouch.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 7584e5a3aafeb..c2d79b2d6cdd2 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2093,6 +2093,11 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, 0x347d, 0x7853) }, + /* HONOR MagicBook Art 14 touchpad */ + { .driver_data = MT_CLS_VTL, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + 0x35cc, 0x0104) }, + /* Ilitek dual touch panel */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_ILITEK, From 8902a52239627c3e6fdc6aec407fad630346cc5d Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 8 Oct 2024 16:21:17 +0100 Subject: [PATCH 09/48] iommu/arm-smmu: Clarify MMU-500 CPRE workaround [ Upstream commit 0dfe314cdd0d378f96bb9c6bdc05c8120f48606d ] CPRE workarounds are implicated in at least 5 MMU-500 errata, some of which remain unfixed. The comment and warning message have proven to be unhelpfully misleading about this scope, so reword them to get the point across with less risk of going out of date or confusing users. Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/dfa82171b5248ad7cf1f25592101a6eec36b8c9a.1728400877.git.robin.murphy@arm.com Signed-off-by: Will Deacon Signed-off-by: Sasha Levin --- drivers/iommu/arm/arm-smmu/arm-smmu-impl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index 9dc772f2cbb27..99030e6b16e7a 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -130,7 +130,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu) /* * Disable MMU-500's not-particularly-beneficial next-page - * prefetcher for the sake of errata #841119 and #826419. + * prefetcher for the sake of at least 5 known errata. */ for (i = 0; i < smmu->num_context_banks; ++i) { reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); @@ -138,7 +138,7 @@ int arm_mmu500_reset(struct arm_smmu_device *smmu) arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg); reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR); if (reg & ARM_MMU500_ACTLR_CPRE) - dev_warn_once(smmu->dev, "Failed to disable prefetcher [errata #841119 and #826419], check ACR.CACHE_LOCK\n"); + dev_warn_once(smmu->dev, "Failed to disable prefetcher for errata workarounds, check SACR.CACHE_LOCK\n"); } return 0; From e04e648058024d8a42bfaaf3da70428eae674634 Mon Sep 17 00:00:00 2001 From: Greg Joyce Date: Mon, 7 Oct 2024 14:33:24 -0500 Subject: [PATCH 10/48] nvme: disable CC.CRIME (NVME_CC_CRIME) [ Upstream commit 0ce96a6708f34280a536263ee5c67e20c433dcce ] Disable NVME_CC_CRIME so that CSTS.RDY indicates that the media is ready and able to handle commands without returning NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Signed-off-by: Greg Joyce Reviewed-by: Nilay Shroff Tested-by: Nilay Shroff Signed-off-by: Keith Busch Signed-off-by: Sasha Levin --- drivers/nvme/host/core.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 82509f3679373..e25206c7de80c 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2250,8 +2250,13 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) else ctrl->ctrl_config = NVME_CC_CSS_NVM; - if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) - ctrl->ctrl_config |= NVME_CC_CRIME; + /* + * Setting CRIME results in CSTS.RDY before the media is ready. This + * makes it possible for media related commands to return the error + * NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is + * restructured to handle retries, disable CC.CRIME. + */ + ctrl->ctrl_config &= ~NVME_CC_CRIME; ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; @@ -2286,10 +2291,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) * devices are known to get this wrong. Use the larger of the * two values. */ - if (ctrl->ctrl_config & NVME_CC_CRIME) - ready_timeout = NVME_CRTO_CRIMT(crto); - else - ready_timeout = NVME_CRTO_CRWMT(crto); + ready_timeout = NVME_CRTO_CRWMT(crto); if (ready_timeout < timeout) dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", From d22f177935dd874d59887d0d3f1d7b054fc7124b Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Tue, 8 Oct 2024 17:07:35 -0400 Subject: [PATCH 11/48] bpf: use kvzmalloc to allocate BPF verifier environment [ Upstream commit 434247637c66e1be2bc71a9987d4c3f0d8672387 ] The kzmalloc call in bpf_check can fail when memory is very fragmented, which in turn can lead to an OOM kill. Use kvzmalloc to fall back to vmalloc when memory is too fragmented to allocate an order 3 sized bpf verifier environment. Admittedly this is not a very common case, and only happens on systems where memory has already been squeezed close to the limit, but this does not seem like much of a hot path, and it's a simple enough fix. Signed-off-by: Rik van Riel Reviewed-by: Shakeel Butt Link: https://lore.kernel.org/r/20241008170735.16766766@imladris.surriel.com Signed-off-by: Alexei Starovoitov Signed-off-by: Sasha Levin --- kernel/bpf/verifier.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 67eb55a354bcc..4f19a091571bb 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -20230,7 +20230,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ - env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); + env = kvzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); if (!env) return -ENOMEM; @@ -20450,6 +20450,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); err_free_env: - kfree(env); + kvfree(env); return ret; } From 83394e7d94216eeada68f8de46d6baef3daadc8f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 6 Oct 2024 09:18:37 +0800 Subject: [PATCH 12/48] crypto: api - Fix liveliness check in crypto_alg_tested [ Upstream commit b81e286ba154a4e0f01a94d99179a97f4ba3e396 ] As algorithm testing is carried out without holding the main crypto lock, it is always possible for the algorithm to go away during the test. So before crypto_alg_tested updates the status of the tested alg, it checks whether it's still on the list of all algorithms. This is inaccurate because it may be off the main list but still on the list of algorithms to be removed. Updating the algorithm status is safe per se as the larval still holds a reference to it. However, killing spawns of other algorithms that are of lower priority is clearly a deficiency as it adds unnecessary churn. Fix the test by checking whether the algorithm is dead. Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin --- crypto/algapi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 85bc279b4233f..b3a6086042530 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -396,7 +396,7 @@ void crypto_alg_tested(const char *name, int err) q->cra_flags |= CRYPTO_ALG_DEAD; alg = test->adult; - if (list_empty(&alg->cra_list)) + if (crypto_is_dead(alg)) goto complete; if (err == -ECANCELED) From 839c22a2134aa6099dbc3a5b5933c69ccd472b96 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 9 Oct 2024 16:38:48 +0800 Subject: [PATCH 13/48] crypto: marvell/cesa - Disable hash algorithms [ Upstream commit e845d2399a00f866f287e0cefbd4fc7d8ef0d2f7 ] Disable cesa hash algorithms by lowering the priority because they appear to be broken when invoked in parallel. This allows them to still be tested for debugging purposes. Reported-by: Klaus Kudielka Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin --- drivers/crypto/marvell/cesa/hash.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index 8d84ad45571c7..f150861ceaf69 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -947,7 +947,7 @@ struct ahash_alg mv_md5_alg = { .base = { .cra_name = "md5", .cra_driver_name = "mv-md5", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, @@ -1018,7 +1018,7 @@ struct ahash_alg mv_sha1_alg = { .base = { .cra_name = "sha1", .cra_driver_name = "mv-sha1", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, @@ -1092,7 +1092,7 @@ struct ahash_alg mv_sha256_alg = { .base = { .cra_name = "sha256", .cra_driver_name = "mv-sha256", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, @@ -1302,7 +1302,7 @@ struct ahash_alg mv_ahmac_md5_alg = { .base = { .cra_name = "hmac(md5)", .cra_driver_name = "mv-hmac-md5", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, @@ -1373,7 +1373,7 @@ struct ahash_alg mv_ahmac_sha1_alg = { .base = { .cra_name = "hmac(sha1)", .cra_driver_name = "mv-hmac-sha1", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, @@ -1444,7 +1444,7 @@ struct ahash_alg mv_ahmac_sha256_alg = { .base = { .cra_name = "hmac(sha256)", .cra_driver_name = "mv-hmac-sha256", - .cra_priority = 300, + .cra_priority = 0, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, From 87791a733a3bc352e3eff94f6db76033ed53ca31 Mon Sep 17 00:00:00 2001 From: Julian Vetter Date: Thu, 10 Oct 2024 14:46:01 +0200 Subject: [PATCH 14/48] sound: Make CONFIG_SND depend on INDIRECT_IOMEM instead of UML [ Upstream commit ad6639f143a0b42d7fb110ad14f5949f7c218890 ] When building for the UM arch and neither INDIRECT_IOMEM=y, nor HAS_IOMEM=y is selected, it will fall back to the implementations from asm-generic/io.h for IO memcpy. But these fall-back functions just do a memcpy. So, instead of depending on UML, add dependency on 'HAS_IOMEM || INDIRECT_IOMEM'. Reviewed-by: Yann Sionneau Signed-off-by: Julian Vetter Link: https://patch.msgid.link/20241010124601.700528-1-jvetter@kalrayinc.com Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/Kconfig b/sound/Kconfig index 4c036a9a420ab..8b40205394fe0 100644 --- a/sound/Kconfig +++ b/sound/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only menuconfig SOUND tristate "Sound card support" - depends on HAS_IOMEM || UML + depends on HAS_IOMEM || INDIRECT_IOMEM help If you have a sound card in your computer, i.e. if it can say more than an occasional beep, say Y. From c6db3a111e2dd3cc53f36ae9c5869c79b6c6435a Mon Sep 17 00:00:00 2001 From: Ian Forbes Date: Thu, 8 Aug 2024 15:06:34 -0500 Subject: [PATCH 15/48] drm/vmwgfx: Limit display layout ioctl array size to VMWGFX_NUM_DISPLAY_UNITS [ Upstream commit 28a5dfd4f615539fb22fb6d5c219c199c14e6eb6 ] Currently the array size is only limited by the largest kmalloc size which is incorrect. This change will also return a more specific error message than ENOMEM to userspace. Signed-off-by: Ian Forbes Reviewed-by: Zack Rusin Reviewed-by: Martin Krastev Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20240808200634.1074083-1-ian.forbes@broadcom.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 4 +++- drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 3 --- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index ac3d7ff3f5bb9..def98d868deb4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -61,7 +61,7 @@ #define VMWGFX_DRIVER_MINOR 20 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) -#define VMWGFX_MAX_DISPLAYS 16 +#define VMWGFX_NUM_DISPLAY_UNITS 8 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 #define VMWGFX_MIN_INITIAL_WIDTH 1280 @@ -81,7 +81,7 @@ #define VMWGFX_NUM_GB_CONTEXT 256 #define VMWGFX_NUM_GB_SHADER 20000 #define VMWGFX_NUM_GB_SURFACE 32768 -#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS +#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS #define VMWGFX_NUM_DXCONTEXT 256 #define VMWGFX_NUM_DXQUERY 512 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 11f7c0e5420e0..33f73d559be72 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2327,7 +2327,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_mode_config *mode_config = &dev->mode_config; struct drm_vmw_update_layout_arg *arg = (struct drm_vmw_update_layout_arg *)data; - void __user *user_rects; + const void __user *user_rects; struct drm_vmw_rect *rects; struct drm_rect *drm_rects; unsigned rects_size; @@ -2339,6 +2339,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, VMWGFX_MIN_INITIAL_HEIGHT}; vmw_du_update_layout(dev_priv, 1, &def_rect); return 0; + } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) { + return -E2BIG; } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 19a843da87b78..ec86f92517a14 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -198,9 +198,6 @@ struct vmw_kms_dirty { s32 unit_y2; }; -#define VMWGFX_NUM_DISPLAY_UNITS 8 - - #define vmw_framebuffer_to_vfb(x) \ container_of(x, struct vmw_framebuffer, base) #define vmw_framebuffer_to_vfbs(x) \ From 3406bfc813a9bbd9c3055795e985f527b7852e8c Mon Sep 17 00:00:00 2001 From: Showrya M N Date: Mon, 7 Oct 2024 18:28:36 +0530 Subject: [PATCH 16/48] RDMA/siw: Add sendpage_ok() check to disable MSG_SPLICE_PAGES [ Upstream commit 4e1e3dd88a4cedd5ccc1a3fc3d71e03b70a7a791 ] While running ISER over SIW, the initiator machine encounters a warning from skb_splice_from_iter() indicating that a slab page is being used in send_page. To address this, it is better to add a sendpage_ok() check within the driver itself, and if it returns 0, then MSG_SPLICE_PAGES flag should be disabled before entering the network stack. A similar issue has been discussed for NVMe in this thread: https://lore.kernel.org/all/20240530142417.146696-1-ofir.gal@volumez.com/ WARNING: CPU: 0 PID: 5342 at net/core/skbuff.c:7140 skb_splice_from_iter+0x173/0x320 Call Trace: tcp_sendmsg_locked+0x368/0xe40 siw_tx_hdt+0x695/0xa40 [siw] siw_qp_sq_process+0x102/0xb00 [siw] siw_sq_resume+0x39/0x110 [siw] siw_run_sq+0x74/0x160 [siw] kthread+0xd2/0x100 ret_from_fork+0x34/0x40 ret_from_fork_asm+0x1a/0x30 Link: https://patch.msgid.link/r/20241007125835.89942-1-showrya@chelsio.com Signed-off-by: Showrya M N Signed-off-by: Potnuri Bharat Teja Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/sw/siw/siw_qp_tx.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 60b6a41359611..feae920784be8 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -337,6 +337,8 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset, msg.msg_flags &= ~MSG_MORE; tcp_rate_check_app_limited(sk); + if (!sendpage_ok(page[i])) + msg.msg_flags &= ~MSG_SPLICE_PAGES; bvec_set_page(&bvec, page[i], bytes, offset); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); From 4a57f42e5ed42cb8f1beb262c4f6d3e698939e4e Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 15 Oct 2024 07:30:17 -0700 Subject: [PATCH 17/48] nvme-multipath: defer partition scanning [ Upstream commit 1f021341eef41e77a633186e9be5223de2ce5d48 ] We need to suppress the partition scan from occuring within the controller's scan_work context. If a path error occurs here, the IO will wait until a path becomes available or all paths are torn down, but that action also occurs within scan_work, so it would deadlock. Defer the partion scan to a different context that does not block scan_work. Reported-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch Signed-off-by: Sasha Levin --- drivers/nvme/host/multipath.c | 33 +++++++++++++++++++++++++++++++++ drivers/nvme/host/nvme.h | 1 + 2 files changed, 34 insertions(+) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 37ea0fa421da8..ede2a14dad8be 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -499,6 +499,20 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) return ret; } +static void nvme_partition_scan_work(struct work_struct *work) +{ + struct nvme_ns_head *head = + container_of(work, struct nvme_ns_head, partition_scan_work); + + if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN, + &head->disk->state))) + return; + + mutex_lock(&head->disk->open_mutex); + bdev_disk_changed(head->disk, false); + mutex_unlock(&head->disk->open_mutex); +} + static void nvme_requeue_work(struct work_struct *work) { struct nvme_ns_head *head = @@ -525,6 +539,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) bio_list_init(&head->requeue_list); spin_lock_init(&head->requeue_lock); INIT_WORK(&head->requeue_work, nvme_requeue_work); + INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work); /* * Add a multipath node if the subsystems supports multiple controllers. @@ -540,6 +555,16 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) return -ENOMEM; head->disk->fops = &nvme_ns_head_ops; head->disk->private_data = head; + + /* + * We need to suppress the partition scan from occuring within the + * controller's scan_work context. If a path error occurs here, the IO + * will wait until a path becomes available or all paths are torn down, + * but that action also occurs within scan_work, so it would deadlock. + * Defer the partion scan to a different context that does not block + * scan_work. + */ + set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state); sprintf(head->disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, head->instance); @@ -589,6 +614,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) return; } nvme_add_ns_head_cdev(head); + kblockd_schedule_work(&head->partition_scan_work); } mutex_lock(&head->lock); @@ -889,6 +915,12 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) kblockd_schedule_work(&head->requeue_work); if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { nvme_cdev_del(&head->cdev, &head->cdev_device); + /* + * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared + * to allow multipath to fail all I/O. + */ + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); del_gendisk(head->disk); } } @@ -900,6 +932,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); + flush_work(&head->partition_scan_work); put_disk(head->disk); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 799f8a2bb0b4f..14a867245c29f 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -476,6 +476,7 @@ struct nvme_ns_head { struct bio_list requeue_list; spinlock_t requeue_lock; struct work_struct requeue_work; + struct work_struct partition_scan_work; struct mutex lock; unsigned long flags; #define NVME_NSHEAD_DISK_LIVE 0 From 4c332037fcbb9bb53c46ba4f156951429acc4d97 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Fri, 4 Oct 2024 16:28:07 -0400 Subject: [PATCH 18/48] drm/amdkfd: Accounting pdd vram_usage for svm [ Upstream commit 68d26c10ef503175df3142db6fcd75dd94860592 ] Process device data pdd->vram_usage is read by rocm-smi via sysfs, this is currently missing the svm_bo usage accounting, so "rocm-smi --showpids" per process VRAM usage report is incorrect. Add pdd->vram_usage accounting when svm_bo allocation and release, change to atomic64_t type because it is updated outside process mutex now. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher (cherry picked from commit 98c0b0efcc11f2a5ddf3ce33af1e48eedf808b04) Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 6 +++--- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 26 ++++++++++++++++++++++++ 4 files changed, 32 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 19d46be639429..8669677662d0c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1164,7 +1164,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM) size >>= 1; - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size)); + atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage); } mutex_unlock(&p->mutex); @@ -1235,7 +1235,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, kfd_process_device_remove_obj_handle( pdd, GET_IDR_HANDLE(args->handle)); - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size); + atomic64_sub(size, &pdd->vram_usage); err_unlock: err_pdd: @@ -2352,7 +2352,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { bo_bucket->restored_offset = offset; /* Update the VRAM usage count */ - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size); + atomic64_add(bo_bucket->size, &pdd->vram_usage); } return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 67204c3dfbb8f..27c9d5c43765a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -765,7 +765,7 @@ struct kfd_process_device { enum kfd_pdd_bound bound; /* VRAM usage */ - uint64_t vram_usage; + atomic64_t vram_usage; struct attribute attr_vram; char vram_filename[MAX_SYSFS_FILENAME_LEN]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 43f520b379670..6c90231e0aec2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -306,7 +306,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, } else if (strncmp(attr->name, "vram_", 5) == 0) { struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, attr_vram); - return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage)); + return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage)); } else if (strncmp(attr->name, "sdma_", 5) == 0) { struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, attr_sdma); @@ -1589,7 +1589,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, pdd->bound = PDD_UNBOUND; pdd->already_dequeued = false; pdd->runtime_inuse = false; - pdd->vram_usage = 0; + atomic64_set(&pdd->vram_usage, 0); pdd->sdma_past_activity_counter = 0; pdd->user_gpu_id = dev->id; atomic64_set(&pdd->evict_duration_counter, 0); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index ce76d45549984..6b7c6f45a80a8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -391,6 +391,27 @@ static void svm_range_bo_release(struct kref *kref) spin_lock(&svm_bo->list_lock); } spin_unlock(&svm_bo->list_lock); + + if (mmget_not_zero(svm_bo->eviction_fence->mm)) { + struct kfd_process_device *pdd; + struct kfd_process *p; + struct mm_struct *mm; + + mm = svm_bo->eviction_fence->mm; + /* + * The forked child process takes svm_bo device pages ref, svm_bo could be + * released after parent process is gone. + */ + p = kfd_lookup_process_by_mm(mm); + if (p) { + pdd = kfd_get_process_device_data(svm_bo->node, p); + if (pdd) + atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage); + kfd_unref_process(p); + } + mmput(mm); + } + if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) /* We're not in the eviction worker. Signal the fence. */ dma_fence_signal(&svm_bo->eviction_fence->base); @@ -518,6 +539,7 @@ int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear) { + struct kfd_process_device *pdd; struct amdgpu_bo_param bp; struct svm_range_bo *svm_bo; struct amdgpu_bo_user *ubo; @@ -609,6 +631,10 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, list_add(&prange->svm_bo_list, &svm_bo->range_list); spin_unlock(&svm_bo->list_lock); + pdd = svm_range_get_pdd_by_node(prange, node); + if (pdd) + atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage); + return 0; reserve_bo_failed: From bee372110e6992dd8ff30e270c4a56c35f227978 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 20 Sep 2024 19:35:20 +1000 Subject: [PATCH 19/48] powerpc/powernv: Free name on error in opal_event_init() [ Upstream commit cf8989d20d64ad702a6210c11a0347ebf3852aa7 ] In opal_event_init() if request_irq() fails name is not freed, leading to a memory leak. The code only runs at boot time, there's no way for a user to trigger it, so there's no security impact. Fix the leak by freeing name in the error path. Reported-by: 2639161967 <2639161967@qq.com> Closes: https://lore.kernel.org/linuxppc-dev/87wmjp3wig.fsf@mail.lhotse Signed-off-by: Michael Ellerman Link: https://patch.msgid.link/20240920093520.67997-1-mpe@ellerman.id.au Signed-off-by: Sasha Levin --- arch/powerpc/platforms/powernv/opal-irqchip.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index 56a1f7ce78d2c..d92759c21fae9 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c @@ -282,6 +282,7 @@ int __init opal_event_init(void) name, NULL); if (rc) { pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start); + kfree(name); continue; } } From dd5d32f74fc1a48302a40cf3e259dd3d4b8e78f0 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 12 Oct 2024 22:35:23 +0200 Subject: [PATCH 20/48] net: phy: mdio-bcm-unimac: Add BCM6846 support [ Upstream commit 906b77ca91c7e9833b4e47bedb6bec76be71d497 ] Add Unimac mdio compatible string for the special BCM6846 variant. This variant has a few extra registers compared to other versions. Suggested-by: Florian Fainelli Link: https://lore.kernel.org/linux-devicetree/b542b2e8-115c-4234-a464-e73aa6bece5c@broadcom.com/ Signed-off-by: Linus Walleij Link: https://patch.msgid.link/20241012-bcm6846-mdio-v1-2-c703ca83e962@linaro.org Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/mdio/mdio-bcm-unimac.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c index 6b26a0803696d..a29838be335c9 100644 --- a/drivers/net/mdio/mdio-bcm-unimac.c +++ b/drivers/net/mdio/mdio-bcm-unimac.c @@ -336,6 +336,7 @@ static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops, static const struct of_device_id unimac_mdio_ids[] = { { .compatible = "brcm,asp-v2.1-mdio", }, { .compatible = "brcm,asp-v2.0-mdio", }, + { .compatible = "brcm,bcm6846-mdio", }, { .compatible = "brcm,genet-mdio-v5", }, { .compatible = "brcm,genet-mdio-v4", }, { .compatible = "brcm,genet-mdio-v3", }, From e4f9fffbb1dc94ba81de45d9f29829267526cc0f Mon Sep 17 00:00:00 2001 From: Nilay Shroff Date: Wed, 16 Oct 2024 08:33:14 +0530 Subject: [PATCH 21/48] nvme-loop: flush off pending I/O while shutting down loop controller [ Upstream commit c199fac88fe7c749f88a0653e9f621b9f5a71cf1 ] While shutting down loop controller, we first quiesce the admin/IO queue, delete the admin/IO tag-set and then at last destroy the admin/IO queue. However it's quite possible that during the window between quiescing and destroying of the admin/IO queue, some admin/IO request might sneak in and if that happens then we could potentially encounter a hung task because shutdown operation can't forward progress until any pending I/O is flushed off. This commit helps ensure that before destroying the admin/IO queue, we unquiesce the admin/IO queue so that any outstanding requests, which are added after the admin/IO queue is quiesced, are now flushed to its completion. Reviewed-by: Christoph Hellwig Signed-off-by: Nilay Shroff Signed-off-by: Keith Busch Signed-off-by: Sasha Levin --- drivers/nvme/target/loop.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 48d5df054cd02..bd61a1b82c4cd 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -265,6 +265,13 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) return; + /* + * It's possible that some requests might have been added + * after admin queue is stopped/quiesced. So now start the + * queue to flush these requests to the completion. + */ + nvme_unquiesce_admin_queue(&ctrl->ctrl); + nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvme_remove_admin_tag_set(&ctrl->ctrl); } @@ -297,6 +304,12 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); } ctrl->ctrl.queue_count = 1; + /* + * It's possible that some requests might have been added + * after io queue is stopped/quiesced. So now start the + * queue to flush these requests to the completion. + */ + nvme_unquiesce_io_queues(&ctrl->ctrl); } static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) From 1a1bcca5c9efd2c72c8d2fcbadf2d673cceb2ea7 Mon Sep 17 00:00:00 2001 From: Nilay Shroff Date: Wed, 16 Oct 2024 08:33:15 +0530 Subject: [PATCH 22/48] nvme: make keep-alive synchronous operation [ Upstream commit d06923670b5a5f609603d4a9fee4dec02d38de9c ] The nvme keep-alive operation, which executes at a periodic interval, could potentially sneak in while shutting down a fabric controller. This may lead to a race between the fabric controller admin queue destroy code path (invoked while shutting down controller) and hw/hctx queue dispatcher called from the nvme keep-alive async request queuing operation. This race could lead to the kernel crash shown below: Call Trace: autoremove_wake_function+0x0/0xbc (unreliable) __blk_mq_sched_dispatch_requests+0x114/0x24c blk_mq_sched_dispatch_requests+0x44/0x84 blk_mq_run_hw_queue+0x140/0x220 nvme_keep_alive_work+0xc8/0x19c [nvme_core] process_one_work+0x200/0x4e0 worker_thread+0x340/0x504 kthread+0x138/0x140 start_kernel_thread+0x14/0x18 While shutting down fabric controller, if nvme keep-alive request sneaks in then it would be flushed off. The nvme_keep_alive_end_io function is then invoked to handle the end of the keep-alive operation which decrements the admin->q_usage_counter and assuming this is the last/only request in the admin queue then the admin->q_usage_counter becomes zero. If that happens then blk-mq destroy queue operation (blk_mq_destroy_ queue()) which could be potentially running simultaneously on another cpu (as this is the controller shutdown code path) would forward progress and deletes the admin queue. So, now from this point onward we are not supposed to access the admin queue resources. However the issue here's that the nvme keep-alive thread running hw/hctx queue dispatch operation hasn't yet finished its work and so it could still potentially access the admin queue resource while the admin queue had been already deleted and that causes the above crash. This fix helps avoid the observed crash by implementing keep-alive as a synchronous operation so that we decrement admin->q_usage_counter only after keep-alive command finished its execution and returns the command status back up to its caller (blk_execute_rq()). This would ensure that fabric shutdown code path doesn't destroy the fabric admin queue until keep-alive request finished execution and also keep-alive thread is not running hw/hctx queue dispatch operation. Reviewed-by: Christoph Hellwig Signed-off-by: Nilay Shroff Signed-off-by: Keith Busch Signed-off-by: Sasha Levin --- drivers/nvme/host/core.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index e25206c7de80c..b3c5460c6d768 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1178,10 +1178,9 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) nvme_keep_alive_work_period(ctrl)); } -static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, - blk_status_t status) +static void nvme_keep_alive_finish(struct request *rq, + blk_status_t status, struct nvme_ctrl *ctrl) { - struct nvme_ctrl *ctrl = rq->end_io_data; unsigned long flags; bool startka = false; unsigned long rtt = jiffies - (rq->deadline - rq->timeout); @@ -1199,13 +1198,11 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, delay = 0; } - blk_mq_free_request(rq); - if (status) { dev_err(ctrl->device, "failed nvme_keep_alive_end_io error=%d\n", status); - return RQ_END_IO_NONE; + return; } ctrl->ka_last_check_time = jiffies; @@ -1217,7 +1214,6 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, spin_unlock_irqrestore(&ctrl->lock, flags); if (startka) queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); - return RQ_END_IO_NONE; } static void nvme_keep_alive_work(struct work_struct *work) @@ -1226,6 +1222,7 @@ static void nvme_keep_alive_work(struct work_struct *work) struct nvme_ctrl, ka_work); bool comp_seen = ctrl->comp_seen; struct request *rq; + blk_status_t status; ctrl->ka_last_check_time = jiffies; @@ -1248,9 +1245,9 @@ static void nvme_keep_alive_work(struct work_struct *work) nvme_init_request(rq, &ctrl->ka_cmd); rq->timeout = ctrl->kato * HZ; - rq->end_io = nvme_keep_alive_end_io; - rq->end_io_data = ctrl; - blk_execute_rq_nowait(rq, false); + status = blk_execute_rq(rq, false); + nvme_keep_alive_finish(rq, status, ctrl); + blk_mq_free_request(rq); } static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) From e8c71494181153a134c96da28766a57bd1eac8cb Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Sat, 2 Nov 2024 14:24:38 -0700 Subject: [PATCH 23/48] smb: client: Fix use-after-free of network namespace. [ Upstream commit ef7134c7fc48e1441b398e55a862232868a6f0a7 ] Recently, we got a customer report that CIFS triggers oops while reconnecting to a server. [0] The workload runs on Kubernetes, and some pods mount CIFS servers in non-root network namespaces. The problem rarely happened, but it was always while the pod was dying. The root cause is wrong reference counting for network namespace. CIFS uses kernel sockets, which do not hold refcnt of the netns that the socket belongs to. That means CIFS must ensure the socket is always freed before its netns; otherwise, use-after-free happens. The repro steps are roughly: 1. mount CIFS in a non-root netns 2. drop packets from the netns 3. destroy the netns 4. unmount CIFS We can reproduce the issue quickly with the script [1] below and see the splat [2] if CONFIG_NET_NS_REFCNT_TRACKER is enabled. When the socket is TCP, it is hard to guarantee the netns lifetime without holding refcnt due to async timers. Let's hold netns refcnt for each socket as done for SMC in commit 9744d2bf1976 ("smc: Fix use-after-free in tcp_write_timer_handler()."). Note that we need to move put_net() from cifs_put_tcp_session() to clean_demultiplex_info(); otherwise, __sock_create() still could touch a freed netns while cifsd tries to reconnect from cifs_demultiplex_thread(). Also, maybe_get_net() cannot be put just before __sock_create() because the code is not under RCU and there is a small chance that the same address happened to be reallocated to another netns. [0]: CIFS: VFS: \\XXXXXXXXXXX has not responded in 15 seconds. Reconnecting... CIFS: Serverclose failed 4 times, giving up Unable to handle kernel paging request at virtual address 14de99e461f84a07 Mem abort info: ESR = 0x0000000096000004 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 FSC = 0x04: level 0 translation fault Data abort info: ISV = 0, ISS = 0x00000004 CM = 0, WnR = 0 [14de99e461f84a07] address between user and kernel address ranges Internal error: Oops: 0000000096000004 [#1] SMP Modules linked in: cls_bpf sch_ingress nls_utf8 cifs cifs_arc4 cifs_md4 dns_resolver tcp_diag inet_diag veth xt_state xt_connmark nf_conntrack_netlink xt_nat xt_statistic xt_MASQUERADE xt_mark xt_addrtype ipt_REJECT nf_reject_ipv4 nft_chain_nat nf_nat xt_conntrack nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 xt_comment nft_compat nf_tables nfnetlink overlay nls_ascii nls_cp437 sunrpc vfat fat aes_ce_blk aes_ce_cipher ghash_ce sm4_ce_cipher sm4 sm3_ce sm3 sha3_ce sha512_ce sha512_arm64 sha1_ce ena button sch_fq_codel loop fuse configfs dmi_sysfs sha2_ce sha256_arm64 dm_mirror dm_region_hash dm_log dm_mod dax efivarfs CPU: 5 PID: 2690970 Comm: cifsd Not tainted 6.1.103-109.184.amzn2023.aarch64 #1 Hardware name: Amazon EC2 r7g.4xlarge/, BIOS 1.0 11/1/2018 pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : fib_rules_lookup+0x44/0x238 lr : __fib_lookup+0x64/0xbc sp : ffff8000265db790 x29: ffff8000265db790 x28: 0000000000000000 x27: 000000000000bd01 x26: 0000000000000000 x25: ffff000b4baf8000 x24: ffff00047b5e4580 x23: ffff8000265db7e0 x22: 0000000000000000 x21: ffff00047b5e4500 x20: ffff0010e3f694f8 x19: 14de99e461f849f7 x18: 0000000000000000 x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000 x14: 0000000000000000 x13: 0000000000000000 x12: 3f92800abd010002 x11: 0000000000000001 x10: ffff0010e3f69420 x9 : ffff800008a6f294 x8 : 0000000000000000 x7 : 0000000000000006 x6 : 0000000000000000 x5 : 0000000000000001 x4 : ffff001924354280 x3 : ffff8000265db7e0 x2 : 0000000000000000 x1 : ffff0010e3f694f8 x0 : ffff00047b5e4500 Call trace: fib_rules_lookup+0x44/0x238 __fib_lookup+0x64/0xbc ip_route_output_key_hash_rcu+0x2c4/0x398 ip_route_output_key_hash+0x60/0x8c tcp_v4_connect+0x290/0x488 __inet_stream_connect+0x108/0x3d0 inet_stream_connect+0x50/0x78 kernel_connect+0x6c/0xac generic_ip_connect+0x10c/0x6c8 [cifs] __reconnect_target_unlocked+0xa0/0x214 [cifs] reconnect_dfs_server+0x144/0x460 [cifs] cifs_reconnect+0x88/0x148 [cifs] cifs_readv_from_socket+0x230/0x430 [cifs] cifs_read_from_socket+0x74/0xa8 [cifs] cifs_demultiplex_thread+0xf8/0x704 [cifs] kthread+0xd0/0xd4 Code: aa0003f8 f8480f13 eb18027f 540006c0 (b9401264) [1]: CIFS_CRED="/root/cred.cifs" CIFS_USER="Administrator" CIFS_PASS="Password" CIFS_IP="X.X.X.X" CIFS_PATH="//${CIFS_IP}/Users/Administrator/Desktop/CIFS_TEST" CIFS_MNT="/mnt/smb" DEV="enp0s3" cat < ${CIFS_CRED} username=${CIFS_USER} password=${CIFS_PASS} domain=EXAMPLE.COM EOF unshare -n bash -c " mkdir -p ${CIFS_MNT} ip netns attach root 1 ip link add eth0 type veth peer veth0 netns root ip link set eth0 up ip -n root link set veth0 up ip addr add 192.168.0.2/24 dev eth0 ip -n root addr add 192.168.0.1/24 dev veth0 ip route add default via 192.168.0.1 dev eth0 ip netns exec root sysctl net.ipv4.ip_forward=1 ip netns exec root iptables -t nat -A POSTROUTING -s 192.168.0.2 -o ${DEV} -j MASQUERADE mount -t cifs ${CIFS_PATH} ${CIFS_MNT} -o vers=3.0,sec=ntlmssp,credentials=${CIFS_CRED},rsize=65536,wsize=65536,cache=none,echo_interval=1 touch ${CIFS_MNT}/a.txt ip netns exec root iptables -t nat -D POSTROUTING -s 192.168.0.2 -o ${DEV} -j MASQUERADE " umount ${CIFS_MNT} [2]: ref_tracker: net notrefcnt@000000004bbc008d has 1/1 users at sk_alloc (./include/net/net_namespace.h:339 net/core/sock.c:2227) inet_create (net/ipv4/af_inet.c:326 net/ipv4/af_inet.c:252) __sock_create (net/socket.c:1576) generic_ip_connect (fs/smb/client/connect.c:3075) cifs_get_tcp_session.part.0 (fs/smb/client/connect.c:3160 fs/smb/client/connect.c:1798) cifs_mount_get_session (fs/smb/client/trace.h:959 fs/smb/client/connect.c:3366) dfs_mount_share (fs/smb/client/dfs.c:63 fs/smb/client/dfs.c:285) cifs_mount (fs/smb/client/connect.c:3622) cifs_smb3_do_mount (fs/smb/client/cifsfs.c:949) smb3_get_tree (fs/smb/client/fs_context.c:784 fs/smb/client/fs_context.c:802 fs/smb/client/fs_context.c:794) vfs_get_tree (fs/super.c:1800) path_mount (fs/namespace.c:3508 fs/namespace.c:3834) __x64_sys_mount (fs/namespace.c:3848 fs/namespace.c:4057 fs/namespace.c:4034 fs/namespace.c:4034) do_syscall_64 (arch/x86/entry/common.c:52 arch/x86/entry/common.c:83) entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130) Fixes: 26abe14379f8 ("net: Modify sk_alloc to not reference count the netns of kernel sockets.") Signed-off-by: Kuniyuki Iwashima Acked-by: Tom Talpey Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/smb/client/connect.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index e325e06357ffb..1df0a6edcc216 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -1054,6 +1054,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server) */ } + put_net(cifs_net_ns(server)); kfree(server->leaf_fullpath); kfree(server); @@ -1649,8 +1650,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) /* srv_count can never go negative */ WARN_ON(server->srv_count < 0); - put_net(cifs_net_ns(server)); - list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); @@ -3077,13 +3076,22 @@ generic_ip_connect(struct TCP_Server_Info *server) if (server->ssocket) { socket = server->ssocket; } else { - rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM, + struct net *net = cifs_net_ns(server); + struct sock *sk; + + rc = __sock_create(net, sfamily, SOCK_STREAM, IPPROTO_TCP, &server->ssocket, 1); if (rc < 0) { cifs_server_dbg(VFS, "Error %d creating socket\n", rc); return rc; } + sk = server->ssocket->sk; + __netns_tracker_free(net, &sk->ns_tracker, false); + sk->sk_net_refcnt = 1; + get_net_track(net, &sk->ns_tracker, GFP_KERNEL); + sock_inuse_add(net, 1); + /* BB other socket options to set KEEPALIVE, NODELAY? */ cifs_dbg(FYI, "Socket created\n"); socket = server->ssocket; From 5a526388d0ac5ab5a2eb2b3dd6ef9d1a1866b5d7 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Mon, 4 Nov 2024 04:24:40 -0800 Subject: [PATCH 24/48] nvme/host: Fix RCU list traversal to use SRCU primitive [ Upstream commit 6d1c69945ce63a9fba22a4abf646cf960d878782 ] The code currently uses list_for_each_entry_rcu() while holding an SRCU lock, triggering false positive warnings with CONFIG_PROVE_RCU=y enabled: drivers/nvme/host/core.c:3770 RCU-list traversed in non-reader section!! While the list is properly protected by SRCU lock, the code uses the wrong list traversal primitive. Replace list_for_each_entry_rcu() with list_for_each_entry_srcu() to correctly indicate SRCU-based protection and eliminate the false warning. Fixes: be647e2c76b2 ("nvme: use srcu for iterating namespace list") Signed-off-by: Breno Leitao Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch Signed-off-by: Sasha Levin --- drivers/nvme/host/core.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index b3c5460c6d768..965ca7d7a3de2 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3544,7 +3544,8 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { if (ns->head->ns_id == nsid) { if (!nvme_get_ns(ns)) continue; @@ -4555,7 +4556,8 @@ void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_mark_disk_dead(ns->disk); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4567,7 +4569,8 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_mq_unfreeze_queue(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); @@ -4580,7 +4583,8 @@ int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); if (timeout <= 0) break; @@ -4596,7 +4600,8 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_mq_freeze_queue_wait(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4609,7 +4614,8 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl) set_bit(NVME_CTRL_FROZEN, &ctrl->flags); srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_freeze_queue_start(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4657,7 +4663,8 @@ void nvme_sync_io_queues(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_sync_queue(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } From 6fbf6ff7549e65e150dede357c47445b149c4cc6 Mon Sep 17 00:00:00 2001 From: Yuan Can Date: Thu, 17 Oct 2024 09:38:12 +0800 Subject: [PATCH 25/48] vDPA/ifcvf: Fix pci_read_config_byte() return code handling [ Upstream commit 7f8825b2a78ac392d3fbb3a2e65e56d9e39d75e9 ] ifcvf_init_hw() uses pci_read_config_byte() that returns PCIBIOS_* codes. The error handling, however, assumes the codes are normal errnos because it checks for < 0. Convert the error check to plain non-zero check. Fixes: 5a2414bc454e ("virtio: Intel IFC VF driver for VDPA") Signed-off-by: Yuan Can Message-Id: <20241017013812.129952-1-yuancan@huawei.com> Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang Acked-by: Zhu Lingshan Signed-off-by: Sasha Levin --- drivers/vdpa/ifcvf/ifcvf_base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 060f837a4f9f7..3b09476e007c8 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -109,7 +109,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) u32 i; ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos); - if (ret < 0) { + if (ret) { IFCVF_ERR(pdev, "Failed to read PCI capability list\n"); return -EIO; } From a078a480ff3f43d74d8a024ae10c3c7daf6db149 Mon Sep 17 00:00:00 2001 From: Zijian Zhang Date: Wed, 6 Nov 2024 00:37:42 +0000 Subject: [PATCH 26/48] bpf: Add sk_is_inet and IS_ICSK check in tls_sw_has_ctx_tx/rx [ Upstream commit 44d0469f79bd3d0b3433732877358df7dc6b17b1 ] As the introduction of the support for vsock and unix sockets in sockmap, tls_sw_has_ctx_tx/rx cannot presume the socket passed in must be IS_ICSK. vsock and af_unix sockets have vsock_sock and unix_sock instead of inet_connection_sock. For these sockets, tls_get_ctx may return an invalid pointer and cause page fault in function tls_sw_ctx_rx. BUG: unable to handle page fault for address: 0000000000040030 Workqueue: vsock-loopback vsock_loopback_work RIP: 0010:sk_psock_strp_data_ready+0x23/0x60 Call Trace: ? __die+0x81/0xc3 ? no_context+0x194/0x350 ? do_page_fault+0x30/0x110 ? async_page_fault+0x3e/0x50 ? sk_psock_strp_data_ready+0x23/0x60 virtio_transport_recv_pkt+0x750/0x800 ? update_load_avg+0x7e/0x620 vsock_loopback_work+0xd0/0x100 process_one_work+0x1a7/0x360 worker_thread+0x30/0x390 ? create_worker+0x1a0/0x1a0 kthread+0x112/0x130 ? __kthread_cancel_work+0x40/0x40 ret_from_fork+0x1f/0x40 v2: - Add IS_ICSK check v3: - Update the commits in Fixes Fixes: 634f1a7110b4 ("vsock: support sockmap") Fixes: 94531cfcbe79 ("af_unix: Add unix_stream_proto for sockmap") Signed-off-by: Zijian Zhang Acked-by: Stanislav Fomichev Acked-by: Jakub Kicinski Reviewed-by: Cong Wang Acked-by: Stefano Garzarella Link: https://lore.kernel.org/r/20241106003742.399240-1-zijianzhang@bytedance.com Signed-off-by: Martin KaFai Lau Signed-off-by: Sasha Levin --- include/net/tls.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/include/net/tls.h b/include/net/tls.h index 2ad28545b15f0..6c642ea180504 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -395,8 +395,12 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx) static inline bool tls_sw_has_ctx_tx(const struct sock *sk) { - struct tls_context *ctx = tls_get_ctx(sk); + struct tls_context *ctx; + + if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk)) + return false; + ctx = tls_get_ctx(sk); if (!ctx) return false; return !!tls_sw_ctx_tx(ctx); @@ -404,8 +408,12 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk) static inline bool tls_sw_has_ctx_rx(const struct sock *sk) { - struct tls_context *ctx = tls_get_ctx(sk); + struct tls_context *ctx; + + if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk)) + return false; + ctx = tls_get_ctx(sk); if (!ctx) return false; return !!tls_sw_ctx_rx(ctx); From 77c523dfb0535d10df809426da2d2097e5b14d9c Mon Sep 17 00:00:00 2001 From: Jiawei Ye Date: Fri, 8 Nov 2024 08:18:52 +0000 Subject: [PATCH 27/48] bpf: Fix mismatched RCU unlock flavour in bpf_out_neigh_v6 [ Upstream commit fb86c42a2a5d44e849ddfbc98b8d2f4f40d36ee3 ] In the bpf_out_neigh_v6 function, rcu_read_lock() is used to begin an RCU read-side critical section. However, when unlocking, one branch incorrectly uses a different RCU unlock flavour rcu_read_unlock_bh() instead of rcu_read_unlock(). This mismatch in RCU locking flavours can lead to unexpected behavior and potential concurrency issues. This possible bug was identified using a static analysis tool developed by myself, specifically designed to detect RCU-related issues. This patch corrects the mismatched unlock flavour by replacing the incorrect rcu_read_unlock_bh() with the appropriate rcu_read_unlock(), ensuring that the RCU critical section is properly exited. This change prevents potential synchronization issues and aligns with proper RCU usage patterns. Fixes: 09eed1192cec ("neighbour: switch to standard rcu, instead of rcu_bh") Signed-off-by: Jiawei Ye Acked-by: Yonghong Song Link: https://lore.kernel.org/r/tencent_CFD3D1C3D68B45EA9F52D8EC76D2C4134306@qq.com Signed-off-by: Martin KaFai Lau Signed-off-by: Sasha Levin --- net/core/filter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/filter.c b/net/core/filter.c index a2467a7c01f9e..f9d05eff80b17 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2233,7 +2233,7 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, rcu_read_unlock(); return ret; } - rcu_read_unlock_bh(); + rcu_read_unlock(); if (dst) IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); out_drop: From 34ec7bcee3d863d415e7007cbed4974e6c73351c Mon Sep 17 00:00:00 2001 From: Christian Heusel Date: Thu, 10 Oct 2024 15:32:11 +0200 Subject: [PATCH 28/48] ASoC: amd: yc: Add quirk for ASUS Vivobook S15 M3502RA [ Upstream commit 182fff3a2aafe4e7f3717a0be9df2fe2ed1a77de ] As reported the builtin microphone doesn't work on the ASUS Vivobook model S15 OLED M3502RA. Therefore add a quirk for it to make it work. Link: https://bugzilla.kernel.org/show_bug.cgi?id=219345 Signed-off-by: Christian Heusel Link: https://patch.msgid.link/20241010-bugzilla-219345-asus-vivobook-v1-1-3bb24834e2c3@heusel.eu Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/amd/yc/acp6x-mach.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index e027bc1d35f4f..2d766d988eb2e 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -339,6 +339,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "M7600RE"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "M3502RA"), + } + }, { .driver_data = &acp6x_card, .matches = { From ebf63d5c82e7592c3885190089e15274668ff26e Mon Sep 17 00:00:00 2001 From: Ilya Dudikov Date: Wed, 16 Oct 2024 10:40:37 +0700 Subject: [PATCH 29/48] ASoC: amd: yc: Fix non-functional mic on ASUS E1404FA [ Upstream commit b0867999e3282378a0b26a7ad200233044d31eca ] ASUS Vivobook E1404FA needs a quirks-table entry for the internal microphone to function properly. Signed-off-by: Ilya Dudikov Link: https://patch.msgid.link/20241016034038.13481-1-ilyadud25@gmail.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/amd/yc/acp6x-mach.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index 2d766d988eb2e..08f823cd88699 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -325,6 +325,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "E1404FA"), + } + }, { .driver_data = &acp6x_card, .matches = { From 1cb5bfc5bfc651982b6203c224d49b7ddacf28bc Mon Sep 17 00:00:00 2001 From: Alessandro Zanni Date: Thu, 17 Oct 2024 14:05:51 +0200 Subject: [PATCH 30/48] fs: Fix uninitialized value issue in from_kuid and from_kgid [ Upstream commit 15f34347481648a567db67fb473c23befb796af5 ] ocfs2_setattr() uses attr->ia_mode, attr->ia_uid and attr->ia_gid in a trace point even though ATTR_MODE, ATTR_UID and ATTR_GID aren't set. Initialize all fields of newattrs to avoid uninitialized variables, by checking if ATTR_MODE, ATTR_UID, ATTR_GID are initialized, otherwise 0. Reported-by: syzbot+6c55f725d1bdc8c52058@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=6c55f725d1bdc8c52058 Signed-off-by: Alessandro Zanni Link: https://lore.kernel.org/r/20241017120553.55331-1-alessandro.zanni87@gmail.com Reviewed-by: Jan Kara Signed-off-by: Christian Brauner Signed-off-by: Sasha Levin --- fs/ocfs2/file.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index aa39d5d2d94f1..e4acb795d1190 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1128,9 +1128,12 @@ int ocfs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry, trace_ocfs2_setattr(inode, dentry, (unsigned long long)OCFS2_I(inode)->ip_blkno, dentry->d_name.len, dentry->d_name.name, - attr->ia_valid, attr->ia_mode, - from_kuid(&init_user_ns, attr->ia_uid), - from_kgid(&init_user_ns, attr->ia_gid)); + attr->ia_valid, + attr->ia_valid & ATTR_MODE ? attr->ia_mode : 0, + attr->ia_valid & ATTR_UID ? + from_kuid(&init_user_ns, attr->ia_uid) : 0, + attr->ia_valid & ATTR_GID ? + from_kgid(&init_user_ns, attr->ia_gid) : 0); /* ensuring we don't even attempt to truncate a symlink */ if (S_ISLNK(inode->i_mode)) From 24e8cc49c03e1749a6faf6539df4297aaab281d3 Mon Sep 17 00:00:00 2001 From: Kenneth Albanowski Date: Fri, 4 Oct 2024 10:24:29 -0700 Subject: [PATCH 31/48] HID: multitouch: Add quirk for Logitech Bolt receiver w/ Casa touchpad [ Upstream commit 526748b925185e95f1415900ee13c2469d4b64cc ] The Logitech Casa Touchpad does not reliably send touch release signals when communicating through the Logitech Bolt wireless-to-USB receiver. Adjusting the device class to add MT_QUIRK_NOT_SEEN_MEANS_UP to make sure that no touches become stuck, MT_QUIRK_FORCE_MULTI_INPUT is not needed, but harmless. Linux does not have information on which devices are connected to the Bolt receiver, so we have to enable this for the entire device. Signed-off-by: Kenneth Albanowski Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin --- drivers/hid/hid-ids.h | 1 + drivers/hid/hid-multitouch.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index d4f6066dbbc59..1a05e22685895 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -868,6 +868,7 @@ #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539 #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a +#define USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER 0xc548 #define USB_DEVICE_ID_SPACETRAVELLER 0xc623 #define USB_DEVICE_ID_SPACENAVIGATOR 0xc626 #define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index c2d79b2d6cdd2..bf9cad7112592 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2140,6 +2140,10 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) }, + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_LOGITECH, + USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER) }, /* MosArt panels */ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, From bff14c38ed514a2dc363b029b8a17de97dbae970 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 10 Oct 2024 11:45:12 +0200 Subject: [PATCH 32/48] HID: lenovo: Add support for Thinkpad X1 Tablet Gen 3 keyboard [ Upstream commit 51268879eb2bfc563a91cdce69362d9dbf707e7e ] The Thinkpad X1 Tablet Gen 3 keyboard has the same Lenovo specific quirks as the original Thinkpad X1 Tablet keyboard. Add the PID for the "Thinkpad X1 Tablet Gen 3 keyboard" to the hid-lenovo driver to fix the FnLock, Mute and media buttons not working. Suggested-by: Izhar Firdaus Closes https://bugzilla.redhat.com/show_bug.cgi?id=2315395 Signed-off-by: Hans de Goede Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin --- drivers/hid/hid-lenovo.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index f86c1ea83a037..a4062f617ba20 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -473,6 +473,7 @@ static int lenovo_input_mapping(struct hid_device *hdev, return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field, usage, bit, max); case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max); default: return 0; @@ -583,6 +584,7 @@ static ssize_t attr_fn_lock_store(struct device *dev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value); if (ret) return ret; @@ -777,6 +779,7 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field, return lenovo_event_cptkbd(hdev, field, usage, value); case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: return lenovo_event_tp10ubkbd(hdev, field, usage, value); default: return 0; @@ -1059,6 +1062,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value); break; } @@ -1289,6 +1293,7 @@ static int lenovo_probe(struct hid_device *hdev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_probe_tp10ubkbd(hdev); break; default: @@ -1375,6 +1380,7 @@ static void lenovo_remove(struct hid_device *hdev) break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB3: lenovo_remove_tp10ubkbd(hdev); break; } @@ -1424,6 +1430,8 @@ static const struct hid_device_id lenovo_devices[] = { */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) }, { } }; From e01cac3d62845e71c84b4c89be32b2c2eb0c83cc Mon Sep 17 00:00:00 2001 From: Cyan Yang Date: Fri, 20 Sep 2024 00:01:26 +0800 Subject: [PATCH 33/48] RISCV: KVM: use raw_spinlock for critical section in imsic [ Upstream commit 3ec4350d4efb5ccb6bd0e11d9cf7f2be4f47297d ] For the external interrupt updating procedure in imsic, there was a spinlock to protect it already. But since it should not be preempted in any cases, we should turn to use raw_spinlock to prevent any preemption in case PREEMPT_RT was enabled. Signed-off-by: Cyan Yang Reviewed-by: Yong-Xuan Wang Reviewed-by: Anup Patel Message-ID: <20240919160126.44487-1-cyan.yang@sifive.com> Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- arch/riscv/kvm/aia_imsic.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c index e808723a85f1b..c1585444f856e 100644 --- a/arch/riscv/kvm/aia_imsic.c +++ b/arch/riscv/kvm/aia_imsic.c @@ -55,7 +55,7 @@ struct imsic { /* IMSIC SW-file */ struct imsic_mrif *swfile; phys_addr_t swfile_pa; - spinlock_t swfile_extirq_lock; + raw_spinlock_t swfile_extirq_lock; }; #define imsic_vs_csr_read(__c) \ @@ -622,7 +622,7 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu) * interruptions between reading topei and updating pending status. */ - spin_lock_irqsave(&imsic->swfile_extirq_lock, flags); + raw_spin_lock_irqsave(&imsic->swfile_extirq_lock, flags); if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) && imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis)) @@ -630,7 +630,7 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu) else kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT); - spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags); + raw_spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags); } static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear, @@ -1051,7 +1051,7 @@ int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu) } imsic->swfile = page_to_virt(swfile_page); imsic->swfile_pa = page_to_phys(swfile_page); - spin_lock_init(&imsic->swfile_extirq_lock); + raw_spin_lock_init(&imsic->swfile_extirq_lock); /* Setup IO device */ kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops); From 4f885fa649836905e2bf665baea2c5fc8a11f0e8 Mon Sep 17 00:00:00 2001 From: Jack Yu Date: Mon, 21 Oct 2024 06:15:44 +0000 Subject: [PATCH 34/48] ASoC: rt722-sdca: increase clk_stop_timeout to fix clock stop issue [ Upstream commit 038fa6ddf5d22694f61ff7a7a53c8887c6b08c45 ] clk_stop_timeout should be increased to 900ms to fix clock stop issue. Signed-off-by: Jack Yu Link: https://patch.msgid.link/cd26275d9fc54374a18dc016755cb72d@realtek.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/codecs/rt722-sdca-sdw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c index 32578a212642e..91314327d9eee 100644 --- a/sound/soc/codecs/rt722-sdca-sdw.c +++ b/sound/soc/codecs/rt722-sdca-sdw.c @@ -253,7 +253,7 @@ static int rt722_sdca_read_prop(struct sdw_slave *slave) } /* set the timeout values */ - prop->clk_stop_timeout = 200; + prop->clk_stop_timeout = 900; /* wake-up event */ prop->wake_capable = 1; From f198c09fe2ca7eced1b0c338bbedba05b77be175 Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Mon, 21 Oct 2024 22:11:18 +0800 Subject: [PATCH 35/48] LoongArch: Use "Exception return address" to comment ERA [ Upstream commit b69269c870ece1bc7d2e3e39ca76f4602f2cb0dd ] The information contained in the comment for LOONGARCH_CSR_ERA is even less informative than the macro itself, which can cause confusion for junior developers. Let's use the full English term. Signed-off-by: Yanteng Si Signed-off-by: Huacai Chen Signed-off-by: Sasha Levin --- arch/loongarch/include/asm/loongarch.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 33531d432b492..23232c7bdb9ff 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -242,7 +242,7 @@ #define CSR_ESTAT_IS_WIDTH 14 #define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT) -#define LOONGARCH_CSR_ERA 0x6 /* ERA */ +#define LOONGARCH_CSR_ERA 0x6 /* Exception return address */ #define LOONGARCH_CSR_BADV 0x7 /* Bad virtual address */ From f08621233573a38bd0e25b08447ec6fa1e4311c2 Mon Sep 17 00:00:00 2001 From: Shengjiu Wang Date: Mon, 14 Oct 2024 13:38:33 +0800 Subject: [PATCH 36/48] ASoC: fsl_micfil: Add sample rate constraint [ Upstream commit b9a8ecf81066e01e8a3de35517481bc5aa0439e5 ] On some platforms, for example i.MX93, there is only one audio PLL source, so some sample rate can't be supported. If the PLL source is used for 8kHz series rates, then 11kHz series rates can't be supported. So add constraints according to the frequency of available clock sources, then alsa-lib will help to convert the unsupported rate for the driver. Signed-off-by: Shengjiu Wang Link: https://patch.msgid.link/1728884313-6778-1-git-send-email-shengjiu.wang@nxp.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/fsl/fsl_micfil.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c index 9407179af5d57..8478a4ac59f9d 100644 --- a/sound/soc/fsl/fsl_micfil.c +++ b/sound/soc/fsl/fsl_micfil.c @@ -28,6 +28,13 @@ #define MICFIL_OSR_DEFAULT 16 +#define MICFIL_NUM_RATES 7 +#define MICFIL_CLK_SRC_NUM 3 +/* clock source ids */ +#define MICFIL_AUDIO_PLL1 0 +#define MICFIL_AUDIO_PLL2 1 +#define MICFIL_CLK_EXT3 2 + enum quality { QUALITY_HIGH, QUALITY_MEDIUM, @@ -45,9 +52,12 @@ struct fsl_micfil { struct clk *mclk; struct clk *pll8k_clk; struct clk *pll11k_clk; + struct clk *clk_src[MICFIL_CLK_SRC_NUM]; struct snd_dmaengine_dai_dma_data dma_params_rx; struct sdma_peripheral_config sdmacfg; struct snd_soc_card *card; + struct snd_pcm_hw_constraint_list constraint_rates; + unsigned int constraint_rates_list[MICFIL_NUM_RATES]; unsigned int dataline; char name[32]; int irq[MICFIL_IRQ_LINES]; @@ -475,12 +485,34 @@ static int fsl_micfil_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct fsl_micfil *micfil = snd_soc_dai_get_drvdata(dai); + unsigned int rates[MICFIL_NUM_RATES] = {8000, 11025, 16000, 22050, 32000, 44100, 48000}; + int i, j, k = 0; + u64 clk_rate; if (!micfil) { dev_err(dai->dev, "micfil dai priv_data not set\n"); return -EINVAL; } + micfil->constraint_rates.list = micfil->constraint_rates_list; + micfil->constraint_rates.count = 0; + + for (j = 0; j < MICFIL_NUM_RATES; j++) { + for (i = 0; i < MICFIL_CLK_SRC_NUM; i++) { + clk_rate = clk_get_rate(micfil->clk_src[i]); + if (clk_rate != 0 && do_div(clk_rate, rates[j]) == 0) { + micfil->constraint_rates_list[k++] = rates[j]; + micfil->constraint_rates.count++; + break; + } + } + } + + if (micfil->constraint_rates.count > 0) + snd_pcm_hw_constraint_list(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_RATE, + &micfil->constraint_rates); + return 0; } @@ -1165,6 +1197,12 @@ static int fsl_micfil_probe(struct platform_device *pdev) fsl_asoc_get_pll_clocks(&pdev->dev, &micfil->pll8k_clk, &micfil->pll11k_clk); + micfil->clk_src[MICFIL_AUDIO_PLL1] = micfil->pll8k_clk; + micfil->clk_src[MICFIL_AUDIO_PLL2] = micfil->pll11k_clk; + micfil->clk_src[MICFIL_CLK_EXT3] = devm_clk_get(&pdev->dev, "clkext3"); + if (IS_ERR(micfil->clk_src[MICFIL_CLK_EXT3])) + micfil->clk_src[MICFIL_CLK_EXT3] = NULL; + /* init regmap */ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(regs)) From 9dcf696124533ae54670f631cdece3bc80148e7f Mon Sep 17 00:00:00 2001 From: Reinhard Speyerer Date: Fri, 18 Oct 2024 22:52:55 +0200 Subject: [PATCH 37/48] net: usb: qmi_wwan: add Fibocom FG132 0x0112 composition [ Upstream commit 64761c980cbf71fb7a532a8c7299907ea972a88c ] Add Fibocom FG132 0x0112 composition: T: Bus=03 Lev=02 Prnt=06 Port=01 Cnt=02 Dev#= 10 Spd=12 MxCh= 0 D: Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1 P: Vendor=2cb7 ProdID=0112 Rev= 5.15 S: Manufacturer=Fibocom Wireless Inc. S: Product=Fibocom Module S: SerialNumber=xxxxxxxx C:* #Ifs= 4 Cfg#= 1 Atr=a0 MxPwr=500mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=50 Driver=qmi_wwan E: Ad=82(I) Atr=03(Int.) MxPS= 8 Ivl=32ms E: Ad=81(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=01(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=83(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=84(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=03(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=86(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=04(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms Signed-off-by: Reinhard Speyerer Link: https://patch.msgid.link/ZxLKp5YZDy-OM0-e@arcor.de Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 92c1500fa7c44..2cf4324a12fd1 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1432,6 +1432,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ + {QMI_QUIRK_SET_DTR(0x2cb7, 0x0112, 0)}, /* Fibocom FG132 */ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */ From d5092b0a1aaf35d77ebd8d33384d7930bec5cb5d Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Thu, 24 Oct 2024 09:35:58 +0800 Subject: [PATCH 38/48] bpf: Check validity of link->type in bpf_link_show_fdinfo() [ Upstream commit 8421d4c8762bd022cb491f2f0f7019ef51b4f0a7 ] If a newly-added link type doesn't invoke BPF_LINK_TYPE(), accessing bpf_link_type_strs[link->type] may result in an out-of-bounds access. To spot such missed invocations early in the future, checking the validity of link->type in bpf_link_show_fdinfo() and emitting a warning when such invocations are missed. Signed-off-by: Hou Tao Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20241024013558.1135167-3-houtao@huaweicloud.com Signed-off-by: Sasha Levin --- kernel/bpf/syscall.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8a1cadc1ff9dd..252aed82d45ea 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2963,13 +2963,17 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) { const struct bpf_link *link = filp->private_data; const struct bpf_prog *prog = link->prog; + enum bpf_link_type type = link->type; char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; - seq_printf(m, - "link_type:\t%s\n" - "link_id:\t%u\n", - bpf_link_type_strs[link->type], - link->id); + if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) { + seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]); + } else { + WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type); + seq_printf(m, "link_type:\t<%u>\n", type); + } + seq_printf(m, "link_id:\t%u\n", link->id); + if (prog) { bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); seq_printf(m, From 950ac86cff338ab56e2eaf611f4936ee34893b63 Mon Sep 17 00:00:00 2001 From: Hagar Hemdan Date: Tue, 4 Jun 2024 13:05:27 +0000 Subject: [PATCH 39/48] io_uring: fix possible deadlock in io_register_iowq_max_workers() commit 73254a297c2dd094abec7c9efee32455ae875bdf upstream. The io_register_iowq_max_workers() function calls io_put_sq_data(), which acquires the sqd->lock without releasing the uring_lock. Similar to the commit 009ad9f0c6ee ("io_uring: drop ctx->uring_lock before acquiring sqd->lock"), this can lead to a potential deadlock situation. To resolve this issue, the uring_lock is released before calling io_put_sq_data(), and then it is re-acquired after the function call. This change ensures that the locks are acquired in the correct order, preventing the possibility of a deadlock. Suggested-by: Maximilian Heyne Signed-off-by: Hagar Hemdan Link: https://lore.kernel.org/r/20240604130527.3597-1-hagarhem@amazon.com Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- io_uring/io_uring.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 484c9bcbee77c..70dd6a5b9647c 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -4358,8 +4358,10 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, } if (sqd) { + mutex_unlock(&ctx->uring_lock); mutex_unlock(&sqd->lock); io_put_sq_data(sqd); + mutex_lock(&ctx->uring_lock); } if (copy_to_user(arg, new_count, sizeof(new_count))) @@ -4384,8 +4386,11 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, return 0; err: if (sqd) { + mutex_unlock(&ctx->uring_lock); mutex_unlock(&sqd->lock); io_put_sq_data(sqd); + mutex_lock(&ctx->uring_lock); + } return ret; } From 71548fada7ee0eb50cc6ccda82dff010c745f92c Mon Sep 17 00:00:00 2001 From: Qun-Wei Lin Date: Fri, 25 Oct 2024 16:58:11 +0800 Subject: [PATCH 40/48] mm: krealloc: Fix MTE false alarm in __do_krealloc commit 704573851b51808b45dae2d62059d1d8189138a2 upstream. This patch addresses an issue introduced by commit 1a83a716ec233 ("mm: krealloc: consider spare memory for __GFP_ZERO") which causes MTE (Memory Tagging Extension) to falsely report a slab-out-of-bounds error. The problem occurs when zeroing out spare memory in __do_krealloc. The original code only considered software-based KASAN and did not account for MTE. It does not reset the KASAN tag before calling memset, leading to a mismatch between the pointer tag and the memory tag, resulting in a false positive. Example of the error: ================================================================== swapper/0: BUG: KASAN: slab-out-of-bounds in __memset+0x84/0x188 swapper/0: Write at addr f4ffff8005f0fdf0 by task swapper/0/1 swapper/0: Pointer tag: [f4], memory tag: [fe] swapper/0: swapper/0: CPU: 4 UID: 0 PID: 1 Comm: swapper/0 Not tainted 6.12. swapper/0: Hardware name: MT6991(ENG) (DT) swapper/0: Call trace: swapper/0: dump_backtrace+0xfc/0x17c swapper/0: show_stack+0x18/0x28 swapper/0: dump_stack_lvl+0x40/0xa0 swapper/0: print_report+0x1b8/0x71c swapper/0: kasan_report+0xec/0x14c swapper/0: __do_kernel_fault+0x60/0x29c swapper/0: do_bad_area+0x30/0xdc swapper/0: do_tag_check_fault+0x20/0x34 swapper/0: do_mem_abort+0x58/0x104 swapper/0: el1_abort+0x3c/0x5c swapper/0: el1h_64_sync_handler+0x80/0xcc swapper/0: el1h_64_sync+0x68/0x6c swapper/0: __memset+0x84/0x188 swapper/0: btf_populate_kfunc_set+0x280/0x3d8 swapper/0: __register_btf_kfunc_id_set+0x43c/0x468 swapper/0: register_btf_kfunc_id_set+0x48/0x60 swapper/0: register_nf_nat_bpf+0x1c/0x40 swapper/0: nf_nat_init+0xc0/0x128 swapper/0: do_one_initcall+0x184/0x464 swapper/0: do_initcall_level+0xdc/0x1b0 swapper/0: do_initcalls+0x70/0xc0 swapper/0: do_basic_setup+0x1c/0x28 swapper/0: kernel_init_freeable+0x144/0x1b8 swapper/0: kernel_init+0x20/0x1a8 swapper/0: ret_from_fork+0x10/0x20 ================================================================== Fixes: 1a83a716ec233 ("mm: krealloc: consider spare memory for __GFP_ZERO") Signed-off-by: Qun-Wei Lin Acked-by: David Rientjes Signed-off-by: Vlastimil Babka Signed-off-by: Greg Kroah-Hartman --- mm/slab_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slab_common.c b/mm/slab_common.c index ef971fcdaa070..2e2b43fae2c3f 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1391,7 +1391,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags) /* Zero out spare memory. */ if (want_init_on_alloc(flags)) { kasan_disable_current(); - memset((void *)p + new_size, 0, ks - new_size); + memset(kasan_reset_tag(p) + new_size, 0, ks - new_size); kasan_enable_current(); } From bc8990235fb5b553c0c20b6185fde74ec9f5923c Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 3 Oct 2023 02:25:33 -0700 Subject: [PATCH 41/48] mm: add page_rmappable_folio() wrapper commit 23e4883248f0472d806c8b3422ba6257e67bf1a5 upstream. folio_prep_large_rmappable() is being used repeatedly along with a conversion from page to folio, a check non-NULL, a check order > 1: wrap it all up into struct folio *page_rmappable_folio(struct page *). Link: https://lkml.kernel.org/r/8d92c6cf-eebe-748-e29c-c8ab224c741@google.com Signed-off-by: Hugh Dickins Cc: Andi Kleen Cc: Christoph Lameter Cc: David Hildenbrand Cc: Greg Kroah-Hartman Cc: "Huang, Ying" Cc: Kefeng Wang Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Michal Hocko Cc: Mike Kravetz Cc: Nhat Pham Cc: Sidhartha Kumar Cc: Suren Baghdasaryan Cc: Tejun heo Cc: Vishal Moola (Oracle) Cc: Yang Shi Cc: Yosry Ahmed Signed-off-by: Andrew Morton Signed-off-by: Hugh Dickins Signed-off-by: Greg Kroah-Hartman --- mm/internal.h | 9 +++++++++ mm/mempolicy.c | 17 +++-------------- mm/page_alloc.c | 8 ++------ 3 files changed, 14 insertions(+), 20 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index ef8d787a510c5..8f8ab64f8cef8 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -415,6 +415,15 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) void folio_undo_large_rmappable(struct folio *folio); +static inline struct folio *page_rmappable_folio(struct page *page) +{ + struct folio *folio = (struct folio *)page; + + if (folio && folio_order(folio) > 1) + folio_prep_large_rmappable(folio); + return folio; +} + static inline void prep_compound_head(struct page *page, unsigned int order) { struct folio *folio = (struct folio *)page; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4cae854c0f28d..109826a2af387 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2200,10 +2200,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, mpol_cond_put(pol); gfp |= __GFP_COMP; page = alloc_page_interleave(gfp, order, nid); - folio = (struct folio *)page; - if (folio && order > 1) - folio_prep_large_rmappable(folio); - goto out; + return page_rmappable_folio(page); } if (pol->mode == MPOL_PREFERRED_MANY) { @@ -2213,10 +2210,7 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, gfp |= __GFP_COMP; page = alloc_pages_preferred_many(gfp, order, node, pol); mpol_cond_put(pol); - folio = (struct folio *)page; - if (folio && order > 1) - folio_prep_large_rmappable(folio); - goto out; + return page_rmappable_folio(page); } if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { @@ -2310,12 +2304,7 @@ EXPORT_SYMBOL(alloc_pages); struct folio *folio_alloc(gfp_t gfp, unsigned order) { - struct page *page = alloc_pages(gfp | __GFP_COMP, order); - struct folio *folio = (struct folio *)page; - - if (folio && order > 1) - folio_prep_large_rmappable(folio); - return folio; + return page_rmappable_folio(alloc_pages(gfp | __GFP_COMP, order)); } EXPORT_SYMBOL(folio_alloc); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1bbbf2f8b7e4c..959383d8d74d2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4464,12 +4464,8 @@ struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask) { struct page *page = __alloc_pages(gfp | __GFP_COMP, order, - preferred_nid, nodemask); - struct folio *folio = (struct folio *)page; - - if (folio && order > 1) - folio_prep_large_rmappable(folio); - return folio; + preferred_nid, nodemask); + return page_rmappable_folio(page); } EXPORT_SYMBOL(__folio_alloc); From 2ad2067e9ffc8b06aa3c9ab5f51507aadd8aa01a Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Fri, 1 Dec 2023 16:10:45 +0000 Subject: [PATCH 42/48] mm/readahead: do not allow order-1 folio commit ec056cef76a525706601b32048f174f9bea72c7c upstream. The THP machinery does not support order-1 folios because it requires meta data spanning the first 3 `struct page`s. So order-2 is the smallest large folio that we can safely create. There was a theoretical bug whereby if ra->size was 2 or 3 pages (due to the device-specific bdi->ra_pages being set that way), we could end up with order = 1. Fix this by unconditionally checking if the preferred order is 1 and if so, set it to 0. Previously this was done in a few specific places, but with this refactoring it is done just once, unconditionally, at the end of the calculation. This is a theoretical bug found during review of the code; I have no evidence to suggest this manifests in the real world (I expect all device-specific ra_pages values are much bigger than 3). Link: https://lkml.kernel.org/r/20231201161045.3962614-1-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Hugh Dickins Signed-off-by: Greg Kroah-Hartman --- mm/readahead.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/mm/readahead.c b/mm/readahead.c index 7c0449f8bec7f..89139a8721941 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -514,16 +514,14 @@ void page_cache_ra_order(struct readahead_control *ractl, unsigned int order = new_order; /* Align with smaller pages if needed */ - if (index & ((1UL << order) - 1)) { + if (index & ((1UL << order) - 1)) order = __ffs(index); - if (order == 1) - order = 0; - } /* Don't allocate pages past EOF */ - while (index + (1UL << order) - 1 > limit) { - if (--order == 1) - order = 0; - } + while (index + (1UL << order) - 1 > limit) + order--; + /* THP machinery does not support order-1 */ + if (order == 1) + order = 0; err = ra_alloc_folio(ractl, index, mark, order, gfp); if (err) break; From e8769509d622b41af8080d35c766f59d4aae326c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 26 Feb 2024 15:55:28 -0500 Subject: [PATCH 43/48] mm: support order-1 folios in the page cache commit 8897277acfef7f70fdecc054073bea2542fc7a1b upstream. Folios of order 1 have no space to store the deferred list. This is not a problem for the page cache as file-backed folios are never placed on the deferred list. All we need to do is prevent the core MM from touching the deferred list for order 1 folios and remove the code which prevented us from allocating order 1 folios. Link: https://lore.kernel.org/linux-mm/90344ea7-4eec-47ee-5996-0c22f42d6a6a@google.com/ Link: https://lkml.kernel.org/r/20240226205534.1603748-3-zi.yan@sent.com Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Zi Yan Cc: David Hildenbrand Cc: Hugh Dickins Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Michal Koutny Cc: Roman Gushchin Cc: Ryan Roberts Cc: Yang Shi Cc: Yu Zhao Cc: Zach O'Keefe Signed-off-by: Andrew Morton Signed-off-by: Hugh Dickins Signed-off-by: Greg Kroah-Hartman --- mm/filemap.c | 2 -- mm/huge_memory.c | 19 +++++++++++++++---- mm/internal.h | 3 +-- mm/readahead.c | 3 --- 4 files changed, 16 insertions(+), 11 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 5e9359e4ff9ed..2c308413387ff 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1957,8 +1957,6 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, gfp_t alloc_gfp = gfp; err = -ENOMEM; - if (order == 1) - order = 0; if (order > 0) alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN; folio = filemap_alloc_folio(alloc_gfp, order); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7b4cb5c68b61b..e01b0a416e76b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -569,8 +569,10 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio) void folio_prep_large_rmappable(struct folio *folio) { - VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); - INIT_LIST_HEAD(&folio->_deferred_list); + if (!folio || !folio_test_large(folio)) + return; + if (folio_order(folio) > 1) + INIT_LIST_HEAD(&folio->_deferred_list); folio_set_large_rmappable(folio); } @@ -2720,7 +2722,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) /* Prevent deferred_split_scan() touching ->_refcount */ spin_lock(&ds_queue->split_queue_lock); if (folio_ref_freeze(folio, 1 + extra_pins)) { - if (!list_empty(&folio->_deferred_list)) { + if (folio_order(folio) > 1 && + !list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; list_del(&folio->_deferred_list); } @@ -2771,6 +2774,9 @@ void folio_undo_large_rmappable(struct folio *folio) struct deferred_split *ds_queue; unsigned long flags; + if (folio_order(folio) <= 1) + return; + /* * At this point, there is no one trying to add the folio to * deferred_list. If folio is not in deferred_list, it's safe @@ -2796,7 +2802,12 @@ void deferred_split_folio(struct folio *folio) #endif unsigned long flags; - VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); + /* + * Order 1 folios have no space for a deferred list, but we also + * won't waste much memory by not adding them to the deferred list. + */ + if (folio_order(folio) <= 1) + return; /* * The try_to_unmap() in page reclaim path might reach here too, diff --git a/mm/internal.h b/mm/internal.h index 8f8ab64f8cef8..7dc4f6cd2e25d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -419,8 +419,7 @@ static inline struct folio *page_rmappable_folio(struct page *page) { struct folio *folio = (struct folio *)page; - if (folio && folio_order(folio) > 1) - folio_prep_large_rmappable(folio); + folio_prep_large_rmappable(folio); return folio; } diff --git a/mm/readahead.c b/mm/readahead.c index 89139a8721941..e9b11d928b0c4 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -519,9 +519,6 @@ void page_cache_ra_order(struct readahead_control *ractl, /* Don't allocate pages past EOF */ while (index + (1UL << order) - 1 > limit) order--; - /* THP machinery does not support order-1 */ - if (order == 1) - order = 0; err = ra_alloc_folio(ractl, index, mark, order, gfp); if (err) break; From 0275e4021b0c8f0e81d8f32756bd76f9c188eadd Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 21 Mar 2024 14:24:39 +0000 Subject: [PATCH 44/48] mm: always initialise folio->_deferred_list commit b7b098cf00a2b65d5654a86dc8edf82f125289c1 upstream. Patch series "Various significant MM patches". These patches all interact in annoying ways which make it tricky to send them out in any way other than a big batch, even though there's not really an overarching theme to connect them. The big effects of this patch series are: - folio_test_hugetlb() becomes reliable, even when called without a page reference - We free up PG_slab, and we could always use more page flags - We no longer need to check PageSlab before calling page_mapcount() This patch (of 9): For compound pages which are at least order-2 (and hence have a deferred_list), initialise it and then we can check at free that the page is not part of a deferred list. We recently found this useful to rule out a source of corruption. [peterx@redhat.com: always initialise folio->_deferred_list] Link: https://lkml.kernel.org/r/20240417211836.2742593-2-peterx@redhat.com Link: https://lkml.kernel.org/r/20240321142448.1645400-1-willy@infradead.org Link: https://lkml.kernel.org/r/20240321142448.1645400-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Peter Xu Reviewed-by: David Hildenbrand Acked-by: Vlastimil Babka Cc: Miaohe Lin Cc: Muchun Song Cc: Oscar Salvador Signed-off-by: Andrew Morton [ Include three small changes from the upstream commit, for backport safety: replace list_del() by list_del_init() in split_huge_page_to_list(), like c010d47f107f ("mm: thp: split huge page to any lower order pages"); replace list_del() by list_del_init() in folio_undo_large_rmappable(), like 9bcef5973e31 ("mm: memcg: fix split queue list crash when large folio migration"); keep __free_pages() instead of folio_put() in __update_and_free_hugetlb_folio(). ] Signed-off-by: Hugh Dickins Signed-off-by: Greg Kroah-Hartman --- mm/huge_memory.c | 6 ++---- mm/hugetlb.c | 1 + mm/internal.h | 2 ++ mm/memcontrol.c | 3 +++ mm/page_alloc.c | 9 +++++---- 5 files changed, 13 insertions(+), 8 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e01b0a416e76b..679a1c72f8d21 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -571,8 +571,6 @@ void folio_prep_large_rmappable(struct folio *folio) { if (!folio || !folio_test_large(folio)) return; - if (folio_order(folio) > 1) - INIT_LIST_HEAD(&folio->_deferred_list); folio_set_large_rmappable(folio); } @@ -2725,7 +2723,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) if (folio_order(folio) > 1 && !list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; - list_del(&folio->_deferred_list); + list_del_init(&folio->_deferred_list); } spin_unlock(&ds_queue->split_queue_lock); if (mapping) { @@ -2789,7 +2787,7 @@ void folio_undo_large_rmappable(struct folio *folio) spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (!list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; - list_del(&folio->_deferred_list); + list_del_init(&folio->_deferred_list); } spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0acb04c3e9529..92b955cc5a41d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1795,6 +1795,7 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, destroy_compound_gigantic_folio(folio, huge_page_order(h)); free_gigantic_folio(folio, huge_page_order(h)); } else { + INIT_LIST_HEAD(&folio->_deferred_list); __free_pages(&folio->page, huge_page_order(h)); } } diff --git a/mm/internal.h b/mm/internal.h index 7dc4f6cd2e25d..2cd821247cfca 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -431,6 +431,8 @@ static inline void prep_compound_head(struct page *page, unsigned int order) atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); atomic_set(&folio->_pincount, 0); + if (order > 1) + INIT_LIST_HEAD(&folio->_deferred_list); } static inline void prep_compound_tail(struct page *head, int tail_idx) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 110afda740a18..a7a6a1a23c7de 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7153,6 +7153,9 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) struct obj_cgroup *objcg; VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); + VM_BUG_ON_FOLIO(folio_order(folio) > 1 && + !folio_test_hugetlb(folio) && + !list_empty(&folio->_deferred_list), folio); /* * Nobody should be changing or seriously looking at diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 959383d8d74d2..6ffd6255608ef 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1002,10 +1002,11 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) } break; case 2: - /* - * the second tail page: ->mapping is - * deferred_list.next -- ignore value. - */ + /* the second tail page: deferred_list overlaps ->mapping */ + if (unlikely(!list_empty(&folio->_deferred_list))) { + bad_page(page, "on deferred list"); + goto out; + } break; default: if (page->mapping != TAIL_MAPPING) { From eb6b6d3e1f1e5bb97ef9d15bb21236d514bc0006 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 21 May 2024 21:03:15 +0800 Subject: [PATCH 45/48] mm: refactor folio_undo_large_rmappable() commit 593a10dabe08dcf93259fce2badd8dc2528859a8 upstream. Folios of order <= 1 are not in deferred list, the check of order is added into folio_undo_large_rmappable() from commit 8897277acfef ("mm: support order-1 folios in the page cache"), but there is a repeated check for small folio (order 0) during each call of the folio_undo_large_rmappable(), so only keep folio_order() check inside the function. In addition, move all the checks into header file to save a function call for non-large-rmappable or empty deferred_list folio. Link: https://lkml.kernel.org/r/20240521130315.46072-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: David Hildenbrand Reviewed-by: Vishal Moola (Oracle) Cc: Johannes Weiner Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton [ Upstream commit itself does not apply cleanly, because there are fewer calls to folio_undo_large_rmappable() in this tree. ] Signed-off-by: Hugh Dickins Signed-off-by: Greg Kroah-Hartman --- mm/huge_memory.c | 13 +------------ mm/internal.h | 17 ++++++++++++++++- mm/page_alloc.c | 4 +--- 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 679a1c72f8d21..8d5a6141b951d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2767,22 +2767,11 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) return ret; } -void folio_undo_large_rmappable(struct folio *folio) +void __folio_undo_large_rmappable(struct folio *folio) { struct deferred_split *ds_queue; unsigned long flags; - if (folio_order(folio) <= 1) - return; - - /* - * At this point, there is no one trying to add the folio to - * deferred_list. If folio is not in deferred_list, it's safe - * to check without acquiring the split_queue_lock. - */ - if (data_race(list_empty(&folio->_deferred_list))) - return; - ds_queue = get_deferred_split_queue(folio); spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (!list_empty(&folio->_deferred_list)) { diff --git a/mm/internal.h b/mm/internal.h index 2cd821247cfca..78db278c126de 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -413,7 +413,22 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) #endif } -void folio_undo_large_rmappable(struct folio *folio); +void __folio_undo_large_rmappable(struct folio *folio); +static inline void folio_undo_large_rmappable(struct folio *folio) +{ + if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) + return; + + /* + * At this point, there is no one trying to add the folio to + * deferred_list. If folio is not in deferred_list, it's safe + * to check without acquiring the split_queue_lock. + */ + if (data_race(list_empty(&folio->_deferred_list))) + return; + + __folio_undo_large_rmappable(folio); +} static inline struct folio *page_rmappable_folio(struct page *page) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6ffd6255608ef..fd9d9afbe1da7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -600,9 +600,7 @@ void destroy_large_folio(struct folio *folio) return; } - if (folio_test_large_rmappable(folio)) - folio_undo_large_rmappable(folio); - + folio_undo_large_rmappable(folio); mem_cgroup_uncharge(folio); free_the_page(&folio->page, folio_order(folio)); } From fc4951c3e3358dd82ea508e893695b916c813f17 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Sun, 27 Oct 2024 13:02:13 -0700 Subject: [PATCH 46/48] mm/thp: fix deferred split unqueue naming and locking commit f8f931bba0f92052cf842b7e30917b1afcc77d5a upstream. Recent changes are putting more pressure on THP deferred split queues: under load revealing long-standing races, causing list_del corruptions, "Bad page state"s and worse (I keep BUGs in both of those, so usually don't get to see how badly they end up without). The relevant recent changes being 6.8's mTHP, 6.10's mTHP swapout, and 6.12's mTHP swapin, improved swap allocation, and underused THP splitting. Before fixing locking: rename misleading folio_undo_large_rmappable(), which does not undo large_rmappable, to folio_unqueue_deferred_split(), which is what it does. But that and its out-of-line __callee are mm internals of very limited usability: add comment and WARN_ON_ONCEs to check usage; and return a bool to say if a deferred split was unqueued, which can then be used in WARN_ON_ONCEs around safety checks (sparing callers the arcane conditionals in __folio_unqueue_deferred_split()). Just omit the folio_unqueue_deferred_split() from free_unref_folios(), all of whose callers now call it beforehand (and if any forget then bad_page() will tell) - except for its caller put_pages_list(), which itself no longer has any callers (and will be deleted separately). Swapout: mem_cgroup_swapout() has been resetting folio->memcg_data 0 without checking and unqueueing a THP folio from deferred split list; which is unfortunate, since the split_queue_lock depends on the memcg (when memcg is enabled); so swapout has been unqueueing such THPs later, when freeing the folio, using the pgdat's lock instead: potentially corrupting the memcg's list. __remove_mapping() has frozen refcount to 0 here, so no problem with calling folio_unqueue_deferred_split() before resetting memcg_data. That goes back to 5.4 commit 87eaceb3faa5 ("mm: thp: make deferred split shrinker memcg aware"): which included a check on swapcache before adding to deferred queue, but no check on deferred queue before adding THP to swapcache. That worked fine with the usual sequence of events in reclaim (though there were a couple of rare ways in which a THP on deferred queue could have been swapped out), but 6.12 commit dafff3f4c850 ("mm: split underused THPs") avoids splitting underused THPs in reclaim, which makes swapcache THPs on deferred queue commonplace. Keep the check on swapcache before adding to deferred queue? Yes: it is no longer essential, but preserves the existing behaviour, and is likely to be a worthwhile optimization (vmstat showed much more traffic on the queue under swapping load if the check was removed); update its comment. Memcg-v1 move (deprecated): mem_cgroup_move_account() has been changing folio->memcg_data without checking and unqueueing a THP folio from the deferred list, sometimes corrupting "from" memcg's list, like swapout. Refcount is non-zero here, so folio_unqueue_deferred_split() can only be used in a WARN_ON_ONCE to validate the fix, which must be done earlier: mem_cgroup_move_charge_pte_range() first try to split the THP (splitting of course unqueues), or skip it if that fails. Not ideal, but moving charge has been requested, and khugepaged should repair the THP later: nobody wants new custom unqueueing code just for this deprecated case. The 87eaceb3faa5 commit did have the code to move from one deferred list to another (but was not conscious of its unsafety while refcount non-0); but that was removed by 5.6 commit fac0516b5534 ("mm: thp: don't need care deferred split queue in memcg charge move path"), which argued that the existence of a PMD mapping guarantees that the THP cannot be on a deferred list. As above, false in rare cases, and now commonly false. Backport to 6.11 should be straightforward. Earlier backports must take care that other _deferred_list fixes and dependencies are included. There is not a strong case for backports, but they can fix cornercases. Link: https://lkml.kernel.org/r/8dc111ae-f6db-2da7-b25c-7a20b1effe3b@google.com Fixes: 87eaceb3faa5 ("mm: thp: make deferred split shrinker memcg aware") Fixes: dafff3f4c850 ("mm: split underused THPs") Signed-off-by: Hugh Dickins Acked-by: David Hildenbrand Reviewed-by: Yang Shi Cc: Baolin Wang Cc: Barry Song Cc: Chris Li Cc: Johannes Weiner Cc: Kefeng Wang Cc: Kirill A. Shutemov Cc: Matthew Wilcox (Oracle) Cc: Nhat Pham Cc: Ryan Roberts Cc: Shakeel Butt Cc: Usama Arif Cc: Wei Yang Cc: Zi Yan Cc: Signed-off-by: Andrew Morton [ Upstream commit itself does not apply cleanly, because there are fewer calls to folio_undo_large_rmappable() in this tree (in particular, folio migration does not migrate memcg charge), and mm/memcontrol-v1.c has not been split out of mm/memcontrol.c. ] Signed-off-by: Hugh Dickins Signed-off-by: Greg Kroah-Hartman --- mm/huge_memory.c | 35 ++++++++++++++++++++++++++--------- mm/internal.h | 10 +++++----- mm/memcontrol.c | 32 +++++++++++++++++++++++++++++--- mm/page_alloc.c | 2 +- 4 files changed, 61 insertions(+), 18 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8d5a6141b951d..635f0f0f6860e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2767,18 +2767,38 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) return ret; } -void __folio_undo_large_rmappable(struct folio *folio) +/* + * __folio_unqueue_deferred_split() is not to be called directly: + * the folio_unqueue_deferred_split() inline wrapper in mm/internal.h + * limits its calls to those folios which may have a _deferred_list for + * queueing THP splits, and that list is (racily observed to be) non-empty. + * + * It is unsafe to call folio_unqueue_deferred_split() until folio refcount is + * zero: because even when split_queue_lock is held, a non-empty _deferred_list + * might be in use on deferred_split_scan()'s unlocked on-stack list. + * + * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is + * therefore important to unqueue deferred split before changing folio memcg. + */ +bool __folio_unqueue_deferred_split(struct folio *folio) { struct deferred_split *ds_queue; unsigned long flags; + bool unqueued = false; + + WARN_ON_ONCE(folio_ref_count(folio)); + WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg(folio)); ds_queue = get_deferred_split_queue(folio); spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (!list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; list_del_init(&folio->_deferred_list); + unqueued = true; } spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); + + return unqueued; /* useful for debug warnings */ } void deferred_split_folio(struct folio *folio) @@ -2797,14 +2817,11 @@ void deferred_split_folio(struct folio *folio) return; /* - * The try_to_unmap() in page reclaim path might reach here too, - * this may cause a race condition to corrupt deferred split queue. - * And, if page reclaim is already handling the same folio, it is - * unnecessary to handle it again in shrinker. - * - * Check the swapcache flag to determine if the folio is being - * handled by page reclaim since THP swap would add the folio into - * swap cache before calling try_to_unmap(). + * Exclude swapcache: originally to avoid a corrupt deferred split + * queue. Nowadays that is fully prevented by mem_cgroup_swapout(); + * but if page reclaim is already handling the same folio, it is + * unnecessary to handle it again in the shrinker, so excluding + * swapcache here may still be a useful optimization. */ if (folio_test_swapcache(folio)) return; diff --git a/mm/internal.h b/mm/internal.h index 78db278c126de..b30907537801c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -413,11 +413,11 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) #endif } -void __folio_undo_large_rmappable(struct folio *folio); -static inline void folio_undo_large_rmappable(struct folio *folio) +bool __folio_unqueue_deferred_split(struct folio *folio); +static inline bool folio_unqueue_deferred_split(struct folio *folio) { if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) - return; + return false; /* * At this point, there is no one trying to add the folio to @@ -425,9 +425,9 @@ static inline void folio_undo_large_rmappable(struct folio *folio) * to check without acquiring the split_queue_lock. */ if (data_race(list_empty(&folio->_deferred_list))) - return; + return false; - __folio_undo_large_rmappable(folio); + return __folio_unqueue_deferred_split(folio); } static inline struct folio *page_rmappable_folio(struct page *page) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a7a6a1a23c7de..d2ceadd11b100 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5873,6 +5873,8 @@ static int mem_cgroup_move_account(struct page *page, css_get(&to->css); css_put(&from->css); + /* Warning should never happen, so don't worry about refcount non-0 */ + WARN_ON_ONCE(folio_unqueue_deferred_split(folio)); folio->memcg_data = (unsigned long)to; __folio_memcg_unlock(from); @@ -6237,7 +6239,10 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, enum mc_target_type target_type; union mc_target target; struct page *page; + struct folio *folio; + bool tried_split_before = false; +retry_pmd: ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { if (mc.precharge < HPAGE_PMD_NR) { @@ -6247,6 +6252,28 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); if (target_type == MC_TARGET_PAGE) { page = target.page; + folio = page_folio(page); + /* + * Deferred split queue locking depends on memcg, + * and unqueue is unsafe unless folio refcount is 0: + * split or skip if on the queue? first try to split. + */ + if (!list_empty(&folio->_deferred_list)) { + spin_unlock(ptl); + if (!tried_split_before) + split_folio(folio); + folio_unlock(folio); + folio_put(folio); + if (tried_split_before) + return 0; + tried_split_before = true; + goto retry_pmd; + } + /* + * So long as that pmd lock is held, the folio cannot + * be racily added to the _deferred_list, because + * page_remove_rmap() will find it still pmdmapped. + */ if (isolate_lru_page(page)) { if (!mem_cgroup_move_account(page, true, mc.from, mc.to)) { @@ -7153,9 +7180,6 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) struct obj_cgroup *objcg; VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); - VM_BUG_ON_FOLIO(folio_order(folio) > 1 && - !folio_test_hugetlb(folio) && - !list_empty(&folio->_deferred_list), folio); /* * Nobody should be changing or seriously looking at @@ -7202,6 +7226,7 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) ug->nr_memory += nr_pages; ug->pgpgout++; + WARN_ON_ONCE(folio_unqueue_deferred_split(folio)); folio->memcg_data = 0; } @@ -7495,6 +7520,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) VM_BUG_ON_FOLIO(oldid, folio); mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); + folio_unqueue_deferred_split(folio); folio->memcg_data = 0; if (!mem_cgroup_is_root(memcg)) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fd9d9afbe1da7..7272a922b8383 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -600,7 +600,7 @@ void destroy_large_folio(struct folio *folio) return; } - folio_undo_large_rmappable(folio); + folio_unqueue_deferred_split(folio); mem_cgroup_uncharge(folio); free_the_page(&folio->page, folio_order(folio)); } From 9da3636a4880e59495d52f2d13d59b275c3b7df3 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 21 Oct 2024 11:57:38 -0700 Subject: [PATCH 47/48] 9p: fix slab cache name creation for real commit a360f311f57a36e96d88fa8086b749159714dcd2 upstream. This was attempted by using the dev_name in the slab cache name, but as Omar Sandoval pointed out, that can be an arbitrary string, eg something like "/dev/root". Which in turn trips verify_dirent_name(), which fails if a filename contains a slash. So just make it use a sequence counter, and make it an atomic_t to avoid any possible races or locking issues. Reported-and-tested-by: Omar Sandoval Link: https://lore.kernel.org/all/ZxafcO8KWMlXaeWE@telecaster.dhcp.thefacebook.com/ Fixes: 79efebae4afc ("9p: Avoid creating multiple slab caches with the same name") Acked-by: Vlastimil Babka Cc: Dominique Martinet Cc: Thorsten Leemhuis Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- net/9p/client.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/9p/client.c b/net/9p/client.c index e7ea6c5c7463d..d841d82e908fe 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -976,6 +976,7 @@ static int p9_client_version(struct p9_client *c) struct p9_client *p9_client_create(const char *dev_name, char *options) { int err; + static atomic_t seqno = ATOMIC_INIT(0); struct p9_client *clnt; char *client_id; char *cache_name; @@ -1035,7 +1036,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) if (err) goto close_trans; - cache_name = kasprintf(GFP_KERNEL, "9p-fcall-cache-%s", dev_name); + cache_name = kasprintf(GFP_KERNEL, + "9p-fcall-cache-%u", atomic_inc_return(&seqno)); if (!cache_name) { err = -ENOMEM; goto close_trans; From c1036e4f14d03aba549cdd9b186148d331013056 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sun, 17 Nov 2024 15:08:59 +0100 Subject: [PATCH 48/48] Linux 6.6.62 Link: https://lore.kernel.org/r/20241115063722.962047137@linuxfoundation.org Tested-by: Takeshi Ogasawara Tested-by: Peter Schneider Tested-by: Harshit Mogalapalli Tested-by: Jon Hunter Tested-by: SeongJae Park Tested-by: Florian Fainelli Tested-by: Mark Brown Tested-by: Ron Economos Tested-by: Linux Kernel Functional Testing Tested-by: Hardik Garg Tested-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e0bb5aaa7fed6..5f3e285d98120 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 6 -SUBLEVEL = 61 +SUBLEVEL = 62 EXTRAVERSION = NAME = Pinguïn Aangedreven