Merge tag 'mm-hotfixes-stable-2025-02-01-03-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"21 hotfixes. 8 are cc:stable and the remainder address post-6.13
issues. 13 are for MM and 8 are for non-MM.

All are singletons, please see the changelogs for details"

* tag 'mm-hotfixes-stable-2025-02-01-03-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (21 commits)
MAINTAINERS: include linux-mm for xarray maintenance
revert "xarray: port tests to kunit"
MAINTAINERS: add lib/test_xarray.c
mailmap, MAINTAINERS, docs: update Carlos's email address
mm/hugetlb: fix hugepage allocation for interleaved memory nodes
mm: gup: fix infinite loop within __get_longterm_locked
mm, swap: fix reclaim offset calculation error during allocation
.mailmap: update email address for Christopher Obbard
kfence: skip __GFP_THISNODE allocations on NUMA systems
nilfs2: fix possible int overflows in nilfs_fiemap()
mm: compaction: use the proper flag to determine watermarks
kernel: be more careful about dup_mmap() failures and uprobe registering
mm/fake-numa: handle cases with no SRAT info
mm: kmemleak: fix upper boundary check for physical address objects
mailmap: add an entry for Hamza Mahfooz
MAINTAINERS: mailmap: update Yosry Ahmed's email address
scripts/gdb: fix aarch64 userspace detection in get_current_task
mm/vmscan: accumulate nr_demoted for accurate demotion statistics
ocfs2: fix incorrect CPU endianness conversion causing mount failure
mm/zsmalloc: add __maybe_unused attribute for is_first_zpdesc()
...

+379 -441
+6 -1
.mailmap
··· 150 150 Cai Huoqing <cai.huoqing@linux.dev> <caihuoqing@baidu.com> 151 151 Can Guo <quic_cang@quicinc.com> <cang@codeaurora.org> 152 152 Carl Huang <quic_cjhuang@quicinc.com> <cjhuang@codeaurora.org> 153 - Carlos Bilbao <carlos.bilbao.osdev@gmail.com> <carlos.bilbao@amd.com> 153 + Carlos Bilbao <carlos.bilbao@kernel.org> <carlos.bilbao@amd.com> 154 + Carlos Bilbao <carlos.bilbao@kernel.org> <carlos.bilbao.osdev@gmail.com> 155 + Carlos Bilbao <carlos.bilbao@kernel.org> <bilbao@vt.edu> 154 156 Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com> 155 157 Changbin Du <changbin.du@intel.com> <changbin.du@intel.com> 156 158 Chao Yu <chao@kernel.org> <chao2.yu@samsung.com> ··· 169 167 Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com> 170 168 Christian Marangi <ansuelsmth@gmail.com> 171 169 Christophe Ricard <christophe.ricard@gmail.com> 170 + Christopher Obbard <christopher.obbard@linaro.org> <chris.obbard@collabora.com> 172 171 Christoph Hellwig <hch@lst.de> 173 172 Chuck Lever <chuck.lever@oracle.com> <cel@kernel.org> 174 173 Chuck Lever <chuck.lever@oracle.com> <cel@netapp.com> ··· 266 263 Guru Das Srinagesh <quic_gurus@quicinc.com> <gurus@codeaurora.org> 267 264 Gustavo Padovan <gustavo@las.ic.unicamp.br> 268 265 Gustavo Padovan <padovan@profusion.mobi> 266 + Hamza Mahfooz <hamzamahfooz@linux.microsoft.com> <hamza.mahfooz@amd.com> 269 267 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org> 270 268 Hans Verkuil <hverkuil@xs4all.nl> <hansverk@cisco.com> 271 269 Hans Verkuil <hverkuil@xs4all.nl> <hverkuil-cisco@xs4all.nl> ··· 767 763 Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com> 768 764 Yanteng Si <si.yanteng@linux.dev> <siyanteng@loongson.cn> 769 765 Ying Huang <huang.ying.caritas@gmail.com> <ying.huang@intel.com> 766 + Yosry Ahmed <yosry.ahmed@linux.dev> <yosryahmed@google.com> 770 767 Yusuke Goda <goda.yusuke@renesas.com> 771 768 Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com> 772 769 Zhu Yanjun <zyjzyj2000@gmail.com> <yanjunz@nvidia.com>
+1 -1
Documentation/translations/sp_SP/index.rst
··· 7 7 8 8 \kerneldocCJKoff 9 9 10 - :maintainer: Carlos Bilbao <carlos.bilbao.osdev@gmail.com> 10 + :maintainer: Carlos Bilbao <carlos.bilbao@kernel.org> 11 11 12 12 .. _sp_disclaimer: 13 13
+7 -5
MAINTAINERS
··· 1090 1090 1091 1091 AMD HSMP DRIVER 1092 1092 M: Naveen Krishna Chatradhi <naveenkrishna.chatradhi@amd.com> 1093 - R: Carlos Bilbao <carlos.bilbao.osdev@gmail.com> 1093 + R: Carlos Bilbao <carlos.bilbao@kernel.org> 1094 1094 L: platform-driver-x86@vger.kernel.org 1095 1095 S: Maintained 1096 1096 F: Documentation/arch/x86/amd_hsmp.rst ··· 5857 5857 5858 5858 CONFIDENTIAL COMPUTING THREAT MODEL FOR X86 VIRTUALIZATION (SNP/TDX) 5859 5859 M: Elena Reshetova <elena.reshetova@intel.com> 5860 - M: Carlos Bilbao <carlos.bilbao.osdev@gmail.com> 5860 + M: Carlos Bilbao <carlos.bilbao@kernel.org> 5861 5861 S: Maintained 5862 5862 F: Documentation/security/snp-tdx-threat-model.rst 5863 5863 ··· 11331 11331 F: drivers/video/fbdev/imsttfb.c 11332 11332 11333 11333 INDEX OF FURTHER KERNEL DOCUMENTATION 11334 - M: Carlos Bilbao <carlos.bilbao.osdev@gmail.com> 11334 + M: Carlos Bilbao <carlos.bilbao@kernel.org> 11335 11335 S: Maintained 11336 11336 F: Documentation/process/kernel-docs.rst 11337 11337 ··· 22215 22215 F: drivers/media/dvb-frontends/sp2* 22216 22216 22217 22217 SPANISH DOCUMENTATION 22218 - M: Carlos Bilbao <carlos.bilbao.osdev@gmail.com> 22218 + M: Carlos Bilbao <carlos.bilbao@kernel.org> 22219 22219 R: Avadhut Naik <avadhut.naik@amd.com> 22220 22220 S: Maintained 22221 22221 F: Documentation/translations/sp_SP/ ··· 25739 25739 XARRAY 25740 25740 M: Matthew Wilcox <willy@infradead.org> 25741 25741 L: linux-fsdevel@vger.kernel.org 25742 + L: linux-mm@kvack.org 25742 25743 S: Supported 25743 25744 F: Documentation/core-api/xarray.rst 25744 25745 F: include/linux/idr.h 25745 25746 F: include/linux/xarray.h 25746 25747 F: lib/idr.c 25748 + F: lib/test_xarray.c 25747 25749 F: lib/xarray.c 25748 25750 F: tools/testing/radix-tree 25749 25751 ··· 26225 26223 26226 26224 ZSWAP COMPRESSED SWAP CACHING 26227 26225 M: Johannes Weiner <hannes@cmpxchg.org> 26228 - M: Yosry Ahmed <yosryahmed@google.com> 26226 + M: Yosry Ahmed <yosry.ahmed@linux.dev> 26229 26227 M: Nhat Pham <nphamcs@gmail.com> 26230 26228 R: Chengming Zhou <chengming.zhou@linux.dev> 26231 26229 L: linux-mm@kvack.org
+1
arch/m68k/configs/amiga_defconfig
··· 626 626 CONFIG_TEST_SCANF=m 627 627 CONFIG_TEST_BITMAP=m 628 628 CONFIG_TEST_UUID=m 629 + CONFIG_TEST_XARRAY=m 629 630 CONFIG_TEST_MAPLE_TREE=m 630 631 CONFIG_TEST_RHASHTABLE=m 631 632 CONFIG_TEST_IDA=m
+1
arch/m68k/configs/apollo_defconfig
··· 583 583 CONFIG_TEST_SCANF=m 584 584 CONFIG_TEST_BITMAP=m 585 585 CONFIG_TEST_UUID=m 586 + CONFIG_TEST_XARRAY=m 586 587 CONFIG_TEST_MAPLE_TREE=m 587 588 CONFIG_TEST_RHASHTABLE=m 588 589 CONFIG_TEST_IDA=m
+1
arch/m68k/configs/atari_defconfig
··· 603 603 CONFIG_TEST_SCANF=m 604 604 CONFIG_TEST_BITMAP=m 605 605 CONFIG_TEST_UUID=m 606 + CONFIG_TEST_XARRAY=m 606 607 CONFIG_TEST_MAPLE_TREE=m 607 608 CONFIG_TEST_RHASHTABLE=m 608 609 CONFIG_TEST_IDA=m
+1
arch/m68k/configs/bvme6000_defconfig
··· 575 575 CONFIG_TEST_SCANF=m 576 576 CONFIG_TEST_BITMAP=m 577 577 CONFIG_TEST_UUID=m 578 + CONFIG_TEST_XARRAY=m 578 579 CONFIG_TEST_MAPLE_TREE=m 579 580 CONFIG_TEST_RHASHTABLE=m 580 581 CONFIG_TEST_IDA=m
+1
arch/m68k/configs/hp300_defconfig
··· 585 585 CONFIG_TEST_SCANF=m 586 586 CONFIG_TEST_BITMAP=m 587 587 CONFIG_TEST_UUID=m 588 + CONFIG_TEST_XARRAY=m 588 589 CONFIG_TEST_MAPLE_TREE=m 589 590 CONFIG_TEST_RHASHTABLE=m 590 591 CONFIG_TEST_IDA=m
+1
arch/m68k/configs/mac_defconfig
··· 602 602 CONFIG_TEST_SCANF=m 603 603 CONFIG_TEST_BITMAP=m 604 604 CONFIG_TEST_UUID=m 605 + CONFIG_TEST_XARRAY=m 605 606 CONFIG_TEST_MAPLE_TREE=m 606 607 CONFIG_TEST_RHASHTABLE=m 607 608 CONFIG_TEST_IDA=m
+1
arch/m68k/configs/multi_defconfig
··· 689 689 CONFIG_TEST_SCANF=m 690 690 CONFIG_TEST_BITMAP=m 691 691 CONFIG_TEST_UUID=m 692 + CONFIG_TEST_XARRAY=m 692 693 CONFIG_TEST_MAPLE_TREE=m 693 694 CONFIG_TEST_RHASHTABLE=m 694 695 CONFIG_TEST_IDA=m
+1
arch/m68k/configs/mvme147_defconfig
··· 575 575 CONFIG_TEST_SCANF=m 576 576 CONFIG_TEST_BITMAP=m 577 577 CONFIG_TEST_UUID=m 578 + CONFIG_TEST_XARRAY=m 578 579 CONFIG_TEST_MAPLE_TREE=m 579 580 CONFIG_TEST_RHASHTABLE=m 580 581 CONFIG_TEST_IDA=m
+1
arch/m68k/configs/mvme16x_defconfig
··· 576 576 CONFIG_TEST_SCANF=m 577 577 CONFIG_TEST_BITMAP=m 578 578 CONFIG_TEST_UUID=m 579 + CONFIG_TEST_XARRAY=m 579 580 CONFIG_TEST_MAPLE_TREE=m 580 581 CONFIG_TEST_RHASHTABLE=m 581 582 CONFIG_TEST_IDA=m
+1
arch/m68k/configs/q40_defconfig
··· 592 592 CONFIG_TEST_SCANF=m 593 593 CONFIG_TEST_BITMAP=m 594 594 CONFIG_TEST_UUID=m 595 + CONFIG_TEST_XARRAY=m 595 596 CONFIG_TEST_MAPLE_TREE=m 596 597 CONFIG_TEST_RHASHTABLE=m 597 598 CONFIG_TEST_IDA=m
+1
arch/m68k/configs/sun3_defconfig
··· 572 572 CONFIG_TEST_SCANF=m 573 573 CONFIG_TEST_BITMAP=m 574 574 CONFIG_TEST_UUID=m 575 + CONFIG_TEST_XARRAY=m 575 576 CONFIG_TEST_MAPLE_TREE=m 576 577 CONFIG_TEST_RHASHTABLE=m 577 578 CONFIG_TEST_IDA=m
+1
arch/m68k/configs/sun3x_defconfig
··· 573 573 CONFIG_TEST_SCANF=m 574 574 CONFIG_TEST_BITMAP=m 575 575 CONFIG_TEST_UUID=m 576 + CONFIG_TEST_XARRAY=m 576 577 CONFIG_TEST_MAPLE_TREE=m 577 578 CONFIG_TEST_RHASHTABLE=m 578 579 CONFIG_TEST_IDA=m
+1
arch/powerpc/configs/ppc64_defconfig
··· 448 448 CONFIG_TEST_SCANF=m 449 449 CONFIG_TEST_BITMAP=m 450 450 CONFIG_TEST_UUID=m 451 + CONFIG_TEST_XARRAY=m 451 452 CONFIG_TEST_MAPLE_TREE=m 452 453 CONFIG_TEST_RHASHTABLE=m 453 454 CONFIG_TEST_IDA=m
+10 -1
drivers/acpi/numa/srat.c
··· 95 95 int i, j, index = -1, count = 0; 96 96 nodemask_t nodes_to_enable; 97 97 98 - if (numa_off || srat_disabled()) 98 + if (numa_off) 99 99 return -1; 100 + 101 + /* no or incomplete node/PXM mapping set, nothing to do */ 102 + if (srat_disabled()) 103 + return 0; 100 104 101 105 /* find fake nodes PXM mapping */ 102 106 for (i = 0; i < MAX_NUMNODES; i++) { ··· 120 116 } 121 117 } 122 118 } 119 + } 120 + if (index == -1) { 121 + pr_debug("No node/PXM mapping has been set\n"); 122 + /* nothing more to be done */ 123 + return 0; 123 124 } 124 125 if (WARN(index != max_nid, "%d max nid when expected %d\n", 125 126 index, max_nid))
+3 -3
fs/nilfs2/inode.c
··· 1186 1186 if (size) { 1187 1187 if (phys && blkphy << blkbits == phys + size) { 1188 1188 /* The current extent goes on */ 1189 - size += n << blkbits; 1189 + size += (u64)n << blkbits; 1190 1190 } else { 1191 1191 /* Terminate the current extent */ 1192 1192 ret = fiemap_fill_next_extent( ··· 1199 1199 flags = FIEMAP_EXTENT_MERGED; 1200 1200 logical = blkoff << blkbits; 1201 1201 phys = blkphy << blkbits; 1202 - size = n << blkbits; 1202 + size = (u64)n << blkbits; 1203 1203 } 1204 1204 } else { 1205 1205 /* Start a new extent */ 1206 1206 flags = FIEMAP_EXTENT_MERGED; 1207 1207 logical = blkoff << blkbits; 1208 1208 phys = blkphy << blkbits; 1209 - size = n << blkbits; 1209 + size = (u64)n << blkbits; 1210 1210 } 1211 1211 blkoff += n; 1212 1212 }
+1 -1
fs/ocfs2/super.c
··· 2285 2285 mlog(ML_ERROR, "found superblock with incorrect block " 2286 2286 "size bits: found %u, should be 9, 10, 11, or 12\n", 2287 2287 blksz_bits); 2288 - } else if ((1 << le32_to_cpu(blksz_bits)) != blksz) { 2288 + } else if ((1 << blksz_bits) != blksz) { 2289 2289 mlog(ML_ERROR, "found superblock with incorrect block " 2290 2290 "size: found %u, should be %u\n", 1 << blksz_bits, blksz); 2291 2291 } else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) !=
+1
include/linux/swap.h
··· 222 222 }; 223 223 224 224 #define SWAP_CLUSTER_MAX 32UL 225 + #define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10) 225 226 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX 226 227 227 228 /* Bit flag in swap_map */
+4
kernel/events/uprobes.c
··· 28 28 #include <linux/rcupdate_trace.h> 29 29 #include <linux/workqueue.h> 30 30 #include <linux/srcu.h> 31 + #include <linux/oom.h> /* check_stable_address_space */ 31 32 32 33 #include <linux/uprobes.h> 33 34 ··· 1261 1260 * returns NULL in find_active_uprobe_rcu(). 1262 1261 */ 1263 1262 mmap_write_lock(mm); 1263 + if (check_stable_address_space(mm)) 1264 + goto unlock; 1265 + 1264 1266 vma = find_vma(mm, info->vaddr); 1265 1267 if (!vma || !valid_vma(vma, is_register) || 1266 1268 file_inode(vma->vm_file) != uprobe->inode)
+14 -3
kernel/fork.c
··· 760 760 mt_set_in_rcu(vmi.mas.tree); 761 761 ksm_fork(mm, oldmm); 762 762 khugepaged_fork(mm, oldmm); 763 - } else if (mpnt) { 763 + } else { 764 + 764 765 /* 765 766 * The entire maple tree has already been duplicated. If the 766 767 * mmap duplication fails, mark the failure point with ··· 769 768 * stop releasing VMAs that have not been duplicated after this 770 769 * point. 771 770 */ 772 - mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); 773 - mas_store(&vmi.mas, XA_ZERO_ENTRY); 771 + if (mpnt) { 772 + mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); 773 + mas_store(&vmi.mas, XA_ZERO_ENTRY); 774 + /* Avoid OOM iterating a broken tree */ 775 + set_bit(MMF_OOM_SKIP, &mm->flags); 776 + } 777 + /* 778 + * The mm_struct is going to exit, but the locks will be dropped 779 + * first. Set the mm_struct as unstable is advisable as it is 780 + * not fully initialised. 781 + */ 782 + set_bit(MMF_UNSTABLE, &mm->flags); 774 783 } 775 784 out: 776 785 mmap_write_unlock(mm);
+2 -16
lib/Kconfig.debug
··· 2456 2456 config TEST_UUID 2457 2457 tristate "Test functions located in the uuid module at runtime" 2458 2458 2459 - config XARRAY_KUNIT 2460 - tristate "KUnit test XArray code at runtime" if !KUNIT_ALL_TESTS 2461 - depends on KUNIT 2462 - default KUNIT_ALL_TESTS 2463 - help 2464 - Enable this option to test the Xarray code at boot. 2465 - 2466 - KUnit tests run during boot and output the results to the debug log 2467 - in TAP format (http://testanything.org/). Only useful for kernel devs 2468 - running the KUnit test harness, and not intended for inclusion into a 2469 - production build. 2470 - 2471 - For more information on KUnit and unit tests in general please refer 2472 - to the KUnit documentation in Documentation/dev-tools/kunit/. 2473 - 2474 - If unsure, say N. 2459 + config TEST_XARRAY 2460 + tristate "Test the XArray code at runtime" 2475 2461 2476 2462 config TEST_MAPLE_TREE 2477 2463 tristate "Test the Maple Tree code at runtime or module load"
+1 -1
lib/Makefile
··· 94 94 endif 95 95 96 96 obj-$(CONFIG_TEST_UUID) += test_uuid.o 97 + obj-$(CONFIG_TEST_XARRAY) += test_xarray.o 97 98 obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o 98 99 obj-$(CONFIG_TEST_PARMAN) += test_parman.o 99 100 obj-$(CONFIG_TEST_KMOD) += test_kmod.o ··· 373 372 obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o 374 373 obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o 375 374 obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o 376 - obj-$(CONFIG_XARRAY_KUNIT) += test_xarray.o 377 375 obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o 378 376 obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o 379 377 obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
+271 -386
lib/test_xarray.c
··· 6 6 * Author: Matthew Wilcox <willy@infradead.org> 7 7 */ 8 8 9 - #include <kunit/test.h> 10 - 11 - #include <linux/module.h> 12 9 #include <linux/xarray.h> 10 + #include <linux/module.h> 11 + 12 + static unsigned int tests_run; 13 + static unsigned int tests_passed; 13 14 14 15 static const unsigned int order_limit = 15 16 IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; ··· 20 19 void xa_dump(const struct xarray *xa) { } 21 20 # endif 22 21 #undef XA_BUG_ON 23 - #define XA_BUG_ON(xa, x) do { \ 24 - if (x) { \ 25 - KUNIT_FAIL(test, #x); \ 26 - xa_dump(xa); \ 27 - dump_stack(); \ 28 - } \ 22 + #define XA_BUG_ON(xa, x) do { \ 23 + tests_run++; \ 24 + if (x) { \ 25 + printk("BUG at %s:%d\n", __func__, __LINE__); \ 26 + xa_dump(xa); \ 27 + dump_stack(); \ 28 + } else { \ 29 + tests_passed++; \ 30 + } \ 29 31 } while (0) 30 32 #endif 31 33 ··· 42 38 return xa_store(xa, index, xa_mk_index(index), gfp); 43 39 } 44 40 45 - static void xa_insert_index(struct kunit *test, struct xarray *xa, unsigned long index) 41 + static void xa_insert_index(struct xarray *xa, unsigned long index) 46 42 { 47 43 XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), 48 44 GFP_KERNEL) != 0); 49 45 } 50 46 51 - static void xa_alloc_index(struct kunit *test, struct xarray *xa, unsigned long index, gfp_t gfp) 47 + static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 52 48 { 53 49 u32 id; 54 50 ··· 57 53 XA_BUG_ON(xa, id != index); 58 54 } 59 55 60 - static void xa_erase_index(struct kunit *test, struct xarray *xa, unsigned long index) 56 + static void xa_erase_index(struct xarray *xa, unsigned long index) 61 57 { 62 58 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); 63 59 XA_BUG_ON(xa, xa_load(xa, index) != NULL); ··· 83 79 return curr; 84 80 } 85 81 86 - static inline struct xarray *xa_param(struct kunit *test) 82 + static noinline void check_xa_err(struct xarray *xa) 87 83 { 88 - return *(struct xarray **)test->param_value; 89 - } 90 - 91 - static noinline void check_xa_err(struct kunit *test) 92 - { 93 - struct xarray *xa = xa_param(test); 94 - 95 84 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); 96 85 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); 97 86 #ifndef __KERNEL__ ··· 99 102 // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 100 103 } 101 104 102 - static noinline void check_xas_retry(struct kunit *test) 105 + static noinline void check_xas_retry(struct xarray *xa) 103 106 { 104 - struct xarray *xa = xa_param(test); 105 - 106 107 XA_STATE(xas, xa, 0); 107 108 void *entry; 108 109 ··· 109 114 110 115 rcu_read_lock(); 111 116 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); 112 - xa_erase_index(test, xa, 1); 117 + xa_erase_index(xa, 1); 113 118 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); 114 119 XA_BUG_ON(xa, xas_retry(&xas, NULL)); 115 120 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); ··· 140 145 } 141 146 xas_unlock(&xas); 142 147 143 - xa_erase_index(test, xa, 0); 144 - xa_erase_index(test, xa, 1); 148 + xa_erase_index(xa, 0); 149 + xa_erase_index(xa, 1); 145 150 } 146 151 147 - static noinline void check_xa_load(struct kunit *test) 152 + static noinline void check_xa_load(struct xarray *xa) 148 153 { 149 - struct xarray *xa = xa_param(test); 150 - 151 154 unsigned long i, j; 152 155 153 156 for (i = 0; i < 1024; i++) { ··· 167 174 else 168 175 XA_BUG_ON(xa, entry); 169 176 } 170 - xa_erase_index(test, xa, i); 177 + xa_erase_index(xa, i); 171 178 } 172 179 XA_BUG_ON(xa, !xa_empty(xa)); 173 180 } 174 181 175 - static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) 182 + static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) 176 183 { 177 - struct xarray *xa = xa_param(test); 178 - 179 184 unsigned int order; 180 185 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; 181 186 ··· 193 202 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); 194 203 195 204 /* Storing NULL clears marks, and they can't be set again */ 196 - xa_erase_index(test, xa, index); 205 + xa_erase_index(xa, index); 197 206 XA_BUG_ON(xa, !xa_empty(xa)); 198 207 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 199 208 xa_set_mark(xa, index, XA_MARK_0); ··· 244 253 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 245 254 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); 246 255 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); 247 - xa_erase_index(test, xa, index); 248 - xa_erase_index(test, xa, next); 256 + xa_erase_index(xa, index); 257 + xa_erase_index(xa, next); 249 258 XA_BUG_ON(xa, !xa_empty(xa)); 250 259 } 251 260 XA_BUG_ON(xa, !xa_empty(xa)); 252 261 } 253 262 254 - static noinline void check_xa_mark_2(struct kunit *test) 263 + static noinline void check_xa_mark_2(struct xarray *xa) 255 264 { 256 - struct xarray *xa = xa_param(test); 257 - 258 265 XA_STATE(xas, xa, 0); 259 266 unsigned long index; 260 267 unsigned int count = 0; ··· 289 300 xa_destroy(xa); 290 301 } 291 302 292 - static noinline void check_xa_mark_3(struct kunit *test) 303 + static noinline void check_xa_mark_3(struct xarray *xa) 293 304 { 294 305 #ifdef CONFIG_XARRAY_MULTI 295 - struct xarray *xa = xa_param(test); 296 - 297 306 XA_STATE(xas, xa, 0x41); 298 307 void *entry; 299 308 int count = 0; ··· 310 323 #endif 311 324 } 312 325 313 - static noinline void check_xa_mark(struct kunit *test) 326 + static noinline void check_xa_mark(struct xarray *xa) 314 327 { 315 328 unsigned long index; 316 329 317 330 for (index = 0; index < 16384; index += 4) 318 - check_xa_mark_1(test, index); 331 + check_xa_mark_1(xa, index); 319 332 320 - check_xa_mark_2(test); 321 - check_xa_mark_3(test); 333 + check_xa_mark_2(xa); 334 + check_xa_mark_3(xa); 322 335 } 323 336 324 - static noinline void check_xa_shrink(struct kunit *test) 337 + static noinline void check_xa_shrink(struct xarray *xa) 325 338 { 326 - struct xarray *xa = xa_param(test); 327 - 328 339 XA_STATE(xas, xa, 1); 329 340 struct xa_node *node; 330 341 unsigned int order; ··· 347 362 XA_BUG_ON(xa, xas_load(&xas) != NULL); 348 363 xas_unlock(&xas); 349 364 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 350 - xa_erase_index(test, xa, 0); 365 + xa_erase_index(xa, 0); 351 366 XA_BUG_ON(xa, !xa_empty(xa)); 352 367 353 368 for (order = 0; order < max_order; order++) { ··· 364 379 XA_BUG_ON(xa, xa_head(xa) == node); 365 380 rcu_read_unlock(); 366 381 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 367 - xa_erase_index(test, xa, ULONG_MAX); 382 + xa_erase_index(xa, ULONG_MAX); 368 383 XA_BUG_ON(xa, xa->xa_head != node); 369 - xa_erase_index(test, xa, 0); 384 + xa_erase_index(xa, 0); 370 385 } 371 386 } 372 387 373 - static noinline void check_insert(struct kunit *test) 388 + static noinline void check_insert(struct xarray *xa) 374 389 { 375 - struct xarray *xa = xa_param(test); 376 - 377 390 unsigned long i; 378 391 379 392 for (i = 0; i < 1024; i++) { 380 - xa_insert_index(test, xa, i); 393 + xa_insert_index(xa, i); 381 394 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); 382 395 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); 383 - xa_erase_index(test, xa, i); 396 + xa_erase_index(xa, i); 384 397 } 385 398 386 399 for (i = 10; i < BITS_PER_LONG; i++) { 387 - xa_insert_index(test, xa, 1UL << i); 400 + xa_insert_index(xa, 1UL << i); 388 401 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); 389 402 XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); 390 - xa_erase_index(test, xa, 1UL << i); 403 + xa_erase_index(xa, 1UL << i); 391 404 392 - xa_insert_index(test, xa, (1UL << i) - 1); 405 + xa_insert_index(xa, (1UL << i) - 1); 393 406 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); 394 407 XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); 395 - xa_erase_index(test, xa, (1UL << i) - 1); 408 + xa_erase_index(xa, (1UL << i) - 1); 396 409 } 397 410 398 - xa_insert_index(test, xa, ~0UL); 411 + xa_insert_index(xa, ~0UL); 399 412 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); 400 413 XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); 401 - xa_erase_index(test, xa, ~0UL); 414 + xa_erase_index(xa, ~0UL); 402 415 403 416 XA_BUG_ON(xa, !xa_empty(xa)); 404 417 } 405 418 406 - static noinline void check_cmpxchg(struct kunit *test) 419 + static noinline void check_cmpxchg(struct xarray *xa) 407 420 { 408 - struct xarray *xa = xa_param(test); 409 - 410 421 void *FIVE = xa_mk_value(5); 411 422 void *SIX = xa_mk_value(6); 412 423 void *LOTS = xa_mk_value(12345678); ··· 418 437 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); 419 438 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); 420 439 XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); 421 - xa_erase_index(test, xa, 12345678); 422 - xa_erase_index(test, xa, 5); 440 + xa_erase_index(xa, 12345678); 441 + xa_erase_index(xa, 5); 423 442 XA_BUG_ON(xa, !xa_empty(xa)); 424 443 } 425 444 426 - static noinline void check_cmpxchg_order(struct kunit *test) 445 + static noinline void check_cmpxchg_order(struct xarray *xa) 427 446 { 428 447 #ifdef CONFIG_XARRAY_MULTI 429 - struct xarray *xa = xa_param(test); 430 - 431 448 void *FIVE = xa_mk_value(5); 432 449 unsigned int i, order = 3; 433 450 ··· 476 497 #endif 477 498 } 478 499 479 - static noinline void check_reserve(struct kunit *test) 500 + static noinline void check_reserve(struct xarray *xa) 480 501 { 481 - struct xarray *xa = xa_param(test); 482 - 483 502 void *entry; 484 503 unsigned long index; 485 504 int count; ··· 494 517 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); 495 518 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 496 519 xa_release(xa, 12345678); 497 - xa_erase_index(test, xa, 12345678); 520 + xa_erase_index(xa, 12345678); 498 521 XA_BUG_ON(xa, !xa_empty(xa)); 499 522 500 523 /* cmpxchg sees a reserved entry as ZERO */ ··· 502 525 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, 503 526 xa_mk_value(12345678), GFP_NOWAIT) != NULL); 504 527 xa_release(xa, 12345678); 505 - xa_erase_index(test, xa, 12345678); 528 + xa_erase_index(xa, 12345678); 506 529 XA_BUG_ON(xa, !xa_empty(xa)); 507 530 508 531 /* xa_insert treats it as busy */ ··· 542 565 xa_destroy(xa); 543 566 } 544 567 545 - static noinline void check_xas_erase(struct kunit *test) 568 + static noinline void check_xas_erase(struct xarray *xa) 546 569 { 547 - struct xarray *xa = xa_param(test); 548 - 549 570 XA_STATE(xas, xa, 0); 550 571 void *entry; 551 572 unsigned long i, j; ··· 581 606 } 582 607 583 608 #ifdef CONFIG_XARRAY_MULTI 584 - static noinline void check_multi_store_1(struct kunit *test, unsigned long index, 609 + static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, 585 610 unsigned int order) 586 611 { 587 - struct xarray *xa = xa_param(test); 588 - 589 612 XA_STATE(xas, xa, index); 590 613 unsigned long min = index & ~((1UL << order) - 1); 591 614 unsigned long max = min + (1UL << order); ··· 602 629 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 603 630 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 604 631 605 - xa_erase_index(test, xa, min); 632 + xa_erase_index(xa, min); 606 633 XA_BUG_ON(xa, !xa_empty(xa)); 607 634 } 608 635 609 - static noinline void check_multi_store_2(struct kunit *test, unsigned long index, 636 + static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, 610 637 unsigned int order) 611 638 { 612 - struct xarray *xa = xa_param(test); 613 - 614 639 XA_STATE(xas, xa, index); 615 640 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 616 641 ··· 620 649 XA_BUG_ON(xa, !xa_empty(xa)); 621 650 } 622 651 623 - static noinline void check_multi_store_3(struct kunit *test, unsigned long index, 652 + static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, 624 653 unsigned int order) 625 654 { 626 - struct xarray *xa = xa_param(test); 627 - 628 655 XA_STATE(xas, xa, 0); 629 656 void *entry; 630 657 int n = 0; ··· 647 678 } 648 679 #endif 649 680 650 - static noinline void check_multi_store(struct kunit *test) 681 + static noinline void check_multi_store(struct xarray *xa) 651 682 { 652 683 #ifdef CONFIG_XARRAY_MULTI 653 - struct xarray *xa = xa_param(test); 654 - 655 684 unsigned long i, j, k; 656 685 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; 657 686 ··· 714 747 } 715 748 716 749 for (i = 0; i < 20; i++) { 717 - check_multi_store_1(test, 200, i); 718 - check_multi_store_1(test, 0, i); 719 - check_multi_store_1(test, (1UL << i) + 1, i); 750 + check_multi_store_1(xa, 200, i); 751 + check_multi_store_1(xa, 0, i); 752 + check_multi_store_1(xa, (1UL << i) + 1, i); 720 753 } 721 - check_multi_store_2(test, 4095, 9); 754 + check_multi_store_2(xa, 4095, 9); 722 755 723 756 for (i = 1; i < 20; i++) { 724 - check_multi_store_3(test, 0, i); 725 - check_multi_store_3(test, 1UL << i, i); 757 + check_multi_store_3(xa, 0, i); 758 + check_multi_store_3(xa, 1UL << i, i); 726 759 } 727 760 #endif 728 761 } 729 762 730 763 #ifdef CONFIG_XARRAY_MULTI 731 764 /* mimics page cache __filemap_add_folio() */ 732 - static noinline void check_xa_multi_store_adv_add(struct kunit *test, 765 + static noinline void check_xa_multi_store_adv_add(struct xarray *xa, 733 766 unsigned long index, 734 767 unsigned int order, 735 768 void *p) 736 769 { 737 - struct xarray *xa = xa_param(test); 738 - 739 770 XA_STATE(xas, xa, index); 740 771 unsigned int nrpages = 1UL << order; 741 772 ··· 761 796 } 762 797 763 798 /* mimics page_cache_delete() */ 764 - static noinline void check_xa_multi_store_adv_del_entry(struct kunit *test, 799 + static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, 765 800 unsigned long index, 766 801 unsigned int order) 767 802 { 768 - struct xarray *xa = xa_param(test); 769 - 770 803 XA_STATE(xas, xa, index); 771 804 772 805 xas_set_order(&xas, index, order); ··· 772 809 xas_init_marks(&xas); 773 810 } 774 811 775 - static noinline void check_xa_multi_store_adv_delete(struct kunit *test, 812 + static noinline void check_xa_multi_store_adv_delete(struct xarray *xa, 776 813 unsigned long index, 777 814 unsigned int order) 778 815 { 779 - struct xarray *xa = xa_param(test); 780 - 781 816 xa_lock_irq(xa); 782 - check_xa_multi_store_adv_del_entry(test, index, order); 817 + check_xa_multi_store_adv_del_entry(xa, index, order); 783 818 xa_unlock_irq(xa); 784 819 } 785 820 ··· 814 853 static unsigned long some_val_2 = 0xdeaddead; 815 854 816 855 /* mimics the page cache usage */ 817 - static noinline void check_xa_multi_store_adv(struct kunit *test, 856 + static noinline void check_xa_multi_store_adv(struct xarray *xa, 818 857 unsigned long pos, 819 858 unsigned int order) 820 859 { 821 - struct xarray *xa = xa_param(test); 822 - 823 860 unsigned int nrpages = 1UL << order; 824 861 unsigned long index, base, next_index, next_next_index; 825 862 unsigned int i; ··· 827 868 next_index = round_down(base + nrpages, nrpages); 828 869 next_next_index = round_down(next_index + nrpages, nrpages); 829 870 830 - check_xa_multi_store_adv_add(test, base, order, &some_val); 871 + check_xa_multi_store_adv_add(xa, base, order, &some_val); 831 872 832 873 for (i = 0; i < nrpages; i++) 833 874 XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); ··· 835 876 XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); 836 877 837 878 /* Use order 0 for the next item */ 838 - check_xa_multi_store_adv_add(test, next_index, 0, &some_val_2); 879 + check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2); 839 880 XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); 840 881 841 882 /* Remove the next item */ 842 - check_xa_multi_store_adv_delete(test, next_index, 0); 883 + check_xa_multi_store_adv_delete(xa, next_index, 0); 843 884 844 885 /* Now use order for a new pointer */ 845 - check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); 886 + check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); 846 887 847 888 for (i = 0; i < nrpages; i++) 848 889 XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); 849 890 850 - check_xa_multi_store_adv_delete(test, next_index, order); 851 - check_xa_multi_store_adv_delete(test, base, order); 891 + check_xa_multi_store_adv_delete(xa, next_index, order); 892 + check_xa_multi_store_adv_delete(xa, base, order); 852 893 XA_BUG_ON(xa, !xa_empty(xa)); 853 894 854 895 /* starting fresh again */ ··· 856 897 /* let's test some holes now */ 857 898 858 899 /* hole at base and next_next */ 859 - check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); 900 + check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); 860 901 861 902 for (i = 0; i < nrpages; i++) 862 903 XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); ··· 867 908 for (i = 0; i < nrpages; i++) 868 909 XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); 869 910 870 - check_xa_multi_store_adv_delete(test, next_index, order); 911 + check_xa_multi_store_adv_delete(xa, next_index, order); 871 912 XA_BUG_ON(xa, !xa_empty(xa)); 872 913 873 914 /* hole at base and next */ 874 915 875 - check_xa_multi_store_adv_add(test, next_next_index, order, &some_val_2); 916 + check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2); 876 917 877 918 for (i = 0; i < nrpages; i++) 878 919 XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); ··· 883 924 for (i = 0; i < nrpages; i++) 884 925 XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); 885 926 886 - check_xa_multi_store_adv_delete(test, next_next_index, order); 927 + check_xa_multi_store_adv_delete(xa, next_next_index, order); 887 928 XA_BUG_ON(xa, !xa_empty(xa)); 888 929 } 889 930 #endif 890 931 891 - static noinline void check_multi_store_advanced(struct kunit *test) 932 + static noinline void check_multi_store_advanced(struct xarray *xa) 892 933 { 893 934 #ifdef CONFIG_XARRAY_MULTI 894 935 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; ··· 900 941 */ 901 942 for (pos = 7; pos < end; pos = (pos * pos) + 564) { 902 943 for (i = 0; i < max_order; i++) { 903 - check_xa_multi_store_adv(test, pos, i); 904 - check_xa_multi_store_adv(test, pos + 157, i); 944 + check_xa_multi_store_adv(xa, pos, i); 945 + check_xa_multi_store_adv(xa, pos + 157, i); 905 946 } 906 947 } 907 948 #endif 908 949 } 909 950 910 - static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, unsigned int base) 951 + static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) 911 952 { 912 953 int i; 913 954 u32 id; 914 955 915 956 XA_BUG_ON(xa, !xa_empty(xa)); 916 957 /* An empty array should assign %base to the first alloc */ 917 - xa_alloc_index(test, xa, base, GFP_KERNEL); 958 + xa_alloc_index(xa, base, GFP_KERNEL); 918 959 919 960 /* Erasing it should make the array empty again */ 920 - xa_erase_index(test, xa, base); 961 + xa_erase_index(xa, base); 921 962 XA_BUG_ON(xa, !xa_empty(xa)); 922 963 923 964 /* And it should assign %base again */ 924 - xa_alloc_index(test, xa, base, GFP_KERNEL); 965 + xa_alloc_index(xa, base, GFP_KERNEL); 925 966 926 967 /* Allocating and then erasing a lot should not lose base */ 927 968 for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) 928 - xa_alloc_index(test, xa, i, GFP_KERNEL); 969 + xa_alloc_index(xa, i, GFP_KERNEL); 929 970 for (i = base; i < 2 * XA_CHUNK_SIZE; i++) 930 - xa_erase_index(test, xa, i); 931 - xa_alloc_index(test, xa, base, GFP_KERNEL); 971 + xa_erase_index(xa, i); 972 + xa_alloc_index(xa, base, GFP_KERNEL); 932 973 933 974 /* Destroying the array should do the same as erasing */ 934 975 xa_destroy(xa); 935 976 936 977 /* And it should assign %base again */ 937 - xa_alloc_index(test, xa, base, GFP_KERNEL); 978 + xa_alloc_index(xa, base, GFP_KERNEL); 938 979 939 980 /* The next assigned ID should be base+1 */ 940 - xa_alloc_index(test, xa, base + 1, GFP_KERNEL); 941 - xa_erase_index(test, xa, base + 1); 981 + xa_alloc_index(xa, base + 1, GFP_KERNEL); 982 + xa_erase_index(xa, base + 1); 942 983 943 984 /* Storing a value should mark it used */ 944 985 xa_store_index(xa, base + 1, GFP_KERNEL); 945 - xa_alloc_index(test, xa, base + 2, GFP_KERNEL); 986 + xa_alloc_index(xa, base + 2, GFP_KERNEL); 946 987 947 988 /* If we then erase base, it should be free */ 948 - xa_erase_index(test, xa, base); 949 - xa_alloc_index(test, xa, base, GFP_KERNEL); 989 + xa_erase_index(xa, base); 990 + xa_alloc_index(xa, base, GFP_KERNEL); 950 991 951 - xa_erase_index(test, xa, base + 1); 952 - xa_erase_index(test, xa, base + 2); 992 + xa_erase_index(xa, base + 1); 993 + xa_erase_index(xa, base + 2); 953 994 954 995 for (i = 1; i < 5000; i++) { 955 - xa_alloc_index(test, xa, base + i, GFP_KERNEL); 996 + xa_alloc_index(xa, base + i, GFP_KERNEL); 956 997 } 957 998 958 999 xa_destroy(xa); ··· 975 1016 976 1017 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 977 1018 GFP_KERNEL) != -EBUSY); 978 - XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != NULL); 1019 + XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); 979 1020 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 980 1021 GFP_KERNEL) != -EBUSY); 981 - xa_erase_index(test, xa, 3); 1022 + xa_erase_index(xa, 3); 982 1023 XA_BUG_ON(xa, !xa_empty(xa)); 983 1024 } 984 1025 985 - static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, unsigned int base) 1026 + static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) 986 1027 { 987 1028 unsigned int i, id; 988 1029 unsigned long index; ··· 1018 1059 XA_BUG_ON(xa, id != 5); 1019 1060 1020 1061 xa_for_each(xa, index, entry) { 1021 - xa_erase_index(test, xa, index); 1062 + xa_erase_index(xa, index); 1022 1063 } 1023 1064 1024 1065 for (i = base; i < base + 9; i++) { ··· 1033 1074 xa_destroy(xa); 1034 1075 } 1035 1076 1036 - static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, unsigned int base) 1077 + static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) 1037 1078 { 1038 1079 struct xa_limit limit = XA_LIMIT(1, 0x3fff); 1039 1080 u32 next = 0; ··· 1049 1090 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, 1050 1091 &next, GFP_KERNEL) != 0); 1051 1092 XA_BUG_ON(xa, id != 0x3ffd); 1052 - xa_erase_index(test, xa, 0x3ffd); 1053 - xa_erase_index(test, xa, 1); 1093 + xa_erase_index(xa, 0x3ffd); 1094 + xa_erase_index(xa, 1); 1054 1095 XA_BUG_ON(xa, !xa_empty(xa)); 1055 1096 1056 1097 for (i = 0x3ffe; i < 0x4003; i++) { ··· 1065 1106 1066 1107 /* Check wrap-around is handled correctly */ 1067 1108 if (base != 0) 1068 - xa_erase_index(test, xa, base); 1069 - xa_erase_index(test, xa, base + 1); 1109 + xa_erase_index(xa, base); 1110 + xa_erase_index(xa, base + 1); 1070 1111 next = UINT_MAX; 1071 1112 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), 1072 1113 xa_limit_32b, &next, GFP_KERNEL) != 0); ··· 1079 1120 XA_BUG_ON(xa, id != base + 1); 1080 1121 1081 1122 xa_for_each(xa, index, entry) 1082 - xa_erase_index(test, xa, index); 1123 + xa_erase_index(xa, index); 1083 1124 1084 1125 XA_BUG_ON(xa, !xa_empty(xa)); 1085 1126 } ··· 1087 1128 static DEFINE_XARRAY_ALLOC(xa0); 1088 1129 static DEFINE_XARRAY_ALLOC1(xa1); 1089 1130 1090 - static noinline void check_xa_alloc(struct kunit *test) 1131 + static noinline void check_xa_alloc(void) 1091 1132 { 1092 - check_xa_alloc_1(test, &xa0, 0); 1093 - check_xa_alloc_1(test, &xa1, 1); 1094 - check_xa_alloc_2(test, &xa0, 0); 1095 - check_xa_alloc_2(test, &xa1, 1); 1096 - check_xa_alloc_3(test, &xa0, 0); 1097 - check_xa_alloc_3(test, &xa1, 1); 1133 + check_xa_alloc_1(&xa0, 0); 1134 + check_xa_alloc_1(&xa1, 1); 1135 + check_xa_alloc_2(&xa0, 0); 1136 + check_xa_alloc_2(&xa1, 1); 1137 + check_xa_alloc_3(&xa0, 0); 1138 + check_xa_alloc_3(&xa1, 1); 1098 1139 } 1099 1140 1100 - static noinline void __check_store_iter(struct kunit *test, unsigned long start, 1141 + static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 1101 1142 unsigned int order, unsigned int present) 1102 1143 { 1103 - struct xarray *xa = xa_param(test); 1104 - 1105 1144 XA_STATE_ORDER(xas, xa, start, order); 1106 1145 void *entry; 1107 1146 unsigned int count = 0; ··· 1123 1166 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); 1124 1167 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 1125 1168 xa_mk_index(start)); 1126 - xa_erase_index(test, xa, start); 1169 + xa_erase_index(xa, start); 1127 1170 } 1128 1171 1129 - static noinline void check_store_iter(struct kunit *test) 1172 + static noinline void check_store_iter(struct xarray *xa) 1130 1173 { 1131 - struct xarray *xa = xa_param(test); 1132 - 1133 1174 unsigned int i, j; 1134 1175 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 1135 1176 1136 1177 for (i = 0; i < max_order; i++) { 1137 1178 unsigned int min = 1 << i; 1138 1179 unsigned int max = (2 << i) - 1; 1139 - __check_store_iter(test, 0, i, 0); 1180 + __check_store_iter(xa, 0, i, 0); 1140 1181 XA_BUG_ON(xa, !xa_empty(xa)); 1141 - __check_store_iter(test, min, i, 0); 1182 + __check_store_iter(xa, min, i, 0); 1142 1183 XA_BUG_ON(xa, !xa_empty(xa)); 1143 1184 1144 1185 xa_store_index(xa, min, GFP_KERNEL); 1145 - __check_store_iter(test, min, i, 1); 1186 + __check_store_iter(xa, min, i, 1); 1146 1187 XA_BUG_ON(xa, !xa_empty(xa)); 1147 1188 xa_store_index(xa, max, GFP_KERNEL); 1148 - __check_store_iter(test, min, i, 1); 1189 + __check_store_iter(xa, min, i, 1); 1149 1190 XA_BUG_ON(xa, !xa_empty(xa)); 1150 1191 1151 1192 for (j = 0; j < min; j++) 1152 1193 xa_store_index(xa, j, GFP_KERNEL); 1153 - __check_store_iter(test, 0, i, min); 1194 + __check_store_iter(xa, 0, i, min); 1154 1195 XA_BUG_ON(xa, !xa_empty(xa)); 1155 1196 for (j = 0; j < min; j++) 1156 1197 xa_store_index(xa, min + j, GFP_KERNEL); 1157 - __check_store_iter(test, min, i, min); 1198 + __check_store_iter(xa, min, i, min); 1158 1199 XA_BUG_ON(xa, !xa_empty(xa)); 1159 1200 } 1160 1201 #ifdef CONFIG_XARRAY_MULTI 1161 1202 xa_store_index(xa, 63, GFP_KERNEL); 1162 1203 xa_store_index(xa, 65, GFP_KERNEL); 1163 - __check_store_iter(test, 64, 2, 1); 1164 - xa_erase_index(test, xa, 63); 1204 + __check_store_iter(xa, 64, 2, 1); 1205 + xa_erase_index(xa, 63); 1165 1206 #endif 1166 1207 XA_BUG_ON(xa, !xa_empty(xa)); 1167 1208 } 1168 1209 1169 - static noinline void check_multi_find_1(struct kunit *test, unsigned int order) 1210 + static noinline void check_multi_find_1(struct xarray *xa, unsigned order) 1170 1211 { 1171 1212 #ifdef CONFIG_XARRAY_MULTI 1172 - struct xarray *xa = xa_param(test); 1173 - 1174 1213 unsigned long multi = 3 << order; 1175 1214 unsigned long next = 4 << order; 1176 1215 unsigned long index; ··· 1189 1236 XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); 1190 1237 XA_BUG_ON(xa, index != next); 1191 1238 1192 - xa_erase_index(test, xa, multi); 1193 - xa_erase_index(test, xa, next); 1194 - xa_erase_index(test, xa, next + 1); 1239 + xa_erase_index(xa, multi); 1240 + xa_erase_index(xa, next); 1241 + xa_erase_index(xa, next + 1); 1195 1242 XA_BUG_ON(xa, !xa_empty(xa)); 1196 1243 #endif 1197 1244 } 1198 1245 1199 - static noinline void check_multi_find_2(struct kunit *test) 1246 + static noinline void check_multi_find_2(struct xarray *xa) 1200 1247 { 1201 - struct xarray *xa = xa_param(test); 1202 - 1203 1248 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; 1204 1249 unsigned int i, j; 1205 1250 void *entry; ··· 1211 1260 GFP_KERNEL); 1212 1261 rcu_read_lock(); 1213 1262 xas_for_each(&xas, entry, ULONG_MAX) { 1214 - xa_erase_index(test, xa, index); 1263 + xa_erase_index(xa, index); 1215 1264 } 1216 1265 rcu_read_unlock(); 1217 - xa_erase_index(test, xa, index - 1); 1266 + xa_erase_index(xa, index - 1); 1218 1267 XA_BUG_ON(xa, !xa_empty(xa)); 1219 1268 } 1220 1269 } 1221 1270 } 1222 1271 1223 - static noinline void check_multi_find_3(struct kunit *test) 1272 + static noinline void check_multi_find_3(struct xarray *xa) 1224 1273 { 1225 - struct xarray *xa = xa_param(test); 1226 - 1227 1274 unsigned int order; 1228 1275 1229 1276 for (order = 5; order < order_limit; order++) { ··· 1230 1281 XA_BUG_ON(xa, !xa_empty(xa)); 1231 1282 xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); 1232 1283 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); 1233 - xa_erase_index(test, xa, 0); 1284 + xa_erase_index(xa, 0); 1234 1285 } 1235 1286 } 1236 1287 1237 - static noinline void check_find_1(struct kunit *test) 1288 + static noinline void check_find_1(struct xarray *xa) 1238 1289 { 1239 - struct xarray *xa = xa_param(test); 1240 - 1241 1290 unsigned long i, j, k; 1242 1291 1243 1292 XA_BUG_ON(xa, !xa_empty(xa)); ··· 1272 1325 else 1273 1326 XA_BUG_ON(xa, entry != NULL); 1274 1327 } 1275 - xa_erase_index(test, xa, j); 1328 + xa_erase_index(xa, j); 1276 1329 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); 1277 1330 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 1278 1331 } 1279 - xa_erase_index(test, xa, i); 1332 + xa_erase_index(xa, i); 1280 1333 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 1281 1334 } 1282 1335 XA_BUG_ON(xa, !xa_empty(xa)); 1283 1336 } 1284 1337 1285 - static noinline void check_find_2(struct kunit *test) 1338 + static noinline void check_find_2(struct xarray *xa) 1286 1339 { 1287 - struct xarray *xa = xa_param(test); 1288 - 1289 1340 void *entry; 1290 1341 unsigned long i, j, index; 1291 1342 ··· 1303 1358 xa_destroy(xa); 1304 1359 } 1305 1360 1306 - static noinline void check_find_3(struct kunit *test) 1361 + static noinline void check_find_3(struct xarray *xa) 1307 1362 { 1308 - struct xarray *xa = xa_param(test); 1309 - 1310 1363 XA_STATE(xas, xa, 0); 1311 1364 unsigned long i, j, k; 1312 1365 void *entry; ··· 1328 1385 xa_destroy(xa); 1329 1386 } 1330 1387 1331 - static noinline void check_find_4(struct kunit *test) 1388 + static noinline void check_find_4(struct xarray *xa) 1332 1389 { 1333 - struct xarray *xa = xa_param(test); 1334 - 1335 1390 unsigned long index = 0; 1336 1391 void *entry; 1337 1392 ··· 1341 1400 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); 1342 1401 XA_BUG_ON(xa, entry); 1343 1402 1344 - xa_erase_index(test, xa, ULONG_MAX); 1403 + xa_erase_index(xa, ULONG_MAX); 1345 1404 } 1346 1405 1347 - static noinline void check_find(struct kunit *test) 1406 + static noinline void check_find(struct xarray *xa) 1348 1407 { 1349 1408 unsigned i; 1350 1409 1351 - check_find_1(test); 1352 - check_find_2(test); 1353 - check_find_3(test); 1354 - check_find_4(test); 1410 + check_find_1(xa); 1411 + check_find_2(xa); 1412 + check_find_3(xa); 1413 + check_find_4(xa); 1355 1414 1356 1415 for (i = 2; i < 10; i++) 1357 - check_multi_find_1(test, i); 1358 - check_multi_find_2(test); 1359 - check_multi_find_3(test); 1416 + check_multi_find_1(xa, i); 1417 + check_multi_find_2(xa); 1418 + check_multi_find_3(xa); 1360 1419 } 1361 1420 1362 1421 /* See find_swap_entry() in mm/shmem.c */ ··· 1382 1441 return entry ? xas.xa_index : -1; 1383 1442 } 1384 1443 1385 - static noinline void check_find_entry(struct kunit *test) 1444 + static noinline void check_find_entry(struct xarray *xa) 1386 1445 { 1387 - struct xarray *xa = xa_param(test); 1388 - 1389 1446 #ifdef CONFIG_XARRAY_MULTI 1390 1447 unsigned int order; 1391 1448 unsigned long offset, index; ··· 1410 1471 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1411 1472 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1412 1473 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); 1413 - xa_erase_index(test, xa, ULONG_MAX); 1474 + xa_erase_index(xa, ULONG_MAX); 1414 1475 XA_BUG_ON(xa, !xa_empty(xa)); 1415 1476 } 1416 1477 1417 - static noinline void check_pause(struct kunit *test) 1478 + static noinline void check_pause(struct xarray *xa) 1418 1479 { 1419 - struct xarray *xa = xa_param(test); 1420 - 1421 1480 XA_STATE(xas, xa, 0); 1422 1481 void *entry; 1423 1482 unsigned int order; ··· 1485 1548 1486 1549 } 1487 1550 1488 - static noinline void check_move_tiny(struct kunit *test) 1551 + static noinline void check_move_tiny(struct xarray *xa) 1489 1552 { 1490 - struct xarray *xa = xa_param(test); 1491 - 1492 1553 XA_STATE(xas, xa, 0); 1493 1554 1494 1555 XA_BUG_ON(xa, !xa_empty(xa)); ··· 1503 1568 XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); 1504 1569 XA_BUG_ON(xa, xas_prev(&xas) != NULL); 1505 1570 rcu_read_unlock(); 1506 - xa_erase_index(test, xa, 0); 1571 + xa_erase_index(xa, 0); 1507 1572 XA_BUG_ON(xa, !xa_empty(xa)); 1508 1573 } 1509 1574 1510 - static noinline void check_move_max(struct kunit *test) 1575 + static noinline void check_move_max(struct xarray *xa) 1511 1576 { 1512 - struct xarray *xa = xa_param(test); 1513 - 1514 1577 XA_STATE(xas, xa, 0); 1515 1578 1516 1579 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); ··· 1524 1591 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); 1525 1592 rcu_read_unlock(); 1526 1593 1527 - xa_erase_index(test, xa, ULONG_MAX); 1594 + xa_erase_index(xa, ULONG_MAX); 1528 1595 XA_BUG_ON(xa, !xa_empty(xa)); 1529 1596 } 1530 1597 1531 - static noinline void check_move_small(struct kunit *test, unsigned long idx) 1598 + static noinline void check_move_small(struct xarray *xa, unsigned long idx) 1532 1599 { 1533 - struct xarray *xa = xa_param(test); 1534 - 1535 1600 XA_STATE(xas, xa, 0); 1536 1601 unsigned long i; 1537 1602 ··· 1571 1640 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 1572 1641 rcu_read_unlock(); 1573 1642 1574 - xa_erase_index(test, xa, 0); 1575 - xa_erase_index(test, xa, idx); 1643 + xa_erase_index(xa, 0); 1644 + xa_erase_index(xa, idx); 1576 1645 XA_BUG_ON(xa, !xa_empty(xa)); 1577 1646 } 1578 1647 1579 - static noinline void check_move(struct kunit *test) 1648 + static noinline void check_move(struct xarray *xa) 1580 1649 { 1581 - struct xarray *xa = xa_param(test); 1582 - 1583 1650 XA_STATE(xas, xa, (1 << 16) - 1); 1584 1651 unsigned long i; 1585 1652 ··· 1604 1675 rcu_read_unlock(); 1605 1676 1606 1677 for (i = (1 << 8); i < (1 << 15); i++) 1607 - xa_erase_index(test, xa, i); 1678 + xa_erase_index(xa, i); 1608 1679 1609 1680 i = xas.xa_index; 1610 1681 ··· 1635 1706 1636 1707 xa_destroy(xa); 1637 1708 1638 - check_move_tiny(test); 1639 - check_move_max(test); 1709 + check_move_tiny(xa); 1710 + check_move_max(xa); 1640 1711 1641 1712 for (i = 0; i < 16; i++) 1642 - check_move_small(test, 1UL << i); 1713 + check_move_small(xa, 1UL << i); 1643 1714 1644 1715 for (i = 2; i < 16; i++) 1645 - check_move_small(test, (1UL << i) - 1); 1716 + check_move_small(xa, (1UL << i) - 1); 1646 1717 } 1647 1718 1648 - static noinline void xa_store_many_order(struct kunit *test, struct xarray *xa, 1719 + static noinline void xa_store_many_order(struct xarray *xa, 1649 1720 unsigned long index, unsigned order) 1650 1721 { 1651 1722 XA_STATE_ORDER(xas, xa, index, order); ··· 1668 1739 XA_BUG_ON(xa, xas_error(&xas)); 1669 1740 } 1670 1741 1671 - static noinline void check_create_range_1(struct kunit *test, 1742 + static noinline void check_create_range_1(struct xarray *xa, 1672 1743 unsigned long index, unsigned order) 1673 1744 { 1674 - struct xarray *xa = xa_param(test); 1675 - 1676 1745 unsigned long i; 1677 1746 1678 - xa_store_many_order(test, xa, index, order); 1747 + xa_store_many_order(xa, index, order); 1679 1748 for (i = index; i < index + (1UL << order); i++) 1680 - xa_erase_index(test, xa, i); 1749 + xa_erase_index(xa, i); 1681 1750 XA_BUG_ON(xa, !xa_empty(xa)); 1682 1751 } 1683 1752 1684 - static noinline void check_create_range_2(struct kunit *test, unsigned int order) 1753 + static noinline void check_create_range_2(struct xarray *xa, unsigned order) 1685 1754 { 1686 - struct xarray *xa = xa_param(test); 1687 - 1688 1755 unsigned long i; 1689 1756 unsigned long nr = 1UL << order; 1690 1757 1691 1758 for (i = 0; i < nr * nr; i += nr) 1692 - xa_store_many_order(test, xa, i, order); 1759 + xa_store_many_order(xa, i, order); 1693 1760 for (i = 0; i < nr * nr; i++) 1694 - xa_erase_index(test, xa, i); 1761 + xa_erase_index(xa, i); 1695 1762 XA_BUG_ON(xa, !xa_empty(xa)); 1696 1763 } 1697 1764 1698 - static noinline void check_create_range_3(struct kunit *test) 1765 + static noinline void check_create_range_3(void) 1699 1766 { 1700 1767 XA_STATE(xas, NULL, 0); 1701 1768 xas_set_err(&xas, -EEXIST); ··· 1699 1774 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); 1700 1775 } 1701 1776 1702 - static noinline void check_create_range_4(struct kunit *test, 1777 + static noinline void check_create_range_4(struct xarray *xa, 1703 1778 unsigned long index, unsigned order) 1704 1779 { 1705 - struct xarray *xa = xa_param(test); 1706 - 1707 1780 XA_STATE_ORDER(xas, xa, index, order); 1708 1781 unsigned long base = xas.xa_index; 1709 1782 unsigned long i = 0; ··· 1727 1804 XA_BUG_ON(xa, xas_error(&xas)); 1728 1805 1729 1806 for (i = base; i < base + (1UL << order); i++) 1730 - xa_erase_index(test, xa, i); 1807 + xa_erase_index(xa, i); 1731 1808 XA_BUG_ON(xa, !xa_empty(xa)); 1732 1809 } 1733 1810 1734 - static noinline void check_create_range_5(struct kunit *test, 1811 + static noinline void check_create_range_5(struct xarray *xa, 1735 1812 unsigned long index, unsigned int order) 1736 1813 { 1737 - struct xarray *xa = xa_param(test); 1738 - 1739 1814 XA_STATE_ORDER(xas, xa, index, order); 1740 1815 unsigned int i; 1741 1816 ··· 1750 1829 xa_destroy(xa); 1751 1830 } 1752 1831 1753 - static noinline void check_create_range(struct kunit *test) 1832 + static noinline void check_create_range(struct xarray *xa) 1754 1833 { 1755 1834 unsigned int order; 1756 1835 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; 1757 1836 1758 1837 for (order = 0; order < max_order; order++) { 1759 - check_create_range_1(test, 0, order); 1760 - check_create_range_1(test, 1U << order, order); 1761 - check_create_range_1(test, 2U << order, order); 1762 - check_create_range_1(test, 3U << order, order); 1763 - check_create_range_1(test, 1U << 24, order); 1838 + check_create_range_1(xa, 0, order); 1839 + check_create_range_1(xa, 1U << order, order); 1840 + check_create_range_1(xa, 2U << order, order); 1841 + check_create_range_1(xa, 3U << order, order); 1842 + check_create_range_1(xa, 1U << 24, order); 1764 1843 if (order < 10) 1765 - check_create_range_2(test, order); 1844 + check_create_range_2(xa, order); 1766 1845 1767 - check_create_range_4(test, 0, order); 1768 - check_create_range_4(test, 1U << order, order); 1769 - check_create_range_4(test, 2U << order, order); 1770 - check_create_range_4(test, 3U << order, order); 1771 - check_create_range_4(test, 1U << 24, order); 1846 + check_create_range_4(xa, 0, order); 1847 + check_create_range_4(xa, 1U << order, order); 1848 + check_create_range_4(xa, 2U << order, order); 1849 + check_create_range_4(xa, 3U << order, order); 1850 + check_create_range_4(xa, 1U << 24, order); 1772 1851 1773 - check_create_range_4(test, 1, order); 1774 - check_create_range_4(test, (1U << order) + 1, order); 1775 - check_create_range_4(test, (2U << order) + 1, order); 1776 - check_create_range_4(test, (2U << order) - 1, order); 1777 - check_create_range_4(test, (3U << order) + 1, order); 1778 - check_create_range_4(test, (3U << order) - 1, order); 1779 - check_create_range_4(test, (1U << 24) + 1, order); 1852 + check_create_range_4(xa, 1, order); 1853 + check_create_range_4(xa, (1U << order) + 1, order); 1854 + check_create_range_4(xa, (2U << order) + 1, order); 1855 + check_create_range_4(xa, (2U << order) - 1, order); 1856 + check_create_range_4(xa, (3U << order) + 1, order); 1857 + check_create_range_4(xa, (3U << order) - 1, order); 1858 + check_create_range_4(xa, (1U << 24) + 1, order); 1780 1859 1781 - check_create_range_5(test, 0, order); 1782 - check_create_range_5(test, (1U << order), order); 1860 + check_create_range_5(xa, 0, order); 1861 + check_create_range_5(xa, (1U << order), order); 1783 1862 } 1784 1863 1785 - check_create_range_3(test); 1864 + check_create_range_3(); 1786 1865 } 1787 1866 1788 - static noinline void __check_store_range(struct kunit *test, unsigned long first, 1867 + static noinline void __check_store_range(struct xarray *xa, unsigned long first, 1789 1868 unsigned long last) 1790 1869 { 1791 - struct xarray *xa = xa_param(test); 1792 - 1793 1870 #ifdef CONFIG_XARRAY_MULTI 1794 1871 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); 1795 1872 ··· 1802 1883 XA_BUG_ON(xa, !xa_empty(xa)); 1803 1884 } 1804 1885 1805 - static noinline void check_store_range(struct kunit *test) 1886 + static noinline void check_store_range(struct xarray *xa) 1806 1887 { 1807 1888 unsigned long i, j; 1808 1889 1809 1890 for (i = 0; i < 128; i++) { 1810 1891 for (j = i; j < 128; j++) { 1811 - __check_store_range(test, i, j); 1812 - __check_store_range(test, 128 + i, 128 + j); 1813 - __check_store_range(test, 4095 + i, 4095 + j); 1814 - __check_store_range(test, 4096 + i, 4096 + j); 1815 - __check_store_range(test, 123456 + i, 123456 + j); 1816 - __check_store_range(test, (1 << 24) + i, (1 << 24) + j); 1892 + __check_store_range(xa, i, j); 1893 + __check_store_range(xa, 128 + i, 128 + j); 1894 + __check_store_range(xa, 4095 + i, 4095 + j); 1895 + __check_store_range(xa, 4096 + i, 4096 + j); 1896 + __check_store_range(xa, 123456 + i, 123456 + j); 1897 + __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); 1817 1898 } 1818 1899 } 1819 1900 } 1820 1901 1821 1902 #ifdef CONFIG_XARRAY_MULTI 1822 - static void check_split_1(struct kunit *test, unsigned long index, 1903 + static void check_split_1(struct xarray *xa, unsigned long index, 1823 1904 unsigned int order, unsigned int new_order) 1824 1905 { 1825 - struct xarray *xa = xa_param(test); 1826 - 1827 1906 XA_STATE_ORDER(xas, xa, index, new_order); 1828 1907 unsigned int i, found; 1829 1908 void *entry; ··· 1857 1940 xa_destroy(xa); 1858 1941 } 1859 1942 1860 - static noinline void check_split(struct kunit *test) 1943 + static noinline void check_split(struct xarray *xa) 1861 1944 { 1862 - struct xarray *xa = xa_param(test); 1863 - 1864 1945 unsigned int order, new_order; 1865 1946 1866 1947 XA_BUG_ON(xa, !xa_empty(xa)); 1867 1948 1868 1949 for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { 1869 1950 for (new_order = 0; new_order < order; new_order++) { 1870 - check_split_1(test, 0, order, new_order); 1871 - check_split_1(test, 1UL << order, order, new_order); 1872 - check_split_1(test, 3UL << order, order, new_order); 1951 + check_split_1(xa, 0, order, new_order); 1952 + check_split_1(xa, 1UL << order, order, new_order); 1953 + check_split_1(xa, 3UL << order, order, new_order); 1873 1954 } 1874 1955 } 1875 1956 } 1876 1957 #else 1877 - static void check_split(struct kunit *test) { } 1958 + static void check_split(struct xarray *xa) { } 1878 1959 #endif 1879 1960 1880 - static void check_align_1(struct kunit *test, char *name) 1961 + static void check_align_1(struct xarray *xa, char *name) 1881 1962 { 1882 - struct xarray *xa = xa_param(test); 1883 - 1884 1963 int i; 1885 1964 unsigned int id; 1886 1965 unsigned long index; ··· 1896 1983 * We should always be able to store without allocating memory after 1897 1984 * reserving a slot. 1898 1985 */ 1899 - static void check_align_2(struct kunit *test, char *name) 1986 + static void check_align_2(struct xarray *xa, char *name) 1900 1987 { 1901 - struct xarray *xa = xa_param(test); 1902 - 1903 1988 int i; 1904 1989 1905 1990 XA_BUG_ON(xa, !xa_empty(xa)); ··· 1916 2005 XA_BUG_ON(xa, !xa_empty(xa)); 1917 2006 } 1918 2007 1919 - static noinline void check_align(struct kunit *test) 2008 + static noinline void check_align(struct xarray *xa) 1920 2009 { 1921 2010 char name[] = "Motorola 68000"; 1922 2011 1923 - check_align_1(test, name); 1924 - check_align_1(test, name + 1); 1925 - check_align_1(test, name + 2); 1926 - check_align_1(test, name + 3); 1927 - check_align_2(test, name); 2012 + check_align_1(xa, name); 2013 + check_align_1(xa, name + 1); 2014 + check_align_1(xa, name + 2); 2015 + check_align_1(xa, name + 3); 2016 + check_align_2(xa, name); 1928 2017 } 1929 2018 1930 2019 static LIST_HEAD(shadow_nodes); ··· 1940 2029 } 1941 2030 } 1942 2031 1943 - static noinline void shadow_remove(struct kunit *test, struct xarray *xa) 2032 + static noinline void shadow_remove(struct xarray *xa) 1944 2033 { 1945 2034 struct xa_node *node; 1946 2035 ··· 1954 2043 xa_unlock(xa); 1955 2044 } 1956 2045 1957 - struct workingset_testcase { 1958 - struct xarray *xa; 1959 - unsigned long index; 1960 - }; 1961 - 1962 - static noinline void check_workingset(struct kunit *test) 2046 + static noinline void check_workingset(struct xarray *xa, unsigned long index) 1963 2047 { 1964 - struct workingset_testcase tc = *(struct workingset_testcase *)test->param_value; 1965 - struct xarray *xa = tc.xa; 1966 - unsigned long index = tc.index; 1967 - 1968 2048 XA_STATE(xas, xa, index); 1969 2049 xas_set_update(&xas, test_update_node); 1970 2050 ··· 1978 2076 xas_unlock(&xas); 1979 2077 XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1980 2078 1981 - shadow_remove(test, xa); 2079 + shadow_remove(xa); 1982 2080 XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1983 2081 XA_BUG_ON(xa, !xa_empty(xa)); 1984 2082 } ··· 1987 2085 * Check that the pointer / value / sibling entries are accounted the 1988 2086 * way we expect them to be. 1989 2087 */ 1990 - static noinline void check_account(struct kunit *test) 2088 + static noinline void check_account(struct xarray *xa) 1991 2089 { 1992 2090 #ifdef CONFIG_XARRAY_MULTI 1993 - struct xarray *xa = xa_param(test); 1994 - 1995 2091 unsigned int order; 1996 2092 1997 2093 for (order = 1; order < 12; order++) { ··· 2016 2116 #endif 2017 2117 } 2018 2118 2019 - static noinline void check_get_order(struct kunit *test) 2119 + static noinline void check_get_order(struct xarray *xa) 2020 2120 { 2021 - struct xarray *xa = xa_param(test); 2022 - 2023 2121 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 2024 2122 unsigned int order; 2025 2123 unsigned long i, j; ··· 2036 2138 } 2037 2139 } 2038 2140 2039 - static noinline void check_xas_get_order(struct kunit *test) 2141 + static noinline void check_xas_get_order(struct xarray *xa) 2040 2142 { 2041 - struct xarray *xa = xa_param(test); 2042 - 2043 2143 XA_STATE(xas, xa, 0); 2044 2144 2045 2145 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; ··· 2069 2173 } 2070 2174 } 2071 2175 2072 - static noinline void check_xas_conflict_get_order(struct kunit *test) 2176 + static noinline void check_xas_conflict_get_order(struct xarray *xa) 2073 2177 { 2074 - struct xarray *xa = xa_param(test); 2075 - 2076 2178 XA_STATE(xas, xa, 0); 2077 2179 2078 2180 void *entry; ··· 2127 2233 } 2128 2234 2129 2235 2130 - static noinline void check_destroy(struct kunit *test) 2236 + static noinline void check_destroy(struct xarray *xa) 2131 2237 { 2132 - struct xarray *xa = xa_param(test); 2133 - 2134 2238 unsigned long index; 2135 2239 2136 2240 XA_BUG_ON(xa, !xa_empty(xa)); ··· 2161 2269 } 2162 2270 2163 2271 static DEFINE_XARRAY(array); 2164 - static struct xarray *arrays[] = { &array }; 2165 - KUNIT_ARRAY_PARAM(array, arrays, NULL); 2166 2272 2167 - static struct xarray *xa0s[] = { &xa0 }; 2168 - KUNIT_ARRAY_PARAM(xa0, xa0s, NULL); 2273 + static int xarray_checks(void) 2274 + { 2275 + check_xa_err(&array); 2276 + check_xas_retry(&array); 2277 + check_xa_load(&array); 2278 + check_xa_mark(&array); 2279 + check_xa_shrink(&array); 2280 + check_xas_erase(&array); 2281 + check_insert(&array); 2282 + check_cmpxchg(&array); 2283 + check_cmpxchg_order(&array); 2284 + check_reserve(&array); 2285 + check_reserve(&xa0); 2286 + check_multi_store(&array); 2287 + check_multi_store_advanced(&array); 2288 + check_get_order(&array); 2289 + check_xas_get_order(&array); 2290 + check_xas_conflict_get_order(&array); 2291 + check_xa_alloc(); 2292 + check_find(&array); 2293 + check_find_entry(&array); 2294 + check_pause(&array); 2295 + check_account(&array); 2296 + check_destroy(&array); 2297 + check_move(&array); 2298 + check_create_range(&array); 2299 + check_store_range(&array); 2300 + check_store_iter(&array); 2301 + check_align(&xa0); 2302 + check_split(&array); 2169 2303 2170 - static struct workingset_testcase workingset_testcases[] = { 2171 - { &array, 0 }, 2172 - { &array, 64 }, 2173 - { &array, 4096 }, 2174 - }; 2175 - KUNIT_ARRAY_PARAM(workingset, workingset_testcases, NULL); 2304 + check_workingset(&array, 0); 2305 + check_workingset(&array, 64); 2306 + check_workingset(&array, 4096); 2176 2307 2177 - static struct kunit_case xarray_cases[] = { 2178 - KUNIT_CASE_PARAM(check_xa_err, array_gen_params), 2179 - KUNIT_CASE_PARAM(check_xas_retry, array_gen_params), 2180 - KUNIT_CASE_PARAM(check_xa_load, array_gen_params), 2181 - KUNIT_CASE_PARAM(check_xa_mark, array_gen_params), 2182 - KUNIT_CASE_PARAM(check_xa_shrink, array_gen_params), 2183 - KUNIT_CASE_PARAM(check_xas_erase, array_gen_params), 2184 - KUNIT_CASE_PARAM(check_insert, array_gen_params), 2185 - KUNIT_CASE_PARAM(check_cmpxchg, array_gen_params), 2186 - KUNIT_CASE_PARAM(check_cmpxchg_order, array_gen_params), 2187 - KUNIT_CASE_PARAM(check_reserve, array_gen_params), 2188 - KUNIT_CASE_PARAM(check_reserve, xa0_gen_params), 2189 - KUNIT_CASE_PARAM(check_multi_store, array_gen_params), 2190 - KUNIT_CASE_PARAM(check_multi_store_advanced, array_gen_params), 2191 - KUNIT_CASE_PARAM(check_get_order, array_gen_params), 2192 - KUNIT_CASE_PARAM(check_xas_get_order, array_gen_params), 2193 - KUNIT_CASE_PARAM(check_xas_conflict_get_order, array_gen_params), 2194 - KUNIT_CASE(check_xa_alloc), 2195 - KUNIT_CASE_PARAM(check_find, array_gen_params), 2196 - KUNIT_CASE_PARAM(check_find_entry, array_gen_params), 2197 - KUNIT_CASE_PARAM(check_pause, array_gen_params), 2198 - KUNIT_CASE_PARAM(check_account, array_gen_params), 2199 - KUNIT_CASE_PARAM(check_destroy, array_gen_params), 2200 - KUNIT_CASE_PARAM(check_move, array_gen_params), 2201 - KUNIT_CASE_PARAM(check_create_range, array_gen_params), 2202 - KUNIT_CASE_PARAM(check_store_range, array_gen_params), 2203 - KUNIT_CASE_PARAM(check_store_iter, array_gen_params), 2204 - KUNIT_CASE_PARAM(check_align, xa0_gen_params), 2205 - KUNIT_CASE_PARAM(check_split, array_gen_params), 2206 - KUNIT_CASE_PARAM(check_workingset, workingset_gen_params), 2207 - {}, 2208 - }; 2308 + printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); 2309 + return (tests_run == tests_passed) ? 0 : -EINVAL; 2310 + } 2209 2311 2210 - static struct kunit_suite xarray_suite = { 2211 - .name = "xarray", 2212 - .test_cases = xarray_cases, 2213 - }; 2312 + static void xarray_exit(void) 2313 + { 2314 + } 2214 2315 2215 - kunit_test_suite(xarray_suite); 2216 - 2316 + module_init(xarray_checks); 2317 + module_exit(xarray_exit); 2217 2318 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); 2218 2319 MODULE_DESCRIPTION("XArray API test module"); 2219 2320 MODULE_LICENSE("GPL");
+25 -4
mm/compaction.c
··· 2491 2491 */ 2492 2492 static enum compact_result 2493 2493 compaction_suit_allocation_order(struct zone *zone, unsigned int order, 2494 - int highest_zoneidx, unsigned int alloc_flags) 2494 + int highest_zoneidx, unsigned int alloc_flags, 2495 + bool async) 2495 2496 { 2496 2497 unsigned long watermark; 2497 2498 ··· 2500 2499 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, 2501 2500 alloc_flags)) 2502 2501 return COMPACT_SUCCESS; 2502 + 2503 + /* 2504 + * For unmovable allocations (without ALLOC_CMA), check if there is enough 2505 + * free memory in the non-CMA pageblocks. Otherwise compaction could form 2506 + * the high-order page in CMA pageblocks, which would not help the 2507 + * allocation to succeed. However, limit the check to costly order async 2508 + * compaction (such as opportunistic THP attempts) because there is the 2509 + * possibility that compaction would migrate pages from non-CMA to CMA 2510 + * pageblock. 2511 + */ 2512 + if (order > PAGE_ALLOC_COSTLY_ORDER && async && 2513 + !(alloc_flags & ALLOC_CMA)) { 2514 + watermark = low_wmark_pages(zone) + compact_gap(order); 2515 + if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, 2516 + 0, zone_page_state(zone, NR_FREE_PAGES))) 2517 + return COMPACT_SKIPPED; 2518 + } 2503 2519 2504 2520 if (!compaction_suitable(zone, order, highest_zoneidx)) 2505 2521 return COMPACT_SKIPPED; ··· 2553 2535 if (!is_via_compact_memory(cc->order)) { 2554 2536 ret = compaction_suit_allocation_order(cc->zone, cc->order, 2555 2537 cc->highest_zoneidx, 2556 - cc->alloc_flags); 2538 + cc->alloc_flags, 2539 + cc->mode == MIGRATE_ASYNC); 2557 2540 if (ret != COMPACT_CONTINUE) 2558 2541 return ret; 2559 2542 } ··· 3057 3038 3058 3039 ret = compaction_suit_allocation_order(zone, 3059 3040 pgdat->kcompactd_max_order, 3060 - highest_zoneidx, ALLOC_WMARK_MIN); 3041 + highest_zoneidx, ALLOC_WMARK_MIN, 3042 + false); 3061 3043 if (ret == COMPACT_CONTINUE) 3062 3044 return true; 3063 3045 } ··· 3099 3079 continue; 3100 3080 3101 3081 ret = compaction_suit_allocation_order(zone, 3102 - cc.order, zoneid, ALLOC_WMARK_MIN); 3082 + cc.order, zoneid, ALLOC_WMARK_MIN, 3083 + false); 3103 3084 if (ret != COMPACT_CONTINUE) 3104 3085 continue; 3105 3086
+4 -10
mm/gup.c
··· 2320 2320 /* 2321 2321 * Returns the number of collected folios. Return value is always >= 0. 2322 2322 */ 2323 - static unsigned long collect_longterm_unpinnable_folios( 2323 + static void collect_longterm_unpinnable_folios( 2324 2324 struct list_head *movable_folio_list, 2325 2325 struct pages_or_folios *pofs) 2326 2326 { 2327 - unsigned long i, collected = 0; 2328 2327 struct folio *prev_folio = NULL; 2329 2328 bool drain_allow = true; 2329 + unsigned long i; 2330 2330 2331 2331 for (i = 0; i < pofs->nr_entries; i++) { 2332 2332 struct folio *folio = pofs_get_folio(pofs, i); ··· 2337 2337 2338 2338 if (folio_is_longterm_pinnable(folio)) 2339 2339 continue; 2340 - 2341 - collected++; 2342 2340 2343 2341 if (folio_is_device_coherent(folio)) 2344 2342 continue; ··· 2359 2361 NR_ISOLATED_ANON + folio_is_file_lru(folio), 2360 2362 folio_nr_pages(folio)); 2361 2363 } 2362 - 2363 - return collected; 2364 2364 } 2365 2365 2366 2366 /* ··· 2435 2439 check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs) 2436 2440 { 2437 2441 LIST_HEAD(movable_folio_list); 2438 - unsigned long collected; 2439 2442 2440 - collected = collect_longterm_unpinnable_folios(&movable_folio_list, 2441 - pofs); 2442 - if (!collected) 2443 + collect_longterm_unpinnable_folios(&movable_folio_list, pofs); 2444 + if (list_empty(&movable_folio_list)) 2443 2445 return 0; 2444 2446 2445 2447 return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs);
+1 -1
mm/hugetlb.c
··· 3309 3309 .thread_fn = gather_bootmem_prealloc_parallel, 3310 3310 .fn_arg = NULL, 3311 3311 .start = 0, 3312 - .size = num_node_state(N_MEMORY), 3312 + .size = nr_node_ids, 3313 3313 .align = 1, 3314 3314 .min_chunk = 1, 3315 3315 .max_threads = num_node_state(N_MEMORY),
+2
mm/kfence/core.c
··· 21 21 #include <linux/log2.h> 22 22 #include <linux/memblock.h> 23 23 #include <linux/moduleparam.h> 24 + #include <linux/nodemask.h> 24 25 #include <linux/notifier.h> 25 26 #include <linux/panic_notifier.h> 26 27 #include <linux/random.h> ··· 1085 1084 * properties (e.g. reside in DMAable memory). 1086 1085 */ 1087 1086 if ((flags & GFP_ZONEMASK) || 1087 + ((flags & __GFP_THISNODE) && num_online_nodes() > 1) || 1088 1088 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { 1089 1089 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); 1090 1090 return NULL;
+1 -1
mm/kmemleak.c
··· 1689 1689 unsigned long phys = object->pointer; 1690 1690 1691 1691 if (PHYS_PFN(phys) < min_low_pfn || 1692 - PHYS_PFN(phys + object->size) >= max_low_pfn) 1692 + PHYS_PFN(phys + object->size) > max_low_pfn) 1693 1693 __paint_it(object, KMEMLEAK_BLACK); 1694 1694 } 1695 1695
+1 -1
mm/swapfile.c
··· 794 794 if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim)) 795 795 continue; 796 796 if (need_reclaim) { 797 - ret = cluster_reclaim_range(si, ci, start, end); 797 + ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages); 798 798 /* 799 799 * Reclaim drops ci->lock and cluster could be used 800 800 * by another order. Not checking flag as off-list
+9 -4
mm/vmscan.c
··· 1086 1086 struct folio_batch free_folios; 1087 1087 LIST_HEAD(ret_folios); 1088 1088 LIST_HEAD(demote_folios); 1089 - unsigned int nr_reclaimed = 0; 1089 + unsigned int nr_reclaimed = 0, nr_demoted = 0; 1090 1090 unsigned int pgactivate = 0; 1091 1091 bool do_demote_pass; 1092 1092 struct swap_iocb *plug = NULL; ··· 1550 1550 /* 'folio_list' is always empty here */ 1551 1551 1552 1552 /* Migrate folios selected for demotion */ 1553 - stat->nr_demoted = demote_folio_list(&demote_folios, pgdat); 1554 - nr_reclaimed += stat->nr_demoted; 1553 + nr_demoted = demote_folio_list(&demote_folios, pgdat); 1554 + nr_reclaimed += nr_demoted; 1555 + stat->nr_demoted += nr_demoted; 1555 1556 /* Folios that could not be demoted are still in @demote_folios */ 1556 1557 if (!list_empty(&demote_folios)) { 1557 1558 /* Folios which weren't demoted go back on @folio_list */ ··· 1693 1692 unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; 1694 1693 unsigned long skipped = 0; 1695 1694 unsigned long scan, total_scan, nr_pages; 1695 + unsigned long max_nr_skipped = 0; 1696 1696 LIST_HEAD(folios_skipped); 1697 1697 1698 1698 total_scan = 0; ··· 1708 1706 nr_pages = folio_nr_pages(folio); 1709 1707 total_scan += nr_pages; 1710 1708 1711 - if (folio_zonenum(folio) > sc->reclaim_idx) { 1709 + /* Using max_nr_skipped to prevent hard LOCKUP*/ 1710 + if (max_nr_skipped < SWAP_CLUSTER_MAX_SKIPPED && 1711 + (folio_zonenum(folio) > sc->reclaim_idx)) { 1712 1712 nr_skipped[folio_zonenum(folio)] += nr_pages; 1713 1713 move_to = &folios_skipped; 1714 + max_nr_skipped++; 1714 1715 goto move; 1715 1716 } 1716 1717
+1 -1
mm/zsmalloc.c
··· 452 452 .lock = INIT_LOCAL_LOCK(lock), 453 453 }; 454 454 455 - static inline bool is_first_zpdesc(struct zpdesc *zpdesc) 455 + static inline bool __maybe_unused is_first_zpdesc(struct zpdesc *zpdesc) 456 456 { 457 457 return PagePrivate(zpdesc_page(zpdesc)); 458 458 }
+1 -1
scripts/gdb/linux/cpus.py
··· 167 167 var_ptr = gdb.parse_and_eval("&pcpu_hot.current_task") 168 168 return per_cpu(var_ptr, cpu).dereference() 169 169 elif utils.is_target_arch("aarch64"): 170 - current_task_addr = gdb.parse_and_eval("$SP_EL0") 170 + current_task_addr = gdb.parse_and_eval("(unsigned long)$SP_EL0") 171 171 if (current_task_addr >> 63) != 0: 172 172 current_task = current_task_addr.cast(task_ptr_type) 173 173 return current_task.dereference()