Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2025-03-08-16-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"33 hotfixes. 24 are cc:stable and the remainder address post-6.13
issues or aren't considered necessary for -stable kernels.

26 are for MM and 7 are for non-MM.

- "mm: memory_failure: unmap poisoned folio during migrate properly"
from Ma Wupeng fixes a couple of two year old bugs involving the
migration of hwpoisoned folios.

- "selftests/damon: three fixes for false results" from SeongJae Park
fixes three one year old bugs in the SAMON selftest code.

The remainder are singletons and doubletons. Please see the individual
changelogs for details"

* tag 'mm-hotfixes-stable-2025-03-08-16-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (33 commits)
mm/page_alloc: fix uninitialized variable
rapidio: add check for rio_add_net() in rio_scan_alloc_net()
rapidio: fix an API misues when rio_add_net() fails
MAINTAINERS: .mailmap: update Sumit Garg's email address
Revert "mm/page_alloc.c: don't show protection in zone's ->lowmem_reserve[] for empty zone"
mm: fix finish_fault() handling for large folios
mm: don't skip arch_sync_kernel_mappings() in error paths
mm: shmem: remove unnecessary warning in shmem_writepage()
userfaultfd: fix PTE unmapping stack-allocated PTE copies
userfaultfd: do not block on locking a large folio with raised refcount
mm: zswap: use ATOMIC_LONG_INIT to initialize zswap_stored_pages
mm: shmem: fix potential data corruption during shmem swapin
mm: fix kernel BUG when userfaultfd_move encounters swapcache
selftests/damon/damon_nr_regions: sort collected regiosn before checking with min/max boundaries
selftests/damon/damon_nr_regions: set ops update for merge results check to 100ms
selftests/damon/damos_quota: make real expectation of quota exceeds
include/linux/log2.h: mark is_power_of_2() with __always_inline
NFS: fix nfs_release_folio() to not deadlock via kcompactd writeback
mm, swap: avoid BUG_ON in relocate_cluster()
mm: swap: use correct step in loop to wait all clusters in wait_for_allocation()
...

+349 -123
+1
.mailmap
··· 691 691 Subhash Jadavani <subhashj@codeaurora.org> 692 692 Sudarshan Rajagopalan <quic_sudaraja@quicinc.com> <sudaraja@codeaurora.org> 693 693 Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> 694 + Sumit Garg <sumit.garg@kernel.org> <sumit.garg@linaro.org> 694 695 Sumit Semwal <sumit.semwal@ti.com> 695 696 Surabhi Vishnoi <quic_svishnoi@quicinc.com> <svishnoi@codeaurora.org> 696 697 Sven Eckelmann <sven@narfation.org> <seckelmann@datto.com>
+3 -3
MAINTAINERS
··· 12875 12875 F: security/keys/trusted-keys/trusted_dcp.c 12876 12876 12877 12877 KEYS-TRUSTED-TEE 12878 - M: Sumit Garg <sumit.garg@linaro.org> 12878 + M: Sumit Garg <sumit.garg@kernel.org> 12879 12879 L: linux-integrity@vger.kernel.org 12880 12880 L: keyrings@vger.kernel.org 12881 12881 S: Supported ··· 17675 17675 F: drivers/tee/optee/ 17676 17676 17677 17677 OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER 17678 - M: Sumit Garg <sumit.garg@linaro.org> 17678 + M: Sumit Garg <sumit.garg@kernel.org> 17679 17679 L: op-tee@lists.trustedfirmware.org 17680 17680 S: Maintained 17681 17681 F: drivers/char/hw_random/optee-rng.c ··· 23288 23288 23289 23289 TEE SUBSYSTEM 23290 23290 M: Jens Wiklander <jens.wiklander@linaro.org> 23291 - R: Sumit Garg <sumit.garg@linaro.org> 23291 + R: Sumit Garg <sumit.garg@kernel.org> 23292 23292 L: op-tee@lists.trustedfirmware.org 23293 23293 S: Maintained 23294 23294 F: Documentation/ABI/testing/sysfs-class-tee
+25 -12
arch/arm/mm/fault-armv.c
··· 62 62 } 63 63 64 64 static int adjust_pte(struct vm_area_struct *vma, unsigned long address, 65 - unsigned long pfn, struct vm_fault *vmf) 65 + unsigned long pfn, bool need_lock) 66 66 { 67 67 spinlock_t *ptl; 68 68 pgd_t *pgd; ··· 99 99 if (!pte) 100 100 return 0; 101 101 102 - /* 103 - * If we are using split PTE locks, then we need to take the page 104 - * lock here. Otherwise we are using shared mm->page_table_lock 105 - * which is already locked, thus cannot take it. 106 - */ 107 - if (ptl != vmf->ptl) { 102 + if (need_lock) { 103 + /* 104 + * Use nested version here to indicate that we are already 105 + * holding one similar spinlock. 106 + */ 108 107 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); 109 108 if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) { 110 109 pte_unmap_unlock(pte, ptl); ··· 113 114 114 115 ret = do_adjust_pte(vma, address, pfn, pte); 115 116 116 - if (ptl != vmf->ptl) 117 + if (need_lock) 117 118 spin_unlock(ptl); 118 119 pte_unmap(pte); 119 120 ··· 122 123 123 124 static void 124 125 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, 125 - unsigned long addr, pte_t *ptep, unsigned long pfn, 126 - struct vm_fault *vmf) 126 + unsigned long addr, pte_t *ptep, unsigned long pfn) 127 127 { 128 + const unsigned long pmd_start_addr = ALIGN_DOWN(addr, PMD_SIZE); 129 + const unsigned long pmd_end_addr = pmd_start_addr + PMD_SIZE; 128 130 struct mm_struct *mm = vma->vm_mm; 129 131 struct vm_area_struct *mpnt; 130 132 unsigned long offset; ··· 142 142 flush_dcache_mmap_lock(mapping); 143 143 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { 144 144 /* 145 + * If we are using split PTE locks, then we need to take the pte 146 + * lock. Otherwise we are using shared mm->page_table_lock which 147 + * is already locked, thus cannot take it. 148 + */ 149 + bool need_lock = IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS); 150 + unsigned long mpnt_addr; 151 + 152 + /* 145 153 * If this VMA is not in our MM, we can ignore it. 146 154 * Note that we intentionally mask out the VMA 147 155 * that we are fixing up. ··· 159 151 if (!(mpnt->vm_flags & VM_MAYSHARE)) 160 152 continue; 161 153 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 162 - aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn, vmf); 154 + mpnt_addr = mpnt->vm_start + offset; 155 + 156 + /* Avoid deadlocks by not grabbing the same PTE lock again. */ 157 + if (mpnt_addr >= pmd_start_addr && mpnt_addr < pmd_end_addr) 158 + need_lock = false; 159 + aliases += adjust_pte(mpnt, mpnt_addr, pfn, need_lock); 163 160 } 164 161 flush_dcache_mmap_unlock(mapping); 165 162 if (aliases) ··· 207 194 __flush_dcache_folio(mapping, folio); 208 195 if (mapping) { 209 196 if (cache_is_vivt()) 210 - make_coherent(mapping, vma, addr, ptep, pfn, vmf); 197 + make_coherent(mapping, vma, addr, ptep, pfn); 211 198 else if (vma->vm_flags & VM_EXEC) 212 199 __flush_icache_all(); 213 200 }
+4 -2
arch/m68k/include/asm/sun3_pgalloc.h
··· 44 44 pgd_t *new_pgd; 45 45 46 46 new_pgd = __pgd_alloc(mm, 0); 47 - memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); 48 - memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT)); 47 + if (likely(new_pgd != NULL)) { 48 + memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); 49 + memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT)); 50 + } 49 51 return new_pgd; 50 52 } 51 53
+2 -1
drivers/rapidio/devices/rio_mport_cdev.c
··· 1742 1742 err = rio_add_net(net); 1743 1743 if (err) { 1744 1744 rmcd_debug(RDEV, "failed to register net, err=%d", err); 1745 - kfree(net); 1745 + put_device(&net->dev); 1746 + mport->net = NULL; 1746 1747 goto cleanup; 1747 1748 } 1748 1749 }
+4 -1
drivers/rapidio/rio-scan.c
··· 871 871 dev_set_name(&net->dev, "rnet_%d", net->id); 872 872 net->dev.parent = &mport->dev; 873 873 net->dev.release = rio_scan_release_dev; 874 - rio_add_net(net); 874 + if (rio_add_net(net)) { 875 + put_device(&net->dev); 876 + net = NULL; 877 + } 875 878 } 876 879 877 880 return net;
+2 -1
fs/nfs/file.c
··· 29 29 #include <linux/pagemap.h> 30 30 #include <linux/gfp.h> 31 31 #include <linux/swap.h> 32 + #include <linux/compaction.h> 32 33 33 34 #include <linux/uaccess.h> 34 35 #include <linux/filelock.h> ··· 458 457 /* If the private flag is set, then the folio is not freeable */ 459 458 if (folio_test_private(folio)) { 460 459 if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL || 461 - current_is_kswapd()) 460 + current_is_kswapd() || current_is_kcompactd()) 462 461 return false; 463 462 if (nfs_wb_folio(folio->mapping->host, folio) < 0) 464 463 return false;
+5
include/linux/compaction.h
··· 80 80 return 2UL << order; 81 81 } 82 82 83 + static inline int current_is_kcompactd(void) 84 + { 85 + return current->flags & PF_KCOMPACTD; 86 + } 87 + 83 88 #ifdef CONFIG_COMPACTION 84 89 85 90 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
+5
include/linux/hugetlb.h
··· 682 682 683 683 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); 684 684 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); 685 + void wait_for_freed_hugetlb_folios(void); 685 686 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 686 687 unsigned long addr, bool cow_from_owner); 687 688 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, ··· 1067 1066 unsigned long end_pfn) 1068 1067 { 1069 1068 return 0; 1069 + } 1070 + 1071 + static inline void wait_for_freed_hugetlb_folios(void) 1072 + { 1070 1073 } 1071 1074 1072 1075 static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+1 -1
include/linux/log2.h
··· 41 41 * *not* considered a power of two. 42 42 * Return: true if @n is a power of 2, otherwise false. 43 43 */ 44 - static inline __attribute__((const)) 44 + static __always_inline __attribute__((const)) 45 45 bool is_power_of_2(unsigned long n) 46 46 { 47 47 return (n != 0 && ((n & (n - 1)) == 0));
+1 -1
include/linux/sched.h
··· 1701 1701 #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ 1702 1702 #define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */ 1703 1703 #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ 1704 - #define PF__HOLE__00010000 0x00010000 1704 + #define PF_KCOMPACTD 0x00010000 /* I am kcompactd */ 1705 1705 #define PF_KSWAPD 0x00020000 /* I am kswapd */ 1706 1706 #define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */ 1707 1707 #define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */
+1 -1
lib/Kconfig.debug
··· 2103 2103 reallocated, catching possible invalid pointers to the skb. 2104 2104 2105 2105 For more information, check 2106 - Documentation/dev-tools/fault-injection/fault-injection.rst 2106 + Documentation/fault-injection/fault-injection.rst 2107 2107 2108 2108 config FAULT_INJECTION_CONFIGFS 2109 2109 bool "Configfs interface for fault-injection capabilities"
+3
mm/compaction.c
··· 3181 3181 long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); 3182 3182 long timeout = default_timeout; 3183 3183 3184 + current->flags |= PF_KCOMPACTD; 3184 3185 set_freezable(); 3185 3186 3186 3187 pgdat->kcompactd_max_order = 0; ··· 3237 3236 if (unlikely(pgdat->proactive_compact_trigger)) 3238 3237 pgdat->proactive_compact_trigger = false; 3239 3238 } 3239 + 3240 + current->flags &= ~PF_KCOMPACTD; 3240 3241 3241 3242 return 0; 3242 3243 }
+8
mm/hugetlb.c
··· 2943 2943 return ret; 2944 2944 } 2945 2945 2946 + void wait_for_freed_hugetlb_folios(void) 2947 + { 2948 + if (llist_empty(&hpage_freelist)) 2949 + return; 2950 + 2951 + flush_work(&free_hpage_work); 2952 + } 2953 + 2946 2954 typedef enum { 2947 2955 /* 2948 2956 * For either 0/1: we checked the per-vma resv map, and one resv
+3 -2
mm/internal.h
··· 1115 1115 * mm/memory-failure.c 1116 1116 */ 1117 1117 #ifdef CONFIG_MEMORY_FAILURE 1118 - void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu); 1118 + int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill); 1119 1119 void shake_folio(struct folio *folio); 1120 1120 extern int hwpoison_filter(struct page *p); 1121 1121 ··· 1138 1138 struct vm_area_struct *vma); 1139 1139 1140 1140 #else 1141 - static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) 1141 + static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) 1142 1142 { 1143 + return -EBUSY; 1143 1144 } 1144 1145 #endif 1145 1146
+1
mm/kmsan/hooks.c
··· 357 357 size -= to_go; 358 358 } 359 359 } 360 + EXPORT_SYMBOL_GPL(kmsan_handle_dma); 360 361 361 362 void kmsan_handle_dma_sg(struct scatterlist *sg, int nents, 362 363 enum dma_data_direction dir)
+31 -32
mm/memory-failure.c
··· 1556 1556 return ret; 1557 1557 } 1558 1558 1559 - void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) 1559 + int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) 1560 1560 { 1561 - if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { 1562 - struct address_space *mapping; 1561 + enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; 1562 + struct address_space *mapping; 1563 1563 1564 + if (folio_test_swapcache(folio)) { 1565 + pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); 1566 + ttu &= ~TTU_HWPOISON; 1567 + } 1568 + 1569 + /* 1570 + * Propagate the dirty bit from PTEs to struct page first, because we 1571 + * need this to decide if we should kill or just drop the page. 1572 + * XXX: the dirty test could be racy: set_page_dirty() may not always 1573 + * be called inside page lock (it's recommended but not enforced). 1574 + */ 1575 + mapping = folio_mapping(folio); 1576 + if (!must_kill && !folio_test_dirty(folio) && mapping && 1577 + mapping_can_writeback(mapping)) { 1578 + if (folio_mkclean(folio)) { 1579 + folio_set_dirty(folio); 1580 + } else { 1581 + ttu &= ~TTU_HWPOISON; 1582 + pr_info("%#lx: corrupted page was clean: dropped without side effects\n", 1583 + pfn); 1584 + } 1585 + } 1586 + 1587 + if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { 1564 1588 /* 1565 1589 * For hugetlb folios in shared mappings, try_to_unmap 1566 1590 * could potentially call huge_pmd_unshare. Because of ··· 1596 1572 if (!mapping) { 1597 1573 pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n", 1598 1574 folio_pfn(folio)); 1599 - return; 1575 + return -EBUSY; 1600 1576 } 1601 1577 1602 1578 try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); ··· 1604 1580 } else { 1605 1581 try_to_unmap(folio, ttu); 1606 1582 } 1583 + 1584 + return folio_mapped(folio) ? -EBUSY : 0; 1607 1585 } 1608 1586 1609 1587 /* ··· 1615 1589 static bool hwpoison_user_mappings(struct folio *folio, struct page *p, 1616 1590 unsigned long pfn, int flags) 1617 1591 { 1618 - enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; 1619 - struct address_space *mapping; 1620 1592 LIST_HEAD(tokill); 1621 1593 bool unmap_success; 1622 1594 int forcekill; ··· 1637 1613 if (!folio_mapped(folio)) 1638 1614 return true; 1639 1615 1640 - if (folio_test_swapcache(folio)) { 1641 - pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); 1642 - ttu &= ~TTU_HWPOISON; 1643 - } 1644 - 1645 - /* 1646 - * Propagate the dirty bit from PTEs to struct page first, because we 1647 - * need this to decide if we should kill or just drop the page. 1648 - * XXX: the dirty test could be racy: set_page_dirty() may not always 1649 - * be called inside page lock (it's recommended but not enforced). 1650 - */ 1651 - mapping = folio_mapping(folio); 1652 - if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping && 1653 - mapping_can_writeback(mapping)) { 1654 - if (folio_mkclean(folio)) { 1655 - folio_set_dirty(folio); 1656 - } else { 1657 - ttu &= ~TTU_HWPOISON; 1658 - pr_info("%#lx: corrupted page was clean: dropped without side effects\n", 1659 - pfn); 1660 - } 1661 - } 1662 - 1663 1616 /* 1664 1617 * First collect all the processes that have the page 1665 1618 * mapped in dirty form. This has to be done before try_to_unmap, ··· 1644 1643 */ 1645 1644 collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); 1646 1645 1647 - unmap_poisoned_folio(folio, ttu); 1648 - 1649 - unmap_success = !folio_mapped(folio); 1646 + unmap_success = !unmap_poisoned_folio(folio, pfn, flags & MF_MUST_KILL); 1650 1647 if (!unmap_success) 1651 1648 pr_err("%#lx: failed to unmap page (folio mapcount=%d)\n", 1652 1649 pfn, folio_mapcount(folio));
+14 -7
mm/memory.c
··· 3051 3051 next = pgd_addr_end(addr, end); 3052 3052 if (pgd_none(*pgd) && !create) 3053 3053 continue; 3054 - if (WARN_ON_ONCE(pgd_leaf(*pgd))) 3055 - return -EINVAL; 3054 + if (WARN_ON_ONCE(pgd_leaf(*pgd))) { 3055 + err = -EINVAL; 3056 + break; 3057 + } 3056 3058 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { 3057 3059 if (!create) 3058 3060 continue; ··· 5185 5183 bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && 5186 5184 !(vma->vm_flags & VM_SHARED); 5187 5185 int type, nr_pages; 5188 - unsigned long addr = vmf->address; 5186 + unsigned long addr; 5187 + bool needs_fallback = false; 5188 + 5189 + fallback: 5190 + addr = vmf->address; 5189 5191 5190 5192 /* Did we COW the page? */ 5191 5193 if (is_cow) ··· 5228 5222 * approach also applies to non-anonymous-shmem faults to avoid 5229 5223 * inflating the RSS of the process. 5230 5224 */ 5231 - if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) { 5225 + if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma)) || 5226 + unlikely(needs_fallback)) { 5232 5227 nr_pages = 1; 5233 5228 } else if (nr_pages > 1) { 5234 5229 pgoff_t idx = folio_page_idx(folio, page); ··· 5265 5258 ret = VM_FAULT_NOPAGE; 5266 5259 goto unlock; 5267 5260 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { 5268 - update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); 5269 - ret = VM_FAULT_NOPAGE; 5270 - goto unlock; 5261 + needs_fallback = true; 5262 + pte_unmap_unlock(vmf->pte, vmf->ptl); 5263 + goto fallback; 5271 5264 } 5272 5265 5273 5266 folio_ref_add(folio, nr_pages - 1);
+13 -15
mm/memory_hotplug.c
··· 1822 1822 if (folio_test_large(folio)) 1823 1823 pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1; 1824 1824 1825 - /* 1826 - * HWPoison pages have elevated reference counts so the migration would 1827 - * fail on them. It also doesn't make any sense to migrate them in the 1828 - * first place. Still try to unmap such a page in case it is still mapped 1829 - * (keep the unmap as the catch all safety net). 1830 - */ 1831 - if (folio_test_hwpoison(folio) || 1832 - (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { 1833 - if (WARN_ON(folio_test_lru(folio))) 1834 - folio_isolate_lru(folio); 1835 - if (folio_mapped(folio)) 1836 - unmap_poisoned_folio(folio, TTU_IGNORE_MLOCK); 1837 - continue; 1838 - } 1839 - 1840 1825 if (!folio_try_get(folio)) 1841 1826 continue; 1842 1827 1843 1828 if (unlikely(page_folio(page) != folio)) 1844 1829 goto put_folio; 1830 + 1831 + if (folio_test_hwpoison(folio) || 1832 + (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { 1833 + if (WARN_ON(folio_test_lru(folio))) 1834 + folio_isolate_lru(folio); 1835 + if (folio_mapped(folio)) { 1836 + folio_lock(folio); 1837 + unmap_poisoned_folio(folio, pfn, false); 1838 + folio_unlock(folio); 1839 + } 1840 + 1841 + goto put_folio; 1842 + } 1845 1843 1846 1844 if (!isolate_folio_to_list(folio, &source)) { 1847 1845 if (__ratelimit(&migrate_rs)) {
+2 -2
mm/page_alloc.c
··· 4243 4243 restart: 4244 4244 compaction_retries = 0; 4245 4245 no_progress_loops = 0; 4246 + compact_result = COMPACT_SKIPPED; 4246 4247 compact_priority = DEF_COMPACT_PRIORITY; 4247 4248 cpuset_mems_cookie = read_mems_allowed_begin(); 4248 4249 zonelist_iter_cookie = zonelist_iter_begin(); ··· 5850 5849 5851 5850 for (j = i + 1; j < MAX_NR_ZONES; j++) { 5852 5851 struct zone *upper_zone = &pgdat->node_zones[j]; 5853 - bool empty = !zone_managed_pages(upper_zone); 5854 5852 5855 5853 managed_pages += zone_managed_pages(upper_zone); 5856 5854 5857 - if (clear || empty) 5855 + if (clear) 5858 5856 zone->lowmem_reserve[j] = 0; 5859 5857 else 5860 5858 zone->lowmem_reserve[j] = managed_pages / ratio;
+10
mm/page_isolation.c
··· 608 608 int ret; 609 609 610 610 /* 611 + * Due to the deferred freeing of hugetlb folios, the hugepage folios may 612 + * not immediately release to the buddy system. This can cause PageBuddy() 613 + * to fail in __test_page_isolated_in_pageblock(). To ensure that the 614 + * hugetlb folios are properly released back to the buddy system, we 615 + * invoke the wait_for_freed_hugetlb_folios() function to wait for the 616 + * release to complete. 617 + */ 618 + wait_for_freed_hugetlb_folios(); 619 + 620 + /* 611 621 * Note: pageblock_nr_pages != MAX_PAGE_ORDER. Then, chunks of free 612 622 * pages are not aligned to pageblock_nr_pages. 613 623 * Then we just check migratetype first.
+28 -5
mm/shmem.c
··· 1548 1548 if (WARN_ON_ONCE(!wbc->for_reclaim)) 1549 1549 goto redirty; 1550 1550 1551 - if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap)) 1551 + if ((info->flags & VM_LOCKED) || sbinfo->noswap) 1552 1552 goto redirty; 1553 1553 1554 1554 if (!total_swap_pages) ··· 2253 2253 struct folio *folio = NULL; 2254 2254 bool skip_swapcache = false; 2255 2255 swp_entry_t swap; 2256 - int error, nr_pages; 2256 + int error, nr_pages, order, split_order; 2257 2257 2258 2258 VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); 2259 2259 swap = radix_to_swp_entry(*foliop); ··· 2272 2272 2273 2273 /* Look it up and read it in.. */ 2274 2274 folio = swap_cache_get_folio(swap, NULL, 0); 2275 + order = xa_get_order(&mapping->i_pages, index); 2275 2276 if (!folio) { 2276 - int order = xa_get_order(&mapping->i_pages, index); 2277 2277 bool fallback_order0 = false; 2278 - int split_order; 2279 2278 2280 2279 /* Or update major stats only when swapin succeeds?? */ 2281 2280 if (fault_type) { ··· 2338 2339 error = -ENOMEM; 2339 2340 goto failed; 2340 2341 } 2342 + } else if (order != folio_order(folio)) { 2343 + /* 2344 + * Swap readahead may swap in order 0 folios into swapcache 2345 + * asynchronously, while the shmem mapping can still stores 2346 + * large swap entries. In such cases, we should split the 2347 + * large swap entry to prevent possible data corruption. 2348 + */ 2349 + split_order = shmem_split_large_entry(inode, index, swap, gfp); 2350 + if (split_order < 0) { 2351 + error = split_order; 2352 + goto failed; 2353 + } 2354 + 2355 + /* 2356 + * If the large swap entry has already been split, it is 2357 + * necessary to recalculate the new swap entry based on 2358 + * the old order alignment. 2359 + */ 2360 + if (split_order > 0) { 2361 + pgoff_t offset = index - round_down(index, 1 << split_order); 2362 + 2363 + swap = swp_entry(swp_type(swap), swp_offset(swap) + offset); 2364 + } 2341 2365 } 2342 2366 2343 2367 alloced: ··· 2368 2346 folio_lock(folio); 2369 2347 if ((!skip_swapcache && !folio_test_swapcache(folio)) || 2370 2348 folio->swap.val != swap.val || 2371 - !shmem_confirm_swap(mapping, index, swap)) { 2349 + !shmem_confirm_swap(mapping, index, swap) || 2350 + xa_get_order(&mapping->i_pages, index) != folio_order(folio)) { 2372 2351 error = -EEXIST; 2373 2352 goto unlock; 2374 2353 }
+10 -2
mm/swapfile.c
··· 653 653 return; 654 654 655 655 if (!ci->count) { 656 - free_cluster(si, ci); 656 + if (ci->flags != CLUSTER_FLAG_FREE) 657 + free_cluster(si, ci); 657 658 } else if (ci->count != SWAPFILE_CLUSTER) { 658 659 if (ci->flags != CLUSTER_FLAG_FRAG) 659 660 move_cluster(si, ci, &si->frag_clusters[ci->order], ··· 858 857 } 859 858 offset++; 860 859 } 860 + 861 + /* in case no swap cache is reclaimed */ 862 + if (ci->flags == CLUSTER_FLAG_NONE) 863 + relocate_cluster(si, ci); 861 864 862 865 unlock_cluster(ci); 863 866 if (to_scan <= 0) ··· 2646 2641 for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) { 2647 2642 ci = lock_cluster(si, offset); 2648 2643 unlock_cluster(ci); 2649 - offset += SWAPFILE_CLUSTER; 2650 2644 } 2651 2645 } 2652 2646 ··· 3546 3542 int err, i; 3547 3543 3548 3544 si = swp_swap_info(entry); 3545 + if (WARN_ON_ONCE(!si)) { 3546 + pr_err("%s%08lx\n", Bad_file, entry.val); 3547 + return -EINVAL; 3548 + } 3549 3549 3550 3550 offset = swp_offset(entry); 3551 3551 VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
+90 -17
mm/userfaultfd.c
··· 18 18 #include <asm/tlbflush.h> 19 19 #include <asm/tlb.h> 20 20 #include "internal.h" 21 + #include "swap.h" 21 22 22 23 static __always_inline 23 24 bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end) ··· 1077 1076 return err; 1078 1077 } 1079 1078 1080 - static int move_swap_pte(struct mm_struct *mm, 1079 + static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma, 1081 1080 unsigned long dst_addr, unsigned long src_addr, 1082 1081 pte_t *dst_pte, pte_t *src_pte, 1083 1082 pte_t orig_dst_pte, pte_t orig_src_pte, 1084 1083 pmd_t *dst_pmd, pmd_t dst_pmdval, 1085 - spinlock_t *dst_ptl, spinlock_t *src_ptl) 1084 + spinlock_t *dst_ptl, spinlock_t *src_ptl, 1085 + struct folio *src_folio) 1086 1086 { 1087 - if (!pte_swp_exclusive(orig_src_pte)) 1088 - return -EBUSY; 1089 - 1090 1087 double_pt_lock(dst_ptl, src_ptl); 1091 1088 1092 1089 if (!is_pte_pages_stable(dst_pte, src_pte, orig_dst_pte, orig_src_pte, 1093 1090 dst_pmd, dst_pmdval)) { 1094 1091 double_pt_unlock(dst_ptl, src_ptl); 1095 1092 return -EAGAIN; 1093 + } 1094 + 1095 + /* 1096 + * The src_folio resides in the swapcache, requiring an update to its 1097 + * index and mapping to align with the dst_vma, where a swap-in may 1098 + * occur and hit the swapcache after moving the PTE. 1099 + */ 1100 + if (src_folio) { 1101 + folio_move_anon_rmap(src_folio, dst_vma); 1102 + src_folio->index = linear_page_index(dst_vma, dst_addr); 1096 1103 } 1097 1104 1098 1105 orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte); ··· 1150 1141 __u64 mode) 1151 1142 { 1152 1143 swp_entry_t entry; 1144 + struct swap_info_struct *si = NULL; 1153 1145 pte_t orig_src_pte, orig_dst_pte; 1154 1146 pte_t src_folio_pte; 1155 1147 spinlock_t *src_ptl, *dst_ptl; ··· 1250 1240 */ 1251 1241 if (!src_folio) { 1252 1242 struct folio *folio; 1243 + bool locked; 1253 1244 1254 1245 /* 1255 1246 * Pin the page while holding the lock to be sure the ··· 1270 1259 goto out; 1271 1260 } 1272 1261 1262 + locked = folio_trylock(folio); 1263 + /* 1264 + * We avoid waiting for folio lock with a raised 1265 + * refcount for large folios because extra refcounts 1266 + * will result in split_folio() failing later and 1267 + * retrying. If multiple tasks are trying to move a 1268 + * large folio we can end up livelocking. 1269 + */ 1270 + if (!locked && folio_test_large(folio)) { 1271 + spin_unlock(src_ptl); 1272 + err = -EAGAIN; 1273 + goto out; 1274 + } 1275 + 1273 1276 folio_get(folio); 1274 1277 src_folio = folio; 1275 1278 src_folio_pte = orig_src_pte; 1276 1279 spin_unlock(src_ptl); 1277 1280 1278 - if (!folio_trylock(src_folio)) { 1279 - pte_unmap(&orig_src_pte); 1280 - pte_unmap(&orig_dst_pte); 1281 + if (!locked) { 1282 + pte_unmap(src_pte); 1283 + pte_unmap(dst_pte); 1281 1284 src_pte = dst_pte = NULL; 1282 1285 /* now we can block and wait */ 1283 1286 folio_lock(src_folio); ··· 1307 1282 /* at this point we have src_folio locked */ 1308 1283 if (folio_test_large(src_folio)) { 1309 1284 /* split_folio() can block */ 1310 - pte_unmap(&orig_src_pte); 1311 - pte_unmap(&orig_dst_pte); 1285 + pte_unmap(src_pte); 1286 + pte_unmap(dst_pte); 1312 1287 src_pte = dst_pte = NULL; 1313 1288 err = split_folio(src_folio); 1314 1289 if (err) ··· 1333 1308 goto out; 1334 1309 } 1335 1310 if (!anon_vma_trylock_write(src_anon_vma)) { 1336 - pte_unmap(&orig_src_pte); 1337 - pte_unmap(&orig_dst_pte); 1311 + pte_unmap(src_pte); 1312 + pte_unmap(dst_pte); 1338 1313 src_pte = dst_pte = NULL; 1339 1314 /* now we can block and wait */ 1340 1315 anon_vma_lock_write(src_anon_vma); ··· 1347 1322 orig_dst_pte, orig_src_pte, dst_pmd, 1348 1323 dst_pmdval, dst_ptl, src_ptl, src_folio); 1349 1324 } else { 1325 + struct folio *folio = NULL; 1326 + 1350 1327 entry = pte_to_swp_entry(orig_src_pte); 1351 1328 if (non_swap_entry(entry)) { 1352 1329 if (is_migration_entry(entry)) { 1353 - pte_unmap(&orig_src_pte); 1354 - pte_unmap(&orig_dst_pte); 1330 + pte_unmap(src_pte); 1331 + pte_unmap(dst_pte); 1355 1332 src_pte = dst_pte = NULL; 1356 1333 migration_entry_wait(mm, src_pmd, src_addr); 1357 1334 err = -EAGAIN; ··· 1362 1335 goto out; 1363 1336 } 1364 1337 1365 - err = move_swap_pte(mm, dst_addr, src_addr, dst_pte, src_pte, 1366 - orig_dst_pte, orig_src_pte, dst_pmd, 1367 - dst_pmdval, dst_ptl, src_ptl); 1338 + if (!pte_swp_exclusive(orig_src_pte)) { 1339 + err = -EBUSY; 1340 + goto out; 1341 + } 1342 + 1343 + si = get_swap_device(entry); 1344 + if (unlikely(!si)) { 1345 + err = -EAGAIN; 1346 + goto out; 1347 + } 1348 + /* 1349 + * Verify the existence of the swapcache. If present, the folio's 1350 + * index and mapping must be updated even when the PTE is a swap 1351 + * entry. The anon_vma lock is not taken during this process since 1352 + * the folio has already been unmapped, and the swap entry is 1353 + * exclusive, preventing rmap walks. 1354 + * 1355 + * For large folios, return -EBUSY immediately, as split_folio() 1356 + * also returns -EBUSY when attempting to split unmapped large 1357 + * folios in the swapcache. This issue needs to be resolved 1358 + * separately to allow proper handling. 1359 + */ 1360 + if (!src_folio) 1361 + folio = filemap_get_folio(swap_address_space(entry), 1362 + swap_cache_index(entry)); 1363 + if (!IS_ERR_OR_NULL(folio)) { 1364 + if (folio_test_large(folio)) { 1365 + err = -EBUSY; 1366 + folio_put(folio); 1367 + goto out; 1368 + } 1369 + src_folio = folio; 1370 + src_folio_pte = orig_src_pte; 1371 + if (!folio_trylock(src_folio)) { 1372 + pte_unmap(src_pte); 1373 + pte_unmap(dst_pte); 1374 + src_pte = dst_pte = NULL; 1375 + put_swap_device(si); 1376 + si = NULL; 1377 + /* now we can block and wait */ 1378 + folio_lock(src_folio); 1379 + goto retry; 1380 + } 1381 + } 1382 + err = move_swap_pte(mm, dst_vma, dst_addr, src_addr, dst_pte, src_pte, 1383 + orig_dst_pte, orig_src_pte, dst_pmd, dst_pmdval, 1384 + dst_ptl, src_ptl, src_folio); 1368 1385 } 1369 1386 1370 1387 out: ··· 1425 1354 if (src_pte) 1426 1355 pte_unmap(src_pte); 1427 1356 mmu_notifier_invalidate_range_end(&range); 1357 + if (si) 1358 + put_swap_device(si); 1428 1359 1429 1360 return err; 1430 1361 }
+8 -4
mm/vma.c
··· 1509 1509 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) 1510 1510 { 1511 1511 struct vm_area_struct *vma = vmg->vma; 1512 + unsigned long start = vmg->start; 1513 + unsigned long end = vmg->end; 1512 1514 struct vm_area_struct *merged; 1513 1515 1514 1516 /* First, try to merge. */ 1515 1517 merged = vma_merge_existing_range(vmg); 1516 1518 if (merged) 1517 1519 return merged; 1520 + if (vmg_nomem(vmg)) 1521 + return ERR_PTR(-ENOMEM); 1518 1522 1519 1523 /* Split any preceding portion of the VMA. */ 1520 - if (vma->vm_start < vmg->start) { 1521 - int err = split_vma(vmg->vmi, vma, vmg->start, 1); 1524 + if (vma->vm_start < start) { 1525 + int err = split_vma(vmg->vmi, vma, start, 1); 1522 1526 1523 1527 if (err) 1524 1528 return ERR_PTR(err); 1525 1529 } 1526 1530 1527 1531 /* Split any trailing portion of the VMA. */ 1528 - if (vma->vm_end > vmg->end) { 1529 - int err = split_vma(vmg->vmi, vma, vmg->end, 0); 1532 + if (vma->vm_end > end) { 1533 + int err = split_vma(vmg->vmi, vma, end, 0); 1530 1534 1531 1535 if (err) 1532 1536 return ERR_PTR(err);
+2 -2
mm/vmalloc.c
··· 586 586 mask |= PGTBL_PGD_MODIFIED; 587 587 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 588 588 if (err) 589 - return err; 589 + break; 590 590 } while (pgd++, addr = next, addr != end); 591 591 592 592 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 593 593 arch_sync_kernel_mappings(start, end); 594 594 595 - return 0; 595 + return err; 596 596 } 597 597 598 598 /*
+1 -1
mm/zswap.c
··· 43 43 * statistics 44 44 **********************************/ 45 45 /* The number of compressed pages currently stored in zswap */ 46 - atomic_long_t zswap_stored_pages = ATOMIC_INIT(0); 46 + atomic_long_t zswap_stored_pages = ATOMIC_LONG_INIT(0); 47 47 48 48 /* 49 49 * The statistics below are not protected from concurrent access for
+2
tools/testing/selftests/damon/damon_nr_regions.py
··· 65 65 66 66 test_name = 'nr_regions test with %d/%d/%d real/min/max nr_regions' % ( 67 67 real_nr_regions, min_nr_regions, max_nr_regions) 68 + collected_nr_regions.sort() 68 69 if (collected_nr_regions[0] < min_nr_regions or 69 70 collected_nr_regions[-1] > max_nr_regions): 70 71 print('fail %s' % test_name) ··· 110 109 attrs = kdamonds.kdamonds[0].contexts[0].monitoring_attrs 111 110 attrs.min_nr_regions = 3 112 111 attrs.max_nr_regions = 7 112 + attrs.update_us = 100000 113 113 err = kdamonds.kdamonds[0].commit() 114 114 if err is not None: 115 115 proc.terminate()
+6 -3
tools/testing/selftests/damon/damos_quota.py
··· 51 51 nr_quota_exceeds = scheme.stats.qt_exceeds 52 52 53 53 wss_collected.sort() 54 + nr_expected_quota_exceeds = 0 54 55 for wss in wss_collected: 55 56 if wss > sz_quota: 56 57 print('quota is not kept: %s > %s' % (wss, sz_quota)) 57 58 print('collected samples are as below') 58 59 print('\n'.join(['%d' % wss for wss in wss_collected])) 59 60 exit(1) 61 + if wss == sz_quota: 62 + nr_expected_quota_exceeds += 1 60 63 61 - if nr_quota_exceeds < len(wss_collected): 62 - print('quota is not always exceeded: %d > %d' % 63 - (len(wss_collected), nr_quota_exceeds)) 64 + if nr_quota_exceeds < nr_expected_quota_exceeds: 65 + print('quota is exceeded less than expected: %d < %d' % 66 + (nr_quota_exceeds, nr_expected_quota_exceeds)) 64 67 exit(1) 65 68 66 69 if __name__ == '__main__':
+3
tools/testing/selftests/damon/damos_quota_goal.py
··· 63 63 if last_effective_bytes != 0 else -1.0)) 64 64 65 65 if last_effective_bytes == goal.effective_bytes: 66 + # effective quota was already minimum that cannot be more reduced 67 + if expect_increase is False and last_effective_bytes == 1: 68 + continue 66 69 print('efective bytes not changed: %d' % goal.effective_bytes) 67 70 exit(1) 68 71
+1 -1
tools/testing/selftests/mm/hugepage-mremap.c
··· 15 15 #define _GNU_SOURCE 16 16 #include <stdlib.h> 17 17 #include <stdio.h> 18 - #include <asm-generic/unistd.h> 18 + #include <unistd.h> 19 19 #include <sys/mman.h> 20 20 #include <errno.h> 21 21 #include <fcntl.h> /* Definition of O_* constants */
+7 -1
tools/testing/selftests/mm/ksm_functional_tests.c
··· 11 11 #include <string.h> 12 12 #include <stdbool.h> 13 13 #include <stdint.h> 14 - #include <asm-generic/unistd.h> 14 + #include <unistd.h> 15 15 #include <errno.h> 16 16 #include <fcntl.h> 17 17 #include <sys/mman.h> ··· 369 369 munmap(map, size); 370 370 } 371 371 372 + #ifdef __NR_userfaultfd 372 373 static void test_unmerge_uffd_wp(void) 373 374 { 374 375 struct uffdio_writeprotect uffd_writeprotect; ··· 430 429 unmap: 431 430 munmap(map, size); 432 431 } 432 + #endif 433 433 434 434 /* Verify that KSM can be enabled / queried with prctl. */ 435 435 static void test_prctl(void) ··· 686 684 exit(test_child_ksm()); 687 685 } 688 686 687 + #ifdef __NR_userfaultfd 689 688 tests++; 689 + #endif 690 690 691 691 ksft_print_header(); 692 692 ksft_set_plan(tests); ··· 700 696 test_unmerge(); 701 697 test_unmerge_zero_pages(); 702 698 test_unmerge_discarded(); 699 + #ifdef __NR_userfaultfd 703 700 test_unmerge_uffd_wp(); 701 + #endif 704 702 705 703 test_prot_none(); 706 704
+13 -1
tools/testing/selftests/mm/memfd_secret.c
··· 17 17 18 18 #include <stdlib.h> 19 19 #include <string.h> 20 - #include <asm-generic/unistd.h> 20 + #include <unistd.h> 21 21 #include <errno.h> 22 22 #include <stdio.h> 23 23 #include <fcntl.h> ··· 27 27 #define fail(fmt, ...) ksft_test_result_fail(fmt, ##__VA_ARGS__) 28 28 #define pass(fmt, ...) ksft_test_result_pass(fmt, ##__VA_ARGS__) 29 29 #define skip(fmt, ...) ksft_test_result_skip(fmt, ##__VA_ARGS__) 30 + 31 + #ifdef __NR_memfd_secret 30 32 31 33 #define PATTERN 0x55 32 34 ··· 334 332 335 333 ksft_finished(); 336 334 } 335 + 336 + #else /* __NR_memfd_secret */ 337 + 338 + int main(int argc, char *argv[]) 339 + { 340 + printf("skip: skipping memfd_secret test (missing __NR_memfd_secret)\n"); 341 + return KSFT_SKIP; 342 + } 343 + 344 + #endif /* __NR_memfd_secret */
+7 -1
tools/testing/selftests/mm/mkdirty.c
··· 9 9 */ 10 10 #include <fcntl.h> 11 11 #include <signal.h> 12 - #include <asm-generic/unistd.h> 12 + #include <unistd.h> 13 13 #include <string.h> 14 14 #include <errno.h> 15 15 #include <stdlib.h> ··· 265 265 munmap(mmap_mem, mmap_size); 266 266 } 267 267 268 + #ifdef __NR_userfaultfd 268 269 static void test_uffdio_copy(void) 269 270 { 270 271 struct uffdio_register uffdio_register; ··· 323 322 munmap(dst, pagesize); 324 323 free(src); 325 324 } 325 + #endif /* __NR_userfaultfd */ 326 326 327 327 int main(void) 328 328 { ··· 336 334 thpsize / 1024); 337 335 tests += 3; 338 336 } 337 + #ifdef __NR_userfaultfd 339 338 tests += 1; 339 + #endif /* __NR_userfaultfd */ 340 340 341 341 ksft_print_header(); 342 342 ksft_set_plan(tests); ··· 368 364 if (thpsize) 369 365 test_pte_mapped_thp(); 370 366 /* Placing a fresh page via userfaultfd may set the PTE dirty. */ 367 + #ifdef __NR_userfaultfd 371 368 test_uffdio_copy(); 369 + #endif /* __NR_userfaultfd */ 372 370 373 371 err = ksft_get_fail_cnt(); 374 372 if (err)
-1
tools/testing/selftests/mm/mlock2.h
··· 3 3 #include <errno.h> 4 4 #include <stdio.h> 5 5 #include <stdlib.h> 6 - #include <asm-generic/unistd.h> 7 6 8 7 static int mlock2_(void *start, size_t len, int flags) 9 8 {
+1 -1
tools/testing/selftests/mm/protection_keys.c
··· 42 42 #include <sys/wait.h> 43 43 #include <sys/stat.h> 44 44 #include <fcntl.h> 45 - #include <asm-generic/unistd.h> 45 + #include <unistd.h> 46 46 #include <sys/ptrace.h> 47 47 #include <setjmp.h> 48 48
+4
tools/testing/selftests/mm/uffd-common.c
··· 673 673 674 674 int uffd_open_sys(unsigned int flags) 675 675 { 676 + #ifdef __NR_userfaultfd 676 677 return syscall(__NR_userfaultfd, flags); 678 + #else 679 + return -1; 680 + #endif 677 681 } 678 682 679 683 int uffd_open(unsigned int flags)
+14 -1
tools/testing/selftests/mm/uffd-stress.c
··· 33 33 * pthread_mutex_lock will also verify the atomicity of the memory 34 34 * transfer (UFFDIO_COPY). 35 35 */ 36 - #include <asm-generic/unistd.h> 36 + 37 37 #include "uffd-common.h" 38 38 39 39 uint64_t features; 40 + #ifdef __NR_userfaultfd 40 41 41 42 #define BOUNCE_RANDOM (1<<0) 42 43 #define BOUNCE_RACINGFAULTS (1<<1) ··· 472 471 nr_pages, nr_pages_per_cpu); 473 472 return userfaultfd_stress(); 474 473 } 474 + 475 + #else /* __NR_userfaultfd */ 476 + 477 + #warning "missing __NR_userfaultfd definition" 478 + 479 + int main(void) 480 + { 481 + printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); 482 + return KSFT_SKIP; 483 + } 484 + 485 + #endif /* __NR_userfaultfd */
+13 -1
tools/testing/selftests/mm/uffd-unit-tests.c
··· 5 5 * Copyright (C) 2015-2023 Red Hat, Inc. 6 6 */ 7 7 8 - #include <asm-generic/unistd.h> 9 8 #include "uffd-common.h" 10 9 11 10 #include "../../../../mm/gup_test.h" 11 + 12 + #ifdef __NR_userfaultfd 12 13 13 14 /* The unit test doesn't need a large or random size, make it 32MB for now */ 14 15 #define UFFD_TEST_MEM_SIZE (32UL << 20) ··· 1559 1558 return ksft_get_fail_cnt() ? KSFT_FAIL : KSFT_PASS; 1560 1559 } 1561 1560 1561 + #else /* __NR_userfaultfd */ 1562 + 1563 + #warning "missing __NR_userfaultfd definition" 1564 + 1565 + int main(void) 1566 + { 1567 + printf("Skipping %s (missing __NR_userfaultfd)\n", __file__); 1568 + return KSFT_SKIP; 1569 + } 1570 + 1571 + #endif /* __NR_userfaultfd */