···1152611526HUGETLB SUBSYSTEM1152711527M: Muchun Song <muchun.song@linux.dev>1152811528M: Oscar Salvador <osalvador@suse.de>1152911529-R: David Hildenbrand <david@redhat.com>1152911529+R: David Hildenbrand <david@kernel.org>1153011530L: linux-mm@kvack.org1153111531S: Maintained1153211532F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages···1373413734M: Christian Borntraeger <borntraeger@linux.ibm.com>1373513735M: Janosch Frank <frankja@linux.ibm.com>1373613736M: Claudio Imbrenda <imbrenda@linux.ibm.com>1373713737-R: David Hildenbrand <david@redhat.com>1373713737+R: David Hildenbrand <david@kernel.org>1373813738L: kvm@vger.kernel.org1373913739S: Supported1374013740T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git···1380013800F: Documentation/core-api/kho/*1380113801F: include/linux/kexec_handover.h1380213802F: kernel/kexec_handover.c1380313803+F: lib/test_kho.c1380313804F: tools/testing/selftests/kho/13804138051380513806KEYS-ENCRYPTED···1622316222F: drivers/devfreq/tegra30-devfreq.c16224162231622516224MEMORY HOT(UN)PLUG1622616226-M: David Hildenbrand <david@redhat.com>1622516225+M: David Hildenbrand <david@kernel.org>1622716226M: Oscar Salvador <osalvador@suse.de>1622816227L: linux-mm@kvack.org1622916228S: Maintained···16248162471624916248MEMORY MANAGEMENT - CORE1625016249M: Andrew Morton <akpm@linux-foundation.org>1625116251-M: David Hildenbrand <david@redhat.com>1625016250+M: David Hildenbrand <david@kernel.org>1625216251R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>1625316252R: Liam R. Howlett <Liam.Howlett@oracle.com>1625416253R: Vlastimil Babka <vbabka@suse.cz>···16304163031630516304MEMORY MANAGEMENT - GUP (GET USER PAGES)1630616305M: Andrew Morton <akpm@linux-foundation.org>1630716307-M: David Hildenbrand <david@redhat.com>1630616306+M: David Hildenbrand <david@kernel.org>1630816307R: Jason Gunthorpe <jgg@nvidia.com>1630916308R: John Hubbard <jhubbard@nvidia.com>1631016309R: Peter Xu <peterx@redhat.com>···16320163191632116320MEMORY MANAGEMENT - KSM (Kernel Samepage Merging)1632216321M: Andrew Morton <akpm@linux-foundation.org>1632316323-M: David Hildenbrand <david@redhat.com>1632216322+M: David Hildenbrand <david@kernel.org>1632416323R: Xu Xin <xu.xin16@zte.com.cn>1632516324R: Chengming Zhou <chengming.zhou@linux.dev>1632616325L: linux-mm@kvack.org···16336163351633716336MEMORY MANAGEMENT - MEMORY POLICY AND MIGRATION1633816337M: Andrew Morton <akpm@linux-foundation.org>1633916339-M: David Hildenbrand <david@redhat.com>1633816338+M: David Hildenbrand <david@kernel.org>1634016339R: Zi Yan <ziy@nvidia.com>1634116340R: Matthew Brost <matthew.brost@intel.com>1634216341R: Joshua Hahn <joshua.hahnjy@gmail.com>···16376163751637716376MEMORY MANAGEMENT - MISC1637816377M: Andrew Morton <akpm@linux-foundation.org>1637916379-M: David Hildenbrand <david@redhat.com>1637816378+M: David Hildenbrand <david@kernel.org>1638016379R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>1638116380R: Liam R. Howlett <Liam.Howlett@oracle.com>1638216381R: Vlastimil Babka <vbabka@suse.cz>···1646416463MEMORY MANAGEMENT - RECLAIM1646516464M: Andrew Morton <akpm@linux-foundation.org>1646616465M: Johannes Weiner <hannes@cmpxchg.org>1646716467-R: David Hildenbrand <david@redhat.com>1646616466+R: David Hildenbrand <david@kernel.org>1646816467R: Michal Hocko <mhocko@kernel.org>1646916468R: Qi Zheng <zhengqi.arch@bytedance.com>1647016469R: Shakeel Butt <shakeel.butt@linux.dev>···16477164761647816477MEMORY MANAGEMENT - RMAP (REVERSE MAPPING)1647916478M: Andrew Morton <akpm@linux-foundation.org>1648016480-M: David Hildenbrand <david@redhat.com>1647916479+M: David Hildenbrand <david@kernel.org>1648116480M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>1648216481R: Rik van Riel <riel@surriel.com>1648316482R: Liam R. Howlett <Liam.Howlett@oracle.com>···16501165001650216501MEMORY MANAGEMENT - SWAP1650316502M: Andrew Morton <akpm@linux-foundation.org>1650316503+M: Chris Li <chrisl@kernel.org>1650416504+M: Kairui Song <kasong@tencent.com>1650416505R: Kemeng Shi <shikemeng@huaweicloud.com>1650516505-R: Kairui Song <kasong@tencent.com>1650616506R: Nhat Pham <nphamcs@gmail.com>1650716507R: Baoquan He <bhe@redhat.com>1650816508R: Barry Song <baohua@kernel.org>1650916509-R: Chris Li <chrisl@kernel.org>1651016509L: linux-mm@kvack.org1651116510S: Maintained1651216511F: Documentation/mm/swap-table.rst···16522165211652316522MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE)1652416523M: Andrew Morton <akpm@linux-foundation.org>1652516525-M: David Hildenbrand <david@redhat.com>1652416524+M: David Hildenbrand <david@kernel.org>1652616525M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>1652716526R: Zi Yan <ziy@nvidia.com>1652816527R: Baolin Wang <baolin.wang@linux.alibaba.com>···1662416623M: Andrew Morton <akpm@linux-foundation.org>1662516624M: Liam R. Howlett <Liam.Howlett@oracle.com>1662616625M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>1662716627-M: David Hildenbrand <david@redhat.com>1662616626+M: David Hildenbrand <david@kernel.org>1662816627R: Vlastimil Babka <vbabka@suse.cz>1662916628R: Jann Horn <jannh@google.com>1663016629L: linux-mm@kvack.org···27091270902709227091VIRTIO BALLOON2709327092M: "Michael S. Tsirkin" <mst@redhat.com>2709427094-M: David Hildenbrand <david@redhat.com>2709327093+M: David Hildenbrand <david@kernel.org>2709527094L: virtualization@lists.linux.dev2709627095S: Maintained2709727096F: drivers/virtio/virtio_balloon.c···2724627245F: include/uapi/linux/virtio_iommu.h27247272462724827247VIRTIO MEM DRIVER2724927249-M: David Hildenbrand <david@redhat.com>2724827248+M: David Hildenbrand <david@kernel.org>2725027249L: virtualization@lists.linux.dev2725127250S: Maintained2725227251W: https://virtio-mem.gitlab.io/
···969969970970void tag_clear_highpage(struct page *page)971971{972972+ /*973973+ * Check if MTE is supported and fall back to clear_highpage().974974+ * get_huge_zero_folio() unconditionally passes __GFP_ZEROTAGS and975975+ * post_alloc_hook() will invoke tag_clear_highpage().976976+ */977977+ if (!system_supports_mte()) {978978+ clear_highpage(page);979979+ return;980980+ }981981+972982 /* Newly allocated page, shouldn't have been tagged yet */973983 WARN_ON_ONCE(!try_page_mte_tagging(page));974984 mte_zero_clear_page_tags(page_address(page));
+1
arch/powerpc/Kconfig
···137137 select ARCH_HAS_DMA_OPS if PPC64138138 select ARCH_HAS_FORTIFY_SOURCE139139 select ARCH_HAS_GCOV_PROFILE_ALL140140+ select ARCH_HAS_GIGANTIC_PAGE if ARCH_SUPPORTS_HUGETLBFS140141 select ARCH_HAS_KCOV141142 select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC64 && PPC_FPU142143 select ARCH_HAS_MEMBARRIER_CALLBACKS
-1
arch/powerpc/platforms/Kconfig.cputype
···423423config PPC_RADIX_MMU424424 bool "Radix MMU Support"425425 depends on PPC_BOOK3S_64426426- select ARCH_HAS_GIGANTIC_PAGE427426 default y428427 help429428 Enable support for the Power ISA 3.0 Radix style MMU. Currently this
+6-1
fs/nilfs2/segment.c
···2768276827692769 if (sci->sc_task) {27702770 wake_up(&sci->sc_wait_daemon);27712771- kthread_stop(sci->sc_task);27712771+ if (kthread_stop(sci->sc_task)) {27722772+ spin_lock(&sci->sc_state_lock);27732773+ sci->sc_task = NULL;27742774+ timer_shutdown_sync(&sci->sc_timer);27752775+ spin_unlock(&sci->sc_state_lock);27762776+ }27722777 }2773277827742779 spin_lock(&sci->sc_state_lock);
+9-3
fs/proc/generic.c
···698698 }699699}700700701701+static void pde_erase(struct proc_dir_entry *pde, struct proc_dir_entry *parent)702702+{703703+ rb_erase(&pde->subdir_node, &parent->subdir);704704+ RB_CLEAR_NODE(&pde->subdir_node);705705+}706706+701707/*702708 * Remove a /proc entry and free it if it's not currently in use.703709 */···726720 WARN(1, "removing permanent /proc entry '%s'", de->name);727721 de = NULL;728722 } else {729729- rb_erase(&de->subdir_node, &parent->subdir);723723+ pde_erase(de, parent);730724 if (S_ISDIR(de->mode))731725 parent->nlink--;732726 }···770764 root->parent->name, root->name);771765 return -EINVAL;772766 }773773- rb_erase(&root->subdir_node, &parent->subdir);767767+ pde_erase(root, parent);774768775769 de = root;776770 while (1) {···782776 next->parent->name, next->name);783777 return -EINVAL;784778 }785785- rb_erase(&next->subdir_node, &de->subdir);779779+ pde_erase(next, de);786780 de = next;787781 continue;788782 }
+3
include/linux/gfp.h
···77#include <linux/mmzone.h>88#include <linux/topology.h>99#include <linux/alloc_tag.h>1010+#include <linux/cleanup.h>1011#include <linux/sched.h>11121213struct vm_area_struct;···463462#endif464463/* This should be paired with folio_put() rather than free_contig_range(). */465464#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))465465+466466+DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))466467467468#endif /* __LINUX_GFP_H */
+23-32
include/linux/huge_mm.h
···376376int folio_split(struct folio *folio, unsigned int new_order, struct page *page,377377 struct list_head *list);378378/*379379- * try_folio_split - try to split a @folio at @page using non uniform split.379379+ * try_folio_split_to_order - try to split a @folio at @page to @new_order using380380+ * non uniform split.380381 * @folio: folio to be split381381- * @page: split to order-0 at the given page382382- * @list: store the after-split folios382382+ * @page: split to @new_order at the given page383383+ * @new_order: the target split order383384 *384384- * Try to split a @folio at @page using non uniform split to order-0, if385385- * non uniform split is not supported, fall back to uniform split.385385+ * Try to split a @folio at @page using non uniform split to @new_order, if386386+ * non uniform split is not supported, fall back to uniform split. After-split387387+ * folios are put back to LRU list. Use min_order_for_split() to get the lower388388+ * bound of @new_order.386389 *387390 * Return: 0: split is successful, otherwise split failed.388391 */389389-static inline int try_folio_split(struct folio *folio, struct page *page,390390- struct list_head *list)392392+static inline int try_folio_split_to_order(struct folio *folio,393393+ struct page *page, unsigned int new_order)391394{392392- int ret = min_order_for_split(folio);393393-394394- if (ret < 0)395395- return ret;396396-397397- if (!non_uniform_split_supported(folio, 0, false))398398- return split_huge_page_to_list_to_order(&folio->page, list,399399- ret);400400- return folio_split(folio, ret, page, list);395395+ if (!non_uniform_split_supported(folio, new_order, /* warns= */ false))396396+ return split_huge_page_to_list_to_order(&folio->page, NULL,397397+ new_order);398398+ return folio_split(folio, new_order, page, NULL);401399}402400static inline int split_huge_page(struct page *page)403401{404404- struct folio *folio = page_folio(page);405405- int ret = min_order_for_split(folio);406406-407407- if (ret < 0)408408- return ret;409409-410410- /*411411- * split_huge_page() locks the page before splitting and412412- * expects the same page that has been split to be locked when413413- * returned. split_folio(page_folio(page)) cannot be used here414414- * because it converts the page to folio and passes the head415415- * page to be split.416416- */417417- return split_huge_page_to_list_to_order(page, NULL, ret);402402+ return split_huge_page_to_list_to_order(page, NULL, 0);418403}419404void deferred_split_folio(struct folio *folio, bool partially_mapped);420405···582597 return -EINVAL;583598}584599600600+static inline int min_order_for_split(struct folio *folio)601601+{602602+ VM_WARN_ON_ONCE_FOLIO(1, folio);603603+ return -EINVAL;604604+}605605+585606static inline int split_folio_to_list(struct folio *folio, struct list_head *list)586607{587608 VM_WARN_ON_ONCE_FOLIO(1, folio);588609 return -EINVAL;589610}590611591591-static inline int try_folio_split(struct folio *folio, struct page *page,592592- struct list_head *list)612612+static inline int try_folio_split_to_order(struct folio *folio,613613+ struct page *page, unsigned int new_order)593614{594615 VM_WARN_ON_ONCE_FOLIO(1, folio);595616 return -EINVAL;
+10-3
include/linux/mm.h
···20742074 return folio_large_nr_pages(folio);20752075}2076207620772077-#if !defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE)20772077+#if !defined(CONFIG_HAVE_GIGANTIC_FOLIOS)20782078/*20792079 * We don't expect any folios that exceed buddy sizes (and consequently20802080 * memory sections).···20872087 * pages are guaranteed to be contiguous.20882088 */20892089#define MAX_FOLIO_ORDER PFN_SECTION_SHIFT20902090-#else20902090+#elif defined(CONFIG_HUGETLB_PAGE)20912091/*20922092 * There is no real limit on the folio size. We limit them to the maximum we20932093- * currently expect (e.g., hugetlb, dax).20932093+ * currently expect (see CONFIG_HAVE_GIGANTIC_FOLIOS): with hugetlb, we expect20942094+ * no folios larger than 16 GiB on 64bit and 1 GiB on 32bit.20952095+ */20962096+#define MAX_FOLIO_ORDER get_order(IS_ENABLED(CONFIG_64BIT) ? SZ_16G : SZ_1G)20972097+#else20982098+/*20992099+ * Without hugetlb, gigantic folios that are bigger than a single PUD are21002100+ * currently impossible.20942101 */20952102#define MAX_FOLIO_ORDER PUD_ORDER20962103#endif
+9
kernel/Kconfig.kexec
···109109 to keep data or state alive across the kexec. For this to work,110110 both source and target kernels need to have this option enabled.111111112112+config KEXEC_HANDOVER_DEBUG113113+ bool "Enable Kexec Handover debug checks"114114+ depends on KEXEC_HANDOVER115115+ help116116+ This option enables extra sanity checks for the Kexec Handover117117+ subsystem. Since, KHO performance is crucial in live update118118+ scenarios and the extra code might be adding overhead it is119119+ only optionally enabled.120120+112121config CRASH_DUMP113122 bool "kernel crash dumps"114123 default ARCH_DEFAULT_CRASH_DUMP
···8899#define pr_fmt(fmt) "KHO: " fmt10101111+#include <linux/cleanup.h>1112#include <linux/cma.h>1213#include <linux/count_zeros.h>1314#include <linux/debugfs.h>···23222423#include <asm/early_ioremap.h>25242525+#include "kexec_handover_internal.h"2626/*2727 * KHO is tightly coupled with mm init and needs access to some of mm2828 * internal APIs.···6967 * Keep track of memory that is to be preserved across KHO.7068 *7169 * The serializing side uses two levels of xarrays to manage chunks of per-order7272- * 512 byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order of a7373- * 1TB system would fit inside a single 512 byte bitmap. For order 0 allocations7474- * each bitmap will cover 16M of address space. Thus, for 16G of memory at most7575- * 512K of bitmap memory will be needed for order 0.7070+ * PAGE_SIZE byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order7171+ * of a 8TB system would fit inside a single 4096 byte bitmap. For order 07272+ * allocations each bitmap will cover 128M of address space. Thus, for 16G of7373+ * memory at most 512K of bitmap memory will be needed for order 0.7674 *7775 * This approach is fully incremental, as the serialization progresses folios7876 * can continue be aggregated to the tracker. The final step, immediately prior···8078 * successor kernel to parse.8179 */82808383-#define PRESERVE_BITS (512 * 8)8181+#define PRESERVE_BITS (PAGE_SIZE * 8)84828583struct kho_mem_phys_bits {8684 DECLARE_BITMAP(preserve, PRESERVE_BITS);8785};8686+8787+static_assert(sizeof(struct kho_mem_phys_bits) == PAGE_SIZE);88888989struct kho_mem_phys {9090 /*···135131 .finalized = false,136132};137133138138-static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)134134+static void *xa_load_or_alloc(struct xarray *xa, unsigned long index)139135{140140- void *elm, *res;136136+ void *res = xa_load(xa, index);141137142142- elm = xa_load(xa, index);143143- if (elm)144144- return elm;138138+ if (res)139139+ return res;145140146146- elm = kzalloc(sz, GFP_KERNEL);141141+ void *elm __free(free_page) = (void *)get_zeroed_page(GFP_KERNEL);142142+147143 if (!elm)148144 return ERR_PTR(-ENOMEM);149145146146+ if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), PAGE_SIZE)))147147+ return ERR_PTR(-EINVAL);148148+150149 res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);151150 if (xa_is_err(res))152152- res = ERR_PTR(xa_err(res));153153-154154- if (res) {155155- kfree(elm);151151+ return ERR_PTR(xa_err(res));152152+ else if (res)156153 return res;157157- }158154159159- return elm;155155+ return no_free_ptr(elm);160156}161157162158static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,···171167 const unsigned long pfn_high = pfn >> order;172168173169 physxa = xa_load(&track->orders, order);174174- if (!physxa)175175- continue;170170+ if (WARN_ON_ONCE(!physxa))171171+ return;176172177173 bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS);178178- if (!bits)179179- continue;174174+ if (WARN_ON_ONCE(!bits))175175+ return;180176181177 clear_bit(pfn_high % PRESERVE_BITS, bits->preserve);182178···220216 }221217 }222218223223- bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS,224224- sizeof(*bits));219219+ bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS);225220 if (IS_ERR(bits))226221 return PTR_ERR(bits);227222···348345static struct khoser_mem_chunk *new_chunk(struct khoser_mem_chunk *cur_chunk,349346 unsigned long order)350347{351351- struct khoser_mem_chunk *chunk;348348+ struct khoser_mem_chunk *chunk __free(free_page) = NULL;352349353353- chunk = kzalloc(PAGE_SIZE, GFP_KERNEL);350350+ chunk = (void *)get_zeroed_page(GFP_KERNEL);354351 if (!chunk)355355- return NULL;352352+ return ERR_PTR(-ENOMEM);353353+354354+ if (WARN_ON(kho_scratch_overlap(virt_to_phys(chunk), PAGE_SIZE)))355355+ return ERR_PTR(-EINVAL);356356+356357 chunk->hdr.order = order;357358 if (cur_chunk)358359 KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk);359359- return chunk;360360+ return no_free_ptr(chunk);360361}361362362363static void kho_mem_ser_free(struct khoser_mem_chunk *first_chunk)···381374 struct khoser_mem_chunk *chunk = NULL;382375 struct kho_mem_phys *physxa;383376 unsigned long order;377377+ int err = -ENOMEM;384378385379 xa_for_each(&ser->track.orders, order, physxa) {386380 struct kho_mem_phys_bits *bits;387381 unsigned long phys;388382389383 chunk = new_chunk(chunk, order);390390- if (!chunk)384384+ if (IS_ERR(chunk)) {385385+ err = PTR_ERR(chunk);391386 goto err_free;387387+ }392388393389 if (!first_chunk)394390 first_chunk = chunk;···401391402392 if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) {403393 chunk = new_chunk(chunk, order);404404- if (!chunk)394394+ if (IS_ERR(chunk)) {395395+ err = PTR_ERR(chunk);405396 goto err_free;397397+ }406398 }407399408400 elm = &chunk->bitmaps[chunk->hdr.num_elms];···421409422410err_free:423411 kho_mem_ser_free(first_chunk);424424- return -ENOMEM;412412+ return err;425413}426414427415static void __init deserialize_bitmap(unsigned int order,···477465 * area for early allocations that happen before page allocator is478466 * initialized.479467 */480480-static struct kho_scratch *kho_scratch;481481-static unsigned int kho_scratch_cnt;468468+struct kho_scratch *kho_scratch;469469+unsigned int kho_scratch_cnt;482470483471/*484472 * The scratch areas are scaled by default as percent of memory allocated from···764752 const unsigned int order = folio_order(folio);765753 struct kho_mem_track *track = &kho_out.ser.track;766754755755+ if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))756756+ return -EINVAL;757757+767758 return __kho_preserve_order(track, pfn, order);768759}769760EXPORT_SYMBOL_GPL(kho_preserve_folio);···789774 unsigned long pfn = start_pfn;790775 unsigned long failed_pfn = 0;791776 int err = 0;777777+778778+ if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT,779779+ nr_pages << PAGE_SHIFT))) {780780+ return -EINVAL;781781+ }792782793783 while (pfn < end_pfn) {794784 const unsigned int order =···882862 return NULL;883863}884864885885-static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk)865865+static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk,866866+ unsigned short order)886867{887868 struct kho_mem_track *track = &kho_out.ser.track;888869 unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));889870890871 __kho_unpreserve(track, pfn, pfn + 1);891872892892- for (int i = 0; chunk->phys[i]; i++) {873873+ for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {893874 pfn = PHYS_PFN(chunk->phys[i]);894894- __kho_unpreserve(track, pfn, pfn + 1);875875+ __kho_unpreserve(track, pfn, pfn + (1 << order));895876 }896877}897878···903882 while (chunk) {904883 struct kho_vmalloc_chunk *tmp = chunk;905884906906- kho_vmalloc_unpreserve_chunk(chunk);885885+ kho_vmalloc_unpreserve_chunk(chunk, kho_vmalloc->order);907886908887 chunk = KHOSER_LOAD_PTR(chunk->hdr.next);909888 free_page((unsigned long)tmp);···1013992 while (chunk) {1014993 struct page *page;101599410161016- for (int i = 0; chunk->phys[i]; i++) {995995+ for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {1017996 phys_addr_t phys = chunk->phys[i];10189971019998 if (idx + contig_pages > total_pages)
···6464#define CREATE_TRACE_POINTS6565#include <trace/events/maple_tree.h>66666767+#define TP_FCT tracepoint_string(__func__)6868+6769/*6870 * Kernel pointer hashing renders much of the maple tree dump useless as tagged6971 * pointers get hashed to arbitrary values.···27582756 MA_STATE(l_mas, mas->tree, mas->index, mas->last);27592757 MA_STATE(r_mas, mas->tree, mas->index, mas->last);2760275827612761- trace_ma_op(__func__, mas);27592759+ trace_ma_op(TP_FCT, mas);2762276027632761 /*27642762 * Rebalancing occurs if a node is insufficient. Data is rebalanced···29992997 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);30002998 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);3001299930023002- trace_ma_op(__func__, mas);30003000+ trace_ma_op(TP_FCT, mas);3003300130043002 mast.l = &l_mas;30053003 mast.r = &r_mas;···31743172 return false;31753173 }3176317431773177- trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);31753175+ trace_ma_write(TP_FCT, wr_mas->mas, wr_mas->r_max, entry);31783176 return true;31793177}31803178···34183416 * of data may happen.34193417 */34203418 mas = wr_mas->mas;34213421- trace_ma_op(__func__, mas);34193419+ trace_ma_op(TP_FCT, mas);3422342034233421 if (unlikely(!mas->index && mas->last == ULONG_MAX))34243422 return mas_new_root(mas, wr_mas->entry);···35543552 } else {35553553 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));35563554 }35573557- trace_ma_write(__func__, mas, 0, wr_mas->entry);35553555+ trace_ma_write(TP_FCT, mas, 0, wr_mas->entry);35583556 mas_update_gap(mas);35593557 mas->end = new_end;35603558 return;···35983596 mas->offset++; /* Keep mas accurate. */35993597 }3600359836013601- trace_ma_write(__func__, mas, 0, wr_mas->entry);35993599+ trace_ma_write(TP_FCT, mas, 0, wr_mas->entry);36023600 /*36033601 * Only update gap when the new entry is empty or there is an empty36043602 * entry in the original two ranges.···37193717 mas_update_gap(mas);3720371837213719 mas->end = new_end;37223722- trace_ma_write(__func__, mas, new_end, wr_mas->entry);37203720+ trace_ma_write(TP_FCT, mas, new_end, wr_mas->entry);37233721 return;37243722}37253723···37333731{37343732 struct maple_big_node b_node;3735373337363736- trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);37343734+ trace_ma_write(TP_FCT, wr_mas->mas, 0, wr_mas->entry);37373735 memset(&b_node, 0, sizeof(struct maple_big_node));37383736 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);37393737 mas_commit_b_node(wr_mas, &b_node);···50645062{50655063 MA_WR_STATE(wr_mas, mas, entry);5066506450675067- trace_ma_write(__func__, mas, 0, entry);50655065+ trace_ma_write(TP_FCT, mas, 0, entry);50685066#ifdef CONFIG_DEBUG_MAPLE_TREE50695067 if (MAS_WARN_ON(mas, mas->index > mas->last))50705068 pr_err("Error %lX > %lX " PTR_FMT "\n", mas->index, mas->last,···51655163 }5166516451675165store:51685168- trace_ma_write(__func__, mas, 0, entry);51665166+ trace_ma_write(TP_FCT, mas, 0, entry);51695167 mas_wr_store_entry(&wr_mas);51705168 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));51715169 mas_destroy(mas);···58845882 MA_STATE(mas, mt, index, index);58855883 void *entry;5886588458875887- trace_ma_read(__func__, &mas);58855885+ trace_ma_read(TP_FCT, &mas);58885886 rcu_read_lock();58895887retry:58905888 entry = mas_start(&mas);···59275925 MA_STATE(mas, mt, index, last);59285926 int ret = 0;5929592759305930- trace_ma_write(__func__, &mas, 0, entry);59285928+ trace_ma_write(TP_FCT, &mas, 0, entry);59315929 if (WARN_ON_ONCE(xa_is_advanced(entry)))59325930 return -EINVAL;59335931···61506148 void *entry = NULL;6151614961526150 MA_STATE(mas, mt, index, index);61536153- trace_ma_op(__func__, &mas);61516151+ trace_ma_op(TP_FCT, &mas);6154615261556153 mtree_lock(mt);61566154 entry = mas_erase(&mas);···64876485 unsigned long copy = *index;64886486#endif6489648764906490- trace_ma_read(__func__, &mas);64886488+ trace_ma_read(TP_FCT, &mas);6491648964926490 if ((*index) > max)64936491 return NULL;
+3
lib/test_kho.c
···301301 phys_addr_t fdt_phys;302302 int err;303303304304+ if (!kho_is_enabled())305305+ return 0;306306+304307 err = kho_retrieve_subtree(KHO_TEST_FDT, &fdt_phys);305308 if (!err)306309 return kho_test_restore(fdt_phys);
+7
mm/Kconfig
···908908config PGTABLE_HAS_HUGE_LEAVES909909 def_bool TRANSPARENT_HUGEPAGE || HUGETLB_PAGE910910911911+#912912+# We can end up creating gigantic folio.913913+#914914+config HAVE_GIGANTIC_FOLIOS915915+ def_bool (HUGETLB_PAGE && ARCH_HAS_GIGANTIC_PAGE) || \916916+ (ZONE_DEVICE && HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)917917+911918# TODO: Allow to be enabled without THP912919config ARCH_SUPPORTS_HUGE_PFNMAP913920 def_bool n
···15521552 return ctx;15531553}1554155415551555+static unsigned long damon_sysfs_next_update_jiffies;15561556+15551557static int damon_sysfs_repeat_call_fn(void *data)15561558{15571559 struct damon_sysfs_kdamond *sysfs_kdamond = data;15581558- static unsigned long next_update_jiffies;1559156015601561 if (!sysfs_kdamond->refresh_ms)15611562 return 0;15621562- if (time_before(jiffies, next_update_jiffies))15631563+ if (time_before(jiffies, damon_sysfs_next_update_jiffies))15631564 return 0;15641564- next_update_jiffies = jiffies +15651565+ damon_sysfs_next_update_jiffies = jiffies +15651566 msecs_to_jiffies(sysfs_kdamond->refresh_ms);1566156715671568 if (!mutex_trylock(&damon_sysfs_lock))···16071606 return err;16081607 }16091608 kdamond->damon_ctx = ctx;16091609+16101610+ damon_sysfs_next_update_jiffies =16111611+ jiffies + msecs_to_jiffies(kdamond->refresh_ms);1610161216111613 repeat_call_control->fn = damon_sysfs_repeat_call_fn;16121614 repeat_call_control->data = kdamond;
+23-10
mm/filemap.c
···36813681static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,36823682 struct folio *folio, unsigned long start,36833683 unsigned long addr, unsigned int nr_pages,36843684- unsigned long *rss, unsigned short *mmap_miss)36843684+ unsigned long *rss, unsigned short *mmap_miss,36853685+ pgoff_t file_end)36853686{36873687+ struct address_space *mapping = folio->mapping;36863688 unsigned int ref_from_caller = 1;36873689 vm_fault_t ret = 0;36883690 struct page *page = folio_page(folio, start);···36933691 unsigned long addr0;3694369236953693 /*36963696- * Map the large folio fully where possible.36943694+ * Map the large folio fully where possible:36973695 *36983698- * The folio must not cross VMA or page table boundary.36963696+ * - The folio is fully within size of the file or belong36973697+ * to shmem/tmpfs;36983698+ * - The folio doesn't cross VMA boundary;36993699+ * - The folio doesn't cross page table boundary;36993700 */37003701 addr0 = addr - start * PAGE_SIZE;37013701- if (folio_within_vma(folio, vmf->vma) &&37023702+ if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&37033703+ folio_within_vma(folio, vmf->vma) &&37023704 (addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {37033705 vmf->pte -= start;37043706 page -= start;···38233817 if (!folio)38243818 goto out;3825381938263826- if (filemap_map_pmd(vmf, folio, start_pgoff)) {38203820+ file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;38213821+ end_pgoff = min(end_pgoff, file_end);38223822+38233823+ /*38243824+ * Do not allow to map with PMD across i_size to preserve38253825+ * SIGBUS semantics.38263826+ *38273827+ * Make an exception for shmem/tmpfs that for long time38283828+ * intentionally mapped with PMDs across i_size.38293829+ */38303830+ if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&38313831+ filemap_map_pmd(vmf, folio, start_pgoff)) {38273832 ret = VM_FAULT_NOPAGE;38283833 goto out;38293834 }···38463829 folio_put(folio);38473830 goto out;38483831 }38493849-38503850- file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;38513851- if (end_pgoff > file_end)38523852- end_pgoff = file_end;3853383238543833 folio_type = mm_counter_file(folio);38553834 do {···38633850 else38643851 ret |= filemap_map_folio_range(vmf, folio,38653852 xas.xa_index - folio->index, addr,38663866- nr_pages, &rss, &mmap_miss);38533853+ nr_pages, &rss, &mmap_miss, file_end);3867385438683855 folio_unlock(folio);38693856 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
+37-26
mm/huge_memory.c
···214214 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))215215 return true;216216217217- zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,217217+ zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO | __GFP_ZEROTAGS) &218218+ ~__GFP_MOVABLE,218219 HPAGE_PMD_ORDER);219220 if (!zero_folio) {220221 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);···32643263 caller_pins;32653264}3266326532663266+static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)32673267+{32683268+ for (; nr_pages; page++, nr_pages--)32693269+ if (PageHWPoison(page))32703270+ return true;32713271+ return false;32723272+}32733273+32673274/*32683275 * It splits @folio into @new_order folios and copies the @folio metadata to32693276 * all the resulting folios.···32793270static void __split_folio_to_order(struct folio *folio, int old_order,32803271 int new_order)32813272{32733273+ /* Scan poisoned pages when split a poisoned folio to large folios */32743274+ const bool handle_hwpoison = folio_test_has_hwpoisoned(folio) && new_order;32823275 long new_nr_pages = 1 << new_order;32833276 long nr_pages = 1 << old_order;32843277 long i;3285327832793279+ folio_clear_has_hwpoisoned(folio);32803280+32813281+ /* Check first new_nr_pages since the loop below skips them */32823282+ if (handle_hwpoison &&32833283+ page_range_has_hwpoisoned(folio_page(folio, 0), new_nr_pages))32843284+ folio_set_has_hwpoisoned(folio);32863285 /*32873286 * Skip the first new_nr_pages, since the new folio from them have all32883287 * the flags from the original folio.32893288 */32903289 for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) {32913290 struct page *new_head = &folio->page + i;32923292-32933291 /*32943292 * Careful: new_folio is not a "real" folio before we cleared PageTail.32953293 * Don't pass it around before clear_compound_head().···33373321#endif33383322 (1L << PG_dirty) |33393323 LRU_GEN_MASK | LRU_REFS_MASK));33243324+33253325+ if (handle_hwpoison &&33263326+ page_range_has_hwpoisoned(new_head, new_nr_pages))33273327+ folio_set_has_hwpoisoned(new_folio);3340332833413329 new_folio->mapping = folio->mapping;33423330 new_folio->index = folio->index + i;···34423422 if (folio_test_anon(folio))34433423 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);3444342434453445- folio_clear_has_hwpoisoned(folio);34463446-34473425 /*34483426 * split to new_order one order at a time. For uniform split,34493427 * folio is split to new_order directly.···35223504 /* order-1 is not supported for anonymous THP. */35233505 VM_WARN_ONCE(warns && new_order == 1,35243506 "Cannot split to order-1 folio");35253525- return new_order != 1;35073507+ if (new_order == 1)35083508+ return false;35263509 } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&35273510 !mapping_large_folio_support(folio->mapping)) {35283511 /*···35543535 if (folio_test_anon(folio)) {35553536 VM_WARN_ONCE(warns && new_order == 1,35563537 "Cannot split to order-1 folio");35573557- return new_order != 1;35383538+ if (new_order == 1)35393539+ return false;35583540 } else if (new_order) {35593541 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&35603542 !mapping_large_folio_support(folio->mapping)) {···36193599 if (folio != page_folio(split_at) || folio != page_folio(lock_at))36203600 return -EINVAL;3621360136023602+ /*36033603+ * Folios that just got truncated cannot get split. Signal to the36043604+ * caller that there was a race.36053605+ *36063606+ * TODO: this will also currently refuse shmem folios that are in the36073607+ * swapcache.36083608+ */36093609+ if (!is_anon && !folio->mapping)36103610+ return -EBUSY;36113611+36223612 if (new_order >= folio_order(folio))36233613 return -EINVAL;36243614···36693639 gfp_t gfp;3670364036713641 mapping = folio->mapping;36723672-36733673- /* Truncated ? */36743674- /*36753675- * TODO: add support for large shmem folio in swap cache.36763676- * When shmem is in swap cache, mapping is NULL and36773677- * folio_test_swapcache() is true.36783678- */36793679- if (!mapping) {36803680- ret = -EBUSY;36813681- goto out;36823682- }36833683-36843642 min_order = mapping_min_folio_order(folio->mapping);36853643 if (new_order < min_order) {36863686- VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",36873687- min_order);36883644 ret = -EINVAL;36893645 goto out;36903646 }···4002398640033987int split_folio_to_list(struct folio *folio, struct list_head *list)40043988{40054005- int ret = min_order_for_split(folio);40064006-40074007- if (ret < 0)40084008- return ret;40094009-40104010- return split_huge_page_to_list_to_order(&folio->page, list, ret);39893989+ return split_huge_page_to_list_to_order(&folio->page, list, 0);40113990}4012399140133992/*
···9696 NULL,9797 gfp_mask);9898 if (folio) {9999+ u32 hash;100100+101101+ /*102102+ * Zero the folio to prevent information leaks to userspace.103103+ * Use folio_zero_user() which is optimized for huge/gigantic104104+ * pages. Pass 0 as addr_hint since this is not a faulting path105105+ * and we don't have a user virtual address yet.106106+ */107107+ folio_zero_user(folio, 0);108108+109109+ /*110110+ * Mark the folio uptodate before adding to page cache,111111+ * as required by filemap.c and other hugetlb paths.112112+ */113113+ __folio_mark_uptodate(folio);114114+115115+ /*116116+ * Serialize hugepage allocation and instantiation to prevent117117+ * races with concurrent allocations, as required by all other118118+ * callers of hugetlb_add_to_page_cache().119119+ */120120+ hash = hugetlb_fault_mutex_hash(memfd->f_mapping, idx);121121+ mutex_lock(&hugetlb_fault_mutex_table[hash]);122122+99123 err = hugetlb_add_to_page_cache(folio,100124 memfd->f_mapping,101125 idx);126126+127127+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);128128+102129 if (err) {103130 folio_put(folio);104131 goto err_unresv;
+19-1
mm/memory.c
···6565#include <linux/gfp.h>6666#include <linux/migrate.h>6767#include <linux/string.h>6868+#include <linux/shmem_fs.h>6869#include <linux/memory-tiers.h>6970#include <linux/debugfs.h>7071#include <linux/userfaultfd_k.h>···55025501 return ret;55035502 }5504550355045504+ if (!needs_fallback && vma->vm_file) {55055505+ struct address_space *mapping = vma->vm_file->f_mapping;55065506+ pgoff_t file_end;55075507+55085508+ file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);55095509+55105510+ /*55115511+ * Do not allow to map with PTEs beyond i_size and with PMD55125512+ * across i_size to preserve SIGBUS semantics.55135513+ *55145514+ * Make an exception for shmem/tmpfs that for long time55155515+ * intentionally mapped with PMDs across i_size.55165516+ */55175517+ needs_fallback = !shmem_mapping(mapping) &&55185518+ file_end < folio_next_index(folio);55195519+ }55205520+55055521 if (pmd_none(*vmf->pmd)) {55065506- if (folio_test_pmd_mappable(folio)) {55225522+ if (!needs_fallback && folio_test_pmd_mappable(folio)) {55075523 ret = do_set_pmd(vmf, folio, page);55085524 if (ret != VM_FAULT_FALLBACK)55095525 return ret;
···241241 if (PTR_ERR(vma) == -EAGAIN) {242242 count_vm_vma_lock_event(VMA_LOCK_MISS);243243 /* The area was replaced with another one */244244+ mas_set(&mas, address);244245 goto retry;245246 }246247
···8282 __folio_mark_uptodate(folio);8383 err = filemap_add_folio(mapping, folio, offset, gfp);8484 if (unlikely(err)) {8585- folio_put(folio);8685 /*8786 * If a split of large page was required, it8887 * already happened when we marked the page invalid8988 * which guarantees that this call won't fail9089 */9190 set_direct_map_default_noflush(folio_page(folio, 0));9191+ folio_put(folio);9292 if (err == -EEXIST)9393 goto retry;9494
+6-3
mm/shmem.c
···18821882 struct shmem_inode_info *info = SHMEM_I(inode);18831883 unsigned long suitable_orders = 0;18841884 struct folio *folio = NULL;18851885+ pgoff_t aligned_index;18851886 long pages;18861887 int error, order;18871888···18961895 order = highest_order(suitable_orders);18971896 while (suitable_orders) {18981897 pages = 1UL << order;18991899- index = round_down(index, pages);19001900- folio = shmem_alloc_folio(gfp, order, info, index);19011901- if (folio)18981898+ aligned_index = round_down(index, pages);18991899+ folio = shmem_alloc_folio(gfp, order, info, aligned_index);19001900+ if (folio) {19011901+ index = aligned_index;19021902 goto allocated;19031903+ }1903190419041905 if (pages == HPAGE_PMD_NR)19051906 count_vm_event(THP_FILE_FALLBACK);
+5-1
mm/slub.c
···20462046 if (slab_exts) {20472047 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,20482048 obj_exts_slab, obj_exts);20492049- /* codetag should be NULL */20492049+20502050+ if (unlikely(is_codetag_empty(&slab_exts[offs].ref)))20512051+ return;20522052+20532053+ /* codetag should be NULL here */20502054 WARN_ON(slab_exts[offs].ref.ct);20512055 set_codetag_empty(&slab_exts[offs].ref);20522056 }
+13
mm/swap_state.c
···748748749749 blk_start_plug(&plug);750750 for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) {751751+ struct swap_info_struct *si = NULL;752752+751753 if (!pte++) {752754 pte = pte_offset_map(vmf->pmd, addr);753755 if (!pte)···763761 continue;764762 pte_unmap(pte);765763 pte = NULL;764764+ /*765765+ * Readahead entry may come from a device that we are not766766+ * holding a reference to, try to grab a reference, or skip.767767+ */768768+ if (swp_type(entry) != swp_type(targ_entry)) {769769+ si = get_swap_device(entry);770770+ if (!si)771771+ continue;772772+ }766773 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,767774 &page_allocated, false);775775+ if (si)776776+ put_swap_device(si);768777 if (!folio)769778 continue;770779 if (page_allocated) {
···177177 return 0;178178}179179180180+static int try_folio_split_or_unmap(struct folio *folio, struct page *split_at,181181+ unsigned long min_order)182182+{183183+ enum ttu_flags ttu_flags =184184+ TTU_SYNC |185185+ TTU_SPLIT_HUGE_PMD |186186+ TTU_IGNORE_MLOCK;187187+ int ret;188188+189189+ ret = try_folio_split_to_order(folio, split_at, min_order);190190+191191+ /*192192+ * If the split fails, unmap the folio, so it will be refaulted193193+ * with PTEs to respect SIGBUS semantics.194194+ *195195+ * Make an exception for shmem/tmpfs that for long time196196+ * intentionally mapped with PMDs across i_size.197197+ */198198+ if (ret && !shmem_mapping(folio->mapping)) {199199+ try_to_unmap(folio, ttu_flags);200200+ WARN_ON(folio_mapped(folio));201201+ }202202+203203+ return ret;204204+}205205+180206/*181207 * Handle partial folios. The folio may be entirely within the182208 * range if a split has raced with us. If not, we zero the part of the···220194 size_t size = folio_size(folio);221195 unsigned int offset, length;222196 struct page *split_at, *split_at2;197197+ unsigned int min_order;223198224199 if (pos < start)225200 offset = start - pos;···250223 if (!folio_test_large(folio))251224 return true;252225226226+ min_order = mapping_min_folio_order(folio->mapping);253227 split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE);254254- if (!try_folio_split(folio, split_at, NULL)) {228228+ if (!try_folio_split_or_unmap(folio, split_at, min_order)) {255229 /*256230 * try to split at offset + length to make sure folios within257231 * the range can be dropped, especially to avoid memory waste···276248 if (!folio_trylock(folio2))277249 goto out;278250279279- /*280280- * make sure folio2 is large and does not change its mapping.281281- * Its split result does not matter here.282282- */251251+ /* make sure folio2 is large and does not change its mapping */283252 if (folio_test_large(folio2) &&284253 folio2->mapping == folio->mapping)285285- try_folio_split(folio2, split_at2, NULL);254254+ try_folio_split_or_unmap(folio2, split_at2, min_order);286255287256 folio_unlock(folio2);288257out:
+8-6
scripts/decode_stacktrace.sh
···277277 fi278278 done279279280280- if [[ ${words[$last]} =~ ^[0-9a-f]+\] ]]; then281281- words[$last-1]="${words[$last-1]} ${words[$last]}"282282- unset words[$last] spaces[$last]283283- last=$(( $last - 1 ))284284- fi285285-286280 # Extract info after the symbol if present. E.g.:287281 # func_name+0x54/0x80 (P)288282 # ^^^···285291 local info_str=""286292 if [[ ${words[$last]} =~ \([A-Z]*\) ]]; then287293 info_str=${words[$last]}294294+ unset words[$last] spaces[$last]295295+ last=$(( $last - 1 ))296296+ fi297297+298298+ # Join module name with its build id if present, as these were299299+ # split during tokenization (e.g. "[module" and "modbuildid]").300300+ if [[ ${words[$last]} =~ ^[0-9a-f]+\] ]]; then301301+ words[$last-1]="${words[$last-1]} ${words[$last]}"288302 unset words[$last] spaces[$last]289303 last=$(( $last - 1 ))290304 fi
+7-8
tools/testing/selftests/mm/uffd-unit-tests.c
···17581758 uffd_test_ops = mem_type->mem_ops;17591759 uffd_test_case_ops = test->test_case_ops;1760176017611761- if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB))17611761+ if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB)) {17621762 gopts.page_size = default_huge_page_size();17631763- else17631763+ if (gopts.page_size == 0) {17641764+ uffd_test_skip("huge page size is 0, feature missing?");17651765+ continue;17661766+ }17671767+ } else {17641768 gopts.page_size = psize();17691769+ }1765177017661771 /* Ensure we have at least 2 pages */17671772 gopts.nr_pages = MAX(UFFD_TEST_MEM_SIZE, gopts.page_size * 2)···17811776 continue;1782177717831778 uffd_test_start("%s on %s", test->name, mem_type->name);17841784- if ((mem_type->mem_flag == MEM_HUGETLB ||17851785- mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&17861786- (default_huge_page_size() == 0)) {17871787- uffd_test_skip("huge page size is 0, feature missing?");17881788- continue;17891789- }17901779 if (!uffd_feature_supported(test)) {17911780 uffd_test_skip("feature missing");17921781 continue;