Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

alloc_tag: support for page allocation tag compression

Implement support for storing page allocation tag references directly in
the page flags instead of page extensions. sysctl.vm.mem_profiling boot
parameter it extended to provide a way for a user to request this mode.
Enabling compression eliminates memory overhead caused by page_ext and
results in better performance for page allocations. However this mode
will not work if the number of available page flag bits is insufficient to
address all kernel allocations. Such condition can happen during boot or
when loading a module. If this condition is detected, memory allocation
profiling gets disabled with an appropriate warning. By default
compression mode is disabled.

Link: https://lkml.kernel.org/r/20241023170759.999909-7-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov (AMD) <bp@alien8.de>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Daniel Gomez <da.gomez@samsung.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: David Rientjes <rientjes@google.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Liam R. Howlett <Liam.Howlett@Oracle.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Minchan Kim <minchan@google.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Petr Pavlu <petr.pavlu@suse.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Sourav Panda <souravpanda@google.com>
Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Huth <thuth@redhat.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xiongwei Song <xiongwei.song@windriver.com>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Suren Baghdasaryan and committed by
Andrew Morton
4835f747 42895a86

+289 -32
+6 -1
Documentation/mm/allocation-profiling.rst
··· 18 18 missing annotation 19 19 20 20 Boot parameter: 21 - sysctl.vm.mem_profiling=0|1|never 21 + sysctl.vm.mem_profiling={0|1|never}[,compressed] 22 22 23 23 When set to "never", memory allocation profiling overhead is minimized and it 24 24 cannot be enabled at runtime (sysctl becomes read-only). 25 25 When CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT=y, default value is "1". 26 26 When CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT=n, default value is "never". 27 + "compressed" optional parameter will try to store page tag references in a 28 + compact format, avoiding page extensions. This results in improved performance 29 + and memory consumption, however it might fail depending on system configuration. 30 + If compression fails, a warning is issued and memory allocation profiling gets 31 + disabled. 27 32 28 33 sysctl: 29 34 /proc/sys/vm/mem_profiling
+9 -1
include/linux/alloc_tag.h
··· 30 30 struct alloc_tag_counters __percpu *counters; 31 31 } __aligned(8); 32 32 33 + struct alloc_tag_kernel_section { 34 + struct alloc_tag *first_tag; 35 + unsigned long count; 36 + }; 37 + 33 38 struct alloc_tag_module_section { 34 - unsigned long start_addr; 39 + union { 40 + unsigned long start_addr; 41 + struct alloc_tag *first_tag; 42 + }; 35 43 unsigned long end_addr; 36 44 /* used size */ 37 45 unsigned long size;
+3
include/linux/codetag.h
··· 13 13 struct seq_buf; 14 14 struct module; 15 15 16 + #define CODETAG_SECTION_START_PREFIX "__start_" 17 + #define CODETAG_SECTION_STOP_PREFIX "__stop_" 18 + 16 19 /* 17 20 * An instance of this structure is created in a special ELF section at every 18 21 * code location being tagged. At runtime, the special section is treated as
+7
include/linux/page-flags-layout.h
··· 111 111 ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \ 112 112 NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH) 113 113 114 + #define NR_NON_PAGEFLAG_BITS (SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + \ 115 + LAST_CPUPID_SHIFT + KASAN_TAG_WIDTH + \ 116 + LRU_GEN_WIDTH + LRU_REFS_WIDTH) 117 + 118 + #define NR_UNUSED_PAGEFLAG_BITS (BITS_PER_LONG - \ 119 + (NR_NON_PAGEFLAG_BITS + NR_PAGEFLAGS)) 120 + 114 121 #endif 115 122 #endif /* _LINUX_PAGE_FLAGS_LAYOUT */
+127 -16
include/linux/pgalloc_tag.h
··· 11 11 12 12 #include <linux/page_ext.h> 13 13 14 + extern struct page_ext_operations page_alloc_tagging_ops; 15 + extern unsigned long alloc_tag_ref_mask; 16 + extern int alloc_tag_ref_offs; 17 + extern struct alloc_tag_kernel_section kernel_tags; 18 + 19 + DECLARE_STATIC_KEY_FALSE(mem_profiling_compressed); 20 + 21 + typedef u16 pgalloc_tag_idx; 22 + 14 23 union pgtag_ref_handle { 15 24 union codetag_ref *ref; /* reference in page extension */ 25 + struct page *page; /* reference in page flags */ 16 26 }; 17 27 18 - extern struct page_ext_operations page_alloc_tagging_ops; 28 + /* Reserved indexes */ 29 + #define CODETAG_ID_NULL 0 30 + #define CODETAG_ID_EMPTY 1 31 + #define CODETAG_ID_FIRST 2 32 + 33 + #ifdef CONFIG_MODULES 34 + 35 + extern struct alloc_tag_module_section module_tags; 36 + 37 + static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx) 38 + { 39 + return &module_tags.first_tag[idx - kernel_tags.count]; 40 + } 41 + 42 + static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag) 43 + { 44 + return CODETAG_ID_FIRST + kernel_tags.count + (tag - module_tags.first_tag); 45 + } 46 + 47 + #else /* CONFIG_MODULES */ 48 + 49 + static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx) 50 + { 51 + pr_warn("invalid page tag reference %lu\n", (unsigned long)idx); 52 + return NULL; 53 + } 54 + 55 + static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag) 56 + { 57 + pr_warn("invalid page tag 0x%lx\n", (unsigned long)tag); 58 + return CODETAG_ID_NULL; 59 + } 60 + 61 + #endif /* CONFIG_MODULES */ 62 + 63 + static inline void idx_to_ref(pgalloc_tag_idx idx, union codetag_ref *ref) 64 + { 65 + switch (idx) { 66 + case (CODETAG_ID_NULL): 67 + ref->ct = NULL; 68 + break; 69 + case (CODETAG_ID_EMPTY): 70 + set_codetag_empty(ref); 71 + break; 72 + default: 73 + idx -= CODETAG_ID_FIRST; 74 + ref->ct = idx < kernel_tags.count ? 75 + &kernel_tags.first_tag[idx].ct : 76 + &module_idx_to_tag(idx)->ct; 77 + break; 78 + } 79 + } 80 + 81 + static inline pgalloc_tag_idx ref_to_idx(union codetag_ref *ref) 82 + { 83 + struct alloc_tag *tag; 84 + 85 + if (!ref->ct) 86 + return CODETAG_ID_NULL; 87 + 88 + if (is_codetag_empty(ref)) 89 + return CODETAG_ID_EMPTY; 90 + 91 + tag = ct_to_alloc_tag(ref->ct); 92 + if (tag >= kernel_tags.first_tag && tag < kernel_tags.first_tag + kernel_tags.count) 93 + return CODETAG_ID_FIRST + (tag - kernel_tags.first_tag); 94 + 95 + return module_tag_to_idx(tag); 96 + } 97 + 98 + 19 99 20 100 /* Should be called only if mem_alloc_profiling_enabled() */ 21 101 static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref, 22 102 union pgtag_ref_handle *handle) 23 103 { 24 - struct page_ext *page_ext; 25 - union codetag_ref *tmp; 26 - 27 104 if (!page) 28 105 return false; 29 106 30 - page_ext = page_ext_get(page); 31 - if (!page_ext) 32 - return false; 107 + if (static_key_enabled(&mem_profiling_compressed)) { 108 + pgalloc_tag_idx idx; 33 109 34 - tmp = (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops); 35 - ref->ct = tmp->ct; 36 - handle->ref = tmp; 110 + idx = (page->flags >> alloc_tag_ref_offs) & alloc_tag_ref_mask; 111 + idx_to_ref(idx, ref); 112 + handle->page = page; 113 + } else { 114 + struct page_ext *page_ext; 115 + union codetag_ref *tmp; 116 + 117 + page_ext = page_ext_get(page); 118 + if (!page_ext) 119 + return false; 120 + 121 + tmp = (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops); 122 + ref->ct = tmp->ct; 123 + handle->ref = tmp; 124 + } 125 + 37 126 return true; 38 127 } 39 128 ··· 131 42 if (WARN_ON(!handle.ref)) 132 43 return; 133 44 134 - page_ext_put((void *)handle.ref - page_alloc_tagging_ops.offset); 45 + if (!static_key_enabled(&mem_profiling_compressed)) 46 + page_ext_put((void *)handle.ref - page_alloc_tagging_ops.offset); 135 47 } 136 48 137 - static inline void update_page_tag_ref(union pgtag_ref_handle handle, 138 - union codetag_ref *ref) 49 + static inline void update_page_tag_ref(union pgtag_ref_handle handle, union codetag_ref *ref) 139 50 { 140 - if (WARN_ON(!handle.ref || !ref)) 141 - return; 51 + if (static_key_enabled(&mem_profiling_compressed)) { 52 + struct page *page = handle.page; 53 + unsigned long old_flags; 54 + unsigned long flags; 55 + unsigned long idx; 142 56 143 - handle.ref->ct = ref->ct; 57 + if (WARN_ON(!page || !ref)) 58 + return; 59 + 60 + idx = (unsigned long)ref_to_idx(ref); 61 + idx = (idx & alloc_tag_ref_mask) << alloc_tag_ref_offs; 62 + do { 63 + old_flags = READ_ONCE(page->flags); 64 + flags = old_flags; 65 + flags &= ~(alloc_tag_ref_mask << alloc_tag_ref_offs); 66 + flags |= idx; 67 + } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); 68 + } else { 69 + if (WARN_ON(!handle.ref || !ref)) 70 + return; 71 + 72 + handle.ref->ct = ref->ct; 73 + } 144 74 } 145 75 146 76 static inline void clear_page_tag_ref(struct page *page) ··· 230 122 this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); 231 123 } 232 124 125 + void __init alloc_tag_sec_init(void); 126 + 233 127 #else /* CONFIG_MEM_ALLOC_PROFILING */ 234 128 235 129 static inline void clear_page_tag_ref(struct page *page) {} ··· 240 130 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} 241 131 static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } 242 132 static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} 133 + static inline void alloc_tag_sec_init(void) {} 243 134 244 135 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 245 136
+133 -9
lib/alloc_tag.c
··· 3 3 #include <linux/execmem.h> 4 4 #include <linux/fs.h> 5 5 #include <linux/gfp.h> 6 + #include <linux/kallsyms.h> 6 7 #include <linux/module.h> 7 8 #include <linux/page_ext.h> 8 9 #include <linux/proc_fs.h> ··· 13 12 14 13 #define ALLOCINFO_FILE_NAME "allocinfo" 15 14 #define MODULE_ALLOC_TAG_VMAP_SIZE (100000UL * sizeof(struct alloc_tag)) 15 + #define SECTION_START(NAME) (CODETAG_SECTION_START_PREFIX NAME) 16 + #define SECTION_STOP(NAME) (CODETAG_SECTION_STOP_PREFIX NAME) 16 17 17 18 #ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT 18 19 static bool mem_profiling_support = true; ··· 29 26 30 27 DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, 31 28 mem_alloc_profiling_key); 29 + DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed); 30 + 31 + struct alloc_tag_kernel_section kernel_tags = { NULL, 0 }; 32 + unsigned long alloc_tag_ref_mask; 33 + int alloc_tag_ref_offs; 32 34 33 35 struct allocinfo_private { 34 36 struct codetag_iterator iter; ··· 163 155 return nr; 164 156 } 165 157 166 - static void shutdown_mem_profiling(void) 158 + static void shutdown_mem_profiling(bool remove_file) 167 159 { 168 160 if (mem_alloc_profiling_enabled()) 169 161 static_branch_disable(&mem_alloc_profiling_key); ··· 171 163 if (!mem_profiling_support) 172 164 return; 173 165 166 + if (remove_file) 167 + remove_proc_entry(ALLOCINFO_FILE_NAME, NULL); 174 168 mem_profiling_support = false; 175 169 } 176 170 ··· 183 173 184 174 if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) { 185 175 pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME); 186 - shutdown_mem_profiling(); 176 + shutdown_mem_profiling(false); 187 177 } 178 + } 179 + 180 + void __init alloc_tag_sec_init(void) 181 + { 182 + struct alloc_tag *last_codetag; 183 + 184 + if (!mem_profiling_support) 185 + return; 186 + 187 + if (!static_key_enabled(&mem_profiling_compressed)) 188 + return; 189 + 190 + kernel_tags.first_tag = (struct alloc_tag *)kallsyms_lookup_name( 191 + SECTION_START(ALLOC_TAG_SECTION_NAME)); 192 + last_codetag = (struct alloc_tag *)kallsyms_lookup_name( 193 + SECTION_STOP(ALLOC_TAG_SECTION_NAME)); 194 + kernel_tags.count = last_codetag - kernel_tags.first_tag; 195 + 196 + /* Check if kernel tags fit into page flags */ 197 + if (kernel_tags.count > (1UL << NR_UNUSED_PAGEFLAG_BITS)) { 198 + shutdown_mem_profiling(false); /* allocinfo file does not exist yet */ 199 + pr_err("%lu allocation tags cannot be references using %d available page flag bits. Memory allocation profiling is disabled!\n", 200 + kernel_tags.count, NR_UNUSED_PAGEFLAG_BITS); 201 + return; 202 + } 203 + 204 + alloc_tag_ref_offs = (LRU_REFS_PGOFF - NR_UNUSED_PAGEFLAG_BITS); 205 + alloc_tag_ref_mask = ((1UL << NR_UNUSED_PAGEFLAG_BITS) - 1); 206 + pr_debug("Memory allocation profiling compression is using %d page flag bits!\n", 207 + NR_UNUSED_PAGEFLAG_BITS); 188 208 } 189 209 190 210 #ifdef CONFIG_MODULES ··· 226 186 /* A dummy object used to indicate a module prepended area */ 227 187 static struct module prepend_mod; 228 188 229 - static struct alloc_tag_module_section module_tags; 189 + struct alloc_tag_module_section module_tags; 190 + 191 + static inline unsigned long alloc_tag_align(unsigned long val) 192 + { 193 + if (!static_key_enabled(&mem_profiling_compressed)) { 194 + /* No alignment requirements when we are not indexing the tags */ 195 + return val; 196 + } 197 + 198 + if (val % sizeof(struct alloc_tag) == 0) 199 + return val; 200 + return ((val / sizeof(struct alloc_tag)) + 1) * sizeof(struct alloc_tag); 201 + } 202 + 203 + static bool ensure_alignment(unsigned long align, unsigned int *prepend) 204 + { 205 + if (!static_key_enabled(&mem_profiling_compressed)) { 206 + /* No alignment requirements when we are not indexing the tags */ 207 + return true; 208 + } 209 + 210 + /* 211 + * If alloc_tag size is not a multiple of required alignment, tag 212 + * indexing does not work. 213 + */ 214 + if (!IS_ALIGNED(sizeof(struct alloc_tag), align)) 215 + return false; 216 + 217 + /* Ensure prepend consumes multiple of alloc_tag-sized blocks */ 218 + if (*prepend) 219 + *prepend = alloc_tag_align(*prepend); 220 + 221 + return true; 222 + } 223 + 224 + static inline bool tags_addressable(void) 225 + { 226 + unsigned long tag_idx_count; 227 + 228 + if (!static_key_enabled(&mem_profiling_compressed)) 229 + return true; /* with page_ext tags are always addressable */ 230 + 231 + tag_idx_count = CODETAG_ID_FIRST + kernel_tags.count + 232 + module_tags.size / sizeof(struct alloc_tag); 233 + 234 + return tag_idx_count < (1UL << NR_UNUSED_PAGEFLAG_BITS); 235 + } 230 236 231 237 static bool needs_section_mem(struct module *mod, unsigned long size) 232 238 { 239 + if (!mem_profiling_support) 240 + return false; 241 + 233 242 return size >= sizeof(struct alloc_tag); 234 243 } 235 244 ··· 389 300 if (!align) 390 301 align = 1; 391 302 303 + if (!ensure_alignment(align, &prepend)) { 304 + shutdown_mem_profiling(true); 305 + pr_err("%s: alignment %lu is incompatible with allocation tag indexing. Memory allocation profiling is disabled!\n", 306 + mod->name, align); 307 + return ERR_PTR(-EINVAL); 308 + } 309 + 392 310 mas_lock(&mas); 393 311 if (!find_aligned_area(&mas, section_size, size, prepend, align)) { 394 312 ret = ERR_PTR(-ENOMEM); ··· 439 343 int grow_res; 440 344 441 345 module_tags.size = offset + size; 346 + if (mem_alloc_profiling_enabled() && !tags_addressable()) { 347 + shutdown_mem_profiling(true); 348 + pr_warn("With module %s there are too many tags to fit in %d page flag bits. Memory allocation profiling is disabled!\n", 349 + mod->name, NR_UNUSED_PAGEFLAG_BITS); 350 + } 351 + 442 352 grow_res = vm_module_tags_populate(); 443 353 if (grow_res) { 444 - shutdown_mem_profiling(); 354 + shutdown_mem_profiling(true); 445 355 pr_err("Failed to allocate memory for allocation tags in the module %s. Memory allocation profiling is disabled!\n", 446 356 mod->name); 447 357 return ERR_PTR(grow_res); ··· 531 429 532 430 module_tags.start_addr = (unsigned long)vm_module_tags->addr; 533 431 module_tags.end_addr = module_tags.start_addr + MODULE_ALLOC_TAG_VMAP_SIZE; 432 + /* Ensure the base is alloc_tag aligned when required for indexing */ 433 + module_tags.start_addr = alloc_tag_align(module_tags.start_addr); 534 434 535 435 return 0; 536 436 } ··· 555 451 556 452 #endif /* CONFIG_MODULES */ 557 453 454 + /* See: Documentation/mm/allocation-profiling.rst */ 558 455 static int __init setup_early_mem_profiling(char *str) 559 456 { 457 + bool compressed = false; 560 458 bool enable; 561 459 562 460 if (!str || !str[0]) ··· 567 461 if (!strncmp(str, "never", 5)) { 568 462 enable = false; 569 463 mem_profiling_support = false; 464 + pr_info("Memory allocation profiling is disabled!\n"); 570 465 } else { 571 - int res; 466 + char *token = strsep(&str, ","); 572 467 573 - res = kstrtobool(str, &enable); 574 - if (res) 575 - return res; 468 + if (kstrtobool(token, &enable)) 469 + return -EINVAL; 576 470 471 + if (str) { 472 + 473 + if (strcmp(str, "compressed")) 474 + return -EINVAL; 475 + 476 + compressed = true; 477 + } 577 478 mem_profiling_support = true; 479 + pr_info("Memory allocation profiling is enabled %s compression and is turned %s!\n", 480 + compressed ? "with" : "without", enable ? "on" : "off"); 578 481 } 579 482 580 - if (enable != static_key_enabled(&mem_alloc_profiling_key)) { 483 + if (enable != mem_alloc_profiling_enabled()) { 581 484 if (enable) 582 485 static_branch_enable(&mem_alloc_profiling_key); 583 486 else 584 487 static_branch_disable(&mem_alloc_profiling_key); 488 + } 489 + if (compressed != static_key_enabled(&mem_profiling_compressed)) { 490 + if (compressed) 491 + static_branch_enable(&mem_profiling_compressed); 492 + else 493 + static_branch_disable(&mem_profiling_compressed); 585 494 } 586 495 587 496 return 0; ··· 605 484 606 485 static __init bool need_page_alloc_tagging(void) 607 486 { 487 + if (static_key_enabled(&mem_profiling_compressed)) 488 + return false; 489 + 608 490 return mem_profiling_support; 609 491 } 610 492
+2 -2
lib/codetag.c
··· 149 149 const char *section) 150 150 { 151 151 return (struct codetag_range) { 152 - get_symbol(mod, "__start_", section), 153 - get_symbol(mod, "__stop_", section), 152 + get_symbol(mod, CODETAG_SECTION_START_PREFIX, section), 153 + get_symbol(mod, CODETAG_SECTION_STOP_PREFIX, section), 154 154 }; 155 155 } 156 156
+2 -3
mm/mm_init.c
··· 83 83 unsigned long or_mask, add_mask; 84 84 85 85 shift = BITS_PER_LONG; 86 - width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH 87 - - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH; 86 + width = shift - NR_NON_PAGEFLAG_BITS; 88 87 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths", 89 88 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n", 90 89 SECTIONS_WIDTH, ··· 2638 2639 BUILD_BUG_ON(MAX_ZONELISTS > 2); 2639 2640 build_all_zonelists(NULL); 2640 2641 page_alloc_init_cpuhp(); 2641 - 2642 + alloc_tag_sec_init(); 2642 2643 /* 2643 2644 * page_ext requires contiguous pages, 2644 2645 * bigger than MAX_PAGE_ORDER unless SPARSEMEM.