Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86-32, percpu: Correct the ordering of the percpu readmostly section
x86, mm: Enable ARCH_DMA_ADDR_T_64BIT with X86_64 || HIGHMEM64G
x86: Spread tlb flush vector between nodes
percpu: Introduce a read-mostly percpu API
x86, mm: Fix incorrect data type in vmalloc_sync_all()
x86, mm: Hold mm->page_table_lock while doing vmalloc_sync
x86, mm: Fix bogus whitespace in sync_global_pgds()
x86-32: Fix sparse warning for the __PHYSICAL_MASK calculation
x86, mm: Add RESERVE_BRK_ARRAY() helper
mm, x86: Saving vmcore with non-lazy freeing of vmas
x86, kdump: Change copy_oldmem_page() to use cached addressing
x86, mm: fix uninitialized addr in kernel_physical_mapping_init()
x86, kmemcheck: Remove double test
x86, mm: Make spurious_fault check explicitly check the PRESENT bit
x86-64, mem: Update all PGDs for direct mapping and vmemmap mapping changes
x86, mm: Separate x86_64 vmalloc_sync_all() into separate functions
x86, mm: Avoid unnecessary TLB flush

+174 -34
+3
arch/x86/Kconfig
··· 1163 config ARCH_PHYS_ADDR_T_64BIT 1164 def_bool X86_64 || X86_PAE 1165 1166 config DIRECT_GBPAGES 1167 bool "Enable 1GB pages for kernel pagetables" if EMBEDDED 1168 default y
··· 1163 config ARCH_PHYS_ADDR_T_64BIT 1164 def_bool X86_64 || X86_PAE 1165 1166 + config ARCH_DMA_ADDR_T_64BIT 1167 + def_bool X86_64 || HIGHMEM64G 1168 + 1169 config DIRECT_GBPAGES 1170 bool "Enable 1GB pages for kernel pagetables" if EMBEDDED 1171 default y
+1
arch/x86/include/asm/io.h
··· 206 207 extern void iounmap(volatile void __iomem *addr); 208 209 210 #ifdef __KERNEL__ 211
··· 206 207 extern void iounmap(volatile void __iomem *addr); 208 209 + extern void set_iounmap_nonlazy(void); 210 211 #ifdef __KERNEL__ 212
+1 -1
arch/x86/include/asm/page_types.h
··· 8 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9 #define PAGE_MASK (~(PAGE_SIZE-1)) 10 11 - #define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) 12 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 13 14 /* Cast PAGE_MASK to a signed type so that it is sign-extended if
··· 8 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9 #define PAGE_MASK (~(PAGE_SIZE-1)) 10 11 + #define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) 12 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 13 14 /* Cast PAGE_MASK to a signed type so that it is sign-extended if
+4
arch/x86/include/asm/pgtable.h
··· 28 extern spinlock_t pgd_lock; 29 extern struct list_head pgd_list; 30 31 #ifdef CONFIG_PARAVIRT 32 #include <asm/paravirt.h> 33 #else /* !CONFIG_PARAVIRT */ ··· 604 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); 605 pte_update(mm, addr, ptep); 606 } 607 608 /* 609 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
··· 28 extern spinlock_t pgd_lock; 29 extern struct list_head pgd_list; 30 31 + extern struct mm_struct *pgd_page_get_mm(struct page *page); 32 + 33 #ifdef CONFIG_PARAVIRT 34 #include <asm/paravirt.h> 35 #else /* !CONFIG_PARAVIRT */ ··· 602 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); 603 pte_update(mm, addr, ptep); 604 } 605 + 606 + #define flush_tlb_fix_spurious_fault(vma, address) 607 608 /* 609 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
+2
arch/x86/include/asm/pgtable_64.h
··· 102 native_set_pgd(pgd, native_make_pgd(0)); 103 } 104 105 /* 106 * Conversion functions: convert a page and protection to a page entry, 107 * and a page entry and page directory to the page they refer to.
··· 102 native_set_pgd(pgd, native_make_pgd(0)); 103 } 104 105 + extern void sync_global_pgds(unsigned long start, unsigned long end); 106 + 107 /* 108 * Conversion functions: convert a page and protection to a page entry, 109 * and a page entry and page directory to the page they refer to.
+5
arch/x86/include/asm/setup.h
··· 93 : : "i" (sz)); \ 94 } 95 96 #ifdef __i386__ 97 98 void __init i386_start_kernel(void);
··· 93 : : "i" (sz)); \ 94 } 95 96 + /* Helper for reserving space for arrays of things */ 97 + #define RESERVE_BRK_ARRAY(type, name, entries) \ 98 + type *name; \ 99 + RESERVE_BRK(name, sizeof(type) * entries) 100 + 101 #ifdef __i386__ 102 103 void __init i386_start_kernel(void);
+2 -1
arch/x86/kernel/crash_dump_64.c
··· 34 if (!csize) 35 return 0; 36 37 - vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); 38 if (!vaddr) 39 return -ENOMEM; 40 ··· 46 } else 47 memcpy(buf, vaddr + offset, csize); 48 49 iounmap(vaddr); 50 return csize; 51 }
··· 34 if (!csize) 35 return 0; 36 37 + vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); 38 if (!vaddr) 39 return -ENOMEM; 40 ··· 46 } else 47 memcpy(buf, vaddr + offset, csize); 48 49 + set_iounmap_nonlazy(); 50 iounmap(vaddr); 51 return csize; 52 }
+18 -25
arch/x86/mm/fault.c
··· 229 230 spin_lock_irqsave(&pgd_lock, flags); 231 list_for_each_entry(page, &pgd_list, lru) { 232 - if (!vmalloc_sync_one(page_address(page), address)) 233 break; 234 } 235 spin_unlock_irqrestore(&pgd_lock, flags); ··· 337 338 void vmalloc_sync_all(void) 339 { 340 - unsigned long address; 341 - 342 - for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; 343 - address += PGDIR_SIZE) { 344 - 345 - const pgd_t *pgd_ref = pgd_offset_k(address); 346 - unsigned long flags; 347 - struct page *page; 348 - 349 - if (pgd_none(*pgd_ref)) 350 - continue; 351 - 352 - spin_lock_irqsave(&pgd_lock, flags); 353 - list_for_each_entry(page, &pgd_list, lru) { 354 - pgd_t *pgd; 355 - pgd = (pgd_t *)page_address(page) + pgd_index(address); 356 - if (pgd_none(*pgd)) 357 - set_pgd(pgd, *pgd_ref); 358 - else 359 - BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 360 - } 361 - spin_unlock_irqrestore(&pgd_lock, flags); 362 - } 363 } 364 365 /* ··· 885 if (pmd_large(*pmd)) 886 return spurious_fault_check(error_code, (pte_t *) pmd); 887 888 pte = pte_offset_kernel(pmd, address); 889 - if (!pte_present(*pte)) 890 return 0; 891 892 ret = spurious_fault_check(error_code, pte);
··· 229 230 spin_lock_irqsave(&pgd_lock, flags); 231 list_for_each_entry(page, &pgd_list, lru) { 232 + spinlock_t *pgt_lock; 233 + pmd_t *ret; 234 + 235 + pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 236 + 237 + spin_lock(pgt_lock); 238 + ret = vmalloc_sync_one(page_address(page), address); 239 + spin_unlock(pgt_lock); 240 + 241 + if (!ret) 242 break; 243 } 244 spin_unlock_irqrestore(&pgd_lock, flags); ··· 328 329 void vmalloc_sync_all(void) 330 { 331 + sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); 332 } 333 334 /* ··· 898 if (pmd_large(*pmd)) 899 return spurious_fault_check(error_code, (pte_t *) pmd); 900 901 + /* 902 + * Note: don't use pte_present() here, since it returns true 903 + * if the _PAGE_PROTNONE bit is set. However, this aliases the 904 + * _PAGE_GLOBAL bit, which for kernel pages give false positives 905 + * when CONFIG_DEBUG_PAGEALLOC is used. 906 + */ 907 pte = pte_offset_kernel(pmd, address); 908 + if (!(pte_flags(*pte) & _PAGE_PRESENT)) 909 return 0; 910 911 ret = spurious_fault_check(error_code, pte);
+46 -1
arch/x86/mm/init_64.c
··· 98 __setup("noexec32=", nonx32_setup); 99 100 /* 101 * NOTE: This function is marked __ref because it calls __init function 102 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. 103 */ ··· 571 unsigned long end, 572 unsigned long page_size_mask) 573 { 574 - 575 unsigned long next, last_map_addr = end; 576 577 start = (unsigned long)__va(start); 578 end = (unsigned long)__va(end); 579 580 for (; start < end; start = next) { 581 pgd_t *pgd = pgd_offset_k(start); ··· 602 spin_lock(&init_mm.page_table_lock); 603 pgd_populate(&init_mm, pgd, __va(pud_phys)); 604 spin_unlock(&init_mm.page_table_lock); 605 } 606 __flush_tlb_all(); 607 608 return last_map_addr; ··· 1047 } 1048 1049 } 1050 return 0; 1051 } 1052
··· 98 __setup("noexec32=", nonx32_setup); 99 100 /* 101 + * When memory was added/removed make sure all the processes MM have 102 + * suitable PGD entries in the local PGD level page. 103 + */ 104 + void sync_global_pgds(unsigned long start, unsigned long end) 105 + { 106 + unsigned long address; 107 + 108 + for (address = start; address <= end; address += PGDIR_SIZE) { 109 + const pgd_t *pgd_ref = pgd_offset_k(address); 110 + unsigned long flags; 111 + struct page *page; 112 + 113 + if (pgd_none(*pgd_ref)) 114 + continue; 115 + 116 + spin_lock_irqsave(&pgd_lock, flags); 117 + list_for_each_entry(page, &pgd_list, lru) { 118 + pgd_t *pgd; 119 + spinlock_t *pgt_lock; 120 + 121 + pgd = (pgd_t *)page_address(page) + pgd_index(address); 122 + pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 123 + spin_lock(pgt_lock); 124 + 125 + if (pgd_none(*pgd)) 126 + set_pgd(pgd, *pgd_ref); 127 + else 128 + BUG_ON(pgd_page_vaddr(*pgd) 129 + != pgd_page_vaddr(*pgd_ref)); 130 + 131 + spin_unlock(pgt_lock); 132 + } 133 + spin_unlock_irqrestore(&pgd_lock, flags); 134 + } 135 + } 136 + 137 + /* 138 * NOTE: This function is marked __ref because it calls __init function 139 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. 140 */ ··· 534 unsigned long end, 535 unsigned long page_size_mask) 536 { 537 + bool pgd_changed = false; 538 unsigned long next, last_map_addr = end; 539 + unsigned long addr; 540 541 start = (unsigned long)__va(start); 542 end = (unsigned long)__va(end); 543 + addr = start; 544 545 for (; start < end; start = next) { 546 pgd_t *pgd = pgd_offset_k(start); ··· 563 spin_lock(&init_mm.page_table_lock); 564 pgd_populate(&init_mm, pgd, __va(pud_phys)); 565 spin_unlock(&init_mm.page_table_lock); 566 + pgd_changed = true; 567 } 568 + 569 + if (pgd_changed) 570 + sync_global_pgds(addr, end); 571 + 572 __flush_tlb_all(); 573 574 return last_map_addr; ··· 1003 } 1004 1005 } 1006 + sync_global_pgds((unsigned long)start_page, end); 1007 return 0; 1008 } 1009
+1 -1
arch/x86/mm/kmemcheck/opcode.c
··· 9 b == 0xf0 || b == 0xf2 || b == 0xf3 10 /* Group 2 */ 11 || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26 12 - || b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e 13 /* Group 3 */ 14 || b == 0x66 15 /* Group 4 */
··· 9 b == 0xf0 || b == 0xf2 || b == 0xf3 10 /* Group 2 */ 11 || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26 12 + || b == 0x64 || b == 0x65 13 /* Group 3 */ 14 || b == 0x66 15 /* Group 4 */
+17 -3
arch/x86/mm/pgtable.c
··· 87 #define UNSHARED_PTRS_PER_PGD \ 88 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 89 90 - static void pgd_ctor(pgd_t *pgd) 91 { 92 /* If the pgd points to a shared pagetable level (either the 93 ptes in non-PAE, or shared PMD in PAE), then just copy the ··· 117 } 118 119 /* list required to sync kernel mapping updates */ 120 - if (!SHARED_KERNEL_PMD) 121 pgd_list_add(pgd); 122 } 123 124 static void pgd_dtor(pgd_t *pgd) ··· 286 */ 287 spin_lock_irqsave(&pgd_lock, flags); 288 289 - pgd_ctor(pgd); 290 pgd_prepopulate_pmd(mm, pgd, pmds); 291 292 spin_unlock_irqrestore(&pgd_lock, flags);
··· 87 #define UNSHARED_PTRS_PER_PGD \ 88 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 89 90 + 91 + static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) 92 + { 93 + BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); 94 + virt_to_page(pgd)->index = (pgoff_t)mm; 95 + } 96 + 97 + struct mm_struct *pgd_page_get_mm(struct page *page) 98 + { 99 + return (struct mm_struct *)page->index; 100 + } 101 + 102 + static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) 103 { 104 /* If the pgd points to a shared pagetable level (either the 105 ptes in non-PAE, or shared PMD in PAE), then just copy the ··· 105 } 106 107 /* list required to sync kernel mapping updates */ 108 + if (!SHARED_KERNEL_PMD) { 109 + pgd_set_mm(pgd, mm); 110 pgd_list_add(pgd); 111 + } 112 } 113 114 static void pgd_dtor(pgd_t *pgd) ··· 272 */ 273 spin_lock_irqsave(&pgd_lock, flags); 274 275 + pgd_ctor(mm, pgd); 276 pgd_prepopulate_pmd(mm, pgd, pmds); 277 278 spin_unlock_irqrestore(&pgd_lock, flags);
+47 -1
arch/x86/mm/tlb.c
··· 5 #include <linux/smp.h> 6 #include <linux/interrupt.h> 7 #include <linux/module.h> 8 9 #include <asm/tlbflush.h> 10 #include <asm/mmu_context.h> ··· 52 to a full cache line because other CPUs can access it and we don't 53 want false sharing in the per cpu data segment. */ 54 static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; 55 56 /* 57 * We cannot call mmdrop() because we are in interrupt context, ··· 176 union smp_flush_state *f; 177 178 /* Caller has disabled preemption */ 179 - sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; 180 f = &flush_state[sender]; 181 182 /* ··· 221 flush_tlb_others_ipi(cpumask, mm, va); 222 } 223 224 static int __cpuinit init_smp_flush(void) 225 { 226 int i; ··· 269 for (i = 0; i < ARRAY_SIZE(flush_state); i++) 270 raw_spin_lock_init(&flush_state[i].tlbstate_lock); 271 272 return 0; 273 } 274 core_initcall(init_smp_flush);
··· 5 #include <linux/smp.h> 6 #include <linux/interrupt.h> 7 #include <linux/module.h> 8 + #include <linux/cpu.h> 9 10 #include <asm/tlbflush.h> 11 #include <asm/mmu_context.h> ··· 51 to a full cache line because other CPUs can access it and we don't 52 want false sharing in the per cpu data segment. */ 53 static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; 54 + 55 + static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset); 56 57 /* 58 * We cannot call mmdrop() because we are in interrupt context, ··· 173 union smp_flush_state *f; 174 175 /* Caller has disabled preemption */ 176 + sender = this_cpu_read(tlb_vector_offset); 177 f = &flush_state[sender]; 178 179 /* ··· 218 flush_tlb_others_ipi(cpumask, mm, va); 219 } 220 221 + static void __cpuinit calculate_tlb_offset(void) 222 + { 223 + int cpu, node, nr_node_vecs; 224 + /* 225 + * we are changing tlb_vector_offset for each CPU in runtime, but this 226 + * will not cause inconsistency, as the write is atomic under X86. we 227 + * might see more lock contentions in a short time, but after all CPU's 228 + * tlb_vector_offset are changed, everything should go normal 229 + * 230 + * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might 231 + * waste some vectors. 232 + **/ 233 + if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS) 234 + nr_node_vecs = 1; 235 + else 236 + nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; 237 + 238 + for_each_online_node(node) { 239 + int node_offset = (node % NUM_INVALIDATE_TLB_VECTORS) * 240 + nr_node_vecs; 241 + int cpu_offset = 0; 242 + for_each_cpu(cpu, cpumask_of_node(node)) { 243 + per_cpu(tlb_vector_offset, cpu) = node_offset + 244 + cpu_offset; 245 + cpu_offset++; 246 + cpu_offset = cpu_offset % nr_node_vecs; 247 + } 248 + } 249 + } 250 + 251 + static int tlb_cpuhp_notify(struct notifier_block *n, 252 + unsigned long action, void *hcpu) 253 + { 254 + switch (action & 0xf) { 255 + case CPU_ONLINE: 256 + case CPU_DEAD: 257 + calculate_tlb_offset(); 258 + } 259 + return NOTIFY_OK; 260 + } 261 + 262 static int __cpuinit init_smp_flush(void) 263 { 264 int i; ··· 225 for (i = 0; i < ARRAY_SIZE(flush_state); i++) 226 raw_spin_lock_init(&flush_state[i].tlbstate_lock); 227 228 + calculate_tlb_offset(); 229 + hotcpu_notifier(tlb_cpuhp_notify, 0); 230 return 0; 231 } 232 core_initcall(init_smp_flush);
+4
include/asm-generic/pgtable.h
··· 129 #define move_pte(pte, prot, old_addr, new_addr) (pte) 130 #endif 131 132 #ifndef pgprot_noncached 133 #define pgprot_noncached(prot) (prot) 134 #endif
··· 129 #define move_pte(pte, prot, old_addr, new_addr) (pte) 130 #endif 131 132 + #ifndef flush_tlb_fix_spurious_fault 133 + #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) 134 + #endif 135 + 136 #ifndef pgprot_noncached 137 #define pgprot_noncached(prot) (prot) 138 #endif
+4
include/asm-generic/vmlinux.lds.h
··· 687 - LOAD_OFFSET) { \ 688 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 689 *(.data..percpu..first) \ 690 *(.data..percpu..page_aligned) \ 691 *(.data..percpu) \ 692 *(.data..percpu..shared_aligned) \ 693 VMLINUX_SYMBOL(__per_cpu_end) = .; \ ··· 715 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 716 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 717 *(.data..percpu..first) \ 718 *(.data..percpu..page_aligned) \ 719 *(.data..percpu) \ 720 *(.data..percpu..shared_aligned) \ 721 VMLINUX_SYMBOL(__per_cpu_end) = .; \
··· 687 - LOAD_OFFSET) { \ 688 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 689 *(.data..percpu..first) \ 690 + . = ALIGN(PAGE_SIZE); \ 691 *(.data..percpu..page_aligned) \ 692 + *(.data..percpu..readmostly) \ 693 *(.data..percpu) \ 694 *(.data..percpu..shared_aligned) \ 695 VMLINUX_SYMBOL(__per_cpu_end) = .; \ ··· 713 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 714 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 715 *(.data..percpu..first) \ 716 + . = ALIGN(PAGE_SIZE); \ 717 *(.data..percpu..page_aligned) \ 718 + *(.data..percpu..readmostly) \ 719 *(.data..percpu) \ 720 *(.data..percpu..shared_aligned) \ 721 VMLINUX_SYMBOL(__per_cpu_end) = .; \
+9
include/linux/percpu-defs.h
··· 139 __aligned(PAGE_SIZE) 140 141 /* 142 * Intermodule exports for per-CPU variables. sparse forgets about 143 * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to 144 * noop if __CHECKER__.
··· 139 __aligned(PAGE_SIZE) 140 141 /* 142 + * Declaration/definition used for per-CPU variables that must be read mostly. 143 + */ 144 + #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ 145 + DECLARE_PER_CPU_SECTION(type, name, "..readmostly") 146 + 147 + #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ 148 + DEFINE_PER_CPU_SECTION(type, name, "..readmostly") 149 + 150 + /* 151 * Intermodule exports for per-CPU variables. sparse forgets about 152 * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to 153 * noop if __CHECKER__.
+1 -1
mm/memory.c
··· 3185 * with threads. 3186 */ 3187 if (flags & FAULT_FLAG_WRITE) 3188 - flush_tlb_page(vma, address); 3189 } 3190 unlock: 3191 pte_unmap_unlock(pte, ptl);
··· 3185 * with threads. 3186 */ 3187 if (flags & FAULT_FLAG_WRITE) 3188 + flush_tlb_fix_spurious_fault(vma, address); 3189 } 3190 unlock: 3191 pte_unmap_unlock(pte, ptl);
+9
mm/vmalloc.c
··· 517 static void purge_fragmented_blocks_allcpus(void); 518 519 /* 520 * Purges all lazily-freed vmap areas. 521 * 522 * If sync is 0 then don't purge if there is already a purge in progress.
··· 517 static void purge_fragmented_blocks_allcpus(void); 518 519 /* 520 + * called before a call to iounmap() if the caller wants vm_area_struct's 521 + * immediately freed. 522 + */ 523 + void set_iounmap_nonlazy(void) 524 + { 525 + atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); 526 + } 527 + 528 + /* 529 * Purges all lazily-freed vmap areas. 530 * 531 * If sync is 0 then don't purge if there is already a purge in progress.