x86/cpu_entry_area: Move it out of the fixmap

Put the cpu_entry_area into a separate P4D entry. The fixmap gets too big
and 0-day already hit a case where the fixmap PTEs were cleared by
cleanup_highmap().

Aside of that the fixmap API is a pain as it's all backwards.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by Thomas Gleixner and committed by Ingo Molnar 92a0f81d ed1bbc40

+148 -93
+2
Documentation/x86/x86_64/mm.txt
··· 12 12 ... unused hole ... 13 13 ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) 14 14 ... unused hole ... 15 + fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping 15 16 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 16 17 ... unused hole ... 17 18 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ··· 36 35 ... unused hole ... 37 36 ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) 38 37 ... unused hole ... 38 + fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping 39 39 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 40 40 ... unused hole ... 41 41 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
+17 -1
arch/x86/include/asm/cpu_entry_area.h
··· 43 43 }; 44 44 45 45 #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) 46 - #define CPU_ENTRY_AREA_PAGES (CPU_ENTRY_AREA_SIZE / PAGE_SIZE) 46 + #define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) 47 47 48 48 DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); 49 49 50 50 extern void setup_cpu_entry_areas(void); 51 + extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); 52 + 53 + #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE 54 + #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) 55 + 56 + #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) 57 + 58 + #define CPU_ENTRY_AREA_MAP_SIZE \ 59 + (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) 60 + 61 + extern struct cpu_entry_area *get_cpu_entry_area(int cpu); 62 + 63 + static inline struct entry_stack *cpu_entry_stack(int cpu) 64 + { 65 + return &get_cpu_entry_area(cpu)->entry_stack_page.stack; 66 + } 51 67 52 68 #endif
+1
arch/x86/include/asm/desc.h
··· 7 7 #include <asm/mmu.h> 8 8 #include <asm/fixmap.h> 9 9 #include <asm/irq_vectors.h> 10 + #include <asm/cpu_entry_area.h> 10 11 11 12 #include <linux/smp.h> 12 13 #include <linux/percpu.h>
+1 -31
arch/x86/include/asm/fixmap.h
··· 25 25 #else 26 26 #include <uapi/asm/vsyscall.h> 27 27 #endif 28 - #include <asm/cpu_entry_area.h> 29 28 30 29 /* 31 30 * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall ··· 83 84 FIX_IO_APIC_BASE_0, 84 85 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, 85 86 #endif 86 - FIX_RO_IDT, /* Virtual mapping for read-only IDT */ 87 87 #ifdef CONFIG_X86_32 88 88 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 89 89 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, ··· 98 100 #ifdef CONFIG_X86_INTEL_MID 99 101 FIX_LNW_VRTC, 100 102 #endif 101 - /* Fixmap entries to remap the GDTs, one per processor. */ 102 - FIX_CPU_ENTRY_AREA_TOP, 103 - FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1, 104 103 105 104 #ifdef CONFIG_ACPI_APEI_GHES 106 105 /* Used for GHES mapping from assorted contexts */ ··· 138 143 extern void reserve_top_address(unsigned long reserve); 139 144 140 145 #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) 141 - #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 146 + #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 142 147 143 148 extern int fixmaps_set; 144 149 ··· 185 190 186 191 void __early_set_fixmap(enum fixed_addresses idx, 187 192 phys_addr_t phys, pgprot_t flags); 188 - 189 - static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page) 190 - { 191 - BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); 192 - 193 - return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page; 194 - } 195 - 196 - #define __get_cpu_entry_area_offset_index(cpu, offset) ({ \ 197 - BUILD_BUG_ON(offset % PAGE_SIZE != 0); \ 198 - __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE); \ 199 - }) 200 - 201 - #define get_cpu_entry_area_index(cpu, field) \ 202 - __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field)) 203 - 204 - static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) 205 - { 206 - return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0)); 207 - } 208 - 209 - static inline struct entry_stack *cpu_entry_stack(int cpu) 210 - { 211 - return &get_cpu_entry_area(cpu)->entry_stack_page.stack; 212 - } 213 193 214 194 #endif /* !__ASSEMBLY__ */ 215 195 #endif /* _ASM_X86_FIXMAP_H */
+12 -3
arch/x86/include/asm/pgtable_32_types.h
··· 38 38 #define LAST_PKMAP 1024 39 39 #endif 40 40 41 - #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ 42 - & PMD_MASK) 41 + /* 42 + * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c 43 + * to avoid include recursion hell 44 + */ 45 + #define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) 46 + 47 + #define CPU_ENTRY_AREA_BASE \ 48 + ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK) 49 + 50 + #define PKMAP_BASE \ 51 + ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) 43 52 44 53 #ifdef CONFIG_HIGHMEM 45 54 # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) 46 55 #else 47 - # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) 56 + # define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE) 48 57 #endif 49 58 50 59 #define MODULES_VADDR VMALLOC_START
+28 -19
arch/x86/include/asm/pgtable_64_types.h
··· 76 76 #define PGDIR_MASK (~(PGDIR_SIZE - 1)) 77 77 78 78 /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ 79 - #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) 79 + #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) 80 + 80 81 #ifdef CONFIG_X86_5LEVEL 81 - #define VMALLOC_SIZE_TB _AC(16384, UL) 82 - #define __VMALLOC_BASE _AC(0xff92000000000000, UL) 83 - #define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) 82 + # define VMALLOC_SIZE_TB _AC(16384, UL) 83 + # define __VMALLOC_BASE _AC(0xff92000000000000, UL) 84 + # define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) 84 85 #else 85 - #define VMALLOC_SIZE_TB _AC(32, UL) 86 - #define __VMALLOC_BASE _AC(0xffffc90000000000, UL) 87 - #define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) 86 + # define VMALLOC_SIZE_TB _AC(32, UL) 87 + # define __VMALLOC_BASE _AC(0xffffc90000000000, UL) 88 + # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) 88 89 #endif 90 + 89 91 #ifdef CONFIG_RANDOMIZE_MEMORY 90 - #define VMALLOC_START vmalloc_base 91 - #define VMEMMAP_START vmemmap_base 92 + # define VMALLOC_START vmalloc_base 93 + # define VMEMMAP_START vmemmap_base 92 94 #else 93 - #define VMALLOC_START __VMALLOC_BASE 94 - #define VMEMMAP_START __VMEMMAP_BASE 95 + # define VMALLOC_START __VMALLOC_BASE 96 + # define VMEMMAP_START __VMEMMAP_BASE 95 97 #endif /* CONFIG_RANDOMIZE_MEMORY */ 96 - #define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) 97 - #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) 98 + 99 + #define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) 100 + 101 + #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) 98 102 /* The module sections ends with the start of the fixmap */ 99 - #define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) 100 - #define MODULES_LEN (MODULES_END - MODULES_VADDR) 101 - #define ESPFIX_PGD_ENTRY _AC(-2, UL) 102 - #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) 103 - #define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) 104 - #define EFI_VA_END (-68 * (_AC(1, UL) << 30)) 103 + #define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) 104 + #define MODULES_LEN (MODULES_END - MODULES_VADDR) 105 + 106 + #define ESPFIX_PGD_ENTRY _AC(-2, UL) 107 + #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) 108 + 109 + #define CPU_ENTRY_AREA_PGD _AC(-3, UL) 110 + #define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT) 111 + 112 + #define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) 113 + #define EFI_VA_END (-68 * (_AC(1, UL) << 30)) 105 114 106 115 #define EARLY_DYNAMIC_PAGE_TABLES 64 107 116
+1
arch/x86/kernel/dumpstack.c
··· 18 18 #include <linux/nmi.h> 19 19 #include <linux/sysfs.h> 20 20 21 + #include <asm/cpu_entry_area.h> 21 22 #include <asm/stacktrace.h> 22 23 #include <asm/unwind.h> 23 24
+3 -2
arch/x86/kernel/traps.c
··· 951 951 * "sidt" instruction will not leak the location of the kernel, and 952 952 * to defend the IDT against arbitrary memory write vulnerabilities. 953 953 * It will be reloaded in cpu_init() */ 954 - __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); 955 - idt_descr.address = fix_to_virt(FIX_RO_IDT); 954 + cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), 955 + PAGE_KERNEL_RO); 956 + idt_descr.address = CPU_ENTRY_AREA_RO_IDT; 956 957 957 958 /* 958 959 * Should be a barrier for any external CPU state:
+51 -17
arch/x86/mm/cpu_entry_area.c
··· 15 15 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); 16 16 #endif 17 17 18 - static void __init 19 - set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) 18 + struct cpu_entry_area *get_cpu_entry_area(int cpu) 20 19 { 21 - for ( ; pages; pages--, idx--, ptr += PAGE_SIZE) 22 - __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot); 20 + unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; 21 + BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); 22 + 23 + return (struct cpu_entry_area *) va; 24 + } 25 + EXPORT_SYMBOL(get_cpu_entry_area); 26 + 27 + void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) 28 + { 29 + unsigned long va = (unsigned long) cea_vaddr; 30 + 31 + set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags)); 32 + } 33 + 34 + static void __init 35 + cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) 36 + { 37 + for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE) 38 + cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); 23 39 } 24 40 25 41 /* Setup the fixmap mappings only once per-processor */ ··· 63 47 pgprot_t tss_prot = PAGE_KERNEL; 64 48 #endif 65 49 66 - __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); 67 - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page), 68 - per_cpu_ptr(&entry_stack_storage, cpu), 1, 69 - PAGE_KERNEL); 50 + cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu), 51 + gdt_prot); 52 + 53 + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page, 54 + per_cpu_ptr(&entry_stack_storage, cpu), 1, 55 + PAGE_KERNEL); 70 56 71 57 /* 72 58 * The Intel SDM says (Volume 3, 7.2.1): ··· 90 72 BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ 91 73 offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); 92 74 BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); 93 - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), 94 - &per_cpu(cpu_tss_rw, cpu), 95 - sizeof(struct tss_struct) / PAGE_SIZE, 96 - tss_prot); 75 + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss, 76 + &per_cpu(cpu_tss_rw, cpu), 77 + sizeof(struct tss_struct) / PAGE_SIZE, tss_prot); 97 78 98 79 #ifdef CONFIG_X86_32 99 80 per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); ··· 102 85 BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); 103 86 BUILD_BUG_ON(sizeof(exception_stacks) != 104 87 sizeof(((struct cpu_entry_area *)0)->exception_stacks)); 105 - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks), 106 - &per_cpu(exception_stacks, cpu), 107 - sizeof(exception_stacks) / PAGE_SIZE, 108 - PAGE_KERNEL); 88 + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks, 89 + &per_cpu(exception_stacks, cpu), 90 + sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL); 109 91 110 - __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline), 92 + cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline, 111 93 __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); 94 + #endif 95 + } 96 + 97 + static __init void setup_cpu_entry_area_ptes(void) 98 + { 99 + #ifdef CONFIG_X86_32 100 + unsigned long start, end; 101 + 102 + BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); 103 + BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); 104 + 105 + start = CPU_ENTRY_AREA_BASE; 106 + end = start + CPU_ENTRY_AREA_MAP_SIZE; 107 + 108 + for (; start < end; start += PMD_SIZE) 109 + populate_extra_pte(start); 112 110 #endif 113 111 } 114 112 115 113 void __init setup_cpu_entry_areas(void) 116 114 { 117 115 unsigned int cpu; 116 + 117 + setup_cpu_entry_area_ptes(); 118 118 119 119 for_each_possible_cpu(cpu) 120 120 setup_cpu_entry_area(cpu);
+5 -1
arch/x86/mm/dump_pagetables.c
··· 58 58 KASAN_SHADOW_START_NR, 59 59 KASAN_SHADOW_END_NR, 60 60 #endif 61 + CPU_ENTRY_AREA_NR, 61 62 #ifdef CONFIG_X86_ESPFIX64 62 63 ESPFIX_START_NR, 63 64 #endif ··· 82 81 [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, 83 82 [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, 84 83 #endif 84 + [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, 85 85 #ifdef CONFIG_X86_ESPFIX64 86 86 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, 87 87 #endif ··· 106 104 #ifdef CONFIG_HIGHMEM 107 105 PKMAP_BASE_NR, 108 106 #endif 107 + CPU_ENTRY_AREA_NR, 109 108 FIXADDR_START_NR, 110 109 END_OF_SPACE_NR, 111 110 }; ··· 119 116 #ifdef CONFIG_HIGHMEM 120 117 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, 121 118 #endif 119 + [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, 122 120 [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, 123 121 [END_OF_SPACE_NR] = { -1, NULL } 124 122 }; ··· 545 541 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; 546 542 # endif 547 543 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; 544 + address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; 548 545 #endif 549 - 550 546 return 0; 551 547 } 552 548 __initcall(pt_dump_init);
+6
arch/x86/mm/init_32.c
··· 50 50 #include <asm/setup.h> 51 51 #include <asm/set_memory.h> 52 52 #include <asm/page_types.h> 53 + #include <asm/cpu_entry_area.h> 53 54 #include <asm/init.h> 54 55 55 56 #include "mm_internal.h" ··· 767 766 mem_init_print_info(NULL); 768 767 printk(KERN_INFO "virtual kernel memory layout:\n" 769 768 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 769 + " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n" 770 770 #ifdef CONFIG_HIGHMEM 771 771 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 772 772 #endif ··· 778 776 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", 779 777 FIXADDR_START, FIXADDR_TOP, 780 778 (FIXADDR_TOP - FIXADDR_START) >> 10, 779 + 780 + CPU_ENTRY_AREA_BASE, 781 + CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE, 782 + CPU_ENTRY_AREA_MAP_SIZE >> 10, 781 783 782 784 #ifdef CONFIG_HIGHMEM 783 785 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
+20 -17
arch/x86/mm/kasan_init_64.c
··· 15 15 #include <asm/tlbflush.h> 16 16 #include <asm/sections.h> 17 17 #include <asm/pgtable.h> 18 + #include <asm/cpu_entry_area.h> 18 19 19 20 extern struct range pfn_mapped[E820_MAX_ENTRIES]; 20 21 ··· 323 322 map_range(&pfn_mapped[i]); 324 323 } 325 324 325 + shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; 326 + shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); 327 + shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, 328 + PAGE_SIZE); 329 + 330 + shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + 331 + CPU_ENTRY_AREA_MAP_SIZE); 332 + shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); 333 + shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, 334 + PAGE_SIZE); 335 + 326 336 kasan_populate_zero_shadow( 327 337 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), 328 - kasan_mem_to_shadow((void *)__START_KERNEL_map)); 338 + shadow_cpu_entry_begin); 339 + 340 + kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, 341 + (unsigned long)shadow_cpu_entry_end, 0); 342 + 343 + kasan_populate_zero_shadow(shadow_cpu_entry_end, 344 + kasan_mem_to_shadow((void *)__START_KERNEL_map)); 329 345 330 346 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), 331 347 (unsigned long)kasan_mem_to_shadow(_end), 332 348 early_pfn_to_nid(__pa(_stext))); 333 349 334 - shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM); 335 - shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); 336 - shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, 337 - PAGE_SIZE); 338 - 339 - shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE); 340 - shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); 341 - shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, 342 - PAGE_SIZE); 343 - 344 350 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), 345 - shadow_cpu_entry_begin); 346 - 347 - kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, 348 - (unsigned long)shadow_cpu_entry_end, 0); 349 - 350 - kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END); 351 + (void *)KASAN_SHADOW_END); 351 352 352 353 load_cr3(init_top_pgt); 353 354 __flush_tlb_all();
+1
arch/x86/mm/pgtable_32.c
··· 10 10 #include <linux/pagemap.h> 11 11 #include <linux/spinlock.h> 12 12 13 + #include <asm/cpu_entry_area.h> 13 14 #include <asm/pgtable.h> 14 15 #include <asm/pgalloc.h> 15 16 #include <asm/fixmap.h>
-2
arch/x86/xen/mmu_pv.c
··· 2261 2261 2262 2262 switch (idx) { 2263 2263 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: 2264 - case FIX_RO_IDT: 2265 2264 #ifdef CONFIG_X86_32 2266 2265 case FIX_WP_TEST: 2267 2266 # ifdef CONFIG_HIGHMEM ··· 2271 2272 #endif 2272 2273 case FIX_TEXT_POKE0: 2273 2274 case FIX_TEXT_POKE1: 2274 - case FIX_CPU_ENTRY_AREA_TOP ... FIX_CPU_ENTRY_AREA_BOTTOM: 2275 2275 /* All local page mappings */ 2276 2276 pte = pfn_pte(phys, prot); 2277 2277 break;