x86/cpu_entry_area: Move it out of the fixmap

Put the cpu_entry_area into a separate P4D entry. The fixmap gets too big
and 0-day already hit a case where the fixmap PTEs were cleared by
cleanup_highmap().

Aside of that the fixmap API is a pain as it's all backwards.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by Thomas Gleixner and committed by Ingo Molnar 92a0f81d ed1bbc40

+148 -93
+2
Documentation/x86/x86_64/mm.txt
··· 12 ... unused hole ... 13 ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) 14 ... unused hole ... 15 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 16 ... unused hole ... 17 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ··· 36 ... unused hole ... 37 ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) 38 ... unused hole ... 39 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 40 ... unused hole ... 41 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
··· 12 ... unused hole ... 13 ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) 14 ... unused hole ... 15 + fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping 16 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 17 ... unused hole ... 18 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ··· 35 ... unused hole ... 36 ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) 37 ... unused hole ... 38 + fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping 39 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 40 ... unused hole ... 41 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
+17 -1
arch/x86/include/asm/cpu_entry_area.h
··· 43 }; 44 45 #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) 46 - #define CPU_ENTRY_AREA_PAGES (CPU_ENTRY_AREA_SIZE / PAGE_SIZE) 47 48 DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); 49 50 extern void setup_cpu_entry_areas(void); 51 52 #endif
··· 43 }; 44 45 #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) 46 + #define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) 47 48 DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); 49 50 extern void setup_cpu_entry_areas(void); 51 + extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); 52 + 53 + #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE 54 + #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) 55 + 56 + #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) 57 + 58 + #define CPU_ENTRY_AREA_MAP_SIZE \ 59 + (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) 60 + 61 + extern struct cpu_entry_area *get_cpu_entry_area(int cpu); 62 + 63 + static inline struct entry_stack *cpu_entry_stack(int cpu) 64 + { 65 + return &get_cpu_entry_area(cpu)->entry_stack_page.stack; 66 + } 67 68 #endif
+1
arch/x86/include/asm/desc.h
··· 7 #include <asm/mmu.h> 8 #include <asm/fixmap.h> 9 #include <asm/irq_vectors.h> 10 11 #include <linux/smp.h> 12 #include <linux/percpu.h>
··· 7 #include <asm/mmu.h> 8 #include <asm/fixmap.h> 9 #include <asm/irq_vectors.h> 10 + #include <asm/cpu_entry_area.h> 11 12 #include <linux/smp.h> 13 #include <linux/percpu.h>
+1 -31
arch/x86/include/asm/fixmap.h
··· 25 #else 26 #include <uapi/asm/vsyscall.h> 27 #endif 28 - #include <asm/cpu_entry_area.h> 29 30 /* 31 * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall ··· 83 FIX_IO_APIC_BASE_0, 84 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, 85 #endif 86 - FIX_RO_IDT, /* Virtual mapping for read-only IDT */ 87 #ifdef CONFIG_X86_32 88 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 89 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, ··· 98 #ifdef CONFIG_X86_INTEL_MID 99 FIX_LNW_VRTC, 100 #endif 101 - /* Fixmap entries to remap the GDTs, one per processor. */ 102 - FIX_CPU_ENTRY_AREA_TOP, 103 - FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1, 104 105 #ifdef CONFIG_ACPI_APEI_GHES 106 /* Used for GHES mapping from assorted contexts */ ··· 138 extern void reserve_top_address(unsigned long reserve); 139 140 #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) 141 - #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 142 143 extern int fixmaps_set; 144 ··· 185 186 void __early_set_fixmap(enum fixed_addresses idx, 187 phys_addr_t phys, pgprot_t flags); 188 - 189 - static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page) 190 - { 191 - BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); 192 - 193 - return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page; 194 - } 195 - 196 - #define __get_cpu_entry_area_offset_index(cpu, offset) ({ \ 197 - BUILD_BUG_ON(offset % PAGE_SIZE != 0); \ 198 - __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE); \ 199 - }) 200 - 201 - #define get_cpu_entry_area_index(cpu, field) \ 202 - __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field)) 203 - 204 - static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) 205 - { 206 - return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0)); 207 - } 208 - 209 - static inline struct entry_stack *cpu_entry_stack(int cpu) 210 - { 211 - return &get_cpu_entry_area(cpu)->entry_stack_page.stack; 212 - } 213 214 #endif /* !__ASSEMBLY__ */ 215 #endif /* _ASM_X86_FIXMAP_H */
··· 25 #else 26 #include <uapi/asm/vsyscall.h> 27 #endif 28 29 /* 30 * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall ··· 84 FIX_IO_APIC_BASE_0, 85 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, 86 #endif 87 #ifdef CONFIG_X86_32 88 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 89 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, ··· 100 #ifdef CONFIG_X86_INTEL_MID 101 FIX_LNW_VRTC, 102 #endif 103 104 #ifdef CONFIG_ACPI_APEI_GHES 105 /* Used for GHES mapping from assorted contexts */ ··· 143 extern void reserve_top_address(unsigned long reserve); 144 145 #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) 146 + #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 147 148 extern int fixmaps_set; 149 ··· 190 191 void __early_set_fixmap(enum fixed_addresses idx, 192 phys_addr_t phys, pgprot_t flags); 193 194 #endif /* !__ASSEMBLY__ */ 195 #endif /* _ASM_X86_FIXMAP_H */
+12 -3
arch/x86/include/asm/pgtable_32_types.h
··· 38 #define LAST_PKMAP 1024 39 #endif 40 41 - #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ 42 - & PMD_MASK) 43 44 #ifdef CONFIG_HIGHMEM 45 # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) 46 #else 47 - # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) 48 #endif 49 50 #define MODULES_VADDR VMALLOC_START
··· 38 #define LAST_PKMAP 1024 39 #endif 40 41 + /* 42 + * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c 43 + * to avoid include recursion hell 44 + */ 45 + #define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) 46 + 47 + #define CPU_ENTRY_AREA_BASE \ 48 + ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK) 49 + 50 + #define PKMAP_BASE \ 51 + ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) 52 53 #ifdef CONFIG_HIGHMEM 54 # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) 55 #else 56 + # define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE) 57 #endif 58 59 #define MODULES_VADDR VMALLOC_START
+28 -19
arch/x86/include/asm/pgtable_64_types.h
··· 76 #define PGDIR_MASK (~(PGDIR_SIZE - 1)) 77 78 /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ 79 - #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) 80 #ifdef CONFIG_X86_5LEVEL 81 - #define VMALLOC_SIZE_TB _AC(16384, UL) 82 - #define __VMALLOC_BASE _AC(0xff92000000000000, UL) 83 - #define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) 84 #else 85 - #define VMALLOC_SIZE_TB _AC(32, UL) 86 - #define __VMALLOC_BASE _AC(0xffffc90000000000, UL) 87 - #define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) 88 #endif 89 #ifdef CONFIG_RANDOMIZE_MEMORY 90 - #define VMALLOC_START vmalloc_base 91 - #define VMEMMAP_START vmemmap_base 92 #else 93 - #define VMALLOC_START __VMALLOC_BASE 94 - #define VMEMMAP_START __VMEMMAP_BASE 95 #endif /* CONFIG_RANDOMIZE_MEMORY */ 96 - #define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) 97 - #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) 98 /* The module sections ends with the start of the fixmap */ 99 - #define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) 100 - #define MODULES_LEN (MODULES_END - MODULES_VADDR) 101 - #define ESPFIX_PGD_ENTRY _AC(-2, UL) 102 - #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) 103 - #define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) 104 - #define EFI_VA_END (-68 * (_AC(1, UL) << 30)) 105 106 #define EARLY_DYNAMIC_PAGE_TABLES 64 107
··· 76 #define PGDIR_MASK (~(PGDIR_SIZE - 1)) 77 78 /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ 79 + #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) 80 + 81 #ifdef CONFIG_X86_5LEVEL 82 + # define VMALLOC_SIZE_TB _AC(16384, UL) 83 + # define __VMALLOC_BASE _AC(0xff92000000000000, UL) 84 + # define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) 85 #else 86 + # define VMALLOC_SIZE_TB _AC(32, UL) 87 + # define __VMALLOC_BASE _AC(0xffffc90000000000, UL) 88 + # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) 89 #endif 90 + 91 #ifdef CONFIG_RANDOMIZE_MEMORY 92 + # define VMALLOC_START vmalloc_base 93 + # define VMEMMAP_START vmemmap_base 94 #else 95 + # define VMALLOC_START __VMALLOC_BASE 96 + # define VMEMMAP_START __VMEMMAP_BASE 97 #endif /* CONFIG_RANDOMIZE_MEMORY */ 98 + 99 + #define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) 100 + 101 + #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) 102 /* The module sections ends with the start of the fixmap */ 103 + #define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) 104 + #define MODULES_LEN (MODULES_END - MODULES_VADDR) 105 + 106 + #define ESPFIX_PGD_ENTRY _AC(-2, UL) 107 + #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) 108 + 109 + #define CPU_ENTRY_AREA_PGD _AC(-3, UL) 110 + #define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT) 111 + 112 + #define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) 113 + #define EFI_VA_END (-68 * (_AC(1, UL) << 30)) 114 115 #define EARLY_DYNAMIC_PAGE_TABLES 64 116
+1
arch/x86/kernel/dumpstack.c
··· 18 #include <linux/nmi.h> 19 #include <linux/sysfs.h> 20 21 #include <asm/stacktrace.h> 22 #include <asm/unwind.h> 23
··· 18 #include <linux/nmi.h> 19 #include <linux/sysfs.h> 20 21 + #include <asm/cpu_entry_area.h> 22 #include <asm/stacktrace.h> 23 #include <asm/unwind.h> 24
+3 -2
arch/x86/kernel/traps.c
··· 951 * "sidt" instruction will not leak the location of the kernel, and 952 * to defend the IDT against arbitrary memory write vulnerabilities. 953 * It will be reloaded in cpu_init() */ 954 - __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); 955 - idt_descr.address = fix_to_virt(FIX_RO_IDT); 956 957 /* 958 * Should be a barrier for any external CPU state:
··· 951 * "sidt" instruction will not leak the location of the kernel, and 952 * to defend the IDT against arbitrary memory write vulnerabilities. 953 * It will be reloaded in cpu_init() */ 954 + cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), 955 + PAGE_KERNEL_RO); 956 + idt_descr.address = CPU_ENTRY_AREA_RO_IDT; 957 958 /* 959 * Should be a barrier for any external CPU state:
+51 -17
arch/x86/mm/cpu_entry_area.c
··· 15 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); 16 #endif 17 18 - static void __init 19 - set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) 20 { 21 - for ( ; pages; pages--, idx--, ptr += PAGE_SIZE) 22 - __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot); 23 } 24 25 /* Setup the fixmap mappings only once per-processor */ ··· 63 pgprot_t tss_prot = PAGE_KERNEL; 64 #endif 65 66 - __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); 67 - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page), 68 - per_cpu_ptr(&entry_stack_storage, cpu), 1, 69 - PAGE_KERNEL); 70 71 /* 72 * The Intel SDM says (Volume 3, 7.2.1): ··· 90 BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ 91 offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); 92 BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); 93 - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), 94 - &per_cpu(cpu_tss_rw, cpu), 95 - sizeof(struct tss_struct) / PAGE_SIZE, 96 - tss_prot); 97 98 #ifdef CONFIG_X86_32 99 per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); ··· 102 BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); 103 BUILD_BUG_ON(sizeof(exception_stacks) != 104 sizeof(((struct cpu_entry_area *)0)->exception_stacks)); 105 - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks), 106 - &per_cpu(exception_stacks, cpu), 107 - sizeof(exception_stacks) / PAGE_SIZE, 108 - PAGE_KERNEL); 109 110 - __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline), 111 __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); 112 #endif 113 } 114 115 void __init setup_cpu_entry_areas(void) 116 { 117 unsigned int cpu; 118 119 for_each_possible_cpu(cpu) 120 setup_cpu_entry_area(cpu);
··· 15 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); 16 #endif 17 18 + struct cpu_entry_area *get_cpu_entry_area(int cpu) 19 { 20 + unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; 21 + BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); 22 + 23 + return (struct cpu_entry_area *) va; 24 + } 25 + EXPORT_SYMBOL(get_cpu_entry_area); 26 + 27 + void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) 28 + { 29 + unsigned long va = (unsigned long) cea_vaddr; 30 + 31 + set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags)); 32 + } 33 + 34 + static void __init 35 + cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) 36 + { 37 + for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE) 38 + cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); 39 } 40 41 /* Setup the fixmap mappings only once per-processor */ ··· 47 pgprot_t tss_prot = PAGE_KERNEL; 48 #endif 49 50 + cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu), 51 + gdt_prot); 52 + 53 + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page, 54 + per_cpu_ptr(&entry_stack_storage, cpu), 1, 55 + PAGE_KERNEL); 56 57 /* 58 * The Intel SDM says (Volume 3, 7.2.1): ··· 72 BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ 73 offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); 74 BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); 75 + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss, 76 + &per_cpu(cpu_tss_rw, cpu), 77 + sizeof(struct tss_struct) / PAGE_SIZE, tss_prot); 78 79 #ifdef CONFIG_X86_32 80 per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); ··· 85 BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); 86 BUILD_BUG_ON(sizeof(exception_stacks) != 87 sizeof(((struct cpu_entry_area *)0)->exception_stacks)); 88 + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks, 89 + &per_cpu(exception_stacks, cpu), 90 + sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL); 91 92 + cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline, 93 __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); 94 + #endif 95 + } 96 + 97 + static __init void setup_cpu_entry_area_ptes(void) 98 + { 99 + #ifdef CONFIG_X86_32 100 + unsigned long start, end; 101 + 102 + BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); 103 + BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); 104 + 105 + start = CPU_ENTRY_AREA_BASE; 106 + end = start + CPU_ENTRY_AREA_MAP_SIZE; 107 + 108 + for (; start < end; start += PMD_SIZE) 109 + populate_extra_pte(start); 110 #endif 111 } 112 113 void __init setup_cpu_entry_areas(void) 114 { 115 unsigned int cpu; 116 + 117 + setup_cpu_entry_area_ptes(); 118 119 for_each_possible_cpu(cpu) 120 setup_cpu_entry_area(cpu);
+5 -1
arch/x86/mm/dump_pagetables.c
··· 58 KASAN_SHADOW_START_NR, 59 KASAN_SHADOW_END_NR, 60 #endif 61 #ifdef CONFIG_X86_ESPFIX64 62 ESPFIX_START_NR, 63 #endif ··· 82 [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, 83 [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, 84 #endif 85 #ifdef CONFIG_X86_ESPFIX64 86 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, 87 #endif ··· 106 #ifdef CONFIG_HIGHMEM 107 PKMAP_BASE_NR, 108 #endif 109 FIXADDR_START_NR, 110 END_OF_SPACE_NR, 111 }; ··· 119 #ifdef CONFIG_HIGHMEM 120 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, 121 #endif 122 [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, 123 [END_OF_SPACE_NR] = { -1, NULL } 124 }; ··· 545 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; 546 # endif 547 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; 548 #endif 549 - 550 return 0; 551 } 552 __initcall(pt_dump_init);
··· 58 KASAN_SHADOW_START_NR, 59 KASAN_SHADOW_END_NR, 60 #endif 61 + CPU_ENTRY_AREA_NR, 62 #ifdef CONFIG_X86_ESPFIX64 63 ESPFIX_START_NR, 64 #endif ··· 81 [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, 82 [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, 83 #endif 84 + [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, 85 #ifdef CONFIG_X86_ESPFIX64 86 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, 87 #endif ··· 104 #ifdef CONFIG_HIGHMEM 105 PKMAP_BASE_NR, 106 #endif 107 + CPU_ENTRY_AREA_NR, 108 FIXADDR_START_NR, 109 END_OF_SPACE_NR, 110 }; ··· 116 #ifdef CONFIG_HIGHMEM 117 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, 118 #endif 119 + [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, 120 [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, 121 [END_OF_SPACE_NR] = { -1, NULL } 122 }; ··· 541 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; 542 # endif 543 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; 544 + address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; 545 #endif 546 return 0; 547 } 548 __initcall(pt_dump_init);
+6
arch/x86/mm/init_32.c
··· 50 #include <asm/setup.h> 51 #include <asm/set_memory.h> 52 #include <asm/page_types.h> 53 #include <asm/init.h> 54 55 #include "mm_internal.h" ··· 767 mem_init_print_info(NULL); 768 printk(KERN_INFO "virtual kernel memory layout:\n" 769 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 770 #ifdef CONFIG_HIGHMEM 771 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 772 #endif ··· 778 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", 779 FIXADDR_START, FIXADDR_TOP, 780 (FIXADDR_TOP - FIXADDR_START) >> 10, 781 782 #ifdef CONFIG_HIGHMEM 783 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
··· 50 #include <asm/setup.h> 51 #include <asm/set_memory.h> 52 #include <asm/page_types.h> 53 + #include <asm/cpu_entry_area.h> 54 #include <asm/init.h> 55 56 #include "mm_internal.h" ··· 766 mem_init_print_info(NULL); 767 printk(KERN_INFO "virtual kernel memory layout:\n" 768 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 769 + " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n" 770 #ifdef CONFIG_HIGHMEM 771 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" 772 #endif ··· 776 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", 777 FIXADDR_START, FIXADDR_TOP, 778 (FIXADDR_TOP - FIXADDR_START) >> 10, 779 + 780 + CPU_ENTRY_AREA_BASE, 781 + CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE, 782 + CPU_ENTRY_AREA_MAP_SIZE >> 10, 783 784 #ifdef CONFIG_HIGHMEM 785 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
+20 -17
arch/x86/mm/kasan_init_64.c
··· 15 #include <asm/tlbflush.h> 16 #include <asm/sections.h> 17 #include <asm/pgtable.h> 18 19 extern struct range pfn_mapped[E820_MAX_ENTRIES]; 20 ··· 323 map_range(&pfn_mapped[i]); 324 } 325 326 kasan_populate_zero_shadow( 327 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), 328 - kasan_mem_to_shadow((void *)__START_KERNEL_map)); 329 330 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), 331 (unsigned long)kasan_mem_to_shadow(_end), 332 early_pfn_to_nid(__pa(_stext))); 333 334 - shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM); 335 - shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); 336 - shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, 337 - PAGE_SIZE); 338 - 339 - shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE); 340 - shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); 341 - shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, 342 - PAGE_SIZE); 343 - 344 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), 345 - shadow_cpu_entry_begin); 346 - 347 - kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, 348 - (unsigned long)shadow_cpu_entry_end, 0); 349 - 350 - kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END); 351 352 load_cr3(init_top_pgt); 353 __flush_tlb_all();
··· 15 #include <asm/tlbflush.h> 16 #include <asm/sections.h> 17 #include <asm/pgtable.h> 18 + #include <asm/cpu_entry_area.h> 19 20 extern struct range pfn_mapped[E820_MAX_ENTRIES]; 21 ··· 322 map_range(&pfn_mapped[i]); 323 } 324 325 + shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; 326 + shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); 327 + shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, 328 + PAGE_SIZE); 329 + 330 + shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + 331 + CPU_ENTRY_AREA_MAP_SIZE); 332 + shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); 333 + shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, 334 + PAGE_SIZE); 335 + 336 kasan_populate_zero_shadow( 337 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), 338 + shadow_cpu_entry_begin); 339 + 340 + kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, 341 + (unsigned long)shadow_cpu_entry_end, 0); 342 + 343 + kasan_populate_zero_shadow(shadow_cpu_entry_end, 344 + kasan_mem_to_shadow((void *)__START_KERNEL_map)); 345 346 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), 347 (unsigned long)kasan_mem_to_shadow(_end), 348 early_pfn_to_nid(__pa(_stext))); 349 350 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), 351 + (void *)KASAN_SHADOW_END); 352 353 load_cr3(init_top_pgt); 354 __flush_tlb_all();
+1
arch/x86/mm/pgtable_32.c
··· 10 #include <linux/pagemap.h> 11 #include <linux/spinlock.h> 12 13 #include <asm/pgtable.h> 14 #include <asm/pgalloc.h> 15 #include <asm/fixmap.h>
··· 10 #include <linux/pagemap.h> 11 #include <linux/spinlock.h> 12 13 + #include <asm/cpu_entry_area.h> 14 #include <asm/pgtable.h> 15 #include <asm/pgalloc.h> 16 #include <asm/fixmap.h>
-2
arch/x86/xen/mmu_pv.c
··· 2261 2262 switch (idx) { 2263 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: 2264 - case FIX_RO_IDT: 2265 #ifdef CONFIG_X86_32 2266 case FIX_WP_TEST: 2267 # ifdef CONFIG_HIGHMEM ··· 2271 #endif 2272 case FIX_TEXT_POKE0: 2273 case FIX_TEXT_POKE1: 2274 - case FIX_CPU_ENTRY_AREA_TOP ... FIX_CPU_ENTRY_AREA_BOTTOM: 2275 /* All local page mappings */ 2276 pte = pfn_pte(phys, prot); 2277 break;
··· 2261 2262 switch (idx) { 2263 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: 2264 #ifdef CONFIG_X86_32 2265 case FIX_WP_TEST: 2266 # ifdef CONFIG_HIGHMEM ··· 2272 #endif 2273 case FIX_TEXT_POKE0: 2274 case FIX_TEXT_POKE1: 2275 /* All local page mappings */ 2276 pte = pfn_pte(phys, prot); 2277 break;