Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'efi-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull EFI updates from Ingo Molnar:
"The main changes in this cycle were:

- Cleanup of the GOP [graphics output] handling code in the EFI stub

- Complete refactoring of the mixed mode handling in the x86 EFI stub

- Overhaul of the x86 EFI boot/runtime code

- Increase robustness for mixed mode code

- Add the ability to disable DMA at the root port level in the EFI
stub

- Get rid of RWX mappings in the EFI memory map and page tables,
where possible

- Move the support code for the old EFI memory mapping style into its
only user, the SGI UV1+ support code.

- plus misc fixes, updates, smaller cleanups.

... and due to interactions with the RWX changes, another round of PAT
cleanups make a guest appearance via the EFI tree - with no side
effects intended"

* 'efi-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (75 commits)
efi/x86: Disable instrumentation in the EFI runtime handling code
efi/libstub/x86: Fix EFI server boot failure
efi/x86: Disallow efi=old_map in mixed mode
x86/boot/compressed: Relax sed symbol type regex for LLVM ld.lld
efi/x86: avoid KASAN false positives when accessing the 1: 1 mapping
efi: Fix handling of multiple efi_fake_mem= entries
efi: Fix efi_memmap_alloc() leaks
efi: Add tracking for dynamically allocated memmaps
efi: Add a flags parameter to efi_memory_map
efi: Fix comment for efi_mem_type() wrt absent physical addresses
efi/arm: Defer probe of PCIe backed efifb on DT systems
efi/x86: Limit EFI old memory map to SGI UV machines
efi/x86: Avoid RWX mappings for all of DRAM
efi/x86: Don't map the entire kernel text RW for mixed mode
x86/mm: Fix NX bit clearing issue in kernel_map_pages_in_pgd
efi/libstub/x86: Fix unused-variable warning
efi/libstub/x86: Use mandatory 16-byte stack alignment in mixed mode
efi/libstub/x86: Use const attribute for efi_is_64bit()
efi: Allow disabling PCI busmastering on bridges during boot
efi/x86: Allow translating 64-bit arguments for mixed mode calls
...

+2650 -2892
+7 -3
Documentation/admin-guide/kernel-parameters.txt
··· 1165 1165 1166 1166 efi= [EFI] 1167 1167 Format: { "old_map", "nochunk", "noruntime", "debug", 1168 - "nosoftreserve" } 1168 + "nosoftreserve", "disable_early_pci_dma", 1169 + "no_disable_early_pci_dma" } 1169 1170 old_map [X86-64]: switch to the old ioremap-based EFI 1170 - runtime services mapping. 32-bit still uses this one by 1171 - default. 1171 + runtime services mapping. [Needs CONFIG_X86_UV=y] 1172 1172 nochunk: disable reading files in "chunks" in the EFI 1173 1173 boot stub, as chunking can cause problems with some 1174 1174 firmware implementations. ··· 1180 1180 claim. Specify efi=nosoftreserve to disable this 1181 1181 reservation and treat the memory by its base type 1182 1182 (i.e. EFI_CONVENTIONAL_MEMORY / "System RAM"). 1183 + disable_early_pci_dma: Disable the busmaster bit on all 1184 + PCI bridges while in the EFI boot stub 1185 + no_disable_early_pci_dma: Leave the busmaster bit set 1186 + on all PCI bridges while in the EFI boot stub 1183 1187 1184 1188 efi_no_storage_paranoia [EFI; X86] 1185 1189 Using this parameter you can use more than 50% of
+4
arch/alpha/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_ALPHA_VMALLOC_H 2 + #define _ASM_ALPHA_VMALLOC_H 3 + 4 + #endif /* _ASM_ALPHA_VMALLOC_H */
+4
arch/arc/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_ARC_VMALLOC_H 2 + #define _ASM_ARC_VMALLOC_H 3 + 4 + #endif /* _ASM_ARC_VMALLOC_H */
+7 -10
arch/arm/include/asm/efi.h
··· 50 50 51 51 /* arch specific definitions used by the stub code */ 52 52 53 - #define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) 54 - #define __efi_call_early(f, ...) f(__VA_ARGS__) 55 - #define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__) 56 - #define efi_is_64bit() (false) 53 + #define efi_bs_call(func, ...) efi_system_table()->boottime->func(__VA_ARGS__) 54 + #define efi_rt_call(func, ...) efi_system_table()->runtime->func(__VA_ARGS__) 55 + #define efi_is_native() (true) 57 56 58 - #define efi_table_attr(table, attr, instance) \ 59 - ((table##_t *)instance)->attr 57 + #define efi_table_attr(inst, attr) (inst->attr) 60 58 61 - #define efi_call_proto(protocol, f, instance, ...) \ 62 - ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) 59 + #define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__) 63 60 64 - struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg); 65 - void free_screen_info(efi_system_table_t *sys_table, struct screen_info *si); 61 + struct screen_info *alloc_screen_info(void); 62 + void free_screen_info(struct screen_info *si); 66 63 67 64 static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) 68 65 {
+4
arch/arm/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_ARM_VMALLOC_H 2 + #define _ASM_ARM_VMALLOC_H 3 + 4 + #endif /* _ASM_ARM_VMALLOC_H */
+6 -10
arch/arm64/include/asm/efi.h
··· 93 93 return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1)); 94 94 } 95 95 96 - #define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) 97 - #define __efi_call_early(f, ...) f(__VA_ARGS__) 98 - #define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__) 99 - #define efi_is_64bit() (true) 96 + #define efi_bs_call(func, ...) efi_system_table()->boottime->func(__VA_ARGS__) 97 + #define efi_rt_call(func, ...) efi_system_table()->runtime->func(__VA_ARGS__) 98 + #define efi_is_native() (true) 100 99 101 - #define efi_table_attr(table, attr, instance) \ 102 - ((table##_t *)instance)->attr 100 + #define efi_table_attr(inst, attr) (inst->attr) 103 101 104 - #define efi_call_proto(protocol, f, instance, ...) \ 105 - ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) 102 + #define efi_call_proto(inst, func, ...) inst->func(inst, ##__VA_ARGS__) 106 103 107 104 #define alloc_screen_info(x...) &screen_info 108 105 109 - static inline void free_screen_info(efi_system_table_t *sys_table_arg, 110 - struct screen_info *si) 106 + static inline void free_screen_info(struct screen_info *si) 111 107 { 112 108 } 113 109
+4
arch/arm64/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_ARM64_VMALLOC_H 2 + #define _ASM_ARM64_VMALLOC_H 3 + 4 + #endif /* _ASM_ARM64_VMALLOC_H */
+4
arch/c6x/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_C6X_VMALLOC_H 2 + #define _ASM_C6X_VMALLOC_H 3 + 4 + #endif /* _ASM_C6X_VMALLOC_H */
+4
arch/csky/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_CSKY_VMALLOC_H 2 + #define _ASM_CSKY_VMALLOC_H 3 + 4 + #endif /* _ASM_CSKY_VMALLOC_H */
+4
arch/h8300/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_H8300_VMALLOC_H 2 + #define _ASM_H8300_VMALLOC_H 3 + 4 + #endif /* _ASM_H8300_VMALLOC_H */
+4
arch/hexagon/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_HEXAGON_VMALLOC_H 2 + #define _ASM_HEXAGON_VMALLOC_H 3 + 4 + #endif /* _ASM_HEXAGON_VMALLOC_H */
+4
arch/ia64/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_IA64_VMALLOC_H 2 + #define _ASM_IA64_VMALLOC_H 3 + 4 + #endif /* _ASM_IA64_VMALLOC_H */
+4
arch/m68k/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_M68K_VMALLOC_H 2 + #define _ASM_M68K_VMALLOC_H 3 + 4 + #endif /* _ASM_M68K_VMALLOC_H */
+4
arch/microblaze/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_MICROBLAZE_VMALLOC_H 2 + #define _ASM_MICROBLAZE_VMALLOC_H 3 + 4 + #endif /* _ASM_MICROBLAZE_VMALLOC_H */
+4
arch/mips/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_MIPS_VMALLOC_H 2 + #define _ASM_MIPS_VMALLOC_H 3 + 4 + #endif /* _ASM_MIPS_VMALLOC_H */
+4
arch/nds32/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_NDS32_VMALLOC_H 2 + #define _ASM_NDS32_VMALLOC_H 3 + 4 + #endif /* _ASM_NDS32_VMALLOC_H */
+4
arch/nios2/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_NIOS2_VMALLOC_H 2 + #define _ASM_NIOS2_VMALLOC_H 3 + 4 + #endif /* _ASM_NIOS2_VMALLOC_H */
+4
arch/openrisc/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_OPENRISC_VMALLOC_H 2 + #define _ASM_OPENRISC_VMALLOC_H 3 + 4 + #endif /* _ASM_OPENRISC_VMALLOC_H */
+4
arch/parisc/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_PARISC_VMALLOC_H 2 + #define _ASM_PARISC_VMALLOC_H 3 + 4 + #endif /* _ASM_PARISC_VMALLOC_H */
+4
arch/powerpc/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_POWERPC_VMALLOC_H 2 + #define _ASM_POWERPC_VMALLOC_H 3 + 4 + #endif /* _ASM_POWERPC_VMALLOC_H */
+4
arch/riscv/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_RISCV_VMALLOC_H 2 + #define _ASM_RISCV_VMALLOC_H 3 + 4 + #endif /* _ASM_RISCV_VMALLOC_H */
+4
arch/s390/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_S390_VMALLOC_H 2 + #define _ASM_S390_VMALLOC_H 3 + 4 + #endif /* _ASM_S390_VMALLOC_H */
+4
arch/sh/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_SH_VMALLOC_H 2 + #define _ASM_SH_VMALLOC_H 3 + 4 + #endif /* _ASM_SH_VMALLOC_H */
+4
arch/sparc/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_SPARC_VMALLOC_H 2 + #define _ASM_SPARC_VMALLOC_H 3 + 4 + #endif /* _ASM_SPARC_VMALLOC_H */
+4
arch/um/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_UM_VMALLOC_H 2 + #define _ASM_UM_VMALLOC_H 3 + 4 + #endif /* _ASM_UM_VMALLOC_H */
+4
arch/unicore32/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_UNICORE32_VMALLOC_H 2 + #define _ASM_UNICORE32_VMALLOC_H 3 + 4 + #endif /* _ASM_UNICORE32_VMALLOC_H */
+7 -6
arch/x86/Kconfig
··· 1513 1513 bool "Enable statistic for Change Page Attribute" 1514 1514 depends on DEBUG_FS 1515 1515 ---help--- 1516 - Expose statistics about the Change Page Attribute mechanims, which 1516 + Expose statistics about the Change Page Attribute mechanism, which 1517 1517 helps to determine the effectiveness of preserving large and huge 1518 1518 page mappings when mapping protections are changed. 1519 1519 ··· 1992 1992 platforms. 1993 1993 1994 1994 config EFI_STUB 1995 - bool "EFI stub support" 1996 - depends on EFI && !X86_USE_3DNOW 1997 - select RELOCATABLE 1998 - ---help--- 1999 - This kernel feature allows a bzImage to be loaded directly 1995 + bool "EFI stub support" 1996 + depends on EFI && !X86_USE_3DNOW 1997 + depends on $(cc-option,-mabi=ms) || X86_32 1998 + select RELOCATABLE 1999 + ---help--- 2000 + This kernel feature allows a bzImage to be loaded directly 2000 2001 by EFI firmware without the use of a bootloader. 2001 2002 2002 2003 See Documentation/admin-guide/efi-stub.rst for more information.
+1 -1
arch/x86/boot/Makefile
··· 88 88 89 89 SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) 90 90 91 - sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' 91 + sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' 92 92 93 93 quiet_cmd_zoffset = ZOFFSET $@ 94 94 cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
+1 -1
arch/x86/boot/compressed/Makefile
··· 89 89 90 90 $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone 91 91 92 - vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \ 92 + vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o \ 93 93 $(objtree)/drivers/firmware/efi/libstub/lib.a 94 94 vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o 95 95
+117 -161
arch/x86/boot/compressed/eboot.c
··· 6 6 * 7 7 * ----------------------------------------------------------------------- */ 8 8 9 + #pragma GCC visibility push(hidden) 10 + 9 11 #include <linux/efi.h> 10 12 #include <linux/pci.h> 11 13 ··· 21 19 #include "eboot.h" 22 20 23 21 static efi_system_table_t *sys_table; 22 + extern const bool efi_is64; 24 23 25 - static struct efi_config *efi_early; 26 - 27 - __pure const struct efi_config *__efi_early(void) 24 + __pure efi_system_table_t *efi_system_table(void) 28 25 { 29 - return efi_early; 26 + return sys_table; 30 27 } 31 28 32 - #define BOOT_SERVICES(bits) \ 33 - static void setup_boot_services##bits(struct efi_config *c) \ 34 - { \ 35 - efi_system_table_##bits##_t *table; \ 36 - \ 37 - table = (typeof(table))sys_table; \ 38 - \ 39 - c->runtime_services = table->runtime; \ 40 - c->boot_services = table->boottime; \ 41 - c->text_output = table->con_out; \ 42 - } 43 - BOOT_SERVICES(32); 44 - BOOT_SERVICES(64); 45 - 46 - void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) 29 + __attribute_const__ bool efi_is_64bit(void) 47 30 { 48 - efi_call_proto(efi_simple_text_output_protocol, output_string, 49 - efi_early->text_output, str); 31 + if (IS_ENABLED(CONFIG_EFI_MIXED)) 32 + return efi_is64; 33 + return IS_ENABLED(CONFIG_X86_64); 50 34 } 51 35 52 36 static efi_status_t ··· 51 63 * large romsize. The UEFI spec limits the size of option ROMs to 16 52 64 * MiB so we reject any ROMs over 16 MiB in size to catch this. 53 65 */ 54 - romimage = (void *)(unsigned long)efi_table_attr(efi_pci_io_protocol, 55 - romimage, pci); 56 - romsize = efi_table_attr(efi_pci_io_protocol, romsize, pci); 66 + romimage = efi_table_attr(pci, romimage); 67 + romsize = efi_table_attr(pci, romsize); 57 68 if (!romimage || !romsize || romsize > SZ_16M) 58 69 return EFI_INVALID_PARAMETER; 59 70 60 71 size = romsize + sizeof(*rom); 61 72 62 - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom); 73 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, 74 + (void **)&rom); 63 75 if (status != EFI_SUCCESS) { 64 - efi_printk(sys_table, "Failed to allocate memory for 'rom'\n"); 76 + efi_printk("Failed to allocate memory for 'rom'\n"); 65 77 return status; 66 78 } 67 79 ··· 73 85 rom->pcilen = pci->romsize; 74 86 *__rom = rom; 75 87 76 - status = efi_call_proto(efi_pci_io_protocol, pci.read, pci, 77 - EfiPciIoWidthUint16, PCI_VENDOR_ID, 1, 78 - &rom->vendor); 88 + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, 89 + PCI_VENDOR_ID, 1, &rom->vendor); 79 90 80 91 if (status != EFI_SUCCESS) { 81 - efi_printk(sys_table, "Failed to read rom->vendor\n"); 92 + efi_printk("Failed to read rom->vendor\n"); 82 93 goto free_struct; 83 94 } 84 95 85 - status = efi_call_proto(efi_pci_io_protocol, pci.read, pci, 86 - EfiPciIoWidthUint16, PCI_DEVICE_ID, 1, 87 - &rom->devid); 96 + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, 97 + PCI_DEVICE_ID, 1, &rom->devid); 88 98 89 99 if (status != EFI_SUCCESS) { 90 - efi_printk(sys_table, "Failed to read rom->devid\n"); 100 + efi_printk("Failed to read rom->devid\n"); 91 101 goto free_struct; 92 102 } 93 103 94 - status = efi_call_proto(efi_pci_io_protocol, get_location, pci, 95 - &rom->segment, &rom->bus, &rom->device, 96 - &rom->function); 104 + status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus, 105 + &rom->device, &rom->function); 97 106 98 107 if (status != EFI_SUCCESS) 99 108 goto free_struct; ··· 99 114 return status; 100 115 101 116 free_struct: 102 - efi_call_early(free_pool, rom); 117 + efi_bs_call(free_pool, rom); 103 118 return status; 104 119 } 105 120 ··· 118 133 void **pci_handle = NULL; 119 134 efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; 120 135 unsigned long size = 0; 121 - unsigned long nr_pci; 122 136 struct setup_data *data; 137 + efi_handle_t h; 123 138 int i; 124 139 125 - status = efi_call_early(locate_handle, 126 - EFI_LOCATE_BY_PROTOCOL, 127 - &pci_proto, NULL, &size, pci_handle); 140 + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, 141 + &pci_proto, NULL, &size, pci_handle); 128 142 129 143 if (status == EFI_BUFFER_TOO_SMALL) { 130 - status = efi_call_early(allocate_pool, 131 - EFI_LOADER_DATA, 132 - size, (void **)&pci_handle); 144 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, 145 + (void **)&pci_handle); 133 146 134 147 if (status != EFI_SUCCESS) { 135 - efi_printk(sys_table, "Failed to allocate memory for 'pci_handle'\n"); 148 + efi_printk("Failed to allocate memory for 'pci_handle'\n"); 136 149 return; 137 150 } 138 151 139 - status = efi_call_early(locate_handle, 140 - EFI_LOCATE_BY_PROTOCOL, &pci_proto, 141 - NULL, &size, pci_handle); 152 + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, 153 + &pci_proto, NULL, &size, pci_handle); 142 154 } 143 155 144 156 if (status != EFI_SUCCESS) ··· 146 164 while (data && data->next) 147 165 data = (struct setup_data *)(unsigned long)data->next; 148 166 149 - nr_pci = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32)); 150 - for (i = 0; i < nr_pci; i++) { 167 + for_each_efi_handle(h, pci_handle, size, i) { 151 168 efi_pci_io_protocol_t *pci = NULL; 152 169 struct pci_setup_rom *rom; 153 170 154 - status = efi_call_early(handle_protocol, 155 - efi_is_64bit() ? ((u64 *)pci_handle)[i] 156 - : ((u32 *)pci_handle)[i], 157 - &pci_proto, (void **)&pci); 171 + status = efi_bs_call(handle_protocol, h, &pci_proto, 172 + (void **)&pci); 158 173 if (status != EFI_SUCCESS || !pci) 159 174 continue; 160 175 ··· 168 189 } 169 190 170 191 free_handle: 171 - efi_call_early(free_pool, pci_handle); 192 + efi_bs_call(free_pool, pci_handle); 172 193 } 173 194 174 195 static void retrieve_apple_device_properties(struct boot_params *boot_params) ··· 177 198 struct setup_data *data, *new; 178 199 efi_status_t status; 179 200 u32 size = 0; 180 - void *p; 201 + apple_properties_protocol_t *p; 181 202 182 - status = efi_call_early(locate_protocol, &guid, NULL, &p); 203 + status = efi_bs_call(locate_protocol, &guid, NULL, (void **)&p); 183 204 if (status != EFI_SUCCESS) 184 205 return; 185 206 186 - if (efi_table_attr(apple_properties_protocol, version, p) != 0x10000) { 187 - efi_printk(sys_table, "Unsupported properties proto version\n"); 207 + if (efi_table_attr(p, version) != 0x10000) { 208 + efi_printk("Unsupported properties proto version\n"); 188 209 return; 189 210 } 190 211 191 - efi_call_proto(apple_properties_protocol, get_all, p, NULL, &size); 212 + efi_call_proto(p, get_all, NULL, &size); 192 213 if (!size) 193 214 return; 194 215 195 216 do { 196 - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 197 - size + sizeof(struct setup_data), &new); 217 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, 218 + size + sizeof(struct setup_data), 219 + (void **)&new); 198 220 if (status != EFI_SUCCESS) { 199 - efi_printk(sys_table, "Failed to allocate memory for 'properties'\n"); 221 + efi_printk("Failed to allocate memory for 'properties'\n"); 200 222 return; 201 223 } 202 224 203 - status = efi_call_proto(apple_properties_protocol, get_all, p, 204 - new->data, &size); 225 + status = efi_call_proto(p, get_all, new->data, &size); 205 226 206 227 if (status == EFI_BUFFER_TOO_SMALL) 207 - efi_call_early(free_pool, new); 228 + efi_bs_call(free_pool, new); 208 229 } while (status == EFI_BUFFER_TOO_SMALL); 209 230 210 231 new->type = SETUP_APPLE_PROPERTIES; ··· 226 247 static void setup_quirks(struct boot_params *boot_params) 227 248 { 228 249 efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long) 229 - efi_table_attr(efi_system_table, fw_vendor, sys_table); 250 + efi_table_attr(efi_system_table(), fw_vendor); 230 251 231 252 if (!memcmp(fw_vendor, apple, sizeof(apple))) { 232 253 if (IS_ENABLED(CONFIG_APPLE_PROPERTIES)) ··· 244 265 u32 width, height; 245 266 void **uga_handle = NULL; 246 267 efi_uga_draw_protocol_t *uga = NULL, *first_uga; 247 - unsigned long nr_ugas; 268 + efi_handle_t handle; 248 269 int i; 249 270 250 - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 251 - size, (void **)&uga_handle); 271 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, 272 + (void **)&uga_handle); 252 273 if (status != EFI_SUCCESS) 253 274 return status; 254 275 255 - status = efi_call_early(locate_handle, 256 - EFI_LOCATE_BY_PROTOCOL, 257 - uga_proto, NULL, &size, uga_handle); 276 + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, 277 + uga_proto, NULL, &size, uga_handle); 258 278 if (status != EFI_SUCCESS) 259 279 goto free_handle; 260 280 ··· 261 283 width = 0; 262 284 263 285 first_uga = NULL; 264 - nr_ugas = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32)); 265 - for (i = 0; i < nr_ugas; i++) { 286 + for_each_efi_handle(handle, uga_handle, size, i) { 266 287 efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID; 267 288 u32 w, h, depth, refresh; 268 289 void *pciio; 269 - unsigned long handle = efi_is_64bit() ? ((u64 *)uga_handle)[i] 270 - : ((u32 *)uga_handle)[i]; 271 290 272 - status = efi_call_early(handle_protocol, handle, 273 - uga_proto, (void **)&uga); 291 + status = efi_bs_call(handle_protocol, handle, uga_proto, 292 + (void **)&uga); 274 293 if (status != EFI_SUCCESS) 275 294 continue; 276 295 277 296 pciio = NULL; 278 - efi_call_early(handle_protocol, handle, &pciio_proto, &pciio); 297 + efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio); 279 298 280 - status = efi_call_proto(efi_uga_draw_protocol, get_mode, uga, 281 - &w, &h, &depth, &refresh); 299 + status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh); 282 300 if (status == EFI_SUCCESS && (!first_uga || pciio)) { 283 301 width = w; 284 302 height = h; ··· 310 336 si->rsvd_pos = 24; 311 337 312 338 free_handle: 313 - efi_call_early(free_pool, uga_handle); 339 + efi_bs_call(free_pool, uga_handle); 314 340 315 341 return status; 316 342 } ··· 329 355 memset(si, 0, sizeof(*si)); 330 356 331 357 size = 0; 332 - status = efi_call_early(locate_handle, 333 - EFI_LOCATE_BY_PROTOCOL, 334 - &graphics_proto, NULL, &size, gop_handle); 358 + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, 359 + &graphics_proto, NULL, &size, gop_handle); 335 360 if (status == EFI_BUFFER_TOO_SMALL) 336 - status = efi_setup_gop(NULL, si, &graphics_proto, size); 361 + status = efi_setup_gop(si, &graphics_proto, size); 337 362 338 363 if (status != EFI_SUCCESS) { 339 364 size = 0; 340 - status = efi_call_early(locate_handle, 341 - EFI_LOCATE_BY_PROTOCOL, 342 - &uga_proto, NULL, &size, uga_handle); 365 + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, 366 + &uga_proto, NULL, &size, uga_handle); 343 367 if (status == EFI_BUFFER_TOO_SMALL) 344 368 setup_uga(si, &uga_proto, size); 345 369 } 346 370 } 347 371 372 + void startup_32(struct boot_params *boot_params); 373 + 374 + void __noreturn efi_stub_entry(efi_handle_t handle, 375 + efi_system_table_t *sys_table_arg, 376 + struct boot_params *boot_params); 377 + 348 378 /* 349 379 * Because the x86 boot code expects to be passed a boot_params we 350 380 * need to create one ourselves (usually the bootloader would create 351 381 * one for us). 352 - * 353 - * The caller is responsible for filling out ->code32_start in the 354 - * returned boot_params. 355 382 */ 356 - struct boot_params *make_boot_params(struct efi_config *c) 383 + efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, 384 + efi_system_table_t *sys_table_arg) 357 385 { 358 386 struct boot_params *boot_params; 359 387 struct apm_bios_info *bi; 360 388 struct setup_header *hdr; 361 389 efi_loaded_image_t *image; 362 - void *handle; 363 390 efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID; 364 391 int options_size = 0; 365 392 efi_status_t status; ··· 368 393 unsigned long ramdisk_addr; 369 394 unsigned long ramdisk_size; 370 395 371 - efi_early = c; 372 - sys_table = (efi_system_table_t *)(unsigned long)efi_early->table; 373 - handle = (void *)(unsigned long)efi_early->image_handle; 396 + sys_table = sys_table_arg; 374 397 375 398 /* Check if we were booted by the EFI firmware */ 376 399 if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) 377 - return NULL; 400 + return EFI_INVALID_PARAMETER; 378 401 379 - if (efi_is_64bit()) 380 - setup_boot_services64(efi_early); 381 - else 382 - setup_boot_services32(efi_early); 383 - 384 - status = efi_call_early(handle_protocol, handle, 385 - &proto, (void *)&image); 402 + status = efi_bs_call(handle_protocol, handle, &proto, (void *)&image); 386 403 if (status != EFI_SUCCESS) { 387 - efi_printk(sys_table, "Failed to get handle for LOADED_IMAGE_PROTOCOL\n"); 388 - return NULL; 404 + efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n"); 405 + return status; 389 406 } 390 407 391 - status = efi_low_alloc(sys_table, 0x4000, 1, 392 - (unsigned long *)&boot_params); 408 + status = efi_low_alloc(0x4000, 1, (unsigned long *)&boot_params); 393 409 if (status != EFI_SUCCESS) { 394 - efi_printk(sys_table, "Failed to allocate lowmem for boot params\n"); 395 - return NULL; 410 + efi_printk("Failed to allocate lowmem for boot params\n"); 411 + return status; 396 412 } 397 413 398 414 memset(boot_params, 0x0, 0x4000); ··· 405 439 hdr->type_of_loader = 0x21; 406 440 407 441 /* Convert unicode cmdline to ascii */ 408 - cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size); 442 + cmdline_ptr = efi_convert_cmdline(image, &options_size); 409 443 if (!cmdline_ptr) 410 444 goto fail; 411 445 ··· 423 457 if (status != EFI_SUCCESS) 424 458 goto fail2; 425 459 426 - status = handle_cmdline_files(sys_table, image, 460 + status = handle_cmdline_files(image, 427 461 (char *)(unsigned long)hdr->cmd_line_ptr, 428 462 "initrd=", hdr->initrd_addr_max, 429 463 &ramdisk_addr, &ramdisk_size); 430 464 431 465 if (status != EFI_SUCCESS && 432 466 hdr->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G) { 433 - efi_printk(sys_table, "Trying to load files to higher address\n"); 434 - status = handle_cmdline_files(sys_table, image, 467 + efi_printk("Trying to load files to higher address\n"); 468 + status = handle_cmdline_files(image, 435 469 (char *)(unsigned long)hdr->cmd_line_ptr, 436 470 "initrd=", -1UL, 437 471 &ramdisk_addr, &ramdisk_size); ··· 444 478 boot_params->ext_ramdisk_image = (u64)ramdisk_addr >> 32; 445 479 boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32; 446 480 447 - return boot_params; 481 + hdr->code32_start = (u32)(unsigned long)startup_32; 482 + 483 + efi_stub_entry(handle, sys_table, boot_params); 484 + /* not reached */ 448 485 449 486 fail2: 450 - efi_free(sys_table, options_size, hdr->cmd_line_ptr); 487 + efi_free(options_size, hdr->cmd_line_ptr); 451 488 fail: 452 - efi_free(sys_table, 0x4000, (unsigned long)boot_params); 489 + efi_free(0x4000, (unsigned long)boot_params); 453 490 454 - return NULL; 491 + return status; 455 492 } 456 493 457 494 static void add_e820ext(struct boot_params *params, ··· 589 620 sizeof(struct e820_entry) * nr_desc; 590 621 591 622 if (*e820ext) { 592 - efi_call_early(free_pool, *e820ext); 623 + efi_bs_call(free_pool, *e820ext); 593 624 *e820ext = NULL; 594 625 *e820ext_size = 0; 595 626 } 596 627 597 - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 598 - size, (void **)e820ext); 628 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, 629 + (void **)e820ext); 599 630 if (status == EFI_SUCCESS) 600 631 *e820ext_size = size; 601 632 ··· 619 650 boot_map.key_ptr = NULL; 620 651 boot_map.buff_size = &buff_size; 621 652 622 - status = efi_get_memory_map(sys_table, &boot_map); 653 + status = efi_get_memory_map(&boot_map); 623 654 if (status != EFI_SUCCESS) 624 655 return status; 625 656 ··· 641 672 struct efi_info *efi; 642 673 }; 643 674 644 - static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, 645 - struct efi_boot_memmap *map, 675 + static efi_status_t exit_boot_func(struct efi_boot_memmap *map, 646 676 void *priv) 647 677 { 648 678 const char *signature; ··· 651 683 : EFI32_LOADER_SIGNATURE; 652 684 memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32)); 653 685 654 - p->efi->efi_systab = (unsigned long)sys_table_arg; 686 + p->efi->efi_systab = (unsigned long)efi_system_table(); 655 687 p->efi->efi_memdesc_size = *map->desc_size; 656 688 p->efi->efi_memdesc_version = *map->desc_ver; 657 689 p->efi->efi_memmap = (unsigned long)*map->map; 658 690 p->efi->efi_memmap_size = *map->map_size; 659 691 660 692 #ifdef CONFIG_X86_64 661 - p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32; 693 + p->efi->efi_systab_hi = (unsigned long)efi_system_table() >> 32; 662 694 p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32; 663 695 #endif 664 696 ··· 690 722 return status; 691 723 692 724 /* Might as well exit boot services now */ 693 - status = efi_exit_boot_services(sys_table, handle, &map, &priv, 694 - exit_boot_func); 725 + status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func); 695 726 if (status != EFI_SUCCESS) 696 727 return status; 697 728 ··· 708 741 * On success we return a pointer to a boot_params structure, and NULL 709 742 * on failure. 710 743 */ 711 - struct boot_params * 712 - efi_main(struct efi_config *c, struct boot_params *boot_params) 744 + struct boot_params *efi_main(efi_handle_t handle, 745 + efi_system_table_t *sys_table_arg, 746 + struct boot_params *boot_params) 713 747 { 714 748 struct desc_ptr *gdt = NULL; 715 749 struct setup_header *hdr = &boot_params->hdr; 716 750 efi_status_t status; 717 751 struct desc_struct *desc; 718 - void *handle; 719 - efi_system_table_t *_table; 720 752 unsigned long cmdline_paddr; 721 753 722 - efi_early = c; 723 - 724 - _table = (efi_system_table_t *)(unsigned long)efi_early->table; 725 - handle = (void *)(unsigned long)efi_early->image_handle; 726 - 727 - sys_table = _table; 754 + sys_table = sys_table_arg; 728 755 729 756 /* Check if we were booted by the EFI firmware */ 730 757 if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) 731 758 goto fail; 732 - 733 - if (efi_is_64bit()) 734 - setup_boot_services64(efi_early); 735 - else 736 - setup_boot_services32(efi_early); 737 759 738 760 /* 739 761 * make_boot_params() may have been called before efi_main(), in which ··· 738 782 * otherwise we ask the BIOS. 739 783 */ 740 784 if (boot_params->secure_boot == efi_secureboot_mode_unset) 741 - boot_params->secure_boot = efi_get_secureboot(sys_table); 785 + boot_params->secure_boot = efi_get_secureboot(); 742 786 743 787 /* Ask the firmware to clear memory on unclean shutdown */ 744 - efi_enable_reset_attack_mitigation(sys_table); 788 + efi_enable_reset_attack_mitigation(); 745 789 746 - efi_random_get_seed(sys_table); 790 + efi_random_get_seed(); 747 791 748 - efi_retrieve_tpm2_eventlog(sys_table); 792 + efi_retrieve_tpm2_eventlog(); 749 793 750 794 setup_graphics(boot_params); 751 795 ··· 753 797 754 798 setup_quirks(boot_params); 755 799 756 - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 757 - sizeof(*gdt), (void **)&gdt); 800 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*gdt), 801 + (void **)&gdt); 758 802 if (status != EFI_SUCCESS) { 759 - efi_printk(sys_table, "Failed to allocate memory for 'gdt' structure\n"); 803 + efi_printk("Failed to allocate memory for 'gdt' structure\n"); 760 804 goto fail; 761 805 } 762 806 763 807 gdt->size = 0x800; 764 - status = efi_low_alloc(sys_table, gdt->size, 8, 765 - (unsigned long *)&gdt->address); 808 + status = efi_low_alloc(gdt->size, 8, (unsigned long *)&gdt->address); 766 809 if (status != EFI_SUCCESS) { 767 - efi_printk(sys_table, "Failed to allocate memory for 'gdt'\n"); 810 + efi_printk("Failed to allocate memory for 'gdt'\n"); 768 811 goto fail; 769 812 } 770 813 ··· 773 818 */ 774 819 if (hdr->pref_address != hdr->code32_start) { 775 820 unsigned long bzimage_addr = hdr->code32_start; 776 - status = efi_relocate_kernel(sys_table, &bzimage_addr, 821 + status = efi_relocate_kernel(&bzimage_addr, 777 822 hdr->init_size, hdr->init_size, 778 823 hdr->pref_address, 779 824 hdr->kernel_alignment, 780 825 LOAD_PHYSICAL_ADDR); 781 826 if (status != EFI_SUCCESS) { 782 - efi_printk(sys_table, "efi_relocate_kernel() failed!\n"); 827 + efi_printk("efi_relocate_kernel() failed!\n"); 783 828 goto fail; 784 829 } 785 830 ··· 789 834 790 835 status = exit_boot(boot_params, handle); 791 836 if (status != EFI_SUCCESS) { 792 - efi_printk(sys_table, "exit_boot() failed!\n"); 837 + efi_printk("exit_boot() failed!\n"); 793 838 goto fail; 794 839 } 795 840 ··· 882 927 883 928 return boot_params; 884 929 fail: 885 - efi_printk(sys_table, "efi_main() failed!\n"); 930 + efi_printk("efi_main() failed!\n"); 886 931 887 - return NULL; 932 + for (;;) 933 + asm("hlt"); 888 934 }
+14 -16
arch/x86/boot/compressed/eboot.h
··· 12 12 13 13 #define DESC_TYPE_CODE_DATA (1 << 0) 14 14 15 - typedef struct { 16 - u32 get_mode; 17 - u32 set_mode; 18 - u32 blt; 19 - } efi_uga_draw_protocol_32_t; 15 + typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t; 20 16 21 - typedef struct { 22 - u64 get_mode; 23 - u64 set_mode; 24 - u64 blt; 25 - } efi_uga_draw_protocol_64_t; 26 - 27 - typedef struct { 28 - void *get_mode; 29 - void *set_mode; 30 - void *blt; 31 - } efi_uga_draw_protocol_t; 17 + union efi_uga_draw_protocol { 18 + struct { 19 + efi_status_t (__efiapi *get_mode)(efi_uga_draw_protocol_t *, 20 + u32*, u32*, u32*, u32*); 21 + void *set_mode; 22 + void *blt; 23 + }; 24 + struct { 25 + u32 get_mode; 26 + u32 set_mode; 27 + u32 blt; 28 + } mixed_mode; 29 + }; 32 30 33 31 #endif /* BOOT_COMPRESSED_EBOOT_H */
-87
arch/x86/boot/compressed/efi_stub_32.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * EFI call stub for IA32. 4 - * 5 - * This stub allows us to make EFI calls in physical mode with interrupts 6 - * turned off. Note that this implementation is different from the one in 7 - * arch/x86/platform/efi/efi_stub_32.S because we're _already_ in physical 8 - * mode at this point. 9 - */ 10 - 11 - #include <linux/linkage.h> 12 - #include <asm/page_types.h> 13 - 14 - /* 15 - * efi_call_phys(void *, ...) is a function with variable parameters. 16 - * All the callers of this function assure that all the parameters are 4-bytes. 17 - */ 18 - 19 - /* 20 - * In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save. 21 - * So we'd better save all of them at the beginning of this function and restore 22 - * at the end no matter how many we use, because we can not assure EFI runtime 23 - * service functions will comply with gcc calling convention, too. 24 - */ 25 - 26 - .text 27 - SYM_FUNC_START(efi_call_phys) 28 - /* 29 - * 0. The function can only be called in Linux kernel. So CS has been 30 - * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found 31 - * the values of these registers are the same. And, the corresponding 32 - * GDT entries are identical. So I will do nothing about segment reg 33 - * and GDT, but change GDT base register in prelog and epilog. 34 - */ 35 - 36 - /* 37 - * 1. Because we haven't been relocated by this point we need to 38 - * use relative addressing. 39 - */ 40 - call 1f 41 - 1: popl %edx 42 - subl $1b, %edx 43 - 44 - /* 45 - * 2. Now on the top of stack is the return 46 - * address in the caller of efi_call_phys(), then parameter 1, 47 - * parameter 2, ..., param n. To make things easy, we save the return 48 - * address of efi_call_phys in a global variable. 49 - */ 50 - popl %ecx 51 - movl %ecx, saved_return_addr(%edx) 52 - /* get the function pointer into ECX*/ 53 - popl %ecx 54 - movl %ecx, efi_rt_function_ptr(%edx) 55 - 56 - /* 57 - * 3. Call the physical function. 58 - */ 59 - call *%ecx 60 - 61 - /* 62 - * 4. Balance the stack. And because EAX contain the return value, 63 - * we'd better not clobber it. We need to calculate our address 64 - * again because %ecx and %edx are not preserved across EFI function 65 - * calls. 66 - */ 67 - call 1f 68 - 1: popl %edx 69 - subl $1b, %edx 70 - 71 - movl efi_rt_function_ptr(%edx), %ecx 72 - pushl %ecx 73 - 74 - /* 75 - * 10. Push the saved return address onto the stack and return. 76 - */ 77 - movl saved_return_addr(%edx), %ecx 78 - pushl %ecx 79 - ret 80 - SYM_FUNC_END(efi_call_phys) 81 - .previous 82 - 83 - .data 84 - saved_return_addr: 85 - .long 0 86 - efi_rt_function_ptr: 87 - .long 0
-5
arch/x86/boot/compressed/efi_stub_64.S
··· 1 - #include <asm/segment.h> 2 - #include <asm/msr.h> 3 - #include <asm/processor-flags.h> 4 - 5 - #include "../../platform/efi/efi_stub_64.S"
+17 -48
arch/x86/boot/compressed/efi_thunk_64.S
··· 10 10 * needs to be able to service interrupts. 11 11 * 12 12 * On the plus side, we don't have to worry about mangling 64-bit 13 - * addresses into 32-bits because we're executing with an identify 13 + * addresses into 32-bits because we're executing with an identity 14 14 * mapped pagetable and haven't transitioned to 64-bit virtual addresses 15 15 * yet. 16 16 */ ··· 23 23 24 24 .code64 25 25 .text 26 - SYM_FUNC_START(efi64_thunk) 26 + SYM_FUNC_START(__efi64_thunk) 27 27 push %rbp 28 28 push %rbx 29 29 30 - subq $8, %rsp 31 - leaq efi_exit32(%rip), %rax 32 - movl %eax, 4(%rsp) 33 - leaq efi_gdt64(%rip), %rax 34 - movl %eax, (%rsp) 35 - movl %eax, 2(%rax) /* Fixup the gdt base address */ 30 + leaq 1f(%rip), %rbp 31 + leaq efi_gdt64(%rip), %rbx 32 + movl %ebx, 2(%rbx) /* Fixup the gdt base address */ 36 33 37 34 movl %ds, %eax 38 35 push %rax ··· 45 48 movl %esi, 0x0(%rsp) 46 49 movl %edx, 0x4(%rsp) 47 50 movl %ecx, 0x8(%rsp) 48 - movq %r8, %rsi 49 - movl %esi, 0xc(%rsp) 50 - movq %r9, %rsi 51 - movl %esi, 0x10(%rsp) 51 + movl %r8d, 0xc(%rsp) 52 + movl %r9d, 0x10(%rsp) 52 53 53 - sgdt save_gdt(%rip) 54 - 55 - leaq 1f(%rip), %rbx 56 - movq %rbx, func_rt_ptr(%rip) 54 + sgdt 0x14(%rsp) 57 55 58 56 /* 59 57 * Switch to gdt with 32-bit segments. This is the firmware GDT ··· 63 71 pushq %rax 64 72 lretq 65 73 66 - 1: addq $32, %rsp 67 - 68 - lgdt save_gdt(%rip) 74 + 1: lgdt 0x14(%rsp) 75 + addq $32, %rsp 76 + movq %rdi, %rax 69 77 70 78 pop %rbx 71 79 movl %ebx, %ss ··· 77 85 /* 78 86 * Convert 32-bit status code into 64-bit. 79 87 */ 80 - test %rax, %rax 81 - jz 1f 82 - movl %eax, %ecx 83 - andl $0x0fffffff, %ecx 84 - andl $0xf0000000, %eax 85 - shl $32, %rax 86 - or %rcx, %rax 87 - 1: 88 - addq $8, %rsp 88 + roll $1, %eax 89 + rorq $1, %rax 90 + 89 91 pop %rbx 90 92 pop %rbp 91 93 ret 92 - SYM_FUNC_END(efi64_thunk) 93 - 94 - SYM_FUNC_START_LOCAL(efi_exit32) 95 - movq func_rt_ptr(%rip), %rax 96 - push %rax 97 - mov %rdi, %rax 98 - ret 99 - SYM_FUNC_END(efi_exit32) 94 + SYM_FUNC_END(__efi64_thunk) 100 95 101 96 .code32 102 97 /* ··· 123 144 */ 124 145 cli 125 146 126 - movl 56(%esp), %eax 127 - movl %eax, 2(%eax) 128 - lgdtl (%eax) 147 + lgdtl (%ebx) 129 148 130 149 movl %cr4, %eax 131 150 btsl $(X86_CR4_PAE_BIT), %eax ··· 140 163 xorl %eax, %eax 141 164 lldt %ax 142 165 143 - movl 60(%esp), %eax 144 166 pushl $__KERNEL_CS 145 - pushl %eax 167 + pushl %ebp 146 168 147 169 /* Enable paging */ 148 170 movl %cr0, %eax ··· 156 180 .word 0 157 181 .quad 0 158 182 SYM_DATA_END(efi32_boot_gdt) 159 - 160 - SYM_DATA_START_LOCAL(save_gdt) 161 - .word 0 162 - .quad 0 163 - SYM_DATA_END(save_gdt) 164 - 165 - SYM_DATA_LOCAL(func_rt_ptr, .quad 0) 166 183 167 184 SYM_DATA_START(efi_gdt64) 168 185 .word efi_gdt64_end - efi_gdt64
+2 -62
arch/x86/boot/compressed/head_32.S
··· 145 145 SYM_FUNC_END(startup_32) 146 146 147 147 #ifdef CONFIG_EFI_STUB 148 - /* 149 - * We don't need the return address, so set up the stack so efi_main() can find 150 - * its arguments. 151 - */ 152 - SYM_FUNC_START(efi_pe_entry) 153 - add $0x4, %esp 154 - 155 - call 1f 156 - 1: popl %esi 157 - subl $1b, %esi 158 - 159 - popl %ecx 160 - movl %ecx, efi32_config(%esi) /* Handle */ 161 - popl %ecx 162 - movl %ecx, efi32_config+8(%esi) /* EFI System table pointer */ 163 - 164 - /* Relocate efi_config->call() */ 165 - leal efi32_config(%esi), %eax 166 - add %esi, 40(%eax) 167 - pushl %eax 168 - 169 - call make_boot_params 170 - cmpl $0, %eax 171 - je fail 172 - movl %esi, BP_code32_start(%eax) 173 - popl %ecx 174 - pushl %eax 175 - pushl %ecx 176 - jmp 2f /* Skip efi_config initialization */ 177 - SYM_FUNC_END(efi_pe_entry) 178 - 179 148 SYM_FUNC_START(efi32_stub_entry) 149 + SYM_FUNC_START_ALIAS(efi_stub_entry) 180 150 add $0x4, %esp 181 - popl %ecx 182 - popl %edx 183 - 184 - call 1f 185 - 1: popl %esi 186 - subl $1b, %esi 187 - 188 - movl %ecx, efi32_config(%esi) /* Handle */ 189 - movl %edx, efi32_config+8(%esi) /* EFI System table pointer */ 190 - 191 - /* Relocate efi_config->call() */ 192 - leal efi32_config(%esi), %eax 193 - add %esi, 40(%eax) 194 - pushl %eax 195 - 2: 196 151 call efi_main 197 - cmpl $0, %eax 198 152 movl %eax, %esi 199 - jne 2f 200 - fail: 201 - /* EFI init failed, so hang. */ 202 - hlt 203 - jmp fail 204 - 2: 205 153 movl BP_code32_start(%esi), %eax 206 154 leal startup_32(%eax), %eax 207 155 jmp *%eax 208 156 SYM_FUNC_END(efi32_stub_entry) 157 + SYM_FUNC_END_ALIAS(efi_stub_entry) 209 158 #endif 210 159 211 160 .text ··· 210 261 xorl %ebx, %ebx 211 262 jmp *%eax 212 263 SYM_FUNC_END(.Lrelocated) 213 - 214 - #ifdef CONFIG_EFI_STUB 215 - .data 216 - efi32_config: 217 - .fill 5,8,0 218 - .long efi_call_phys 219 - .long 0 220 - .byte 0 221 - #endif 222 264 223 265 /* 224 266 * Stack and heap for uncompression
+15 -82
arch/x86/boot/compressed/head_64.S
··· 208 208 pushl $__KERNEL_CS 209 209 leal startup_64(%ebp), %eax 210 210 #ifdef CONFIG_EFI_MIXED 211 - movl efi32_config(%ebp), %ebx 212 - cmp $0, %ebx 211 + movl efi32_boot_args(%ebp), %edi 212 + cmp $0, %edi 213 213 jz 1f 214 - leal handover_entry(%ebp), %eax 214 + leal efi64_stub_entry(%ebp), %eax 215 + movl %esi, %edx 216 + movl efi32_boot_args+4(%ebp), %esi 215 217 1: 216 218 #endif 217 219 pushl %eax ··· 234 232 popl %edx 235 233 popl %esi 236 234 237 - leal (BP_scratch+4)(%esi), %esp 238 235 call 1f 239 236 1: pop %ebp 240 237 subl $1b, %ebp 241 238 242 - movl %ecx, efi32_config(%ebp) 243 - movl %edx, efi32_config+8(%ebp) 239 + movl %ecx, efi32_boot_args(%ebp) 240 + movl %edx, efi32_boot_args+4(%ebp) 244 241 sgdtl efi32_boot_gdt(%ebp) 245 - 246 - leal efi32_config(%ebp), %eax 247 - movl %eax, efi_config(%ebp) 242 + movb $0, efi_is64(%ebp) 248 243 249 244 /* Disable paging */ 250 245 movl %cr0, %eax ··· 449 450 SYM_CODE_END(startup_64) 450 451 451 452 #ifdef CONFIG_EFI_STUB 452 - 453 - /* The entry point for the PE/COFF executable is efi_pe_entry. */ 454 - SYM_FUNC_START(efi_pe_entry) 455 - movq %rcx, efi64_config(%rip) /* Handle */ 456 - movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */ 457 - 458 - leaq efi64_config(%rip), %rax 459 - movq %rax, efi_config(%rip) 460 - 461 - call 1f 462 - 1: popq %rbp 463 - subq $1b, %rbp 464 - 465 - /* 466 - * Relocate efi_config->call(). 467 - */ 468 - addq %rbp, efi64_config+40(%rip) 469 - 470 - movq %rax, %rdi 471 - call make_boot_params 472 - cmpq $0,%rax 473 - je fail 474 - mov %rax, %rsi 475 - leaq startup_32(%rip), %rax 476 - movl %eax, BP_code32_start(%rsi) 477 - jmp 2f /* Skip the relocation */ 478 - 479 - handover_entry: 480 - call 1f 481 - 1: popq %rbp 482 - subq $1b, %rbp 483 - 484 - /* 485 - * Relocate efi_config->call(). 486 - */ 487 - movq efi_config(%rip), %rax 488 - addq %rbp, 40(%rax) 489 - 2: 490 - movq efi_config(%rip), %rdi 453 + .org 0x390 454 + SYM_FUNC_START(efi64_stub_entry) 455 + SYM_FUNC_START_ALIAS(efi_stub_entry) 456 + and $~0xf, %rsp /* realign the stack */ 491 457 call efi_main 492 458 movq %rax,%rsi 493 - cmpq $0,%rax 494 - jne 2f 495 - fail: 496 - /* EFI init failed, so hang. */ 497 - hlt 498 - jmp fail 499 - 2: 500 459 movl BP_code32_start(%esi), %eax 501 460 leaq startup_64(%rax), %rax 502 461 jmp *%rax 503 - SYM_FUNC_END(efi_pe_entry) 504 - 505 - .org 0x390 506 - SYM_FUNC_START(efi64_stub_entry) 507 - movq %rdi, efi64_config(%rip) /* Handle */ 508 - movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */ 509 - 510 - leaq efi64_config(%rip), %rax 511 - movq %rax, efi_config(%rip) 512 - 513 - movq %rdx, %rsi 514 - jmp handover_entry 515 462 SYM_FUNC_END(efi64_stub_entry) 463 + SYM_FUNC_END_ALIAS(efi_stub_entry) 516 464 #endif 517 465 518 466 .text ··· 628 682 .quad 0x0000000000000000 /* TS continued */ 629 683 SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) 630 684 631 - #ifdef CONFIG_EFI_STUB 632 - SYM_DATA_LOCAL(efi_config, .quad 0) 633 - 634 685 #ifdef CONFIG_EFI_MIXED 635 - SYM_DATA_START(efi32_config) 636 - .fill 5,8,0 637 - .quad efi64_thunk 638 - .byte 0 639 - SYM_DATA_END(efi32_config) 686 + SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0) 687 + SYM_DATA(efi_is64, .byte 1) 640 688 #endif 641 - 642 - SYM_DATA_START(efi64_config) 643 - .fill 5,8,0 644 - .quad efi_call 645 - .byte 1 646 - SYM_DATA_END(efi64_config) 647 - #endif /* CONFIG_EFI_STUB */ 648 689 649 690 /* 650 691 * Stack and heap for uncompression
+1 -9
arch/x86/include/asm/cpu_entry_area.h
··· 6 6 #include <linux/percpu-defs.h> 7 7 #include <asm/processor.h> 8 8 #include <asm/intel_ds.h> 9 + #include <asm/pgtable_areas.h> 9 10 10 11 #ifdef CONFIG_X86_64 11 12 ··· 134 133 135 134 extern void setup_cpu_entry_areas(void); 136 135 extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); 137 - 138 - /* Single page reserved for the readonly IDT mapping: */ 139 - #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE 140 - #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) 141 - 142 - #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) 143 - 144 - #define CPU_ENTRY_AREA_MAP_SIZE \ 145 - (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE) 146 136 147 137 extern struct cpu_entry_area *get_cpu_entry_area(int cpu); 148 138
+168 -80
arch/x86/include/asm/efi.h
··· 8 8 #include <asm/tlb.h> 9 9 #include <asm/nospec-branch.h> 10 10 #include <asm/mmu_context.h> 11 + #include <linux/build_bug.h> 11 12 12 13 /* 13 14 * We map the EFI regions needed for runtime services non-contiguously, ··· 20 19 * This is the main reason why we're doing stable VA mappings for RT 21 20 * services. 22 21 * 23 - * This flag is used in conjunction with a chicken bit called 24 - * "efi=old_map" which can be used as a fallback to the old runtime 25 - * services mapping method in case there's some b0rkage with a 26 - * particular EFI implementation (haha, it is hard to hold up the 27 - * sarcasm here...). 22 + * SGI UV1 machines are known to be incompatible with this scheme, so we 23 + * provide an opt-out for these machines via a DMI quirk that sets the 24 + * attribute below. 28 25 */ 29 - #define EFI_OLD_MEMMAP EFI_ARCH_1 26 + #define EFI_UV1_MEMMAP EFI_ARCH_1 27 + 28 + static inline bool efi_have_uv1_memmap(void) 29 + { 30 + return IS_ENABLED(CONFIG_X86_UV) && efi_enabled(EFI_UV1_MEMMAP); 31 + } 30 32 31 33 #define EFI32_LOADER_SIGNATURE "EL32" 32 34 #define EFI64_LOADER_SIGNATURE "EL64" ··· 38 34 39 35 #define ARCH_EFI_IRQ_FLAGS_MASK X86_EFLAGS_IF 40 36 37 + /* 38 + * The EFI services are called through variadic functions in many cases. These 39 + * functions are implemented in assembler and support only a fixed number of 40 + * arguments. The macros below allows us to check at build time that we don't 41 + * try to call them with too many arguments. 42 + * 43 + * __efi_nargs() will return the number of arguments if it is 7 or less, and 44 + * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it 45 + * impossible to calculate the exact number of arguments beyond some 46 + * pre-defined limit. The maximum number of arguments currently supported by 47 + * any of the thunks is 7, so this is good enough for now and can be extended 48 + * in the obvious way if we ever need more. 49 + */ 50 + 51 + #define __efi_nargs(...) __efi_nargs_(__VA_ARGS__) 52 + #define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__, \ 53 + __efi_arg_sentinel(7), __efi_arg_sentinel(6), \ 54 + __efi_arg_sentinel(5), __efi_arg_sentinel(4), \ 55 + __efi_arg_sentinel(3), __efi_arg_sentinel(2), \ 56 + __efi_arg_sentinel(1), __efi_arg_sentinel(0)) 57 + #define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, n, ...) \ 58 + __take_second_arg(n, \ 59 + ({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 8; })) 60 + #define __efi_arg_sentinel(n) , n 61 + 62 + /* 63 + * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis 64 + * represents more than n arguments. 65 + */ 66 + 67 + #define __efi_nargs_check(f, n, ...) \ 68 + __efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n) 69 + #define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n) 70 + #define __efi_nargs_check__(f, p, n) ({ \ 71 + BUILD_BUG_ON_MSG( \ 72 + (p) > (n), \ 73 + #f " called with too many arguments (" #p ">" #n ")"); \ 74 + }) 75 + 41 76 #ifdef CONFIG_X86_32 42 - 43 - extern asmlinkage unsigned long efi_call_phys(void *, ...); 44 - 45 77 #define arch_efi_call_virt_setup() \ 46 78 ({ \ 47 79 kernel_fpu_begin(); \ ··· 91 51 }) 92 52 93 53 94 - /* 95 - * Wrap all the virtual calls in a way that forces the parameters on the stack. 96 - */ 97 - #define arch_efi_call_virt(p, f, args...) \ 98 - ({ \ 99 - ((efi_##f##_t __attribute__((regparm(0)))*) p->f)(args); \ 100 - }) 54 + #define arch_efi_call_virt(p, f, args...) p->f(args) 101 55 102 56 #define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size) 103 57 ··· 99 65 100 66 #define EFI_LOADER_SIGNATURE "EL64" 101 67 102 - extern asmlinkage u64 efi_call(void *fp, ...); 68 + extern asmlinkage u64 __efi_call(void *fp, ...); 103 69 104 - #define efi_call_phys(f, args...) efi_call((f), args) 70 + #define efi_call(...) ({ \ 71 + __efi_nargs_check(efi_call, 7, __VA_ARGS__); \ 72 + __efi_call(__VA_ARGS__); \ 73 + }) 105 74 106 75 /* 107 76 * struct efi_scratch - Scratch space used while switching to/from efi_mm ··· 122 85 kernel_fpu_begin(); \ 123 86 firmware_restrict_branch_speculation_start(); \ 124 87 \ 125 - if (!efi_enabled(EFI_OLD_MEMMAP)) \ 88 + if (!efi_have_uv1_memmap()) \ 126 89 efi_switch_mm(&efi_mm); \ 127 90 }) 128 91 ··· 131 94 132 95 #define arch_efi_call_virt_teardown() \ 133 96 ({ \ 134 - if (!efi_enabled(EFI_OLD_MEMMAP)) \ 97 + if (!efi_have_uv1_memmap()) \ 135 98 efi_switch_mm(efi_scratch.prev_mm); \ 136 99 \ 137 100 firmware_restrict_branch_speculation_end(); \ ··· 158 121 extern struct efi_scratch efi_scratch; 159 122 extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable); 160 123 extern int __init efi_memblock_x86_reserve_range(void); 161 - extern pgd_t * __init efi_call_phys_prolog(void); 162 - extern void __init efi_call_phys_epilog(pgd_t *save_pgd); 163 124 extern void __init efi_print_memmap(void); 164 125 extern void __init efi_memory_uc(u64 addr, unsigned long size); 165 126 extern void __init efi_map_region(efi_memory_desc_t *md); ··· 175 140 extern void efi_switch_mm(struct mm_struct *mm); 176 141 extern void efi_recover_from_page_fault(unsigned long phys_addr); 177 142 extern void efi_free_boot_services(void); 143 + extern pgd_t * __init efi_uv1_memmap_phys_prolog(void); 144 + extern void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd); 178 145 179 146 struct efi_setup_data { 180 147 u64 fw_vendor; ··· 189 152 extern u64 efi_setup; 190 153 191 154 #ifdef CONFIG_EFI 155 + extern efi_status_t __efi64_thunk(u32, ...); 192 156 193 - static inline bool efi_is_native(void) 157 + #define efi64_thunk(...) ({ \ 158 + __efi_nargs_check(efi64_thunk, 6, __VA_ARGS__); \ 159 + __efi64_thunk(__VA_ARGS__); \ 160 + }) 161 + 162 + static inline bool efi_is_mixed(void) 194 163 { 195 - return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT); 164 + if (!IS_ENABLED(CONFIG_EFI_MIXED)) 165 + return false; 166 + return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT); 196 167 } 197 168 198 169 static inline bool efi_runtime_supported(void) 199 170 { 200 - if (efi_is_native()) 171 + if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT)) 201 172 return true; 202 173 203 - if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP)) 204 - return true; 205 - 206 - return false; 174 + return IS_ENABLED(CONFIG_EFI_MIXED); 207 175 } 208 176 209 177 extern void parse_efi_setup(u64 phys_addr, u32 data_len); 210 178 211 179 extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); 212 180 213 - #ifdef CONFIG_EFI_MIXED 214 181 extern void efi_thunk_runtime_setup(void); 215 - extern efi_status_t efi_thunk_set_virtual_address_map( 216 - void *phys_set_virtual_address_map, 217 - unsigned long memory_map_size, 218 - unsigned long descriptor_size, 219 - u32 descriptor_version, 220 - efi_memory_desc_t *virtual_map); 221 - #else 222 - static inline void efi_thunk_runtime_setup(void) {} 223 - static inline efi_status_t efi_thunk_set_virtual_address_map( 224 - void *phys_set_virtual_address_map, 225 - unsigned long memory_map_size, 226 - unsigned long descriptor_size, 227 - u32 descriptor_version, 228 - efi_memory_desc_t *virtual_map) 229 - { 230 - return EFI_SUCCESS; 231 - } 232 - #endif /* CONFIG_EFI_MIXED */ 233 - 182 + efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size, 183 + unsigned long descriptor_size, 184 + u32 descriptor_version, 185 + efi_memory_desc_t *virtual_map); 234 186 235 187 /* arch specific definitions used by the stub code */ 236 188 237 - struct efi_config { 238 - u64 image_handle; 239 - u64 table; 240 - u64 runtime_services; 241 - u64 boot_services; 242 - u64 text_output; 243 - efi_status_t (*call)(unsigned long, ...); 244 - bool is64; 245 - } __packed; 189 + __attribute_const__ bool efi_is_64bit(void); 246 190 247 - __pure const struct efi_config *__efi_early(void); 248 - 249 - static inline bool efi_is_64bit(void) 191 + static inline bool efi_is_native(void) 250 192 { 251 193 if (!IS_ENABLED(CONFIG_X86_64)) 252 - return false; 253 - 194 + return true; 254 195 if (!IS_ENABLED(CONFIG_EFI_MIXED)) 255 196 return true; 256 - 257 - return __efi_early()->is64; 197 + return efi_is_64bit(); 258 198 } 259 199 260 - #define efi_table_attr(table, attr, instance) \ 261 - (efi_is_64bit() ? \ 262 - ((table##_64_t *)(unsigned long)instance)->attr : \ 263 - ((table##_32_t *)(unsigned long)instance)->attr) 200 + #define efi_mixed_mode_cast(attr) \ 201 + __builtin_choose_expr( \ 202 + __builtin_types_compatible_p(u32, __typeof__(attr)), \ 203 + (unsigned long)(attr), (attr)) 264 204 265 - #define efi_call_proto(protocol, f, instance, ...) \ 266 - __efi_early()->call(efi_table_attr(protocol, f, instance), \ 267 - instance, ##__VA_ARGS__) 205 + #define efi_table_attr(inst, attr) \ 206 + (efi_is_native() \ 207 + ? inst->attr \ 208 + : (__typeof__(inst->attr)) \ 209 + efi_mixed_mode_cast(inst->mixed_mode.attr)) 268 210 269 - #define efi_call_early(f, ...) \ 270 - __efi_early()->call(efi_table_attr(efi_boot_services, f, \ 271 - __efi_early()->boot_services), __VA_ARGS__) 211 + /* 212 + * The following macros allow translating arguments if necessary from native to 213 + * mixed mode. The use case for this is to initialize the upper 32 bits of 214 + * output parameters, and where the 32-bit method requires a 64-bit argument, 215 + * which must be split up into two arguments to be thunked properly. 216 + * 217 + * As examples, the AllocatePool boot service returns the address of the 218 + * allocation, but it will not set the high 32 bits of the address. To ensure 219 + * that the full 64-bit address is initialized, we zero-init the address before 220 + * calling the thunk. 221 + * 222 + * The FreePages boot service takes a 64-bit physical address even in 32-bit 223 + * mode. For the thunk to work correctly, a native 64-bit call of 224 + * free_pages(addr, size) 225 + * must be translated to 226 + * efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size) 227 + * so that the two 32-bit halves of addr get pushed onto the stack separately. 228 + */ 272 229 273 - #define __efi_call_early(f, ...) \ 274 - __efi_early()->call((unsigned long)f, __VA_ARGS__); 230 + static inline void *efi64_zero_upper(void *p) 231 + { 232 + ((u32 *)p)[1] = 0; 233 + return p; 234 + } 275 235 276 - #define efi_call_runtime(f, ...) \ 277 - __efi_early()->call(efi_table_attr(efi_runtime_services, f, \ 278 - __efi_early()->runtime_services), __VA_ARGS__) 236 + #define __efi64_argmap_free_pages(addr, size) \ 237 + ((addr), 0, (size)) 238 + 239 + #define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver) \ 240 + ((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver)) 241 + 242 + #define __efi64_argmap_allocate_pool(type, size, buffer) \ 243 + ((type), (size), efi64_zero_upper(buffer)) 244 + 245 + #define __efi64_argmap_handle_protocol(handle, protocol, interface) \ 246 + ((handle), (protocol), efi64_zero_upper(interface)) 247 + 248 + #define __efi64_argmap_locate_protocol(protocol, reg, interface) \ 249 + ((protocol), (reg), efi64_zero_upper(interface)) 250 + 251 + /* PCI I/O */ 252 + #define __efi64_argmap_get_location(protocol, seg, bus, dev, func) \ 253 + ((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus), \ 254 + efi64_zero_upper(dev), efi64_zero_upper(func)) 255 + 256 + /* 257 + * The macros below handle the plumbing for the argument mapping. To add a 258 + * mapping for a specific EFI method, simply define a macro 259 + * __efi64_argmap_<method name>, following the examples above. 260 + */ 261 + 262 + #define __efi64_thunk_map(inst, func, ...) \ 263 + efi64_thunk(inst->mixed_mode.func, \ 264 + __efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__), \ 265 + (__VA_ARGS__))) 266 + 267 + #define __efi64_argmap(mapped, args) \ 268 + __PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args) 269 + #define __efi64_argmap__0(mapped, args) __efi_eval mapped 270 + #define __efi64_argmap__1(mapped, args) __efi_eval args 271 + 272 + #define __efi_eat(...) 273 + #define __efi_eval(...) __VA_ARGS__ 274 + 275 + /* The three macros below handle dispatching via the thunk if needed */ 276 + 277 + #define efi_call_proto(inst, func, ...) \ 278 + (efi_is_native() \ 279 + ? inst->func(inst, ##__VA_ARGS__) \ 280 + : __efi64_thunk_map(inst, func, inst, ##__VA_ARGS__)) 281 + 282 + #define efi_bs_call(func, ...) \ 283 + (efi_is_native() \ 284 + ? efi_system_table()->boottime->func(__VA_ARGS__) \ 285 + : __efi64_thunk_map(efi_table_attr(efi_system_table(), \ 286 + boottime), func, __VA_ARGS__)) 287 + 288 + #define efi_rt_call(func, ...) \ 289 + (efi_is_native() \ 290 + ? efi_system_table()->runtime->func(__VA_ARGS__) \ 291 + : __efi64_thunk_map(efi_table_attr(efi_system_table(), \ 292 + runtime), func, __VA_ARGS__)) 279 293 280 294 extern bool efi_reboot_required(void); 281 295 extern bool efi_is_table_address(unsigned long phys_addr);
+27
arch/x86/include/asm/memtype.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _ASM_X86_MEMTYPE_H 3 + #define _ASM_X86_MEMTYPE_H 4 + 5 + #include <linux/types.h> 6 + #include <asm/pgtable_types.h> 7 + 8 + extern bool pat_enabled(void); 9 + extern void pat_disable(const char *reason); 10 + extern void pat_init(void); 11 + extern void init_cache_modes(void); 12 + 13 + extern int memtype_reserve(u64 start, u64 end, 14 + enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); 15 + extern int memtype_free(u64 start, u64 end); 16 + 17 + extern int memtype_kernel_map_sync(u64 base, unsigned long size, 18 + enum page_cache_mode pcm); 19 + 20 + extern int memtype_reserve_io(resource_size_t start, resource_size_t end, 21 + enum page_cache_mode *pcm); 22 + 23 + extern void memtype_free_io(resource_size_t start, resource_size_t end); 24 + 25 + extern bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn); 26 + 27 + #endif /* _ASM_X86_MEMTYPE_H */
+6 -80
arch/x86/include/asm/mmu_context.h
··· 69 69 int slot; 70 70 }; 71 71 72 - /* This is a multiple of PAGE_SIZE. */ 73 - #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) 74 - 75 - static inline void *ldt_slot_va(int slot) 76 - { 77 - return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); 78 - } 79 - 80 72 /* 81 73 * Used for LDT copy/destruction. 82 74 */ ··· 91 99 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } 92 100 #endif 93 101 102 + #ifdef CONFIG_MODIFY_LDT_SYSCALL 103 + extern void load_mm_ldt(struct mm_struct *mm); 104 + extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next); 105 + #else 94 106 static inline void load_mm_ldt(struct mm_struct *mm) 95 107 { 96 - #ifdef CONFIG_MODIFY_LDT_SYSCALL 97 - struct ldt_struct *ldt; 98 - 99 - /* READ_ONCE synchronizes with smp_store_release */ 100 - ldt = READ_ONCE(mm->context.ldt); 101 - 102 - /* 103 - * Any change to mm->context.ldt is followed by an IPI to all 104 - * CPUs with the mm active. The LDT will not be freed until 105 - * after the IPI is handled by all such CPUs. This means that, 106 - * if the ldt_struct changes before we return, the values we see 107 - * will be safe, and the new values will be loaded before we run 108 - * any user code. 109 - * 110 - * NB: don't try to convert this to use RCU without extreme care. 111 - * We would still need IRQs off, because we don't want to change 112 - * the local LDT after an IPI loaded a newer value than the one 113 - * that we can see. 114 - */ 115 - 116 - if (unlikely(ldt)) { 117 - if (static_cpu_has(X86_FEATURE_PTI)) { 118 - if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { 119 - /* 120 - * Whoops -- either the new LDT isn't mapped 121 - * (if slot == -1) or is mapped into a bogus 122 - * slot (if slot > 1). 123 - */ 124 - clear_LDT(); 125 - return; 126 - } 127 - 128 - /* 129 - * If page table isolation is enabled, ldt->entries 130 - * will not be mapped in the userspace pagetables. 131 - * Tell the CPU to access the LDT through the alias 132 - * at ldt_slot_va(ldt->slot). 133 - */ 134 - set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries); 135 - } else { 136 - set_ldt(ldt->entries, ldt->nr_entries); 137 - } 138 - } else { 139 - clear_LDT(); 140 - } 141 - #else 142 108 clear_LDT(); 143 - #endif 144 109 } 145 - 146 110 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) 147 111 { 148 - #ifdef CONFIG_MODIFY_LDT_SYSCALL 149 - /* 150 - * Load the LDT if either the old or new mm had an LDT. 151 - * 152 - * An mm will never go from having an LDT to not having an LDT. Two 153 - * mms never share an LDT, so we don't gain anything by checking to 154 - * see whether the LDT changed. There's also no guarantee that 155 - * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, 156 - * then prev->context.ldt will also be non-NULL. 157 - * 158 - * If we really cared, we could optimize the case where prev == next 159 - * and we're exiting lazy mode. Most of the time, if this happens, 160 - * we don't actually need to reload LDTR, but modify_ldt() is mostly 161 - * used by legacy code and emulators where we don't need this level of 162 - * performance. 163 - * 164 - * This uses | instead of || because it generates better code. 165 - */ 166 - if (unlikely((unsigned long)prev->context.ldt | 167 - (unsigned long)next->context.ldt)) 168 - load_mm_ldt(next); 169 - #endif 170 - 171 112 DEBUG_LOCKS_WARN_ON(preemptible()); 172 113 } 114 + #endif 173 115 174 - void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); 116 + extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); 175 117 176 118 /* 177 119 * Init a new mm. Used on mm copies, like at fork()
+2 -2
arch/x86/include/asm/mtrr.h
··· 24 24 #define _ASM_X86_MTRR_H 25 25 26 26 #include <uapi/asm/mtrr.h> 27 - #include <asm/pat.h> 27 + #include <asm/memtype.h> 28 28 29 29 30 30 /* ··· 86 86 } 87 87 static inline void mtrr_bp_init(void) 88 88 { 89 - pat_disable("MTRRs disabled, skipping PAT initialization too."); 89 + pat_disable("PAT support disabled because CONFIG_MTRR is disabled in the kernel."); 90 90 } 91 91 92 92 #define mtrr_ap_init() do {} while (0)
-27
arch/x86/include/asm/pat.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_X86_PAT_H 3 - #define _ASM_X86_PAT_H 4 - 5 - #include <linux/types.h> 6 - #include <asm/pgtable_types.h> 7 - 8 - bool pat_enabled(void); 9 - void pat_disable(const char *reason); 10 - extern void pat_init(void); 11 - extern void init_cache_modes(void); 12 - 13 - extern int reserve_memtype(u64 start, u64 end, 14 - enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); 15 - extern int free_memtype(u64 start, u64 end); 16 - 17 - extern int kernel_map_sync_memtype(u64 base, unsigned long size, 18 - enum page_cache_mode pcm); 19 - 20 - int io_reserve_memtype(resource_size_t start, resource_size_t end, 21 - enum page_cache_mode *pcm); 22 - 23 - void io_free_memtype(resource_size_t start, resource_size_t end); 24 - 25 - bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn); 26 - 27 - #endif /* _ASM_X86_PAT_H */
+1 -1
arch/x86/include/asm/pci.h
··· 9 9 #include <linux/scatterlist.h> 10 10 #include <linux/numa.h> 11 11 #include <asm/io.h> 12 - #include <asm/pat.h> 12 + #include <asm/memtype.h> 13 13 #include <asm/x86_init.h> 14 14 15 15 struct pci_sysdata {
+53
arch/x86/include/asm/pgtable_32_areas.h
··· 1 + #ifndef _ASM_X86_PGTABLE_32_AREAS_H 2 + #define _ASM_X86_PGTABLE_32_AREAS_H 3 + 4 + #include <asm/cpu_entry_area.h> 5 + 6 + /* 7 + * Just any arbitrary offset to the start of the vmalloc VM area: the 8 + * current 8MB value just means that there will be a 8MB "hole" after the 9 + * physical memory until the kernel virtual memory starts. That means that 10 + * any out-of-bounds memory accesses will hopefully be caught. 11 + * The vmalloc() routines leaves a hole of 4kB between each vmalloced 12 + * area for the same reason. ;) 13 + */ 14 + #define VMALLOC_OFFSET (8 * 1024 * 1024) 15 + 16 + #ifndef __ASSEMBLY__ 17 + extern bool __vmalloc_start_set; /* set once high_memory is set */ 18 + #endif 19 + 20 + #define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) 21 + #ifdef CONFIG_X86_PAE 22 + #define LAST_PKMAP 512 23 + #else 24 + #define LAST_PKMAP 1024 25 + #endif 26 + 27 + #define CPU_ENTRY_AREA_PAGES (NR_CPUS * DIV_ROUND_UP(sizeof(struct cpu_entry_area), PAGE_SIZE)) 28 + 29 + /* The +1 is for the readonly IDT page: */ 30 + #define CPU_ENTRY_AREA_BASE \ 31 + ((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK) 32 + 33 + #define LDT_BASE_ADDR \ 34 + ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) 35 + 36 + #define LDT_END_ADDR (LDT_BASE_ADDR + PMD_SIZE) 37 + 38 + #define PKMAP_BASE \ 39 + ((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK) 40 + 41 + #ifdef CONFIG_HIGHMEM 42 + # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) 43 + #else 44 + # define VMALLOC_END (LDT_BASE_ADDR - 2 * PAGE_SIZE) 45 + #endif 46 + 47 + #define MODULES_VADDR VMALLOC_START 48 + #define MODULES_END VMALLOC_END 49 + #define MODULES_LEN (MODULES_VADDR - MODULES_END) 50 + 51 + #define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE) 52 + 53 + #endif /* _ASM_X86_PGTABLE_32_AREAS_H */
+3 -54
arch/x86/include/asm/pgtable_32_types.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_X86_PGTABLE_32_DEFS_H 3 - #define _ASM_X86_PGTABLE_32_DEFS_H 2 + #ifndef _ASM_X86_PGTABLE_32_TYPES_H 3 + #define _ASM_X86_PGTABLE_32_TYPES_H 4 4 5 5 /* 6 6 * The Linux x86 paging architecture is 'compile-time dual-mode', it ··· 20 20 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 21 21 #define PGDIR_MASK (~(PGDIR_SIZE - 1)) 22 22 23 - /* Just any arbitrary offset to the start of the vmalloc VM area: the 24 - * current 8MB value just means that there will be a 8MB "hole" after the 25 - * physical memory until the kernel virtual memory starts. That means that 26 - * any out-of-bounds memory accesses will hopefully be caught. 27 - * The vmalloc() routines leaves a hole of 4kB between each vmalloced 28 - * area for the same reason. ;) 29 - */ 30 - #define VMALLOC_OFFSET (8 * 1024 * 1024) 31 - 32 - #ifndef __ASSEMBLY__ 33 - extern bool __vmalloc_start_set; /* set once high_memory is set */ 34 - #endif 35 - 36 - #define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) 37 - #ifdef CONFIG_X86_PAE 38 - #define LAST_PKMAP 512 39 - #else 40 - #define LAST_PKMAP 1024 41 - #endif 42 - 43 - /* 44 - * This is an upper bound on sizeof(struct cpu_entry_area) / PAGE_SIZE. 45 - * Define this here and validate with BUILD_BUG_ON() in cpu_entry_area.c 46 - * to avoid include recursion hell. 47 - */ 48 - #define CPU_ENTRY_AREA_PAGES (NR_CPUS * 43) 49 - 50 - /* The +1 is for the readonly IDT page: */ 51 - #define CPU_ENTRY_AREA_BASE \ 52 - ((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK) 53 - 54 - #define LDT_BASE_ADDR \ 55 - ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) 56 - 57 - #define LDT_END_ADDR (LDT_BASE_ADDR + PMD_SIZE) 58 - 59 - #define PKMAP_BASE \ 60 - ((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK) 61 - 62 - #ifdef CONFIG_HIGHMEM 63 - # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) 64 - #else 65 - # define VMALLOC_END (LDT_BASE_ADDR - 2 * PAGE_SIZE) 66 - #endif 67 - 68 - #define MODULES_VADDR VMALLOC_START 69 - #define MODULES_END VMALLOC_END 70 - #define MODULES_LEN (MODULES_VADDR - MODULES_END) 71 - 72 - #define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE) 73 - 74 - #endif /* _ASM_X86_PGTABLE_32_DEFS_H */ 23 + #endif /* _ASM_X86_PGTABLE_32_TYPES_H */
+16
arch/x86/include/asm/pgtable_areas.h
··· 1 + #ifndef _ASM_X86_PGTABLE_AREAS_H 2 + #define _ASM_X86_PGTABLE_AREAS_H 3 + 4 + #ifdef CONFIG_X86_32 5 + # include <asm/pgtable_32_areas.h> 6 + #endif 7 + 8 + /* Single page reserved for the readonly IDT mapping: */ 9 + #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE 10 + #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) 11 + 12 + #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) 13 + 14 + #define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE) 15 + 16 + #endif /* _ASM_X86_PGTABLE_AREAS_H */
+70 -65
arch/x86/include/asm/pgtable_types.h
··· 110 110 111 111 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) 112 112 113 - #define _PAGE_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |\ 114 - _PAGE_ACCESSED | _PAGE_DIRTY) 115 - #define _KERNPG_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | \ 116 - _PAGE_ACCESSED | _PAGE_DIRTY) 117 - 118 113 /* 119 114 * Set of bits not changed in pte_modify. The pte's 120 115 * protection key is treated like _PAGE_RW, for ··· 131 136 */ 132 137 #ifndef __ASSEMBLY__ 133 138 enum page_cache_mode { 134 - _PAGE_CACHE_MODE_WB = 0, 135 - _PAGE_CACHE_MODE_WC = 1, 139 + _PAGE_CACHE_MODE_WB = 0, 140 + _PAGE_CACHE_MODE_WC = 1, 136 141 _PAGE_CACHE_MODE_UC_MINUS = 2, 137 - _PAGE_CACHE_MODE_UC = 3, 138 - _PAGE_CACHE_MODE_WT = 4, 139 - _PAGE_CACHE_MODE_WP = 5, 140 - _PAGE_CACHE_MODE_NUM = 8 142 + _PAGE_CACHE_MODE_UC = 3, 143 + _PAGE_CACHE_MODE_WT = 4, 144 + _PAGE_CACHE_MODE_WP = 5, 145 + 146 + _PAGE_CACHE_MODE_NUM = 8 141 147 }; 142 148 #endif 143 149 144 - #define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT) 150 + #define _PAGE_ENC (_AT(pteval_t, sme_me_mask)) 151 + 152 + #define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT) 153 + 145 154 #define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC)) 146 155 #define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP)) 147 156 148 - #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) 149 - #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ 150 - _PAGE_ACCESSED | _PAGE_NX) 157 + #define __PP _PAGE_PRESENT 158 + #define __RW _PAGE_RW 159 + #define _USR _PAGE_USER 160 + #define ___A _PAGE_ACCESSED 161 + #define ___D _PAGE_DIRTY 162 + #define ___G _PAGE_GLOBAL 163 + #define __NX _PAGE_NX 151 164 152 - #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ 153 - _PAGE_USER | _PAGE_ACCESSED) 154 - #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ 155 - _PAGE_ACCESSED | _PAGE_NX) 156 - #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ 157 - _PAGE_ACCESSED) 158 - #define PAGE_COPY PAGE_COPY_NOEXEC 159 - #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ 160 - _PAGE_ACCESSED | _PAGE_NX) 161 - #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ 162 - _PAGE_ACCESSED) 165 + #define _ENC _PAGE_ENC 166 + #define __WP _PAGE_CACHE_WP 167 + #define __NC _PAGE_NOCACHE 168 + #define _PSE _PAGE_PSE 163 169 164 - #define __PAGE_KERNEL_EXEC \ 165 - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) 166 - #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) 170 + #define pgprot_val(x) ((x).pgprot) 171 + #define __pgprot(x) ((pgprot_t) { (x) } ) 172 + #define __pg(x) __pgprot(x) 167 173 168 - #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) 169 - #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) 170 - #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) 171 - #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) 172 - #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) 173 - #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) 174 - #define __PAGE_KERNEL_WP (__PAGE_KERNEL | _PAGE_CACHE_WP) 174 + #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) 175 175 176 - #define __PAGE_KERNEL_IO (__PAGE_KERNEL) 177 - #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE) 176 + #define PAGE_NONE __pg( 0| 0| 0|___A| 0| 0| 0|___G) 177 + #define PAGE_SHARED __pg(__PP|__RW|_USR|___A|__NX| 0| 0| 0) 178 + #define PAGE_SHARED_EXEC __pg(__PP|__RW|_USR|___A| 0| 0| 0| 0) 179 + #define PAGE_COPY_NOEXEC __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0) 180 + #define PAGE_COPY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0) 181 + #define PAGE_COPY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0) 182 + #define PAGE_READONLY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0) 183 + #define PAGE_READONLY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0) 184 + 185 + #define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G) 186 + #define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G) 187 + #define _KERNPG_TABLE_NOENC (__PP|__RW| 0|___A| 0|___D| 0| 0) 188 + #define _KERNPG_TABLE (__PP|__RW| 0|___A| 0|___D| 0| 0| _ENC) 189 + #define _PAGE_TABLE_NOENC (__PP|__RW|_USR|___A| 0|___D| 0| 0) 190 + #define _PAGE_TABLE (__PP|__RW|_USR|___A| 0|___D| 0| 0| _ENC) 191 + #define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX|___D| 0|___G) 192 + #define __PAGE_KERNEL_RX (__PP| 0| 0|___A| 0|___D| 0|___G) 193 + #define __PAGE_KERNEL_NOCACHE (__PP|__RW| 0|___A|__NX|___D| 0|___G| __NC) 194 + #define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX|___D| 0|___G) 195 + #define __PAGE_KERNEL_LARGE (__PP|__RW| 0|___A|__NX|___D|_PSE|___G) 196 + #define __PAGE_KERNEL_LARGE_EXEC (__PP|__RW| 0|___A| 0|___D|_PSE|___G) 197 + #define __PAGE_KERNEL_WP (__PP|__RW| 0|___A|__NX|___D| 0|___G| __WP) 198 + 199 + 200 + #define __PAGE_KERNEL_IO __PAGE_KERNEL 201 + #define __PAGE_KERNEL_IO_NOCACHE __PAGE_KERNEL_NOCACHE 202 + 178 203 179 204 #ifndef __ASSEMBLY__ 180 205 181 - #define _PAGE_ENC (_AT(pteval_t, sme_me_mask)) 206 + #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _ENC) 207 + #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _ENC) 208 + #define __PAGE_KERNEL_NOENC (__PAGE_KERNEL | 0) 209 + #define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP | 0) 182 210 183 - #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ 184 - _PAGE_DIRTY | _PAGE_ENC) 185 - #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) 211 + #define __pgprot_mask(x) __pgprot((x) & __default_kernel_pte_mask) 186 212 187 - #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _PAGE_ENC) 188 - #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _PAGE_ENC) 213 + #define PAGE_KERNEL __pgprot_mask(__PAGE_KERNEL | _ENC) 214 + #define PAGE_KERNEL_NOENC __pgprot_mask(__PAGE_KERNEL | 0) 215 + #define PAGE_KERNEL_RO __pgprot_mask(__PAGE_KERNEL_RO | _ENC) 216 + #define PAGE_KERNEL_EXEC __pgprot_mask(__PAGE_KERNEL_EXEC | _ENC) 217 + #define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC | 0) 218 + #define PAGE_KERNEL_RX __pgprot_mask(__PAGE_KERNEL_RX | _ENC) 219 + #define PAGE_KERNEL_NOCACHE __pgprot_mask(__PAGE_KERNEL_NOCACHE | _ENC) 220 + #define PAGE_KERNEL_LARGE __pgprot_mask(__PAGE_KERNEL_LARGE | _ENC) 221 + #define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC) 222 + #define PAGE_KERNEL_VVAR __pgprot_mask(__PAGE_KERNEL_VVAR | _ENC) 189 223 190 - #define __PAGE_KERNEL_NOENC (__PAGE_KERNEL) 191 - #define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP) 192 - 193 - #define default_pgprot(x) __pgprot((x) & __default_kernel_pte_mask) 194 - 195 - #define PAGE_KERNEL default_pgprot(__PAGE_KERNEL | _PAGE_ENC) 196 - #define PAGE_KERNEL_NOENC default_pgprot(__PAGE_KERNEL) 197 - #define PAGE_KERNEL_RO default_pgprot(__PAGE_KERNEL_RO | _PAGE_ENC) 198 - #define PAGE_KERNEL_EXEC default_pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC) 199 - #define PAGE_KERNEL_EXEC_NOENC default_pgprot(__PAGE_KERNEL_EXEC) 200 - #define PAGE_KERNEL_RX default_pgprot(__PAGE_KERNEL_RX | _PAGE_ENC) 201 - #define PAGE_KERNEL_NOCACHE default_pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC) 202 - #define PAGE_KERNEL_LARGE default_pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC) 203 - #define PAGE_KERNEL_LARGE_EXEC default_pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC) 204 - #define PAGE_KERNEL_VVAR default_pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC) 205 - 206 - #define PAGE_KERNEL_IO default_pgprot(__PAGE_KERNEL_IO) 207 - #define PAGE_KERNEL_IO_NOCACHE default_pgprot(__PAGE_KERNEL_IO_NOCACHE) 224 + #define PAGE_KERNEL_IO __pgprot_mask(__PAGE_KERNEL_IO) 225 + #define PAGE_KERNEL_IO_NOCACHE __pgprot_mask(__PAGE_KERNEL_IO_NOCACHE) 208 226 209 227 #endif /* __ASSEMBLY__ */ 210 228 ··· 456 448 { 457 449 return native_pte_val(pte) & PTE_FLAGS_MASK; 458 450 } 459 - 460 - #define pgprot_val(x) ((x).pgprot) 461 - #define __pgprot(x) ((pgprot_t) { (x) } ) 462 451 463 452 extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM]; 464 453 extern uint8_t __pte2cachemode_tbl[8];
+6
arch/x86/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_X86_VMALLOC_H 2 + #define _ASM_X86_VMALLOC_H 3 + 4 + #include <asm/pgtable_areas.h> 5 + 6 + #endif /* _ASM_X86_VMALLOC_H */
+1 -1
arch/x86/kernel/cpu/common.c
··· 49 49 #include <asm/cpu.h> 50 50 #include <asm/mce.h> 51 51 #include <asm/msr.h> 52 - #include <asm/pat.h> 52 + #include <asm/memtype.h> 53 53 #include <asm/microcode.h> 54 54 #include <asm/microcode_intel.h> 55 55 #include <asm/intel-family.h>
+1 -1
arch/x86/kernel/cpu/mtrr/generic.c
··· 15 15 #include <asm/tlbflush.h> 16 16 #include <asm/mtrr.h> 17 17 #include <asm/msr.h> 18 - #include <asm/pat.h> 18 + #include <asm/memtype.h> 19 19 20 20 #include "mtrr.h" 21 21
+1 -1
arch/x86/kernel/cpu/mtrr/mtrr.c
··· 52 52 #include <asm/e820/api.h> 53 53 #include <asm/mtrr.h> 54 54 #include <asm/msr.h> 55 - #include <asm/pat.h> 55 + #include <asm/memtype.h> 56 56 57 57 #include "mtrr.h" 58 58
+1 -1
arch/x86/kernel/cpu/scattered.c
··· 4 4 */ 5 5 #include <linux/cpu.h> 6 6 7 - #include <asm/pat.h> 7 + #include <asm/memtype.h> 8 8 #include <asm/apic.h> 9 9 #include <asm/processor.h> 10 10
+1 -1
arch/x86/kernel/cpu/topology.c
··· 7 7 8 8 #include <linux/cpu.h> 9 9 #include <asm/apic.h> 10 - #include <asm/pat.h> 10 + #include <asm/memtype.h> 11 11 #include <asm/processor.h> 12 12 13 13 #include "cpu.h"
+1 -1
arch/x86/kernel/kexec-bzimage64.c
··· 177 177 * acpi_rsdp=<addr> on kernel command line to make second kernel boot 178 178 * without efi. 179 179 */ 180 - if (efi_enabled(EFI_OLD_MEMMAP)) 180 + if (efi_have_uv1_memmap()) 181 181 return 0; 182 182 183 183 params->secure_boot = boot_params.secure_boot;
+83
arch/x86/kernel/ldt.c
··· 28 28 #include <asm/desc.h> 29 29 #include <asm/mmu_context.h> 30 30 #include <asm/syscalls.h> 31 + #include <asm/pgtable_areas.h> 32 + 33 + /* This is a multiple of PAGE_SIZE. */ 34 + #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) 35 + 36 + static inline void *ldt_slot_va(int slot) 37 + { 38 + return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); 39 + } 40 + 41 + void load_mm_ldt(struct mm_struct *mm) 42 + { 43 + struct ldt_struct *ldt; 44 + 45 + /* READ_ONCE synchronizes with smp_store_release */ 46 + ldt = READ_ONCE(mm->context.ldt); 47 + 48 + /* 49 + * Any change to mm->context.ldt is followed by an IPI to all 50 + * CPUs with the mm active. The LDT will not be freed until 51 + * after the IPI is handled by all such CPUs. This means that, 52 + * if the ldt_struct changes before we return, the values we see 53 + * will be safe, and the new values will be loaded before we run 54 + * any user code. 55 + * 56 + * NB: don't try to convert this to use RCU without extreme care. 57 + * We would still need IRQs off, because we don't want to change 58 + * the local LDT after an IPI loaded a newer value than the one 59 + * that we can see. 60 + */ 61 + 62 + if (unlikely(ldt)) { 63 + if (static_cpu_has(X86_FEATURE_PTI)) { 64 + if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { 65 + /* 66 + * Whoops -- either the new LDT isn't mapped 67 + * (if slot == -1) or is mapped into a bogus 68 + * slot (if slot > 1). 69 + */ 70 + clear_LDT(); 71 + return; 72 + } 73 + 74 + /* 75 + * If page table isolation is enabled, ldt->entries 76 + * will not be mapped in the userspace pagetables. 77 + * Tell the CPU to access the LDT through the alias 78 + * at ldt_slot_va(ldt->slot). 79 + */ 80 + set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries); 81 + } else { 82 + set_ldt(ldt->entries, ldt->nr_entries); 83 + } 84 + } else { 85 + clear_LDT(); 86 + } 87 + } 88 + 89 + void switch_ldt(struct mm_struct *prev, struct mm_struct *next) 90 + { 91 + /* 92 + * Load the LDT if either the old or new mm had an LDT. 93 + * 94 + * An mm will never go from having an LDT to not having an LDT. Two 95 + * mms never share an LDT, so we don't gain anything by checking to 96 + * see whether the LDT changed. There's also no guarantee that 97 + * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, 98 + * then prev->context.ldt will also be non-NULL. 99 + * 100 + * If we really cared, we could optimize the case where prev == next 101 + * and we're exiting lazy mode. Most of the time, if this happens, 102 + * we don't actually need to reload LDTR, but modify_ldt() is mostly 103 + * used by legacy code and emulators where we don't need this level of 104 + * performance. 105 + * 106 + * This uses | instead of || because it generates better code. 107 + */ 108 + if (unlikely((unsigned long)prev->context.ldt | 109 + (unsigned long)next->context.ldt)) 110 + load_mm_ldt(next); 111 + 112 + DEBUG_LOCKS_WARN_ON(preemptible()); 113 + } 31 114 32 115 static void refresh_ldt_segments(void) 33 116 {
+1
arch/x86/kernel/setup.c
··· 42 42 #include <asm/proto.h> 43 43 #include <asm/unwind.h> 44 44 #include <asm/vsyscall.h> 45 + #include <linux/vmalloc.h> 45 46 46 47 /* 47 48 * max_low_pfn_mapped: highest directly mapped pfn < 4 GB
+1 -1
arch/x86/kernel/x86_init.c
··· 20 20 #include <asm/irq.h> 21 21 #include <asm/io_apic.h> 22 22 #include <asm/hpet.h> 23 - #include <asm/pat.h> 23 + #include <asm/memtype.h> 24 24 #include <asm/tsc.h> 25 25 #include <asm/iommu.h> 26 26 #include <asm/mach_traps.h>
+1 -1
arch/x86/kvm/mmu/mmu.c
··· 40 40 #include <linux/kthread.h> 41 41 42 42 #include <asm/page.h> 43 - #include <asm/pat.h> 43 + #include <asm/memtype.h> 44 44 #include <asm/cmpxchg.h> 45 45 #include <asm/e820/api.h> 46 46 #include <asm/io.h>
+4 -4
arch/x86/mm/Makefile
··· 12 12 CFLAGS_REMOVE_mem_encrypt_identity.o = -pg 13 13 endif 14 14 15 - obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 16 - pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o 15 + obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \ 16 + pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o 17 + 18 + obj-y += pat/ 17 19 18 20 # Make sure __phys_addr has no stackprotector 19 21 nostackp := $(call cc-option, -fno-stack-protector) ··· 24 22 CFLAGS_mem_encrypt_identity.o := $(nostackp) 25 23 26 24 CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace 27 - 28 - obj-$(CONFIG_X86_PAT) += pat_interval.o 29 25 30 26 obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o 31 27
+1
arch/x86/mm/fault.c
··· 29 29 #include <asm/efi.h> /* efi_recover_from_page_fault()*/ 30 30 #include <asm/desc.h> /* store_idt(), ... */ 31 31 #include <asm/cpu_entry_area.h> /* exception stack */ 32 + #include <asm/pgtable_areas.h> /* VMALLOC_START, ... */ 32 33 33 34 #define CREATE_TRACE_POINTS 34 35 #include <asm/trace/exceptions.h>
+1
arch/x86/mm/init_32.c
··· 52 52 #include <asm/page_types.h> 53 53 #include <asm/cpu_entry_area.h> 54 54 #include <asm/init.h> 55 + #include <asm/pgtable_areas.h> 55 56 56 57 #include "mm_internal.h" 57 58
+3 -3
arch/x86/mm/iomap_32.c
··· 4 4 */ 5 5 6 6 #include <asm/iomap.h> 7 - #include <asm/pat.h> 7 + #include <asm/memtype.h> 8 8 #include <linux/export.h> 9 9 #include <linux/highmem.h> 10 10 ··· 26 26 if (!is_io_mapping_possible(base, size)) 27 27 return -EINVAL; 28 28 29 - ret = io_reserve_memtype(base, base + size, &pcm); 29 + ret = memtype_reserve_io(base, base + size, &pcm); 30 30 if (ret) 31 31 return ret; 32 32 ··· 40 40 41 41 void iomap_free(resource_size_t base, unsigned long size) 42 42 { 43 - io_free_memtype(base, base + size); 43 + memtype_free_io(base, base + size); 44 44 } 45 45 EXPORT_SYMBOL_GPL(iomap_free); 46 46
+6 -6
arch/x86/mm/ioremap.c
··· 24 24 #include <asm/pgtable.h> 25 25 #include <asm/tlbflush.h> 26 26 #include <asm/pgalloc.h> 27 - #include <asm/pat.h> 27 + #include <asm/memtype.h> 28 28 #include <asm/setup.h> 29 29 30 30 #include "physaddr.h" ··· 196 196 phys_addr &= PHYSICAL_PAGE_MASK; 197 197 size = PAGE_ALIGN(last_addr+1) - phys_addr; 198 198 199 - retval = reserve_memtype(phys_addr, (u64)phys_addr + size, 199 + retval = memtype_reserve(phys_addr, (u64)phys_addr + size, 200 200 pcm, &new_pcm); 201 201 if (retval) { 202 - printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); 202 + printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval); 203 203 return NULL; 204 204 } 205 205 ··· 255 255 area->phys_addr = phys_addr; 256 256 vaddr = (unsigned long) area->addr; 257 257 258 - if (kernel_map_sync_memtype(phys_addr, size, pcm)) 258 + if (memtype_kernel_map_sync(phys_addr, size, pcm)) 259 259 goto err_free_area; 260 260 261 261 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) ··· 275 275 err_free_area: 276 276 free_vm_area(area); 277 277 err_free_memtype: 278 - free_memtype(phys_addr, phys_addr + size); 278 + memtype_free(phys_addr, phys_addr + size); 279 279 return NULL; 280 280 } 281 281 ··· 451 451 return; 452 452 } 453 453 454 - free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); 454 + memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p)); 455 455 456 456 /* Finally remove it */ 457 457 o = remove_vm_area((void __force *)addr);
arch/x86/mm/pageattr-test.c arch/x86/mm/pat/cpa-test.c
+13 -19
arch/x86/mm/pageattr.c arch/x86/mm/pat/set_memory.c
··· 24 24 #include <linux/uaccess.h> 25 25 #include <asm/pgalloc.h> 26 26 #include <asm/proto.h> 27 - #include <asm/pat.h> 27 + #include <asm/memtype.h> 28 28 #include <asm/set_memory.h> 29 29 30 - #include "mm_internal.h" 30 + #include "../mm_internal.h" 31 31 32 32 /* 33 33 * The current flushing context - we pass it instead of 5 arguments: ··· 331 331 on_each_cpu(__cpa_flush_all, (void *) cache, 1); 332 332 } 333 333 334 - void __cpa_flush_tlb(void *data) 334 + static void __cpa_flush_tlb(void *data) 335 335 { 336 336 struct cpa_data *cpa = data; 337 337 unsigned int i; ··· 1801 1801 /* 1802 1802 * for now UC MINUS. see comments in ioremap() 1803 1803 */ 1804 - ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1804 + ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1805 1805 _PAGE_CACHE_MODE_UC_MINUS, NULL); 1806 1806 if (ret) 1807 1807 goto out_err; ··· 1813 1813 return 0; 1814 1814 1815 1815 out_free: 1816 - free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1816 + memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1817 1817 out_err: 1818 1818 return ret; 1819 1819 } ··· 1839 1839 { 1840 1840 int ret; 1841 1841 1842 - ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1842 + ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1843 1843 _PAGE_CACHE_MODE_WC, NULL); 1844 1844 if (ret) 1845 1845 return ret; 1846 1846 1847 1847 ret = _set_memory_wc(addr, numpages); 1848 1848 if (ret) 1849 - free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1849 + memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1850 1850 1851 1851 return ret; 1852 1852 } ··· 1873 1873 if (ret) 1874 1874 return ret; 1875 1875 1876 - free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1876 + memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1877 1877 return 0; 1878 1878 } 1879 1879 EXPORT_SYMBOL(set_memory_wb); ··· 2014 2014 continue; 2015 2015 start = page_to_pfn(pages[i]) << PAGE_SHIFT; 2016 2016 end = start + PAGE_SIZE; 2017 - if (reserve_memtype(start, end, new_type, NULL)) 2017 + if (memtype_reserve(start, end, new_type, NULL)) 2018 2018 goto err_out; 2019 2019 } 2020 2020 ··· 2040 2040 continue; 2041 2041 start = page_to_pfn(pages[i]) << PAGE_SHIFT; 2042 2042 end = start + PAGE_SIZE; 2043 - free_memtype(start, end); 2043 + memtype_free(start, end); 2044 2044 } 2045 2045 return -EINVAL; 2046 2046 } ··· 2089 2089 continue; 2090 2090 start = page_to_pfn(pages[i]) << PAGE_SHIFT; 2091 2091 end = start + PAGE_SIZE; 2092 - free_memtype(start, end); 2092 + memtype_free(start, end); 2093 2093 } 2094 2094 2095 2095 return 0; ··· 2215 2215 .pgd = pgd, 2216 2216 .numpages = numpages, 2217 2217 .mask_set = __pgprot(0), 2218 - .mask_clr = __pgprot(0), 2218 + .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)), 2219 2219 .flags = 0, 2220 2220 }; 2221 2221 ··· 2223 2223 2224 2224 if (!(__supported_pte_mask & _PAGE_NX)) 2225 2225 goto out; 2226 - 2227 - if (!(page_flags & _PAGE_NX)) 2228 - cpa.mask_clr = __pgprot(_PAGE_NX); 2229 - 2230 - if (!(page_flags & _PAGE_RW)) 2231 - cpa.mask_clr = __pgprot(_PAGE_RW); 2232 2226 2233 2227 if (!(page_flags & _PAGE_ENC)) 2234 2228 cpa.mask_clr = pgprot_encrypted(cpa.mask_clr); ··· 2275 2281 * be exposed to the rest of the kernel. Include these directly here. 2276 2282 */ 2277 2283 #ifdef CONFIG_CPA_DEBUG 2278 - #include "pageattr-test.c" 2284 + #include "cpa-test.c" 2279 2285 #endif
+119 -84
arch/x86/mm/pat.c arch/x86/mm/pat/memtype.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Handle caching attributes in page tables (PAT) 3 + * Page Attribute Table (PAT) support: handle memory caching attributes in page tables. 4 4 * 5 5 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 6 6 * Suresh B Siddha <suresh.b.siddha@intel.com> 7 7 * 8 8 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. 9 + * 10 + * Basic principles: 11 + * 12 + * PAT is a CPU feature supported by all modern x86 CPUs, to allow the firmware and 13 + * the kernel to set one of a handful of 'caching type' attributes for physical 14 + * memory ranges: uncached, write-combining, write-through, write-protected, 15 + * and the most commonly used and default attribute: write-back caching. 16 + * 17 + * PAT support supercedes and augments MTRR support in a compatible fashion: MTRR is 18 + * a hardware interface to enumerate a limited number of physical memory ranges 19 + * and set their caching attributes explicitly, programmed into the CPU via MSRs. 20 + * Even modern CPUs have MTRRs enabled - but these are typically not touched 21 + * by the kernel or by user-space (such as the X server), we rely on PAT for any 22 + * additional cache attribute logic. 23 + * 24 + * PAT doesn't work via explicit memory ranges, but uses page table entries to add 25 + * cache attribute information to the mapped memory range: there's 3 bits used, 26 + * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT), with the 8 possible values mapped by the 27 + * CPU to actual cache attributes via an MSR loaded into the CPU (MSR_IA32_CR_PAT). 28 + * 29 + * ( There's a metric ton of finer details, such as compatibility with CPU quirks 30 + * that only support 4 types of PAT entries, and interaction with MTRRs, see 31 + * below for details. ) 9 32 */ 10 33 11 34 #include <linux/seq_file.h> ··· 52 29 #include <asm/mtrr.h> 53 30 #include <asm/page.h> 54 31 #include <asm/msr.h> 55 - #include <asm/pat.h> 32 + #include <asm/memtype.h> 56 33 #include <asm/io.h> 57 34 58 - #include "pat_internal.h" 59 - #include "mm_internal.h" 35 + #include "memtype.h" 36 + #include "../mm_internal.h" 60 37 61 38 #undef pr_fmt 62 39 #define pr_fmt(fmt) "" fmt 63 40 64 - static bool __read_mostly boot_cpu_done; 41 + static bool __read_mostly pat_bp_initialized; 65 42 static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); 66 - static bool __read_mostly pat_initialized; 67 - static bool __read_mostly init_cm_done; 43 + static bool __read_mostly pat_bp_enabled; 44 + static bool __read_mostly pat_cm_initialized; 68 45 69 - void pat_disable(const char *reason) 46 + /* 47 + * PAT support is enabled by default, but can be disabled for 48 + * various user-requested or hardware-forced reasons: 49 + */ 50 + void pat_disable(const char *msg_reason) 70 51 { 71 52 if (pat_disabled) 72 53 return; 73 54 74 - if (boot_cpu_done) { 55 + if (pat_bp_initialized) { 75 56 WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n"); 76 57 return; 77 58 } 78 59 79 60 pat_disabled = true; 80 - pr_info("x86/PAT: %s\n", reason); 61 + pr_info("x86/PAT: %s\n", msg_reason); 81 62 } 82 63 83 64 static int __init nopat(char *str) 84 65 { 85 - pat_disable("PAT support disabled."); 66 + pat_disable("PAT support disabled via boot option."); 86 67 return 0; 87 68 } 88 69 early_param("nopat", nopat); 89 70 90 71 bool pat_enabled(void) 91 72 { 92 - return pat_initialized; 73 + return pat_bp_enabled; 93 74 } 94 75 EXPORT_SYMBOL_GPL(pat_enabled); 95 76 ··· 224 197 char pat_msg[33]; 225 198 int i; 226 199 200 + WARN_ON_ONCE(pat_cm_initialized); 201 + 227 202 pat_msg[32] = 0; 228 203 for (i = 7; i >= 0; i--) { 229 204 cache = pat_get_cache_mode((pat >> (i * 8)) & 7, ··· 234 205 } 235 206 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); 236 207 237 - init_cm_done = true; 208 + pat_cm_initialized = true; 238 209 } 239 210 240 211 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) 241 212 242 - static void pat_bsp_init(u64 pat) 213 + static void pat_bp_init(u64 pat) 243 214 { 244 215 u64 tmp_pat; 245 216 246 217 if (!boot_cpu_has(X86_FEATURE_PAT)) { 247 - pat_disable("PAT not supported by CPU."); 218 + pat_disable("PAT not supported by the CPU."); 248 219 return; 249 220 } 250 221 251 222 rdmsrl(MSR_IA32_CR_PAT, tmp_pat); 252 223 if (!tmp_pat) { 253 - pat_disable("PAT MSR is 0, disabled."); 224 + pat_disable("PAT support disabled by the firmware."); 254 225 return; 255 226 } 256 227 257 228 wrmsrl(MSR_IA32_CR_PAT, pat); 258 - pat_initialized = true; 229 + pat_bp_enabled = true; 259 230 260 231 __init_cache_modes(pat); 261 232 } ··· 277 248 { 278 249 u64 pat = 0; 279 250 280 - if (init_cm_done) 251 + if (pat_cm_initialized) 281 252 return; 282 253 283 254 if (boot_cpu_has(X86_FEATURE_PAT)) { ··· 320 291 } 321 292 322 293 /** 323 - * pat_init - Initialize PAT MSR and PAT table 294 + * pat_init - Initialize the PAT MSR and PAT table on the current CPU 324 295 * 325 296 * This function initializes PAT MSR and PAT table with an OS-defined value 326 297 * to enable additional cache attributes, WC, WT and WP. ··· 333 304 { 334 305 u64 pat; 335 306 struct cpuinfo_x86 *c = &boot_cpu_data; 307 + 308 + #ifndef CONFIG_X86_PAT 309 + pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n"); 310 + #endif 336 311 337 312 if (pat_disabled) 338 313 return; ··· 397 364 PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT); 398 365 } 399 366 400 - if (!boot_cpu_done) { 401 - pat_bsp_init(pat); 402 - boot_cpu_done = true; 367 + if (!pat_bp_initialized) { 368 + pat_bp_init(pat); 369 + pat_bp_initialized = true; 403 370 } else { 404 371 pat_ap_init(pat); 405 372 } ··· 575 542 * available type in new_type in case of no error. In case of any error 576 543 * it will return a negative return value. 577 544 */ 578 - int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type, 545 + int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type, 579 546 enum page_cache_mode *new_type) 580 547 { 581 - struct memtype *new; 548 + struct memtype *entry_new; 582 549 enum page_cache_mode actual_type; 583 550 int is_range_ram; 584 551 int err = 0; ··· 626 593 return -EINVAL; 627 594 } 628 595 629 - new = kzalloc(sizeof(struct memtype), GFP_KERNEL); 630 - if (!new) 596 + entry_new = kzalloc(sizeof(struct memtype), GFP_KERNEL); 597 + if (!entry_new) 631 598 return -ENOMEM; 632 599 633 - new->start = start; 634 - new->end = end; 635 - new->type = actual_type; 600 + entry_new->start = start; 601 + entry_new->end = end; 602 + entry_new->type = actual_type; 636 603 637 604 spin_lock(&memtype_lock); 638 605 639 - err = memtype_check_insert(new, new_type); 606 + err = memtype_check_insert(entry_new, new_type); 640 607 if (err) { 641 - pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n", 608 + pr_info("x86/PAT: memtype_reserve failed [mem %#010Lx-%#010Lx], track %s, req %s\n", 642 609 start, end - 1, 643 - cattr_name(new->type), cattr_name(req_type)); 644 - kfree(new); 610 + cattr_name(entry_new->type), cattr_name(req_type)); 611 + kfree(entry_new); 645 612 spin_unlock(&memtype_lock); 646 613 647 614 return err; ··· 649 616 650 617 spin_unlock(&memtype_lock); 651 618 652 - dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", 653 - start, end - 1, cattr_name(new->type), cattr_name(req_type), 619 + dprintk("memtype_reserve added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", 620 + start, end - 1, cattr_name(entry_new->type), cattr_name(req_type), 654 621 new_type ? cattr_name(*new_type) : "-"); 655 622 656 623 return err; 657 624 } 658 625 659 - int free_memtype(u64 start, u64 end) 626 + int memtype_free(u64 start, u64 end) 660 627 { 661 - int err = -EINVAL; 662 628 int is_range_ram; 663 - struct memtype *entry; 629 + struct memtype *entry_old; 664 630 665 631 if (!pat_enabled()) 666 632 return 0; ··· 672 640 return 0; 673 641 674 642 is_range_ram = pat_pagerange_is_ram(start, end); 675 - if (is_range_ram == 1) { 676 - 677 - err = free_ram_pages_type(start, end); 678 - 679 - return err; 680 - } else if (is_range_ram < 0) { 643 + if (is_range_ram == 1) 644 + return free_ram_pages_type(start, end); 645 + if (is_range_ram < 0) 681 646 return -EINVAL; 682 - } 683 647 684 648 spin_lock(&memtype_lock); 685 - entry = memtype_erase(start, end); 649 + entry_old = memtype_erase(start, end); 686 650 spin_unlock(&memtype_lock); 687 651 688 - if (IS_ERR(entry)) { 652 + if (IS_ERR(entry_old)) { 689 653 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", 690 654 current->comm, current->pid, start, end - 1); 691 655 return -EINVAL; 692 656 } 693 657 694 - kfree(entry); 658 + kfree(entry_old); 695 659 696 - dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1); 660 + dprintk("memtype_free request [mem %#010Lx-%#010Lx]\n", start, end - 1); 697 661 698 662 return 0; 699 663 } ··· 728 700 rettype = _PAGE_CACHE_MODE_UC_MINUS; 729 701 730 702 spin_unlock(&memtype_lock); 703 + 731 704 return rettype; 732 705 } 733 706 ··· 752 723 EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); 753 724 754 725 /** 755 - * io_reserve_memtype - Request a memory type mapping for a region of memory 726 + * memtype_reserve_io - Request a memory type mapping for a region of memory 756 727 * @start: start (physical address) of the region 757 728 * @end: end (physical address) of the region 758 729 * @type: A pointer to memtype, with requested type. On success, requested ··· 761 732 * On success, returns 0 762 733 * On failure, returns non-zero 763 734 */ 764 - int io_reserve_memtype(resource_size_t start, resource_size_t end, 735 + int memtype_reserve_io(resource_size_t start, resource_size_t end, 765 736 enum page_cache_mode *type) 766 737 { 767 738 resource_size_t size = end - start; ··· 771 742 772 743 WARN_ON_ONCE(iomem_map_sanity_check(start, size)); 773 744 774 - ret = reserve_memtype(start, end, req_type, &new_type); 745 + ret = memtype_reserve(start, end, req_type, &new_type); 775 746 if (ret) 776 747 goto out_err; 777 748 778 749 if (!is_new_memtype_allowed(start, size, req_type, new_type)) 779 750 goto out_free; 780 751 781 - if (kernel_map_sync_memtype(start, size, new_type) < 0) 752 + if (memtype_kernel_map_sync(start, size, new_type) < 0) 782 753 goto out_free; 783 754 784 755 *type = new_type; 785 756 return 0; 786 757 787 758 out_free: 788 - free_memtype(start, end); 759 + memtype_free(start, end); 789 760 ret = -EBUSY; 790 761 out_err: 791 762 return ret; 792 763 } 793 764 794 765 /** 795 - * io_free_memtype - Release a memory type mapping for a region of memory 766 + * memtype_free_io - Release a memory type mapping for a region of memory 796 767 * @start: start (physical address) of the region 797 768 * @end: end (physical address) of the region 798 769 */ 799 - void io_free_memtype(resource_size_t start, resource_size_t end) 770 + void memtype_free_io(resource_size_t start, resource_size_t end) 800 771 { 801 - free_memtype(start, end); 772 + memtype_free(start, end); 802 773 } 803 774 804 775 int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) 805 776 { 806 777 enum page_cache_mode type = _PAGE_CACHE_MODE_WC; 807 778 808 - return io_reserve_memtype(start, start + size, &type); 779 + return memtype_reserve_io(start, start + size, &type); 809 780 } 810 781 EXPORT_SYMBOL(arch_io_reserve_memtype_wc); 811 782 812 783 void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) 813 784 { 814 - io_free_memtype(start, start + size); 785 + memtype_free_io(start, start + size); 815 786 } 816 787 EXPORT_SYMBOL(arch_io_free_memtype_wc); 817 788 ··· 868 839 } 869 840 870 841 /* 871 - * Change the memory type for the physial address range in kernel identity 842 + * Change the memory type for the physical address range in kernel identity 872 843 * mapping space if that range is a part of identity map. 873 844 */ 874 - int kernel_map_sync_memtype(u64 base, unsigned long size, 845 + int memtype_kernel_map_sync(u64 base, unsigned long size, 875 846 enum page_cache_mode pcm) 876 847 { 877 848 unsigned long id_sz; ··· 880 851 return 0; 881 852 882 853 /* 883 - * some areas in the middle of the kernel identity range 884 - * are not mapped, like the PCI space. 854 + * Some areas in the middle of the kernel identity range 855 + * are not mapped, for example the PCI space. 885 856 */ 886 857 if (!page_is_ram(base >> PAGE_SHIFT)) 887 858 return 0; 888 859 889 860 id_sz = (__pa(high_memory-1) <= base + size) ? 890 - __pa(high_memory) - base : 891 - size; 861 + __pa(high_memory) - base : size; 892 862 893 863 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) { 894 864 pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n", ··· 901 873 902 874 /* 903 875 * Internal interface to reserve a range of physical memory with prot. 904 - * Reserved non RAM regions only and after successful reserve_memtype, 876 + * Reserved non RAM regions only and after successful memtype_reserve, 905 877 * this func also keeps identity mapping (if any) in sync with this new prot. 906 878 */ 907 879 static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, ··· 938 910 return 0; 939 911 } 940 912 941 - ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm); 913 + ret = memtype_reserve(paddr, paddr + size, want_pcm, &pcm); 942 914 if (ret) 943 915 return ret; 944 916 945 917 if (pcm != want_pcm) { 946 918 if (strict_prot || 947 919 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { 948 - free_memtype(paddr, paddr + size); 920 + memtype_free(paddr, paddr + size); 949 921 pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", 950 922 current->comm, current->pid, 951 923 cattr_name(want_pcm), ··· 963 935 cachemode2protval(pcm)); 964 936 } 965 937 966 - if (kernel_map_sync_memtype(paddr, size, pcm) < 0) { 967 - free_memtype(paddr, paddr + size); 938 + if (memtype_kernel_map_sync(paddr, size, pcm) < 0) { 939 + memtype_free(paddr, paddr + size); 968 940 return -EINVAL; 969 941 } 970 942 return 0; ··· 980 952 981 953 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 982 954 if (is_ram == 0) 983 - free_memtype(paddr, paddr + size); 955 + memtype_free(paddr, paddr + size); 984 956 } 985 957 986 958 /* ··· 1127 1099 1128 1100 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) 1129 1101 1102 + /* 1103 + * We are allocating a temporary printout-entry to be passed 1104 + * between seq_start()/next() and seq_show(): 1105 + */ 1130 1106 static struct memtype *memtype_get_idx(loff_t pos) 1131 1107 { 1132 - struct memtype *print_entry; 1108 + struct memtype *entry_print; 1133 1109 int ret; 1134 1110 1135 - print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL); 1136 - if (!print_entry) 1111 + entry_print = kzalloc(sizeof(struct memtype), GFP_KERNEL); 1112 + if (!entry_print) 1137 1113 return NULL; 1138 1114 1139 1115 spin_lock(&memtype_lock); 1140 - ret = memtype_copy_nth_element(print_entry, pos); 1116 + ret = memtype_copy_nth_element(entry_print, pos); 1141 1117 spin_unlock(&memtype_lock); 1142 1118 1143 - if (!ret) { 1144 - return print_entry; 1145 - } else { 1146 - kfree(print_entry); 1119 + /* Free it on error: */ 1120 + if (ret) { 1121 + kfree(entry_print); 1147 1122 return NULL; 1148 1123 } 1124 + 1125 + return entry_print; 1149 1126 } 1150 1127 1151 1128 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) ··· 1175 1142 1176 1143 static int memtype_seq_show(struct seq_file *seq, void *v) 1177 1144 { 1178 - struct memtype *print_entry = (struct memtype *)v; 1145 + struct memtype *entry_print = (struct memtype *)v; 1179 1146 1180 - seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), 1181 - print_entry->start, print_entry->end); 1182 - kfree(print_entry); 1147 + seq_printf(seq, "PAT: [mem 0x%016Lx-0x%016Lx] %s\n", 1148 + entry_print->start, 1149 + entry_print->end, 1150 + cattr_name(entry_print->type)); 1151 + 1152 + kfree(entry_print); 1183 1153 1184 1154 return 0; 1185 1155 } ··· 1214 1178 } 1215 1179 return 0; 1216 1180 } 1217 - 1218 1181 late_initcall(pat_memtype_list_init); 1219 1182 1220 1183 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
+5
arch/x86/mm/pat/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + obj-y := set_memory.o memtype.o 4 + 5 + obj-$(CONFIG_X86_PAT) += memtype_interval.o
+194
arch/x86/mm/pat/memtype_interval.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Handle caching attributes in page tables (PAT) 4 + * 5 + * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 6 + * Suresh B Siddha <suresh.b.siddha@intel.com> 7 + * 8 + * Interval tree used to store the PAT memory type reservations. 9 + */ 10 + 11 + #include <linux/seq_file.h> 12 + #include <linux/debugfs.h> 13 + #include <linux/kernel.h> 14 + #include <linux/interval_tree_generic.h> 15 + #include <linux/sched.h> 16 + #include <linux/gfp.h> 17 + 18 + #include <asm/pgtable.h> 19 + #include <asm/memtype.h> 20 + 21 + #include "memtype.h" 22 + 23 + /* 24 + * The memtype tree keeps track of memory type for specific 25 + * physical memory areas. Without proper tracking, conflicting memory 26 + * types in different mappings can cause CPU cache corruption. 27 + * 28 + * The tree is an interval tree (augmented rbtree) which tree is ordered 29 + * by the starting address. The tree can contain multiple entries for 30 + * different regions which overlap. All the aliases have the same 31 + * cache attributes of course, as enforced by the PAT logic. 32 + * 33 + * memtype_lock protects the rbtree. 34 + */ 35 + 36 + static inline u64 interval_start(struct memtype *entry) 37 + { 38 + return entry->start; 39 + } 40 + 41 + static inline u64 interval_end(struct memtype *entry) 42 + { 43 + return entry->end - 1; 44 + } 45 + 46 + INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end, 47 + interval_start, interval_end, 48 + static, interval) 49 + 50 + static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED; 51 + 52 + enum { 53 + MEMTYPE_EXACT_MATCH = 0, 54 + MEMTYPE_END_MATCH = 1 55 + }; 56 + 57 + static struct memtype *memtype_match(u64 start, u64 end, int match_type) 58 + { 59 + struct memtype *entry_match; 60 + 61 + entry_match = interval_iter_first(&memtype_rbroot, start, end-1); 62 + 63 + while (entry_match != NULL && entry_match->start < end) { 64 + if ((match_type == MEMTYPE_EXACT_MATCH) && 65 + (entry_match->start == start) && (entry_match->end == end)) 66 + return entry_match; 67 + 68 + if ((match_type == MEMTYPE_END_MATCH) && 69 + (entry_match->start < start) && (entry_match->end == end)) 70 + return entry_match; 71 + 72 + entry_match = interval_iter_next(entry_match, start, end-1); 73 + } 74 + 75 + return NULL; /* Returns NULL if there is no match */ 76 + } 77 + 78 + static int memtype_check_conflict(u64 start, u64 end, 79 + enum page_cache_mode reqtype, 80 + enum page_cache_mode *newtype) 81 + { 82 + struct memtype *entry_match; 83 + enum page_cache_mode found_type = reqtype; 84 + 85 + entry_match = interval_iter_first(&memtype_rbroot, start, end-1); 86 + if (entry_match == NULL) 87 + goto success; 88 + 89 + if (entry_match->type != found_type && newtype == NULL) 90 + goto failure; 91 + 92 + dprintk("Overlap at 0x%Lx-0x%Lx\n", entry_match->start, entry_match->end); 93 + found_type = entry_match->type; 94 + 95 + entry_match = interval_iter_next(entry_match, start, end-1); 96 + while (entry_match) { 97 + if (entry_match->type != found_type) 98 + goto failure; 99 + 100 + entry_match = interval_iter_next(entry_match, start, end-1); 101 + } 102 + success: 103 + if (newtype) 104 + *newtype = found_type; 105 + 106 + return 0; 107 + 108 + failure: 109 + pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n", 110 + current->comm, current->pid, start, end, 111 + cattr_name(found_type), cattr_name(entry_match->type)); 112 + 113 + return -EBUSY; 114 + } 115 + 116 + int memtype_check_insert(struct memtype *entry_new, enum page_cache_mode *ret_type) 117 + { 118 + int err = 0; 119 + 120 + err = memtype_check_conflict(entry_new->start, entry_new->end, entry_new->type, ret_type); 121 + if (err) 122 + return err; 123 + 124 + if (ret_type) 125 + entry_new->type = *ret_type; 126 + 127 + interval_insert(entry_new, &memtype_rbroot); 128 + return 0; 129 + } 130 + 131 + struct memtype *memtype_erase(u64 start, u64 end) 132 + { 133 + struct memtype *entry_old; 134 + 135 + /* 136 + * Since the memtype_rbroot tree allows overlapping ranges, 137 + * memtype_erase() checks with EXACT_MATCH first, i.e. free 138 + * a whole node for the munmap case. If no such entry is found, 139 + * it then checks with END_MATCH, i.e. shrink the size of a node 140 + * from the end for the mremap case. 141 + */ 142 + entry_old = memtype_match(start, end, MEMTYPE_EXACT_MATCH); 143 + if (!entry_old) { 144 + entry_old = memtype_match(start, end, MEMTYPE_END_MATCH); 145 + if (!entry_old) 146 + return ERR_PTR(-EINVAL); 147 + } 148 + 149 + if (entry_old->start == start) { 150 + /* munmap: erase this node */ 151 + interval_remove(entry_old, &memtype_rbroot); 152 + } else { 153 + /* mremap: update the end value of this node */ 154 + interval_remove(entry_old, &memtype_rbroot); 155 + entry_old->end = start; 156 + interval_insert(entry_old, &memtype_rbroot); 157 + 158 + return NULL; 159 + } 160 + 161 + return entry_old; 162 + } 163 + 164 + struct memtype *memtype_lookup(u64 addr) 165 + { 166 + return interval_iter_first(&memtype_rbroot, addr, addr + PAGE_SIZE-1); 167 + } 168 + 169 + /* 170 + * Debugging helper, copy the Nth entry of the tree into a 171 + * a copy for printout. This allows us to print out the tree 172 + * via debugfs, without holding the memtype_lock too long: 173 + */ 174 + #ifdef CONFIG_DEBUG_FS 175 + int memtype_copy_nth_element(struct memtype *entry_out, loff_t pos) 176 + { 177 + struct memtype *entry_match; 178 + int i = 1; 179 + 180 + entry_match = interval_iter_first(&memtype_rbroot, 0, ULONG_MAX); 181 + 182 + while (entry_match && pos != i) { 183 + entry_match = interval_iter_next(entry_match, 0, ULONG_MAX); 184 + i++; 185 + } 186 + 187 + if (entry_match) { /* pos == i */ 188 + *entry_out = *entry_match; 189 + return 0; 190 + } else { 191 + return 1; 192 + } 193 + } 194 + #endif
+6 -6
arch/x86/mm/pat_internal.h arch/x86/mm/pat/memtype.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef __PAT_INTERNAL_H_ 3 - #define __PAT_INTERNAL_H_ 2 + #ifndef __MEMTYPE_H_ 3 + #define __MEMTYPE_H_ 4 4 5 5 extern int pat_debug_enable; 6 6 ··· 29 29 } 30 30 31 31 #ifdef CONFIG_X86_PAT 32 - extern int memtype_check_insert(struct memtype *new, 32 + extern int memtype_check_insert(struct memtype *entry_new, 33 33 enum page_cache_mode *new_type); 34 34 extern struct memtype *memtype_erase(u64 start, u64 end); 35 35 extern struct memtype *memtype_lookup(u64 addr); 36 - extern int memtype_copy_nth_element(struct memtype *out, loff_t pos); 36 + extern int memtype_copy_nth_element(struct memtype *entry_out, loff_t pos); 37 37 #else 38 - static inline int memtype_check_insert(struct memtype *new, 38 + static inline int memtype_check_insert(struct memtype *entry_new, 39 39 enum page_cache_mode *new_type) 40 40 { return 0; } 41 41 static inline struct memtype *memtype_erase(u64 start, u64 end) ··· 46 46 { return 0; } 47 47 #endif 48 48 49 - #endif /* __PAT_INTERNAL_H_ */ 49 + #endif /* __MEMTYPE_H_ */
-185
arch/x86/mm/pat_interval.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Handle caching attributes in page tables (PAT) 4 - * 5 - * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 6 - * Suresh B Siddha <suresh.b.siddha@intel.com> 7 - * 8 - * Interval tree used to store the PAT memory type reservations. 9 - */ 10 - 11 - #include <linux/seq_file.h> 12 - #include <linux/debugfs.h> 13 - #include <linux/kernel.h> 14 - #include <linux/interval_tree_generic.h> 15 - #include <linux/sched.h> 16 - #include <linux/gfp.h> 17 - 18 - #include <asm/pgtable.h> 19 - #include <asm/pat.h> 20 - 21 - #include "pat_internal.h" 22 - 23 - /* 24 - * The memtype tree keeps track of memory type for specific 25 - * physical memory areas. Without proper tracking, conflicting memory 26 - * types in different mappings can cause CPU cache corruption. 27 - * 28 - * The tree is an interval tree (augmented rbtree) with tree ordered 29 - * on starting address. Tree can contain multiple entries for 30 - * different regions which overlap. All the aliases have the same 31 - * cache attributes of course. 32 - * 33 - * memtype_lock protects the rbtree. 34 - */ 35 - static inline u64 memtype_interval_start(struct memtype *memtype) 36 - { 37 - return memtype->start; 38 - } 39 - 40 - static inline u64 memtype_interval_end(struct memtype *memtype) 41 - { 42 - return memtype->end - 1; 43 - } 44 - INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end, 45 - memtype_interval_start, memtype_interval_end, 46 - static, memtype_interval) 47 - 48 - static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED; 49 - 50 - enum { 51 - MEMTYPE_EXACT_MATCH = 0, 52 - MEMTYPE_END_MATCH = 1 53 - }; 54 - 55 - static struct memtype *memtype_match(u64 start, u64 end, int match_type) 56 - { 57 - struct memtype *match; 58 - 59 - match = memtype_interval_iter_first(&memtype_rbroot, start, end-1); 60 - while (match != NULL && match->start < end) { 61 - if ((match_type == MEMTYPE_EXACT_MATCH) && 62 - (match->start == start) && (match->end == end)) 63 - return match; 64 - 65 - if ((match_type == MEMTYPE_END_MATCH) && 66 - (match->start < start) && (match->end == end)) 67 - return match; 68 - 69 - match = memtype_interval_iter_next(match, start, end-1); 70 - } 71 - 72 - return NULL; /* Returns NULL if there is no match */ 73 - } 74 - 75 - static int memtype_check_conflict(u64 start, u64 end, 76 - enum page_cache_mode reqtype, 77 - enum page_cache_mode *newtype) 78 - { 79 - struct memtype *match; 80 - enum page_cache_mode found_type = reqtype; 81 - 82 - match = memtype_interval_iter_first(&memtype_rbroot, start, end-1); 83 - if (match == NULL) 84 - goto success; 85 - 86 - if (match->type != found_type && newtype == NULL) 87 - goto failure; 88 - 89 - dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end); 90 - found_type = match->type; 91 - 92 - match = memtype_interval_iter_next(match, start, end-1); 93 - while (match) { 94 - if (match->type != found_type) 95 - goto failure; 96 - 97 - match = memtype_interval_iter_next(match, start, end-1); 98 - } 99 - success: 100 - if (newtype) 101 - *newtype = found_type; 102 - 103 - return 0; 104 - 105 - failure: 106 - pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n", 107 - current->comm, current->pid, start, end, 108 - cattr_name(found_type), cattr_name(match->type)); 109 - return -EBUSY; 110 - } 111 - 112 - int memtype_check_insert(struct memtype *new, 113 - enum page_cache_mode *ret_type) 114 - { 115 - int err = 0; 116 - 117 - err = memtype_check_conflict(new->start, new->end, new->type, ret_type); 118 - if (err) 119 - return err; 120 - 121 - if (ret_type) 122 - new->type = *ret_type; 123 - 124 - memtype_interval_insert(new, &memtype_rbroot); 125 - return 0; 126 - } 127 - 128 - struct memtype *memtype_erase(u64 start, u64 end) 129 - { 130 - struct memtype *data; 131 - 132 - /* 133 - * Since the memtype_rbroot tree allows overlapping ranges, 134 - * memtype_erase() checks with EXACT_MATCH first, i.e. free 135 - * a whole node for the munmap case. If no such entry is found, 136 - * it then checks with END_MATCH, i.e. shrink the size of a node 137 - * from the end for the mremap case. 138 - */ 139 - data = memtype_match(start, end, MEMTYPE_EXACT_MATCH); 140 - if (!data) { 141 - data = memtype_match(start, end, MEMTYPE_END_MATCH); 142 - if (!data) 143 - return ERR_PTR(-EINVAL); 144 - } 145 - 146 - if (data->start == start) { 147 - /* munmap: erase this node */ 148 - memtype_interval_remove(data, &memtype_rbroot); 149 - } else { 150 - /* mremap: update the end value of this node */ 151 - memtype_interval_remove(data, &memtype_rbroot); 152 - data->end = start; 153 - memtype_interval_insert(data, &memtype_rbroot); 154 - return NULL; 155 - } 156 - 157 - return data; 158 - } 159 - 160 - struct memtype *memtype_lookup(u64 addr) 161 - { 162 - return memtype_interval_iter_first(&memtype_rbroot, addr, 163 - addr + PAGE_SIZE-1); 164 - } 165 - 166 - #if defined(CONFIG_DEBUG_FS) 167 - int memtype_copy_nth_element(struct memtype *out, loff_t pos) 168 - { 169 - struct memtype *match; 170 - int i = 1; 171 - 172 - match = memtype_interval_iter_first(&memtype_rbroot, 0, ULONG_MAX); 173 - while (match && pos != i) { 174 - match = memtype_interval_iter_next(match, 0, ULONG_MAX); 175 - i++; 176 - } 177 - 178 - if (match) { /* pos == i */ 179 - *out = *match; 180 - return 0; 181 - } else { 182 - return 1; 183 - } 184 - } 185 - #endif
+1
arch/x86/mm/pgtable_32.c
··· 18 18 #include <asm/tlb.h> 19 19 #include <asm/tlbflush.h> 20 20 #include <asm/io.h> 21 + #include <linux/vmalloc.h> 21 22 22 23 unsigned int __VMALLOC_RESERVE = 128 << 20; 23 24
+1
arch/x86/mm/physaddr.c
··· 5 5 #include <linux/mm.h> 6 6 7 7 #include <asm/page.h> 8 + #include <linux/vmalloc.h> 8 9 9 10 #include "physaddr.h" 10 11
+1 -1
arch/x86/pci/i386.c
··· 34 34 #include <linux/errno.h> 35 35 #include <linux/memblock.h> 36 36 37 - #include <asm/pat.h> 37 + #include <asm/memtype.h> 38 38 #include <asm/e820/api.h> 39 39 #include <asm/pci_x86.h> 40 40 #include <asm/io_apic.h>
+2 -1
arch/x86/platform/efi/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y 3 - OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y 3 + KASAN_SANITIZE := n 4 + GCOV_PROFILE := n 4 5 5 6 obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o 6 7 obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o
+130 -260
arch/x86/platform/efi/efi.c
··· 54 54 #include <asm/x86_init.h> 55 55 #include <asm/uv/uv.h> 56 56 57 - static struct efi efi_phys __initdata; 58 57 static efi_system_table_t efi_systab __initdata; 58 + static u64 efi_systab_phys __initdata; 59 59 60 60 static efi_config_table_type_t arch_tables[] __initdata = { 61 61 #ifdef CONFIG_X86_UV ··· 96 96 return 0; 97 97 } 98 98 early_param("add_efi_memmap", setup_add_efi_memmap); 99 - 100 - static efi_status_t __init phys_efi_set_virtual_address_map( 101 - unsigned long memory_map_size, 102 - unsigned long descriptor_size, 103 - u32 descriptor_version, 104 - efi_memory_desc_t *virtual_map) 105 - { 106 - efi_status_t status; 107 - unsigned long flags; 108 - pgd_t *save_pgd; 109 - 110 - save_pgd = efi_call_phys_prolog(); 111 - if (!save_pgd) 112 - return EFI_ABORTED; 113 - 114 - /* Disable interrupts around EFI calls: */ 115 - local_irq_save(flags); 116 - status = efi_call_phys(efi_phys.set_virtual_address_map, 117 - memory_map_size, descriptor_size, 118 - descriptor_version, virtual_map); 119 - local_irq_restore(flags); 120 - 121 - efi_call_phys_epilog(save_pgd); 122 - 123 - return status; 124 - } 125 99 126 100 void __init efi_find_mirror(void) 127 101 { ··· 304 330 } 305 331 306 332 if (n_removal > 0) { 307 - u64 size = efi.memmap.nr_map - n_removal; 333 + struct efi_memory_map_data data = { 334 + .phys_map = efi.memmap.phys_map, 335 + .desc_version = efi.memmap.desc_version, 336 + .desc_size = efi.memmap.desc_size, 337 + .size = data.desc_size * (efi.memmap.nr_map - n_removal), 338 + .flags = 0, 339 + }; 308 340 309 341 pr_warn("Removing %d invalid memory map entries.\n", n_removal); 310 - efi_memmap_install(efi.memmap.phys_map, size); 342 + efi_memmap_install(&data); 311 343 } 312 344 } 313 345 ··· 333 353 } 334 354 } 335 355 336 - static int __init efi_systab_init(void *phys) 356 + static int __init efi_systab_init(u64 phys) 337 357 { 358 + int size = efi_enabled(EFI_64BIT) ? sizeof(efi_system_table_64_t) 359 + : sizeof(efi_system_table_32_t); 360 + bool over4g = false; 361 + void *p; 362 + 363 + p = early_memremap_ro(phys, size); 364 + if (p == NULL) { 365 + pr_err("Couldn't map the system table!\n"); 366 + return -ENOMEM; 367 + } 368 + 338 369 if (efi_enabled(EFI_64BIT)) { 339 - efi_system_table_64_t *systab64; 340 - struct efi_setup_data *data = NULL; 341 - u64 tmp = 0; 370 + const efi_system_table_64_t *systab64 = p; 371 + 372 + efi_systab.hdr = systab64->hdr; 373 + efi_systab.fw_vendor = systab64->fw_vendor; 374 + efi_systab.fw_revision = systab64->fw_revision; 375 + efi_systab.con_in_handle = systab64->con_in_handle; 376 + efi_systab.con_in = systab64->con_in; 377 + efi_systab.con_out_handle = systab64->con_out_handle; 378 + efi_systab.con_out = (void *)(unsigned long)systab64->con_out; 379 + efi_systab.stderr_handle = systab64->stderr_handle; 380 + efi_systab.stderr = systab64->stderr; 381 + efi_systab.runtime = (void *)(unsigned long)systab64->runtime; 382 + efi_systab.boottime = (void *)(unsigned long)systab64->boottime; 383 + efi_systab.nr_tables = systab64->nr_tables; 384 + efi_systab.tables = systab64->tables; 385 + 386 + over4g = systab64->con_in_handle > U32_MAX || 387 + systab64->con_in > U32_MAX || 388 + systab64->con_out_handle > U32_MAX || 389 + systab64->con_out > U32_MAX || 390 + systab64->stderr_handle > U32_MAX || 391 + systab64->stderr > U32_MAX || 392 + systab64->boottime > U32_MAX; 342 393 343 394 if (efi_setup) { 344 - data = early_memremap(efi_setup, sizeof(*data)); 345 - if (!data) 395 + struct efi_setup_data *data; 396 + 397 + data = early_memremap_ro(efi_setup, sizeof(*data)); 398 + if (!data) { 399 + early_memunmap(p, size); 346 400 return -ENOMEM; 347 - } 348 - systab64 = early_memremap((unsigned long)phys, 349 - sizeof(*systab64)); 350 - if (systab64 == NULL) { 351 - pr_err("Couldn't map the system table!\n"); 352 - if (data) 353 - early_memunmap(data, sizeof(*data)); 354 - return -ENOMEM; 355 - } 401 + } 356 402 357 - efi_systab.hdr = systab64->hdr; 358 - efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor : 359 - systab64->fw_vendor; 360 - tmp |= data ? data->fw_vendor : systab64->fw_vendor; 361 - efi_systab.fw_revision = systab64->fw_revision; 362 - efi_systab.con_in_handle = systab64->con_in_handle; 363 - tmp |= systab64->con_in_handle; 364 - efi_systab.con_in = systab64->con_in; 365 - tmp |= systab64->con_in; 366 - efi_systab.con_out_handle = systab64->con_out_handle; 367 - tmp |= systab64->con_out_handle; 368 - efi_systab.con_out = systab64->con_out; 369 - tmp |= systab64->con_out; 370 - efi_systab.stderr_handle = systab64->stderr_handle; 371 - tmp |= systab64->stderr_handle; 372 - efi_systab.stderr = systab64->stderr; 373 - tmp |= systab64->stderr; 374 - efi_systab.runtime = data ? 375 - (void *)(unsigned long)data->runtime : 376 - (void *)(unsigned long)systab64->runtime; 377 - tmp |= data ? data->runtime : systab64->runtime; 378 - efi_systab.boottime = (void *)(unsigned long)systab64->boottime; 379 - tmp |= systab64->boottime; 380 - efi_systab.nr_tables = systab64->nr_tables; 381 - efi_systab.tables = data ? (unsigned long)data->tables : 382 - systab64->tables; 383 - tmp |= data ? data->tables : systab64->tables; 403 + efi_systab.fw_vendor = (unsigned long)data->fw_vendor; 404 + efi_systab.runtime = (void *)(unsigned long)data->runtime; 405 + efi_systab.tables = (unsigned long)data->tables; 384 406 385 - early_memunmap(systab64, sizeof(*systab64)); 386 - if (data) 407 + over4g |= data->fw_vendor > U32_MAX || 408 + data->runtime > U32_MAX || 409 + data->tables > U32_MAX; 410 + 387 411 early_memunmap(data, sizeof(*data)); 388 - #ifdef CONFIG_X86_32 389 - if (tmp >> 32) { 390 - pr_err("EFI data located above 4GB, disabling EFI.\n"); 391 - return -EINVAL; 412 + } else { 413 + over4g |= systab64->fw_vendor > U32_MAX || 414 + systab64->runtime > U32_MAX || 415 + systab64->tables > U32_MAX; 392 416 } 393 - #endif 394 417 } else { 395 - efi_system_table_32_t *systab32; 418 + const efi_system_table_32_t *systab32 = p; 396 419 397 - systab32 = early_memremap((unsigned long)phys, 398 - sizeof(*systab32)); 399 - if (systab32 == NULL) { 400 - pr_err("Couldn't map the system table!\n"); 401 - return -ENOMEM; 402 - } 420 + efi_systab.hdr = systab32->hdr; 421 + efi_systab.fw_vendor = systab32->fw_vendor; 422 + efi_systab.fw_revision = systab32->fw_revision; 423 + efi_systab.con_in_handle = systab32->con_in_handle; 424 + efi_systab.con_in = systab32->con_in; 425 + efi_systab.con_out_handle = systab32->con_out_handle; 426 + efi_systab.con_out = (void *)(unsigned long)systab32->con_out; 427 + efi_systab.stderr_handle = systab32->stderr_handle; 428 + efi_systab.stderr = systab32->stderr; 429 + efi_systab.runtime = (void *)(unsigned long)systab32->runtime; 430 + efi_systab.boottime = (void *)(unsigned long)systab32->boottime; 431 + efi_systab.nr_tables = systab32->nr_tables; 432 + efi_systab.tables = systab32->tables; 433 + } 403 434 404 - efi_systab.hdr = systab32->hdr; 405 - efi_systab.fw_vendor = systab32->fw_vendor; 406 - efi_systab.fw_revision = systab32->fw_revision; 407 - efi_systab.con_in_handle = systab32->con_in_handle; 408 - efi_systab.con_in = systab32->con_in; 409 - efi_systab.con_out_handle = systab32->con_out_handle; 410 - efi_systab.con_out = systab32->con_out; 411 - efi_systab.stderr_handle = systab32->stderr_handle; 412 - efi_systab.stderr = systab32->stderr; 413 - efi_systab.runtime = (void *)(unsigned long)systab32->runtime; 414 - efi_systab.boottime = (void *)(unsigned long)systab32->boottime; 415 - efi_systab.nr_tables = systab32->nr_tables; 416 - efi_systab.tables = systab32->tables; 435 + early_memunmap(p, size); 417 436 418 - early_memunmap(systab32, sizeof(*systab32)); 437 + if (IS_ENABLED(CONFIG_X86_32) && over4g) { 438 + pr_err("EFI data located above 4GB, disabling EFI.\n"); 439 + return -EINVAL; 419 440 } 420 441 421 442 efi.systab = &efi_systab; ··· 436 455 return 0; 437 456 } 438 457 439 - static int __init efi_runtime_init32(void) 440 - { 441 - efi_runtime_services_32_t *runtime; 442 - 443 - runtime = early_memremap((unsigned long)efi.systab->runtime, 444 - sizeof(efi_runtime_services_32_t)); 445 - if (!runtime) { 446 - pr_err("Could not map the runtime service table!\n"); 447 - return -ENOMEM; 448 - } 449 - 450 - /* 451 - * We will only need *early* access to the SetVirtualAddressMap 452 - * EFI runtime service. All other runtime services will be called 453 - * via the virtual mapping. 454 - */ 455 - efi_phys.set_virtual_address_map = 456 - (efi_set_virtual_address_map_t *) 457 - (unsigned long)runtime->set_virtual_address_map; 458 - early_memunmap(runtime, sizeof(efi_runtime_services_32_t)); 459 - 460 - return 0; 461 - } 462 - 463 - static int __init efi_runtime_init64(void) 464 - { 465 - efi_runtime_services_64_t *runtime; 466 - 467 - runtime = early_memremap((unsigned long)efi.systab->runtime, 468 - sizeof(efi_runtime_services_64_t)); 469 - if (!runtime) { 470 - pr_err("Could not map the runtime service table!\n"); 471 - return -ENOMEM; 472 - } 473 - 474 - /* 475 - * We will only need *early* access to the SetVirtualAddressMap 476 - * EFI runtime service. All other runtime services will be called 477 - * via the virtual mapping. 478 - */ 479 - efi_phys.set_virtual_address_map = 480 - (efi_set_virtual_address_map_t *) 481 - (unsigned long)runtime->set_virtual_address_map; 482 - early_memunmap(runtime, sizeof(efi_runtime_services_64_t)); 483 - 484 - return 0; 485 - } 486 - 487 - static int __init efi_runtime_init(void) 488 - { 489 - int rv; 490 - 491 - /* 492 - * Check out the runtime services table. We need to map 493 - * the runtime services table so that we can grab the physical 494 - * address of several of the EFI runtime functions, needed to 495 - * set the firmware into virtual mode. 496 - * 497 - * When EFI_PARAVIRT is in force then we could not map runtime 498 - * service memory region because we do not have direct access to it. 499 - * However, runtime services are available through proxy functions 500 - * (e.g. in case of Xen dom0 EFI implementation they call special 501 - * hypercall which executes relevant EFI functions) and that is why 502 - * they are always enabled. 503 - */ 504 - 505 - if (!efi_enabled(EFI_PARAVIRT)) { 506 - if (efi_enabled(EFI_64BIT)) 507 - rv = efi_runtime_init64(); 508 - else 509 - rv = efi_runtime_init32(); 510 - 511 - if (rv) 512 - return rv; 513 - } 514 - 515 - set_bit(EFI_RUNTIME_SERVICES, &efi.flags); 516 - 517 - return 0; 518 - } 519 - 520 458 void __init efi_init(void) 521 459 { 522 460 efi_char16_t *c16; 523 461 char vendor[100] = "unknown"; 524 462 int i = 0; 525 - void *tmp; 526 463 527 - #ifdef CONFIG_X86_32 528 - if (boot_params.efi_info.efi_systab_hi || 529 - boot_params.efi_info.efi_memmap_hi) { 464 + if (IS_ENABLED(CONFIG_X86_32) && 465 + (boot_params.efi_info.efi_systab_hi || 466 + boot_params.efi_info.efi_memmap_hi)) { 530 467 pr_info("Table located above 4GB, disabling EFI.\n"); 531 468 return; 532 469 } 533 - efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; 534 - #else 535 - efi_phys.systab = (efi_system_table_t *) 536 - (boot_params.efi_info.efi_systab | 537 - ((__u64)boot_params.efi_info.efi_systab_hi<<32)); 538 - #endif 539 470 540 - if (efi_systab_init(efi_phys.systab)) 471 + efi_systab_phys = boot_params.efi_info.efi_systab | 472 + ((__u64)boot_params.efi_info.efi_systab_hi << 32); 473 + 474 + if (efi_systab_init(efi_systab_phys)) 541 475 return; 542 476 543 477 efi.config_table = (unsigned long)efi.systab->tables; ··· 462 566 /* 463 567 * Show what we know for posterity 464 568 */ 465 - c16 = tmp = early_memremap(efi.systab->fw_vendor, 2); 569 + c16 = early_memremap_ro(efi.systab->fw_vendor, 570 + sizeof(vendor) * sizeof(efi_char16_t)); 466 571 if (c16) { 467 - for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i) 468 - vendor[i] = *c16++; 572 + for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) 573 + vendor[i] = c16[i]; 469 574 vendor[i] = '\0'; 470 - } else 575 + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); 576 + } else { 471 577 pr_err("Could not map the firmware vendor!\n"); 472 - early_memunmap(tmp, 2); 578 + } 473 579 474 580 pr_info("EFI v%u.%.02u by %s\n", 475 581 efi.systab->hdr.revision >> 16, ··· 490 592 491 593 if (!efi_runtime_supported()) 492 594 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); 493 - else { 494 - if (efi_runtime_disabled() || efi_runtime_init()) { 495 - efi_memmap_unmap(); 496 - return; 497 - } 595 + 596 + if (!efi_runtime_supported() || efi_runtime_disabled()) { 597 + efi_memmap_unmap(); 598 + return; 498 599 } 499 600 601 + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); 500 602 efi_clean_memmap(); 501 603 502 604 if (efi_enabled(EFI_DBG)) 503 605 efi_print_memmap(); 504 606 } 607 + 608 + #if defined(CONFIG_X86_32) || defined(CONFIG_X86_UV) 505 609 506 610 void __init efi_set_executable(efi_memory_desc_t *md, bool executable) 507 611 { ··· 569 669 (unsigned long long)md->phys_addr); 570 670 } 571 671 672 + #endif 673 + 572 674 /* Merge contiguous regions of the same type and attribute */ 573 675 static void __init efi_merge_regions(void) 574 676 { ··· 609 707 610 708 size = md->num_pages << EFI_PAGE_SHIFT; 611 709 end = md->phys_addr + size; 612 - systab = (u64)(unsigned long)efi_phys.systab; 710 + systab = efi_systab_phys; 613 711 if (md->phys_addr <= systab && systab < end) { 614 712 systab += md->virt_addr - md->phys_addr; 615 713 efi.systab = (efi_system_table_t *)(unsigned long)systab; ··· 669 767 */ 670 768 static void *efi_map_next_entry(void *entry) 671 769 { 672 - if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) { 770 + if (!efi_have_uv1_memmap() && efi_enabled(EFI_64BIT)) { 673 771 /* 674 772 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE 675 773 * config table feature requires us to map all entries ··· 730 828 * Map all of RAM so that we can access arguments in the 1:1 731 829 * mapping when making EFI runtime calls. 732 830 */ 733 - if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_is_native()) { 831 + if (efi_is_mixed()) { 734 832 if (md->type == EFI_CONVENTIONAL_MEMORY || 735 833 md->type == EFI_LOADER_DATA || 736 834 md->type == EFI_LOADER_CODE) ··· 801 899 802 900 /* 803 901 * We don't do virtual mode, since we don't do runtime services, on 804 - * non-native EFI. With efi=old_map, we don't do runtime services in 902 + * non-native EFI. With the UV1 memmap, we don't do runtime services in 805 903 * kexec kernel because in the initial boot something else might 806 904 * have been mapped at these virtual addresses. 807 905 */ 808 - if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) { 906 + if (efi_is_mixed() || efi_have_uv1_memmap()) { 809 907 efi_memmap_unmap(); 810 908 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 811 909 return; ··· 860 958 efi.runtime_version = efi_systab.hdr.revision; 861 959 862 960 efi_native_runtime_setup(); 863 - 864 - efi.set_virtual_address_map = NULL; 865 - 866 - if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) 867 - runtime_code_page_mkexec(); 868 961 #endif 869 962 } 870 963 ··· 871 974 * 872 975 * The old method which used to update that memory descriptor with the 873 976 * virtual address obtained from ioremap() is still supported when the 874 - * kernel is booted with efi=old_map on its command line. Same old 875 - * method enabled the runtime services to be called without having to 876 - * thunk back into physical mode for every invocation. 977 + * kernel is booted on SG1 UV1 hardware. Same old method enabled the 978 + * runtime services to be called without having to thunk back into 979 + * physical mode for every invocation. 877 980 * 878 981 * The new method does a pagetable switch in a preemption-safe manner 879 982 * so that we're in a different address space when calling a runtime ··· 896 999 897 1000 if (efi_alloc_page_tables()) { 898 1001 pr_err("Failed to allocate EFI page tables\n"); 899 - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 900 - return; 1002 + goto err; 901 1003 } 902 1004 903 1005 efi_merge_regions(); 904 1006 new_memmap = efi_map_regions(&count, &pg_shift); 905 1007 if (!new_memmap) { 906 1008 pr_err("Error reallocating memory, EFI runtime non-functional!\n"); 907 - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 908 - return; 1009 + goto err; 909 1010 } 910 1011 911 1012 pa = __pa(new_memmap); ··· 917 1022 918 1023 if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) { 919 1024 pr_err("Failed to remap late EFI memory map\n"); 920 - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 921 - return; 1025 + goto err; 922 1026 } 923 1027 924 1028 if (efi_enabled(EFI_DBG)) { ··· 925 1031 efi_print_memmap(); 926 1032 } 927 1033 928 - BUG_ON(!efi.systab); 1034 + if (WARN_ON(!efi.systab)) 1035 + goto err; 929 1036 930 - if (efi_setup_page_tables(pa, 1 << pg_shift)) { 931 - clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 932 - return; 933 - } 1037 + if (efi_setup_page_tables(pa, 1 << pg_shift)) 1038 + goto err; 934 1039 935 1040 efi_sync_low_kernel_mappings(); 936 1041 937 - if (efi_is_native()) { 938 - status = phys_efi_set_virtual_address_map( 939 - efi.memmap.desc_size * count, 940 - efi.memmap.desc_size, 941 - efi.memmap.desc_version, 942 - (efi_memory_desc_t *)pa); 943 - } else { 944 - status = efi_thunk_set_virtual_address_map( 945 - efi_phys.set_virtual_address_map, 946 - efi.memmap.desc_size * count, 947 - efi.memmap.desc_size, 948 - efi.memmap.desc_version, 949 - (efi_memory_desc_t *)pa); 950 - } 951 - 1042 + status = efi_set_virtual_address_map(efi.memmap.desc_size * count, 1043 + efi.memmap.desc_size, 1044 + efi.memmap.desc_version, 1045 + (efi_memory_desc_t *)pa); 952 1046 if (status != EFI_SUCCESS) { 953 - pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n", 954 - status); 955 - panic("EFI call to SetVirtualAddressMap() failed!"); 1047 + pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n", 1048 + status); 1049 + goto err; 956 1050 } 957 1051 958 1052 efi_free_boot_services(); ··· 953 1071 */ 954 1072 efi.runtime_version = efi_systab.hdr.revision; 955 1073 956 - if (efi_is_native()) 1074 + if (!efi_is_mixed()) 957 1075 efi_native_runtime_setup(); 958 1076 else 959 1077 efi_thunk_runtime_setup(); 960 - 961 - efi.set_virtual_address_map = NULL; 962 1078 963 1079 /* 964 1080 * Apply more restrictive page table mapping attributes now that ··· 967 1087 968 1088 /* clean DUMMY object */ 969 1089 efi_delete_dummy_variable(); 1090 + return; 1091 + 1092 + err: 1093 + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 970 1094 } 971 1095 972 1096 void __init efi_enter_virtual_mode(void) ··· 985 1101 986 1102 efi_dump_pagetable(); 987 1103 } 988 - 989 - static int __init arch_parse_efi_cmdline(char *str) 990 - { 991 - if (!str) { 992 - pr_warn("need at least one option\n"); 993 - return -EINVAL; 994 - } 995 - 996 - if (parse_option_str(str, "old_map")) 997 - set_bit(EFI_OLD_MEMMAP, &efi.flags); 998 - 999 - return 0; 1000 - } 1001 - early_param("efi", arch_parse_efi_cmdline); 1002 1104 1003 1105 bool efi_is_table_address(unsigned long phys_addr) 1004 1106 {
+17 -5
arch/x86/platform/efi/efi_32.c
··· 66 66 void __init efi_map_region_fixed(efi_memory_desc_t *md) {} 67 67 void __init parse_efi_setup(u64 phys_addr, u32 data_len) {} 68 68 69 - pgd_t * __init efi_call_phys_prolog(void) 69 + efi_status_t efi_call_svam(efi_set_virtual_address_map_t *__efiapi *, 70 + u32, u32, u32, void *); 71 + 72 + efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, 73 + unsigned long descriptor_size, 74 + u32 descriptor_version, 75 + efi_memory_desc_t *virtual_map) 70 76 { 71 77 struct desc_ptr gdt_descr; 78 + efi_status_t status; 79 + unsigned long flags; 72 80 pgd_t *save_pgd; 73 81 74 82 /* Current pgd is swapper_pg_dir, we'll restore it later: */ ··· 88 80 gdt_descr.size = GDT_SIZE - 1; 89 81 load_gdt(&gdt_descr); 90 82 91 - return save_pgd; 92 - } 83 + /* Disable interrupts around EFI calls: */ 84 + local_irq_save(flags); 85 + status = efi_call_svam(&efi.systab->runtime->set_virtual_address_map, 86 + memory_map_size, descriptor_size, 87 + descriptor_version, virtual_map); 88 + local_irq_restore(flags); 93 89 94 - void __init efi_call_phys_epilog(pgd_t *save_pgd) 95 - { 96 90 load_fixmap_gdt(0); 97 91 load_cr3(save_pgd); 98 92 __flush_tlb_all(); 93 + 94 + return status; 99 95 } 100 96 101 97 void __init efi_runtime_update_mappings(void)
+115 -202
arch/x86/platform/efi/efi_64.c
··· 57 57 58 58 struct efi_scratch efi_scratch; 59 59 60 - static void __init early_code_mapping_set_exec(int executable) 61 - { 62 - efi_memory_desc_t *md; 63 - 64 - if (!(__supported_pte_mask & _PAGE_NX)) 65 - return; 66 - 67 - /* Make EFI service code area executable */ 68 - for_each_efi_memory_desc(md) { 69 - if (md->type == EFI_RUNTIME_SERVICES_CODE || 70 - md->type == EFI_BOOT_SERVICES_CODE) 71 - efi_set_executable(md, executable); 72 - } 73 - } 74 - 75 - pgd_t * __init efi_call_phys_prolog(void) 76 - { 77 - unsigned long vaddr, addr_pgd, addr_p4d, addr_pud; 78 - pgd_t *save_pgd, *pgd_k, *pgd_efi; 79 - p4d_t *p4d, *p4d_k, *p4d_efi; 80 - pud_t *pud; 81 - 82 - int pgd; 83 - int n_pgds, i, j; 84 - 85 - if (!efi_enabled(EFI_OLD_MEMMAP)) { 86 - efi_switch_mm(&efi_mm); 87 - return efi_mm.pgd; 88 - } 89 - 90 - early_code_mapping_set_exec(1); 91 - 92 - n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); 93 - save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); 94 - if (!save_pgd) 95 - return NULL; 96 - 97 - /* 98 - * Build 1:1 identity mapping for efi=old_map usage. Note that 99 - * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while 100 - * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical 101 - * address X, the pud_index(X) != pud_index(__va(X)), we can only copy 102 - * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping. 103 - * This means here we can only reuse the PMD tables of the direct mapping. 104 - */ 105 - for (pgd = 0; pgd < n_pgds; pgd++) { 106 - addr_pgd = (unsigned long)(pgd * PGDIR_SIZE); 107 - vaddr = (unsigned long)__va(pgd * PGDIR_SIZE); 108 - pgd_efi = pgd_offset_k(addr_pgd); 109 - save_pgd[pgd] = *pgd_efi; 110 - 111 - p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd); 112 - if (!p4d) { 113 - pr_err("Failed to allocate p4d table!\n"); 114 - goto out; 115 - } 116 - 117 - for (i = 0; i < PTRS_PER_P4D; i++) { 118 - addr_p4d = addr_pgd + i * P4D_SIZE; 119 - p4d_efi = p4d + p4d_index(addr_p4d); 120 - 121 - pud = pud_alloc(&init_mm, p4d_efi, addr_p4d); 122 - if (!pud) { 123 - pr_err("Failed to allocate pud table!\n"); 124 - goto out; 125 - } 126 - 127 - for (j = 0; j < PTRS_PER_PUD; j++) { 128 - addr_pud = addr_p4d + j * PUD_SIZE; 129 - 130 - if (addr_pud > (max_pfn << PAGE_SHIFT)) 131 - break; 132 - 133 - vaddr = (unsigned long)__va(addr_pud); 134 - 135 - pgd_k = pgd_offset_k(vaddr); 136 - p4d_k = p4d_offset(pgd_k, vaddr); 137 - pud[j] = *pud_offset(p4d_k, vaddr); 138 - } 139 - } 140 - pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX; 141 - } 142 - 143 - __flush_tlb_all(); 144 - return save_pgd; 145 - out: 146 - efi_call_phys_epilog(save_pgd); 147 - return NULL; 148 - } 149 - 150 - void __init efi_call_phys_epilog(pgd_t *save_pgd) 151 - { 152 - /* 153 - * After the lock is released, the original page table is restored. 154 - */ 155 - int pgd_idx, i; 156 - int nr_pgds; 157 - pgd_t *pgd; 158 - p4d_t *p4d; 159 - pud_t *pud; 160 - 161 - if (!efi_enabled(EFI_OLD_MEMMAP)) { 162 - efi_switch_mm(efi_scratch.prev_mm); 163 - return; 164 - } 165 - 166 - nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); 167 - 168 - for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) { 169 - pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); 170 - set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); 171 - 172 - if (!pgd_present(*pgd)) 173 - continue; 174 - 175 - for (i = 0; i < PTRS_PER_P4D; i++) { 176 - p4d = p4d_offset(pgd, 177 - pgd_idx * PGDIR_SIZE + i * P4D_SIZE); 178 - 179 - if (!p4d_present(*p4d)) 180 - continue; 181 - 182 - pud = (pud_t *)p4d_page_vaddr(*p4d); 183 - pud_free(&init_mm, pud); 184 - } 185 - 186 - p4d = (p4d_t *)pgd_page_vaddr(*pgd); 187 - p4d_free(&init_mm, p4d); 188 - } 189 - 190 - kfree(save_pgd); 191 - 192 - __flush_tlb_all(); 193 - early_code_mapping_set_exec(0); 194 - } 195 - 196 60 EXPORT_SYMBOL_GPL(efi_mm); 197 61 198 62 /* ··· 75 211 pud_t *pud; 76 212 gfp_t gfp_mask; 77 213 78 - if (efi_enabled(EFI_OLD_MEMMAP)) 214 + if (efi_have_uv1_memmap()) 79 215 return 0; 80 216 81 217 gfp_mask = GFP_KERNEL | __GFP_ZERO; ··· 116 252 pud_t *pud_k, *pud_efi; 117 253 pgd_t *efi_pgd = efi_mm.pgd; 118 254 119 - if (efi_enabled(EFI_OLD_MEMMAP)) 255 + if (efi_have_uv1_memmap()) 120 256 return; 121 257 122 258 /* ··· 210 346 unsigned npages; 211 347 pgd_t *pgd = efi_mm.pgd; 212 348 213 - if (efi_enabled(EFI_OLD_MEMMAP)) 349 + if (efi_have_uv1_memmap()) 214 350 return 0; 215 351 216 352 /* ··· 237 373 * as trim_bios_range() will reserve the first page and isolate it away 238 374 * from memory allocators anyway. 239 375 */ 240 - pf = _PAGE_RW; 241 - if (sev_active()) 242 - pf |= _PAGE_ENC; 243 - 244 376 if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) { 245 377 pr_err("Failed to create 1:1 mapping for the first page!\n"); 246 378 return 1; ··· 248 388 * text and allocate a new stack because we can't rely on the 249 389 * stack pointer being < 4GB. 250 390 */ 251 - if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native()) 391 + if (!efi_is_mixed()) 252 392 return 0; 253 393 254 394 page = alloc_page(GFP_KERNEL|__GFP_DMA32); 255 - if (!page) 256 - panic("Unable to allocate EFI runtime stack < 4GB\n"); 395 + if (!page) { 396 + pr_err("Unable to allocate EFI runtime stack < 4GB\n"); 397 + return 1; 398 + } 257 399 258 - efi_scratch.phys_stack = virt_to_phys(page_address(page)); 259 - efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */ 400 + efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */ 260 401 261 - npages = (_etext - _text) >> PAGE_SHIFT; 402 + npages = (__end_rodata_aligned - _text) >> PAGE_SHIFT; 262 403 text = __pa(_text); 263 404 pfn = text >> PAGE_SHIFT; 264 405 265 - pf = _PAGE_RW | _PAGE_ENC; 406 + pf = _PAGE_ENC; 266 407 if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) { 267 408 pr_err("Failed to map kernel text 1:1\n"); 268 409 return 1; ··· 277 416 unsigned long flags = _PAGE_RW; 278 417 unsigned long pfn; 279 418 pgd_t *pgd = efi_mm.pgd; 419 + 420 + /* 421 + * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF 422 + * executable images in memory that consist of both R-X and 423 + * RW- sections, so we cannot apply read-only or non-exec 424 + * permissions just yet. However, modern EFI systems provide 425 + * a memory attributes table that describes those sections 426 + * with the appropriate restricted permissions, which are 427 + * applied in efi_runtime_update_mappings() below. All other 428 + * regions can be mapped non-executable at this point, with 429 + * the exception of boot services code regions, but those will 430 + * be unmapped again entirely in efi_free_boot_services(). 431 + */ 432 + if (md->type != EFI_BOOT_SERVICES_CODE && 433 + md->type != EFI_RUNTIME_SERVICES_CODE) 434 + flags |= _PAGE_NX; 280 435 281 436 if (!(md->attribute & EFI_MEMORY_WB)) 282 437 flags |= _PAGE_PCD; ··· 311 434 unsigned long size = md->num_pages << PAGE_SHIFT; 312 435 u64 pa = md->phys_addr; 313 436 314 - if (efi_enabled(EFI_OLD_MEMMAP)) 437 + if (efi_have_uv1_memmap()) 315 438 return old_map_region(md); 316 439 317 440 /* ··· 326 449 * booting in EFI mixed mode, because even though we may be 327 450 * running a 64-bit kernel, the firmware may only be 32-bit. 328 451 */ 329 - if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) { 452 + if (efi_is_mixed()) { 330 453 md->virt_addr = md->phys_addr; 331 454 return; 332 455 } ··· 366 489 { 367 490 __map_region(md, md->phys_addr); 368 491 __map_region(md, md->virt_addr); 369 - } 370 - 371 - void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 372 - u32 type, u64 attribute) 373 - { 374 - unsigned long last_map_pfn; 375 - 376 - if (type == EFI_MEMORY_MAPPED_IO) 377 - return ioremap(phys_addr, size); 378 - 379 - last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 380 - if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 381 - unsigned long top = last_map_pfn << PAGE_SHIFT; 382 - efi_ioremap(top, size - (top - phys_addr), type, attribute); 383 - } 384 - 385 - if (!(attribute & EFI_MEMORY_WB)) 386 - efi_memory_uc((u64)(unsigned long)__va(phys_addr), size); 387 - 388 - return (void __iomem *)__va(phys_addr); 389 492 } 390 493 391 494 void __init parse_efi_setup(u64 phys_addr, u32 data_len) ··· 416 559 { 417 560 efi_memory_desc_t *md; 418 561 419 - if (efi_enabled(EFI_OLD_MEMMAP)) { 562 + if (efi_have_uv1_memmap()) { 420 563 if (__supported_pte_mask & _PAGE_NX) 421 564 runtime_code_page_mkexec(); 422 565 return; ··· 470 613 void __init efi_dump_pagetable(void) 471 614 { 472 615 #ifdef CONFIG_EFI_PGT_DUMP 473 - if (efi_enabled(EFI_OLD_MEMMAP)) 616 + if (efi_have_uv1_memmap()) 474 617 ptdump_walk_pgd_level(NULL, swapper_pg_dir); 475 618 else 476 619 ptdump_walk_pgd_level(NULL, efi_mm.pgd); ··· 491 634 switch_mm(efi_scratch.prev_mm, mm, NULL); 492 635 } 493 636 494 - #ifdef CONFIG_EFI_MIXED 495 - extern efi_status_t efi64_thunk(u32, ...); 496 - 497 637 static DEFINE_SPINLOCK(efi_runtime_lock); 498 638 499 - #define runtime_service32(func) \ 500 - ({ \ 501 - u32 table = (u32)(unsigned long)efi.systab; \ 502 - u32 *rt, *___f; \ 503 - \ 504 - rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \ 505 - ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \ 506 - *___f; \ 639 + /* 640 + * DS and ES contain user values. We need to save them. 641 + * The 32-bit EFI code needs a valid DS, ES, and SS. There's no 642 + * need to save the old SS: __KERNEL_DS is always acceptable. 643 + */ 644 + #define __efi_thunk(func, ...) \ 645 + ({ \ 646 + efi_runtime_services_32_t *__rt; \ 647 + unsigned short __ds, __es; \ 648 + efi_status_t ____s; \ 649 + \ 650 + __rt = (void *)(unsigned long)efi.systab->mixed_mode.runtime; \ 651 + \ 652 + savesegment(ds, __ds); \ 653 + savesegment(es, __es); \ 654 + \ 655 + loadsegment(ss, __KERNEL_DS); \ 656 + loadsegment(ds, __KERNEL_DS); \ 657 + loadsegment(es, __KERNEL_DS); \ 658 + \ 659 + ____s = efi64_thunk(__rt->func, __VA_ARGS__); \ 660 + \ 661 + loadsegment(ds, __ds); \ 662 + loadsegment(es, __es); \ 663 + \ 664 + ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32; \ 665 + ____s; \ 507 666 }) 508 667 509 668 /* 510 669 * Switch to the EFI page tables early so that we can access the 1:1 511 670 * runtime services mappings which are not mapped in any other page 512 - * tables. This function must be called before runtime_service32(). 671 + * tables. 513 672 * 514 673 * Also, disable interrupts because the IDT points to 64-bit handlers, 515 674 * which aren't going to function correctly when we switch to 32-bit. 516 675 */ 517 - #define efi_thunk(f, ...) \ 676 + #define efi_thunk(func...) \ 518 677 ({ \ 519 678 efi_status_t __s; \ 520 - u32 __func; \ 521 679 \ 522 680 arch_efi_call_virt_setup(); \ 523 681 \ 524 - __func = runtime_service32(f); \ 525 - __s = efi64_thunk(__func, __VA_ARGS__); \ 682 + __s = __efi_thunk(func); \ 526 683 \ 527 684 arch_efi_call_virt_teardown(); \ 528 685 \ 529 686 __s; \ 530 687 }) 531 688 532 - efi_status_t efi_thunk_set_virtual_address_map( 533 - void *phys_set_virtual_address_map, 534 - unsigned long memory_map_size, 535 - unsigned long descriptor_size, 536 - u32 descriptor_version, 537 - efi_memory_desc_t *virtual_map) 689 + static efi_status_t __init __no_sanitize_address 690 + efi_thunk_set_virtual_address_map(unsigned long memory_map_size, 691 + unsigned long descriptor_size, 692 + u32 descriptor_version, 693 + efi_memory_desc_t *virtual_map) 538 694 { 539 695 efi_status_t status; 540 696 unsigned long flags; 541 - u32 func; 542 697 543 698 efi_sync_low_kernel_mappings(); 544 699 local_irq_save(flags); 545 700 546 701 efi_switch_mm(&efi_mm); 547 702 548 - func = (u32)(unsigned long)phys_set_virtual_address_map; 549 - status = efi64_thunk(func, memory_map_size, descriptor_size, 550 - descriptor_version, virtual_map); 703 + status = __efi_thunk(set_virtual_address_map, memory_map_size, 704 + descriptor_size, descriptor_version, virtual_map); 551 705 552 706 efi_switch_mm(efi_scratch.prev_mm); 553 707 local_irq_restore(flags); ··· 861 993 return EFI_UNSUPPORTED; 862 994 } 863 995 864 - void efi_thunk_runtime_setup(void) 996 + void __init efi_thunk_runtime_setup(void) 865 997 { 998 + if (!IS_ENABLED(CONFIG_EFI_MIXED)) 999 + return; 1000 + 866 1001 efi.get_time = efi_thunk_get_time; 867 1002 efi.set_time = efi_thunk_set_time; 868 1003 efi.get_wakeup_time = efi_thunk_get_wakeup_time; ··· 881 1010 efi.update_capsule = efi_thunk_update_capsule; 882 1011 efi.query_capsule_caps = efi_thunk_query_capsule_caps; 883 1012 } 884 - #endif /* CONFIG_EFI_MIXED */ 1013 + 1014 + efi_status_t __init __no_sanitize_address 1015 + efi_set_virtual_address_map(unsigned long memory_map_size, 1016 + unsigned long descriptor_size, 1017 + u32 descriptor_version, 1018 + efi_memory_desc_t *virtual_map) 1019 + { 1020 + efi_status_t status; 1021 + unsigned long flags; 1022 + pgd_t *save_pgd = NULL; 1023 + 1024 + if (efi_is_mixed()) 1025 + return efi_thunk_set_virtual_address_map(memory_map_size, 1026 + descriptor_size, 1027 + descriptor_version, 1028 + virtual_map); 1029 + 1030 + if (efi_have_uv1_memmap()) { 1031 + save_pgd = efi_uv1_memmap_phys_prolog(); 1032 + if (!save_pgd) 1033 + return EFI_ABORTED; 1034 + } else { 1035 + efi_switch_mm(&efi_mm); 1036 + } 1037 + 1038 + kernel_fpu_begin(); 1039 + 1040 + /* Disable interrupts around EFI calls: */ 1041 + local_irq_save(flags); 1042 + status = efi_call(efi.systab->runtime->set_virtual_address_map, 1043 + memory_map_size, descriptor_size, 1044 + descriptor_version, virtual_map); 1045 + local_irq_restore(flags); 1046 + 1047 + kernel_fpu_end(); 1048 + 1049 + if (save_pgd) 1050 + efi_uv1_memmap_phys_epilog(save_pgd); 1051 + else 1052 + efi_switch_mm(efi_scratch.prev_mm); 1053 + 1054 + return status; 1055 + }
+17 -92
arch/x86/platform/efi/efi_stub_32.S
··· 7 7 */ 8 8 9 9 #include <linux/linkage.h> 10 + #include <linux/init.h> 10 11 #include <asm/page_types.h> 11 12 12 - /* 13 - * efi_call_phys(void *, ...) is a function with variable parameters. 14 - * All the callers of this function assure that all the parameters are 4-bytes. 15 - */ 16 - 17 - /* 18 - * In gcc calling convention, EBX, ESP, EBP, ESI and EDI are all callee save. 19 - * So we'd better save all of them at the beginning of this function and restore 20 - * at the end no matter how many we use, because we can not assure EFI runtime 21 - * service functions will comply with gcc calling convention, too. 22 - */ 23 - 24 - .text 25 - SYM_FUNC_START(efi_call_phys) 26 - /* 27 - * 0. The function can only be called in Linux kernel. So CS has been 28 - * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found 29 - * the values of these registers are the same. And, the corresponding 30 - * GDT entries are identical. So I will do nothing about segment reg 31 - * and GDT, but change GDT base register in prolog and epilog. 32 - */ 13 + __INIT 14 + SYM_FUNC_START(efi_call_svam) 15 + push 8(%esp) 16 + push 8(%esp) 17 + push %ecx 18 + push %edx 33 19 34 20 /* 35 - * 1. Now I am running with EIP = <physical address> + PAGE_OFFSET. 36 - * But to make it smoothly switch from virtual mode to flat mode. 37 - * The mapping of lower virtual memory has been created in prolog and 38 - * epilog. 21 + * Switch to the flat mapped alias of this routine, by jumping to the 22 + * address of label '1' after subtracting PAGE_OFFSET from it. 39 23 */ 40 24 movl $1f, %edx 41 25 subl $__PAGE_OFFSET, %edx 42 26 jmp *%edx 43 27 1: 44 28 45 - /* 46 - * 2. Now on the top of stack is the return 47 - * address in the caller of efi_call_phys(), then parameter 1, 48 - * parameter 2, ..., param n. To make things easy, we save the return 49 - * address of efi_call_phys in a global variable. 50 - */ 51 - popl %edx 52 - movl %edx, saved_return_addr 53 - /* get the function pointer into ECX*/ 54 - popl %ecx 55 - movl %ecx, efi_rt_function_ptr 56 - movl $2f, %edx 57 - subl $__PAGE_OFFSET, %edx 58 - pushl %edx 59 - 60 - /* 61 - * 3. Clear PG bit in %CR0. 62 - */ 29 + /* disable paging */ 63 30 movl %cr0, %edx 64 31 andl $0x7fffffff, %edx 65 32 movl %edx, %cr0 66 - jmp 1f 67 - 1: 68 33 69 - /* 70 - * 4. Adjust stack pointer. 71 - */ 34 + /* convert the stack pointer to a flat mapped address */ 72 35 subl $__PAGE_OFFSET, %esp 73 36 74 - /* 75 - * 5. Call the physical function. 76 - */ 77 - jmp *%ecx 37 + /* call the EFI routine */ 38 + call *(%eax) 78 39 79 - 2: 80 - /* 81 - * 6. After EFI runtime service returns, control will return to 82 - * following instruction. We'd better readjust stack pointer first. 83 - */ 84 - addl $__PAGE_OFFSET, %esp 40 + /* convert ESP back to a kernel VA, and pop the outgoing args */ 41 + addl $__PAGE_OFFSET + 16, %esp 85 42 86 - /* 87 - * 7. Restore PG bit 88 - */ 43 + /* re-enable paging */ 89 44 movl %cr0, %edx 90 45 orl $0x80000000, %edx 91 46 movl %edx, %cr0 92 - jmp 1f 93 - 1: 94 - /* 95 - * 8. Now restore the virtual mode from flat mode by 96 - * adding EIP with PAGE_OFFSET. 97 - */ 98 - movl $1f, %edx 99 - jmp *%edx 100 - 1: 101 47 102 - /* 103 - * 9. Balance the stack. And because EAX contain the return value, 104 - * we'd better not clobber it. 105 - */ 106 - leal efi_rt_function_ptr, %edx 107 - movl (%edx), %ecx 108 - pushl %ecx 109 - 110 - /* 111 - * 10. Push the saved return address onto the stack and return. 112 - */ 113 - leal saved_return_addr, %edx 114 - movl (%edx), %ecx 115 - pushl %ecx 116 48 ret 117 - SYM_FUNC_END(efi_call_phys) 118 - .previous 119 - 120 - .data 121 - saved_return_addr: 122 - .long 0 123 - efi_rt_function_ptr: 124 - .long 0 49 + SYM_FUNC_END(efi_call_svam)
+6 -37
arch/x86/platform/efi/efi_stub_64.S
··· 8 8 */ 9 9 10 10 #include <linux/linkage.h> 11 - #include <asm/segment.h> 12 - #include <asm/msr.h> 13 - #include <asm/processor-flags.h> 14 - #include <asm/page_types.h> 11 + #include <asm/nospec-branch.h> 15 12 16 - #define SAVE_XMM \ 17 - mov %rsp, %rax; \ 18 - subq $0x70, %rsp; \ 19 - and $~0xf, %rsp; \ 20 - mov %rax, (%rsp); \ 21 - mov %cr0, %rax; \ 22 - clts; \ 23 - mov %rax, 0x8(%rsp); \ 24 - movaps %xmm0, 0x60(%rsp); \ 25 - movaps %xmm1, 0x50(%rsp); \ 26 - movaps %xmm2, 0x40(%rsp); \ 27 - movaps %xmm3, 0x30(%rsp); \ 28 - movaps %xmm4, 0x20(%rsp); \ 29 - movaps %xmm5, 0x10(%rsp) 30 - 31 - #define RESTORE_XMM \ 32 - movaps 0x60(%rsp), %xmm0; \ 33 - movaps 0x50(%rsp), %xmm1; \ 34 - movaps 0x40(%rsp), %xmm2; \ 35 - movaps 0x30(%rsp), %xmm3; \ 36 - movaps 0x20(%rsp), %xmm4; \ 37 - movaps 0x10(%rsp), %xmm5; \ 38 - mov 0x8(%rsp), %rsi; \ 39 - mov %rsi, %cr0; \ 40 - mov (%rsp), %rsp 41 - 42 - SYM_FUNC_START(efi_call) 13 + SYM_FUNC_START(__efi_call) 43 14 pushq %rbp 44 15 movq %rsp, %rbp 45 - SAVE_XMM 16 + and $~0xf, %rsp 46 17 mov 16(%rbp), %rax 47 18 subq $48, %rsp 48 19 mov %r9, 32(%rsp) ··· 21 50 mov %r8, %r9 22 51 mov %rcx, %r8 23 52 mov %rsi, %rcx 24 - call *%rdi 25 - addq $48, %rsp 26 - RESTORE_XMM 27 - popq %rbp 53 + CALL_NOSPEC %rdi 54 + leave 28 55 ret 29 - SYM_FUNC_END(efi_call) 56 + SYM_FUNC_END(__efi_call)
+20 -101
arch/x86/platform/efi/efi_thunk_64.S
··· 25 25 26 26 .text 27 27 .code64 28 - SYM_FUNC_START(efi64_thunk) 28 + SYM_CODE_START(__efi64_thunk) 29 29 push %rbp 30 30 push %rbx 31 31 32 32 /* 33 33 * Switch to 1:1 mapped 32-bit stack pointer. 34 34 */ 35 - movq %rsp, efi_saved_sp(%rip) 35 + movq %rsp, %rax 36 36 movq efi_scratch(%rip), %rsp 37 + push %rax 37 38 38 39 /* 39 40 * Calculate the physical address of the kernel text. ··· 42 41 movq $__START_KERNEL_map, %rax 43 42 subq phys_base(%rip), %rax 44 43 45 - /* 46 - * Push some physical addresses onto the stack. This is easier 47 - * to do now in a code64 section while the assembler can address 48 - * 64-bit values. Note that all the addresses on the stack are 49 - * 32-bit. 50 - */ 51 - subq $16, %rsp 52 - leaq efi_exit32(%rip), %rbx 44 + leaq 1f(%rip), %rbp 45 + leaq 2f(%rip), %rbx 46 + subq %rax, %rbp 53 47 subq %rax, %rbx 54 - movl %ebx, 8(%rsp) 55 48 56 - leaq __efi64_thunk(%rip), %rbx 57 - subq %rax, %rbx 58 - call *%rbx 59 - 60 - movq efi_saved_sp(%rip), %rsp 61 - pop %rbx 62 - pop %rbp 63 - retq 64 - SYM_FUNC_END(efi64_thunk) 65 - 66 - /* 67 - * We run this function from the 1:1 mapping. 68 - * 69 - * This function must be invoked with a 1:1 mapped stack. 70 - */ 71 - SYM_FUNC_START_LOCAL(__efi64_thunk) 72 - movl %ds, %eax 73 - push %rax 74 - movl %es, %eax 75 - push %rax 76 - movl %ss, %eax 77 - push %rax 78 - 79 - subq $32, %rsp 80 - movl %esi, 0x0(%rsp) 81 - movl %edx, 0x4(%rsp) 82 - movl %ecx, 0x8(%rsp) 83 - movq %r8, %rsi 84 - movl %esi, 0xc(%rsp) 85 - movq %r9, %rsi 86 - movl %esi, 0x10(%rsp) 87 - 88 - leaq 1f(%rip), %rbx 89 - movq %rbx, func_rt_ptr(%rip) 49 + subq $28, %rsp 50 + movl %ebx, 0x0(%rsp) /* return address */ 51 + movl %esi, 0x4(%rsp) 52 + movl %edx, 0x8(%rsp) 53 + movl %ecx, 0xc(%rsp) 54 + movl %r8d, 0x10(%rsp) 55 + movl %r9d, 0x14(%rsp) 90 56 91 57 /* Switch to 32-bit descriptor */ 92 58 pushq $__KERNEL32_CS 93 - leaq efi_enter32(%rip), %rax 94 - pushq %rax 59 + pushq %rdi /* EFI runtime service address */ 95 60 lretq 96 61 97 - 1: addq $32, %rsp 98 - 62 + 1: movq 24(%rsp), %rsp 99 63 pop %rbx 100 - movl %ebx, %ss 101 - pop %rbx 102 - movl %ebx, %es 103 - pop %rbx 104 - movl %ebx, %ds 105 - 106 - /* 107 - * Convert 32-bit status code into 64-bit. 108 - */ 109 - test %rax, %rax 110 - jz 1f 111 - movl %eax, %ecx 112 - andl $0x0fffffff, %ecx 113 - andl $0xf0000000, %eax 114 - shl $32, %rax 115 - or %rcx, %rax 116 - 1: 117 - ret 118 - SYM_FUNC_END(__efi64_thunk) 119 - 120 - SYM_FUNC_START_LOCAL(efi_exit32) 121 - movq func_rt_ptr(%rip), %rax 122 - push %rax 123 - mov %rdi, %rax 124 - ret 125 - SYM_FUNC_END(efi_exit32) 64 + pop %rbp 65 + retq 126 66 127 67 .code32 128 - /* 129 - * EFI service pointer must be in %edi. 130 - * 131 - * The stack should represent the 32-bit calling convention. 132 - */ 133 - SYM_FUNC_START_LOCAL(efi_enter32) 134 - movl $__KERNEL_DS, %eax 135 - movl %eax, %ds 136 - movl %eax, %es 137 - movl %eax, %ss 138 - 139 - call *%edi 140 - 141 - /* We must preserve return value */ 142 - movl %eax, %edi 143 - 144 - movl 72(%esp), %eax 145 - pushl $__KERNEL_CS 146 - pushl %eax 147 - 68 + 2: pushl $__KERNEL_CS 69 + pushl %ebp 148 70 lret 149 - SYM_FUNC_END(efi_enter32) 150 - 151 - .data 152 - .balign 8 153 - func_rt_ptr: .quad 0 154 - efi_saved_sp: .quad 0 71 + SYM_CODE_END(__efi64_thunk)
+24 -22
arch/x86/platform/efi/quirks.c
··· 244 244 */ 245 245 void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) 246 246 { 247 - phys_addr_t new_phys, new_size; 247 + struct efi_memory_map_data data = { 0 }; 248 248 struct efi_mem_range mr; 249 249 efi_memory_desc_t md; 250 250 int num_entries; ··· 272 272 num_entries = efi_memmap_split_count(&md, &mr.range); 273 273 num_entries += efi.memmap.nr_map; 274 274 275 - new_size = efi.memmap.desc_size * num_entries; 276 - 277 - new_phys = efi_memmap_alloc(num_entries); 278 - if (!new_phys) { 275 + if (efi_memmap_alloc(num_entries, &data) != 0) { 279 276 pr_err("Could not allocate boot services memmap\n"); 280 277 return; 281 278 } 282 279 283 - new = early_memremap(new_phys, new_size); 280 + new = early_memremap(data.phys_map, data.size); 284 281 if (!new) { 285 282 pr_err("Failed to map new boot services memmap\n"); 286 283 return; 287 284 } 288 285 289 286 efi_memmap_insert(&efi.memmap, new, &mr); 290 - early_memunmap(new, new_size); 287 + early_memunmap(new, data.size); 291 288 292 - efi_memmap_install(new_phys, num_entries); 289 + efi_memmap_install(&data); 293 290 e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED); 294 291 e820__update_table(e820_table); 295 292 } ··· 382 385 383 386 /* 384 387 * To Do: Remove this check after adding functionality to unmap EFI boot 385 - * services code/data regions from direct mapping area because 386 - * "efi=old_map" maps EFI regions in swapper_pg_dir. 388 + * services code/data regions from direct mapping area because the UV1 389 + * memory map maps EFI regions in swapper_pg_dir. 387 390 */ 388 - if (efi_enabled(EFI_OLD_MEMMAP)) 391 + if (efi_have_uv1_memmap()) 389 392 return; 390 393 391 394 /* ··· 393 396 * EFI runtime calls, hence don't unmap EFI boot services code/data 394 397 * regions. 395 398 */ 396 - if (!efi_is_native()) 399 + if (efi_is_mixed()) 397 400 return; 398 401 399 402 if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages)) ··· 405 408 406 409 void __init efi_free_boot_services(void) 407 410 { 408 - phys_addr_t new_phys, new_size; 411 + struct efi_memory_map_data data = { 0 }; 409 412 efi_memory_desc_t *md; 410 413 int num_entries = 0; 411 414 void *new, *new_md; ··· 460 463 if (!num_entries) 461 464 return; 462 465 463 - new_size = efi.memmap.desc_size * num_entries; 464 - new_phys = efi_memmap_alloc(num_entries); 465 - if (!new_phys) { 466 + if (efi_memmap_alloc(num_entries, &data) != 0) { 466 467 pr_err("Failed to allocate new EFI memmap\n"); 467 468 return; 468 469 } 469 470 470 - new = memremap(new_phys, new_size, MEMREMAP_WB); 471 + new = memremap(data.phys_map, data.size, MEMREMAP_WB); 471 472 if (!new) { 472 473 pr_err("Failed to map new EFI memmap\n"); 473 474 return; ··· 489 494 490 495 memunmap(new); 491 496 492 - if (efi_memmap_install(new_phys, num_entries)) { 497 + if (efi_memmap_install(&data) != 0) { 493 498 pr_err("Could not install new EFI memmap\n"); 494 499 return; 495 500 } ··· 554 559 return ret; 555 560 } 556 561 557 - static const struct dmi_system_id sgi_uv1_dmi[] = { 562 + static const struct dmi_system_id sgi_uv1_dmi[] __initconst = { 558 563 { NULL, "SGI UV1", 559 564 { DMI_MATCH(DMI_PRODUCT_NAME, "Stoutland Platform"), 560 565 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), ··· 577 582 } 578 583 579 584 /* UV2+ BIOS has a fix for this issue. UV1 still needs the quirk. */ 580 - if (dmi_check_system(sgi_uv1_dmi)) 581 - set_bit(EFI_OLD_MEMMAP, &efi.flags); 585 + if (dmi_check_system(sgi_uv1_dmi)) { 586 + if (IS_ENABLED(CONFIG_X86_UV)) { 587 + set_bit(EFI_UV1_MEMMAP, &efi.flags); 588 + } else { 589 + pr_warn("EFI runtime disabled, needs CONFIG_X86_UV=y on UV1\n"); 590 + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 591 + efi_memmap_unmap(); 592 + } 593 + } 582 594 } 583 595 584 596 /* ··· 723 721 /* 724 722 * Make sure that an efi runtime service caused the page fault. 725 723 * "efi_mm" cannot be used to check if the page fault had occurred 726 - * in the firmware context because efi=old_map doesn't use efi_pgd. 724 + * in the firmware context because the UV1 memmap doesn't use efi_pgd. 727 725 */ 728 726 if (efi_rts_work.efi_rts_id == EFI_NONE) 729 727 return;
+166 -3
arch/x86/platform/uv/bios_uv.c
··· 31 31 return BIOS_STATUS_UNIMPLEMENTED; 32 32 33 33 /* 34 - * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI 34 + * If EFI_UV1_MEMMAP is set, we need to fall back to using our old EFI 35 35 * callback method, which uses efi_call() directly, with the kernel page tables: 36 36 */ 37 - if (unlikely(efi_enabled(EFI_OLD_MEMMAP))) 37 + if (unlikely(efi_enabled(EFI_UV1_MEMMAP))) { 38 + kernel_fpu_begin(); 38 39 ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5); 39 - else 40 + kernel_fpu_end(); 41 + } else { 40 42 ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5); 43 + } 41 44 42 45 return ret; 43 46 } ··· 217 214 pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision); 218 215 return 0; 219 216 } 217 + 218 + static void __init early_code_mapping_set_exec(int executable) 219 + { 220 + efi_memory_desc_t *md; 221 + 222 + if (!(__supported_pte_mask & _PAGE_NX)) 223 + return; 224 + 225 + /* Make EFI service code area executable */ 226 + for_each_efi_memory_desc(md) { 227 + if (md->type == EFI_RUNTIME_SERVICES_CODE || 228 + md->type == EFI_BOOT_SERVICES_CODE) 229 + efi_set_executable(md, executable); 230 + } 231 + } 232 + 233 + void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd) 234 + { 235 + /* 236 + * After the lock is released, the original page table is restored. 237 + */ 238 + int pgd_idx, i; 239 + int nr_pgds; 240 + pgd_t *pgd; 241 + p4d_t *p4d; 242 + pud_t *pud; 243 + 244 + nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); 245 + 246 + for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) { 247 + pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); 248 + set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); 249 + 250 + if (!pgd_present(*pgd)) 251 + continue; 252 + 253 + for (i = 0; i < PTRS_PER_P4D; i++) { 254 + p4d = p4d_offset(pgd, 255 + pgd_idx * PGDIR_SIZE + i * P4D_SIZE); 256 + 257 + if (!p4d_present(*p4d)) 258 + continue; 259 + 260 + pud = (pud_t *)p4d_page_vaddr(*p4d); 261 + pud_free(&init_mm, pud); 262 + } 263 + 264 + p4d = (p4d_t *)pgd_page_vaddr(*pgd); 265 + p4d_free(&init_mm, p4d); 266 + } 267 + 268 + kfree(save_pgd); 269 + 270 + __flush_tlb_all(); 271 + early_code_mapping_set_exec(0); 272 + } 273 + 274 + pgd_t * __init efi_uv1_memmap_phys_prolog(void) 275 + { 276 + unsigned long vaddr, addr_pgd, addr_p4d, addr_pud; 277 + pgd_t *save_pgd, *pgd_k, *pgd_efi; 278 + p4d_t *p4d, *p4d_k, *p4d_efi; 279 + pud_t *pud; 280 + 281 + int pgd; 282 + int n_pgds, i, j; 283 + 284 + early_code_mapping_set_exec(1); 285 + 286 + n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); 287 + save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); 288 + if (!save_pgd) 289 + return NULL; 290 + 291 + /* 292 + * Build 1:1 identity mapping for UV1 memmap usage. Note that 293 + * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while 294 + * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical 295 + * address X, the pud_index(X) != pud_index(__va(X)), we can only copy 296 + * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping. 297 + * This means here we can only reuse the PMD tables of the direct mapping. 298 + */ 299 + for (pgd = 0; pgd < n_pgds; pgd++) { 300 + addr_pgd = (unsigned long)(pgd * PGDIR_SIZE); 301 + vaddr = (unsigned long)__va(pgd * PGDIR_SIZE); 302 + pgd_efi = pgd_offset_k(addr_pgd); 303 + save_pgd[pgd] = *pgd_efi; 304 + 305 + p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd); 306 + if (!p4d) { 307 + pr_err("Failed to allocate p4d table!\n"); 308 + goto out; 309 + } 310 + 311 + for (i = 0; i < PTRS_PER_P4D; i++) { 312 + addr_p4d = addr_pgd + i * P4D_SIZE; 313 + p4d_efi = p4d + p4d_index(addr_p4d); 314 + 315 + pud = pud_alloc(&init_mm, p4d_efi, addr_p4d); 316 + if (!pud) { 317 + pr_err("Failed to allocate pud table!\n"); 318 + goto out; 319 + } 320 + 321 + for (j = 0; j < PTRS_PER_PUD; j++) { 322 + addr_pud = addr_p4d + j * PUD_SIZE; 323 + 324 + if (addr_pud > (max_pfn << PAGE_SHIFT)) 325 + break; 326 + 327 + vaddr = (unsigned long)__va(addr_pud); 328 + 329 + pgd_k = pgd_offset_k(vaddr); 330 + p4d_k = p4d_offset(pgd_k, vaddr); 331 + pud[j] = *pud_offset(p4d_k, vaddr); 332 + } 333 + } 334 + pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX; 335 + } 336 + 337 + __flush_tlb_all(); 338 + return save_pgd; 339 + out: 340 + efi_uv1_memmap_phys_epilog(save_pgd); 341 + return NULL; 342 + } 343 + 344 + void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, 345 + u32 type, u64 attribute) 346 + { 347 + unsigned long last_map_pfn; 348 + 349 + if (type == EFI_MEMORY_MAPPED_IO) 350 + return ioremap(phys_addr, size); 351 + 352 + last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 353 + if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 354 + unsigned long top = last_map_pfn << PAGE_SHIFT; 355 + efi_ioremap(top, size - (top - phys_addr), type, attribute); 356 + } 357 + 358 + if (!(attribute & EFI_MEMORY_WB)) 359 + efi_memory_uc((u64)(unsigned long)__va(phys_addr), size); 360 + 361 + return (void __iomem *)__va(phys_addr); 362 + } 363 + 364 + static int __init arch_parse_efi_cmdline(char *str) 365 + { 366 + if (!str) { 367 + pr_warn("need at least one option\n"); 368 + return -EINVAL; 369 + } 370 + 371 + if (!efi_is_mixed() && parse_option_str(str, "old_map")) 372 + set_bit(EFI_UV1_MEMMAP, &efi.flags); 373 + 374 + return 0; 375 + } 376 + early_param("efi", arch_parse_efi_cmdline);
+1 -1
arch/x86/xen/efi.c
··· 31 31 .con_in_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ 32 32 .con_in = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ 33 33 .con_out_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ 34 - .con_out = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ 34 + .con_out = NULL, /* Not used under Xen. */ 35 35 .stderr_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ 36 36 .stderr = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ 37 37 .runtime = (efi_runtime_services_t *)EFI_INVALID_TABLE_ADDR,
+1 -1
arch/x86/xen/mmu_pv.c
··· 67 67 #include <asm/linkage.h> 68 68 #include <asm/page.h> 69 69 #include <asm/init.h> 70 - #include <asm/pat.h> 70 + #include <asm/memtype.h> 71 71 #include <asm/smp.h> 72 72 #include <asm/tlb.h> 73 73
+4
arch/xtensa/include/asm/vmalloc.h
··· 1 + #ifndef _ASM_XTENSA_VMALLOC_H 2 + #define _ASM_XTENSA_VMALLOC_H 3 + 4 + #endif /* _ASM_XTENSA_VMALLOC_H */
+22
drivers/firmware/efi/Kconfig
··· 215 215 216 216 Say Y here for Dell EMC PowerEdge systems. 217 217 218 + config EFI_DISABLE_PCI_DMA 219 + bool "Clear Busmaster bit on PCI bridges during ExitBootServices()" 220 + help 221 + Disable the busmaster bit in the control register on all PCI bridges 222 + while calling ExitBootServices() and passing control to the runtime 223 + kernel. System firmware may configure the IOMMU to prevent malicious 224 + PCI devices from being able to attack the OS via DMA. However, since 225 + firmware can't guarantee that the OS is IOMMU-aware, it will tear 226 + down IOMMU configuration when ExitBootServices() is called. This 227 + leaves a window between where a hostile device could still cause 228 + damage before Linux configures the IOMMU again. 229 + 230 + If you say Y here, the EFI stub will clear the busmaster bit on all 231 + PCI bridges before ExitBootServices() is called. This will prevent 232 + any malicious PCI devices from being able to perform DMA until the 233 + kernel reenables busmastering after configuring the IOMMU. 234 + 235 + This option will cause failures with some poorly behaved hardware 236 + and should not be enabled without testing. The kernel commandline 237 + options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma" 238 + may be used to override this option. 239 + 218 240 endmenu 219 241 220 242 config UEFI_CPER
+103 -4
drivers/firmware/efi/arm-init.c
··· 10 10 #define pr_fmt(fmt) "efi: " fmt 11 11 12 12 #include <linux/efi.h> 13 + #include <linux/fwnode.h> 13 14 #include <linux/init.h> 14 15 #include <linux/memblock.h> 15 16 #include <linux/mm_types.h> 16 17 #include <linux/of.h> 18 + #include <linux/of_address.h> 17 19 #include <linux/of_fdt.h> 18 20 #include <linux/platform_device.h> 19 21 #include <linux/screen_info.h> ··· 278 276 efi_memmap_unmap(); 279 277 } 280 278 279 + static bool efifb_overlaps_pci_range(const struct of_pci_range *range) 280 + { 281 + u64 fb_base = screen_info.lfb_base; 282 + 283 + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) 284 + fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32; 285 + 286 + return fb_base >= range->cpu_addr && 287 + fb_base < (range->cpu_addr + range->size); 288 + } 289 + 290 + static struct device_node *find_pci_overlap_node(void) 291 + { 292 + struct device_node *np; 293 + 294 + for_each_node_by_type(np, "pci") { 295 + struct of_pci_range_parser parser; 296 + struct of_pci_range range; 297 + int err; 298 + 299 + err = of_pci_range_parser_init(&parser, np); 300 + if (err) { 301 + pr_warn("of_pci_range_parser_init() failed: %d\n", err); 302 + continue; 303 + } 304 + 305 + for_each_of_pci_range(&parser, &range) 306 + if (efifb_overlaps_pci_range(&range)) 307 + return np; 308 + } 309 + return NULL; 310 + } 311 + 312 + /* 313 + * If the efifb framebuffer is backed by a PCI graphics controller, we have 314 + * to ensure that this relation is expressed using a device link when 315 + * running in DT mode, or the probe order may be reversed, resulting in a 316 + * resource reservation conflict on the memory window that the efifb 317 + * framebuffer steals from the PCIe host bridge. 318 + */ 319 + static int efifb_add_links(const struct fwnode_handle *fwnode, 320 + struct device *dev) 321 + { 322 + struct device_node *sup_np; 323 + struct device *sup_dev; 324 + 325 + sup_np = find_pci_overlap_node(); 326 + 327 + /* 328 + * If there's no PCI graphics controller backing the efifb, we are 329 + * done here. 330 + */ 331 + if (!sup_np) 332 + return 0; 333 + 334 + sup_dev = get_dev_from_fwnode(&sup_np->fwnode); 335 + of_node_put(sup_np); 336 + 337 + /* 338 + * Return -ENODEV if the PCI graphics controller device hasn't been 339 + * registered yet. This ensures that efifb isn't allowed to probe 340 + * and this function is retried again when new devices are 341 + * registered. 342 + */ 343 + if (!sup_dev) 344 + return -ENODEV; 345 + 346 + /* 347 + * If this fails, retrying this function at a later point won't 348 + * change anything. So, don't return an error after this. 349 + */ 350 + if (!device_link_add(dev, sup_dev, 0)) 351 + dev_warn(dev, "device_link_add() failed\n"); 352 + 353 + put_device(sup_dev); 354 + 355 + return 0; 356 + } 357 + 358 + static const struct fwnode_operations efifb_fwnode_ops = { 359 + .add_links = efifb_add_links, 360 + }; 361 + 362 + static struct fwnode_handle efifb_fwnode = { 363 + .ops = &efifb_fwnode_ops, 364 + }; 365 + 281 366 static int __init register_gop_device(void) 282 367 { 283 - void *pd; 368 + struct platform_device *pd; 369 + int err; 284 370 285 371 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) 286 372 return 0; 287 373 288 - pd = platform_device_register_data(NULL, "efi-framebuffer", 0, 289 - &screen_info, sizeof(screen_info)); 290 - return PTR_ERR_OR_ZERO(pd); 374 + pd = platform_device_alloc("efi-framebuffer", 0); 375 + if (!pd) 376 + return -ENOMEM; 377 + 378 + if (IS_ENABLED(CONFIG_PCI)) 379 + pd->dev.fwnode = &efifb_fwnode; 380 + 381 + err = platform_device_add_data(pd, &screen_info, sizeof(screen_info)); 382 + if (err) 383 + return err; 384 + 385 + return platform_device_add(pd); 291 386 } 292 387 subsys_initcall(register_gop_device);
+1 -1
drivers/firmware/efi/efi.c
··· 908 908 * 909 909 * Search in the EFI memory map for the region covering @phys_addr. 910 910 * Returns the EFI memory type if the region was found in the memory 911 - * map, EFI_RESERVED_TYPE (zero) otherwise. 911 + * map, -EINVAL otherwise. 912 912 */ 913 913 int efi_mem_type(unsigned long phys_addr) 914 914 {
+29 -30
drivers/firmware/efi/fake_mem.c
··· 34 34 return 0; 35 35 } 36 36 37 - void __init efi_fake_memmap(void) 37 + static void __init efi_fake_range(struct efi_mem_range *efi_range) 38 38 { 39 + struct efi_memory_map_data data = { 0 }; 39 40 int new_nr_map = efi.memmap.nr_map; 40 41 efi_memory_desc_t *md; 41 - phys_addr_t new_memmap_phy; 42 42 void *new_memmap; 43 + 44 + /* count up the number of EFI memory descriptor */ 45 + for_each_efi_memory_desc(md) 46 + new_nr_map += efi_memmap_split_count(md, &efi_range->range); 47 + 48 + /* allocate memory for new EFI memmap */ 49 + if (efi_memmap_alloc(new_nr_map, &data) != 0) 50 + return; 51 + 52 + /* create new EFI memmap */ 53 + new_memmap = early_memremap(data.phys_map, data.size); 54 + if (!new_memmap) { 55 + __efi_memmap_free(data.phys_map, data.size, data.flags); 56 + return; 57 + } 58 + 59 + efi_memmap_insert(&efi.memmap, new_memmap, efi_range); 60 + 61 + /* swap into new EFI memmap */ 62 + early_memunmap(new_memmap, data.size); 63 + 64 + efi_memmap_install(&data); 65 + } 66 + 67 + void __init efi_fake_memmap(void) 68 + { 43 69 int i; 44 70 45 71 if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem) 46 72 return; 47 73 48 - /* count up the number of EFI memory descriptor */ 49 - for (i = 0; i < nr_fake_mem; i++) { 50 - for_each_efi_memory_desc(md) { 51 - struct range *r = &efi_fake_mems[i].range; 52 - 53 - new_nr_map += efi_memmap_split_count(md, r); 54 - } 55 - } 56 - 57 - /* allocate memory for new EFI memmap */ 58 - new_memmap_phy = efi_memmap_alloc(new_nr_map); 59 - if (!new_memmap_phy) 60 - return; 61 - 62 - /* create new EFI memmap */ 63 - new_memmap = early_memremap(new_memmap_phy, 64 - efi.memmap.desc_size * new_nr_map); 65 - if (!new_memmap) { 66 - memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map); 67 - return; 68 - } 69 - 70 74 for (i = 0; i < nr_fake_mem; i++) 71 - efi_memmap_insert(&efi.memmap, new_memmap, &efi_fake_mems[i]); 72 - 73 - /* swap into new EFI memmap */ 74 - early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map); 75 - 76 - efi_memmap_install(new_memmap_phy, new_nr_map); 75 + efi_fake_range(&efi_fake_mems[i]); 77 76 78 77 /* print new EFI memmap */ 79 78 efi_print_memmap();
+1 -1
drivers/firmware/efi/libstub/Makefile
··· 39 39 KCOV_INSTRUMENT := n 40 40 41 41 lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \ 42 - random.o 42 + random.o pci.o 43 43 44 44 # include the stub's generic dependencies from lib/ when building for ARM/arm64 45 45 arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
+53 -57
drivers/firmware/efi/libstub/arm-stub.c
··· 37 37 38 38 static u64 virtmap_base = EFI_RT_VIRTUAL_BASE; 39 39 40 - void efi_char16_printk(efi_system_table_t *sys_table_arg, 41 - efi_char16_t *str) 42 - { 43 - struct efi_simple_text_output_protocol *out; 40 + static efi_system_table_t *__efistub_global sys_table; 44 41 45 - out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out; 46 - out->output_string(out, str); 42 + __pure efi_system_table_t *efi_system_table(void) 43 + { 44 + return sys_table; 47 45 } 48 46 49 - static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg) 47 + static struct screen_info *setup_graphics(void) 50 48 { 51 49 efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID; 52 50 efi_status_t status; ··· 53 55 struct screen_info *si = NULL; 54 56 55 57 size = 0; 56 - status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL, 57 - &gop_proto, NULL, &size, gop_handle); 58 + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, 59 + &gop_proto, NULL, &size, gop_handle); 58 60 if (status == EFI_BUFFER_TOO_SMALL) { 59 - si = alloc_screen_info(sys_table_arg); 61 + si = alloc_screen_info(); 60 62 if (!si) 61 63 return NULL; 62 - efi_setup_gop(sys_table_arg, si, &gop_proto, size); 64 + efi_setup_gop(si, &gop_proto, size); 63 65 } 64 66 return si; 65 67 } 66 68 67 - void install_memreserve_table(efi_system_table_t *sys_table_arg) 69 + void install_memreserve_table(void) 68 70 { 69 71 struct linux_efi_memreserve *rsv; 70 72 efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; 71 73 efi_status_t status; 72 74 73 - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), 74 - (void **)&rsv); 75 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), 76 + (void **)&rsv); 75 77 if (status != EFI_SUCCESS) { 76 - pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n"); 78 + pr_efi_err("Failed to allocate memreserve entry!\n"); 77 79 return; 78 80 } 79 81 ··· 81 83 rsv->size = 0; 82 84 atomic_set(&rsv->count, 0); 83 85 84 - status = efi_call_early(install_configuration_table, 85 - &memreserve_table_guid, 86 - rsv); 86 + status = efi_bs_call(install_configuration_table, 87 + &memreserve_table_guid, rsv); 87 88 if (status != EFI_SUCCESS) 88 - pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n"); 89 + pr_efi_err("Failed to install memreserve config table!\n"); 89 90 } 90 91 91 92 ··· 94 97 * must be reserved. On failure it is required to free all 95 98 * all allocations it has made. 96 99 */ 97 - efi_status_t handle_kernel_image(efi_system_table_t *sys_table, 98 - unsigned long *image_addr, 100 + efi_status_t handle_kernel_image(unsigned long *image_addr, 99 101 unsigned long *image_size, 100 102 unsigned long *reserve_addr, 101 103 unsigned long *reserve_size, ··· 106 110 * for both archictectures, with the arch-specific code provided in the 107 111 * handle_kernel_image() function. 108 112 */ 109 - unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, 113 + unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg, 110 114 unsigned long *image_addr) 111 115 { 112 116 efi_loaded_image_t *image; ··· 127 131 enum efi_secureboot_mode secure_boot; 128 132 struct screen_info *si; 129 133 134 + sys_table = sys_table_arg; 135 + 130 136 /* Check if we were booted by the EFI firmware */ 131 137 if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) 132 138 goto fail; 133 139 134 - status = check_platform_features(sys_table); 140 + status = check_platform_features(); 135 141 if (status != EFI_SUCCESS) 136 142 goto fail; 137 143 ··· 145 147 status = sys_table->boottime->handle_protocol(handle, 146 148 &loaded_image_proto, (void *)&image); 147 149 if (status != EFI_SUCCESS) { 148 - pr_efi_err(sys_table, "Failed to get loaded image protocol\n"); 150 + pr_efi_err("Failed to get loaded image protocol\n"); 149 151 goto fail; 150 152 } 151 153 152 - dram_base = get_dram_base(sys_table); 154 + dram_base = get_dram_base(); 153 155 if (dram_base == EFI_ERROR) { 154 - pr_efi_err(sys_table, "Failed to find DRAM base\n"); 156 + pr_efi_err("Failed to find DRAM base\n"); 155 157 goto fail; 156 158 } 157 159 ··· 160 162 * protocol. We are going to copy the command line into the 161 163 * device tree, so this can be allocated anywhere. 162 164 */ 163 - cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size); 165 + cmdline_ptr = efi_convert_cmdline(image, &cmdline_size); 164 166 if (!cmdline_ptr) { 165 - pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n"); 167 + pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n"); 166 168 goto fail; 167 169 } 168 170 ··· 174 176 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) 175 177 efi_parse_options(cmdline_ptr); 176 178 177 - pr_efi(sys_table, "Booting Linux Kernel...\n"); 179 + pr_efi("Booting Linux Kernel...\n"); 178 180 179 - si = setup_graphics(sys_table); 181 + si = setup_graphics(); 180 182 181 - status = handle_kernel_image(sys_table, image_addr, &image_size, 183 + status = handle_kernel_image(image_addr, &image_size, 182 184 &reserve_addr, 183 185 &reserve_size, 184 186 dram_base, image); 185 187 if (status != EFI_SUCCESS) { 186 - pr_efi_err(sys_table, "Failed to relocate kernel\n"); 188 + pr_efi_err("Failed to relocate kernel\n"); 187 189 goto fail_free_cmdline; 188 190 } 189 191 190 - efi_retrieve_tpm2_eventlog(sys_table); 192 + efi_retrieve_tpm2_eventlog(); 191 193 192 194 /* Ask the firmware to clear memory on unclean shutdown */ 193 - efi_enable_reset_attack_mitigation(sys_table); 195 + efi_enable_reset_attack_mitigation(); 194 196 195 - secure_boot = efi_get_secureboot(sys_table); 197 + secure_boot = efi_get_secureboot(); 196 198 197 199 /* 198 200 * Unauthenticated device tree data is a security hazard, so ignore ··· 202 204 if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) || 203 205 secure_boot != efi_secureboot_mode_disabled) { 204 206 if (strstr(cmdline_ptr, "dtb=")) 205 - pr_efi(sys_table, "Ignoring DTB from command line.\n"); 207 + pr_efi("Ignoring DTB from command line.\n"); 206 208 } else { 207 - status = handle_cmdline_files(sys_table, image, cmdline_ptr, 208 - "dtb=", 209 + status = handle_cmdline_files(image, cmdline_ptr, "dtb=", 209 210 ~0UL, &fdt_addr, &fdt_size); 210 211 211 212 if (status != EFI_SUCCESS) { 212 - pr_efi_err(sys_table, "Failed to load device tree!\n"); 213 + pr_efi_err("Failed to load device tree!\n"); 213 214 goto fail_free_image; 214 215 } 215 216 } 216 217 217 218 if (fdt_addr) { 218 - pr_efi(sys_table, "Using DTB from command line\n"); 219 + pr_efi("Using DTB from command line\n"); 219 220 } else { 220 221 /* Look for a device tree configuration table entry. */ 221 - fdt_addr = (uintptr_t)get_fdt(sys_table, &fdt_size); 222 + fdt_addr = (uintptr_t)get_fdt(&fdt_size); 222 223 if (fdt_addr) 223 - pr_efi(sys_table, "Using DTB from configuration table\n"); 224 + pr_efi("Using DTB from configuration table\n"); 224 225 } 225 226 226 227 if (!fdt_addr) 227 - pr_efi(sys_table, "Generating empty DTB\n"); 228 + pr_efi("Generating empty DTB\n"); 228 229 229 - status = handle_cmdline_files(sys_table, image, cmdline_ptr, "initrd=", 230 + status = handle_cmdline_files(image, cmdline_ptr, "initrd=", 230 231 efi_get_max_initrd_addr(dram_base, 231 232 *image_addr), 232 233 (unsigned long *)&initrd_addr, 233 234 (unsigned long *)&initrd_size); 234 235 if (status != EFI_SUCCESS) 235 - pr_efi_err(sys_table, "Failed initrd from command line!\n"); 236 + pr_efi_err("Failed initrd from command line!\n"); 236 237 237 - efi_random_get_seed(sys_table); 238 + efi_random_get_seed(); 238 239 239 240 /* hibernation expects the runtime regions to stay in the same place */ 240 241 if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) { ··· 248 251 EFI_RT_VIRTUAL_SIZE; 249 252 u32 rnd; 250 253 251 - status = efi_get_random_bytes(sys_table, sizeof(rnd), 252 - (u8 *)&rnd); 254 + status = efi_get_random_bytes(sizeof(rnd), (u8 *)&rnd); 253 255 if (status == EFI_SUCCESS) { 254 256 virtmap_base = EFI_RT_VIRTUAL_BASE + 255 257 (((headroom >> 21) * rnd) >> (32 - 21)); 256 258 } 257 259 } 258 260 259 - install_memreserve_table(sys_table); 261 + install_memreserve_table(); 260 262 261 263 new_fdt_addr = fdt_addr; 262 - status = allocate_new_fdt_and_exit_boot(sys_table, handle, 264 + status = allocate_new_fdt_and_exit_boot(handle, 263 265 &new_fdt_addr, efi_get_max_fdt_addr(dram_base), 264 266 initrd_addr, initrd_size, cmdline_ptr, 265 267 fdt_addr, fdt_size); ··· 271 275 if (status == EFI_SUCCESS) 272 276 return new_fdt_addr; 273 277 274 - pr_efi_err(sys_table, "Failed to update FDT and exit boot services\n"); 278 + pr_efi_err("Failed to update FDT and exit boot services\n"); 275 279 276 - efi_free(sys_table, initrd_size, initrd_addr); 277 - efi_free(sys_table, fdt_size, fdt_addr); 280 + efi_free(initrd_size, initrd_addr); 281 + efi_free(fdt_size, fdt_addr); 278 282 279 283 fail_free_image: 280 - efi_free(sys_table, image_size, *image_addr); 281 - efi_free(sys_table, reserve_size, reserve_addr); 284 + efi_free(image_size, *image_addr); 285 + efi_free(reserve_size, reserve_addr); 282 286 fail_free_cmdline: 283 - free_screen_info(sys_table, si); 284 - efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr); 287 + free_screen_info(si); 288 + efi_free(cmdline_size, (unsigned long)cmdline_ptr); 285 289 fail: 286 290 return EFI_ERROR; 287 291 }
+32 -38
drivers/firmware/efi/libstub/arm32-stub.c
··· 7 7 8 8 #include "efistub.h" 9 9 10 - efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) 10 + efi_status_t check_platform_features(void) 11 11 { 12 12 int block; 13 13 ··· 18 18 /* LPAE kernels need compatible hardware */ 19 19 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0); 20 20 if (block < 5) { 21 - pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n"); 21 + pr_efi_err("This LPAE kernel is not supported by your CPU\n"); 22 22 return EFI_UNSUPPORTED; 23 23 } 24 24 return EFI_SUCCESS; ··· 26 26 27 27 static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID; 28 28 29 - struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg) 29 + struct screen_info *alloc_screen_info(void) 30 30 { 31 31 struct screen_info *si; 32 32 efi_status_t status; ··· 37 37 * its contents while we hand over to the kernel proper from the 38 38 * decompressor. 39 39 */ 40 - status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA, 41 - sizeof(*si), (void **)&si); 40 + status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, 41 + sizeof(*si), (void **)&si); 42 42 43 43 if (status != EFI_SUCCESS) 44 44 return NULL; 45 45 46 - status = efi_call_early(install_configuration_table, 47 - &screen_info_guid, si); 46 + status = efi_bs_call(install_configuration_table, 47 + &screen_info_guid, si); 48 48 if (status == EFI_SUCCESS) 49 49 return si; 50 50 51 - efi_call_early(free_pool, si); 51 + efi_bs_call(free_pool, si); 52 52 return NULL; 53 53 } 54 54 55 - void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si) 55 + void free_screen_info(struct screen_info *si) 56 56 { 57 57 if (!si) 58 58 return; 59 59 60 - efi_call_early(install_configuration_table, &screen_info_guid, NULL); 61 - efi_call_early(free_pool, si); 60 + efi_bs_call(install_configuration_table, &screen_info_guid, NULL); 61 + efi_bs_call(free_pool, si); 62 62 } 63 63 64 - static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, 65 - unsigned long dram_base, 64 + static efi_status_t reserve_kernel_base(unsigned long dram_base, 66 65 unsigned long *reserve_addr, 67 66 unsigned long *reserve_size) 68 67 { ··· 91 92 */ 92 93 alloc_addr = dram_base + MAX_UNCOMP_KERNEL_SIZE; 93 94 nr_pages = MAX_UNCOMP_KERNEL_SIZE / EFI_PAGE_SIZE; 94 - status = efi_call_early(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS, 95 - EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr); 95 + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS, 96 + EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr); 96 97 if (status == EFI_SUCCESS) { 97 98 if (alloc_addr == dram_base) { 98 99 *reserve_addr = alloc_addr; ··· 118 119 * released to the OS after ExitBootServices(), the decompressor can 119 120 * safely overwrite them. 120 121 */ 121 - status = efi_get_memory_map(sys_table_arg, &map); 122 + status = efi_get_memory_map(&map); 122 123 if (status != EFI_SUCCESS) { 123 - pr_efi_err(sys_table_arg, 124 - "reserve_kernel_base(): Unable to retrieve memory map.\n"); 124 + pr_efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n"); 125 125 return status; 126 126 } 127 127 ··· 156 158 start = max(start, (u64)dram_base); 157 159 end = min(end, (u64)dram_base + MAX_UNCOMP_KERNEL_SIZE); 158 160 159 - status = efi_call_early(allocate_pages, 160 - EFI_ALLOCATE_ADDRESS, 161 - EFI_LOADER_DATA, 162 - (end - start) / EFI_PAGE_SIZE, 163 - &start); 161 + status = efi_bs_call(allocate_pages, 162 + EFI_ALLOCATE_ADDRESS, 163 + EFI_LOADER_DATA, 164 + (end - start) / EFI_PAGE_SIZE, 165 + &start); 164 166 if (status != EFI_SUCCESS) { 165 - pr_efi_err(sys_table_arg, 166 - "reserve_kernel_base(): alloc failed.\n"); 167 + pr_efi_err("reserve_kernel_base(): alloc failed.\n"); 167 168 goto out; 168 169 } 169 170 break; ··· 185 188 186 189 status = EFI_SUCCESS; 187 190 out: 188 - efi_call_early(free_pool, memory_map); 191 + efi_bs_call(free_pool, memory_map); 189 192 return status; 190 193 } 191 194 192 - efi_status_t handle_kernel_image(efi_system_table_t *sys_table, 193 - unsigned long *image_addr, 195 + efi_status_t handle_kernel_image(unsigned long *image_addr, 194 196 unsigned long *image_size, 195 197 unsigned long *reserve_addr, 196 198 unsigned long *reserve_size, ··· 217 221 */ 218 222 kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE; 219 223 220 - status = reserve_kernel_base(sys_table, kernel_base, reserve_addr, 221 - reserve_size); 224 + status = reserve_kernel_base(kernel_base, reserve_addr, reserve_size); 222 225 if (status != EFI_SUCCESS) { 223 - pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n"); 226 + pr_efi_err("Unable to allocate memory for uncompressed kernel.\n"); 224 227 return status; 225 228 } 226 229 ··· 228 233 * memory window. 229 234 */ 230 235 *image_size = image->image_size; 231 - status = efi_relocate_kernel(sys_table, image_addr, *image_size, 232 - *image_size, 236 + status = efi_relocate_kernel(image_addr, *image_size, *image_size, 233 237 kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0); 234 238 if (status != EFI_SUCCESS) { 235 - pr_efi_err(sys_table, "Failed to relocate kernel.\n"); 236 - efi_free(sys_table, *reserve_size, *reserve_addr); 239 + pr_efi_err("Failed to relocate kernel.\n"); 240 + efi_free(*reserve_size, *reserve_addr); 237 241 *reserve_size = 0; 238 242 return status; 239 243 } ··· 243 249 * address at which the zImage is loaded. 244 250 */ 245 251 if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) { 246 - pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n"); 247 - efi_free(sys_table, *reserve_size, *reserve_addr); 252 + pr_efi_err("Failed to relocate kernel, no low memory available.\n"); 253 + efi_free(*reserve_size, *reserve_addr); 248 254 *reserve_size = 0; 249 - efi_free(sys_table, *image_size, *image_addr); 255 + efi_free(*image_size, *image_addr); 250 256 *image_size = 0; 251 257 return EFI_LOAD_ERROR; 252 258 }
+15 -17
drivers/firmware/efi/libstub/arm64-stub.c
··· 21 21 22 22 #include "efistub.h" 23 23 24 - efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) 24 + efi_status_t check_platform_features(void) 25 25 { 26 26 u64 tg; 27 27 ··· 32 32 tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; 33 33 if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) { 34 34 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) 35 - pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n"); 35 + pr_efi_err("This 64 KB granular kernel is not supported by your CPU\n"); 36 36 else 37 - pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n"); 37 + pr_efi_err("This 16 KB granular kernel is not supported by your CPU\n"); 38 38 return EFI_UNSUPPORTED; 39 39 } 40 40 return EFI_SUCCESS; 41 41 } 42 42 43 - efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, 44 - unsigned long *image_addr, 43 + efi_status_t handle_kernel_image(unsigned long *image_addr, 45 44 unsigned long *image_size, 46 45 unsigned long *reserve_addr, 47 46 unsigned long *reserve_size, ··· 55 56 56 57 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 57 58 if (!nokaslr()) { 58 - status = efi_get_random_bytes(sys_table_arg, 59 - sizeof(phys_seed), 59 + status = efi_get_random_bytes(sizeof(phys_seed), 60 60 (u8 *)&phys_seed); 61 61 if (status == EFI_NOT_FOUND) { 62 - pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n"); 62 + pr_efi("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n"); 63 63 } else if (status != EFI_SUCCESS) { 64 - pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n"); 64 + pr_efi_err("efi_get_random_bytes() failed\n"); 65 65 return status; 66 66 } 67 67 } else { 68 - pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n"); 68 + pr_efi("KASLR disabled on kernel command line\n"); 69 69 } 70 70 } 71 71 ··· 106 108 * locate the kernel at a randomized offset in physical memory. 107 109 */ 108 110 *reserve_size = kernel_memsize + offset; 109 - status = efi_random_alloc(sys_table_arg, *reserve_size, 111 + status = efi_random_alloc(*reserve_size, 110 112 MIN_KIMG_ALIGN, reserve_addr, 111 113 (u32)phys_seed); 112 114 ··· 129 131 *image_addr = *reserve_addr = preferred_offset; 130 132 *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN); 131 133 132 - status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, 133 - EFI_LOADER_DATA, 134 - *reserve_size / EFI_PAGE_SIZE, 135 - (efi_physical_addr_t *)reserve_addr); 134 + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, 135 + EFI_LOADER_DATA, 136 + *reserve_size / EFI_PAGE_SIZE, 137 + (efi_physical_addr_t *)reserve_addr); 136 138 } 137 139 138 140 if (status != EFI_SUCCESS) { 139 141 *reserve_size = kernel_memsize + TEXT_OFFSET; 140 - status = efi_low_alloc(sys_table_arg, *reserve_size, 142 + status = efi_low_alloc(*reserve_size, 141 143 MIN_KIMG_ALIGN, reserve_addr); 142 144 143 145 if (status != EFI_SUCCESS) { 144 - pr_efi_err(sys_table_arg, "Failed to relocate kernel\n"); 146 + pr_efi_err("Failed to relocate kernel\n"); 145 147 *reserve_size = 0; 146 148 return status; 147 149 }
+142 -152
drivers/firmware/efi/libstub/efi-stub-helper.c
··· 27 27 */ 28 28 #define EFI_READ_CHUNK_SIZE (1024 * 1024) 29 29 30 - static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE; 30 + static unsigned long efi_chunk_size = EFI_READ_CHUNK_SIZE; 31 31 32 - static int __section(.data) __nokaslr; 33 - static int __section(.data) __quiet; 34 - static int __section(.data) __novamap; 35 - static bool __section(.data) efi_nosoftreserve; 32 + static bool __efistub_global efi_nokaslr; 33 + static bool __efistub_global efi_quiet; 34 + static bool __efistub_global efi_novamap; 35 + static bool __efistub_global efi_nosoftreserve; 36 + static bool __efistub_global efi_disable_pci_dma = 37 + IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA); 36 38 37 - int __pure nokaslr(void) 39 + bool __pure nokaslr(void) 38 40 { 39 - return __nokaslr; 41 + return efi_nokaslr; 40 42 } 41 - int __pure is_quiet(void) 43 + bool __pure is_quiet(void) 42 44 { 43 - return __quiet; 45 + return efi_quiet; 44 46 } 45 - int __pure novamap(void) 47 + bool __pure novamap(void) 46 48 { 47 - return __novamap; 49 + return efi_novamap; 48 50 } 49 51 bool __pure __efi_soft_reserve_enabled(void) 50 52 { ··· 60 58 u64 size; 61 59 }; 62 60 63 - void efi_printk(efi_system_table_t *sys_table_arg, char *str) 61 + void efi_printk(char *str) 64 62 { 65 63 char *s8; 66 64 ··· 70 68 ch[0] = *s8; 71 69 if (*s8 == '\n') { 72 70 efi_char16_t nl[2] = { '\r', 0 }; 73 - efi_char16_printk(sys_table_arg, nl); 71 + efi_char16_printk(nl); 74 72 } 75 73 76 - efi_char16_printk(sys_table_arg, ch); 74 + efi_char16_printk(ch); 77 75 } 78 76 } 79 77 ··· 86 84 return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS; 87 85 } 88 86 89 - efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, 90 - struct efi_boot_memmap *map) 87 + efi_status_t efi_get_memory_map(struct efi_boot_memmap *map) 91 88 { 92 89 efi_memory_desc_t *m = NULL; 93 90 efi_status_t status; ··· 97 96 *map->map_size = *map->desc_size * 32; 98 97 *map->buff_size = *map->map_size; 99 98 again: 100 - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 101 - *map->map_size, (void **)&m); 99 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, 100 + *map->map_size, (void **)&m); 102 101 if (status != EFI_SUCCESS) 103 102 goto fail; 104 103 105 104 *map->desc_size = 0; 106 105 key = 0; 107 - status = efi_call_early(get_memory_map, map->map_size, m, 108 - &key, map->desc_size, &desc_version); 106 + status = efi_bs_call(get_memory_map, map->map_size, m, 107 + &key, map->desc_size, &desc_version); 109 108 if (status == EFI_BUFFER_TOO_SMALL || 110 109 !mmap_has_headroom(*map->buff_size, *map->map_size, 111 110 *map->desc_size)) { 112 - efi_call_early(free_pool, m); 111 + efi_bs_call(free_pool, m); 113 112 /* 114 113 * Make sure there is some entries of headroom so that the 115 114 * buffer can be reused for a new map after allocations are ··· 123 122 } 124 123 125 124 if (status != EFI_SUCCESS) 126 - efi_call_early(free_pool, m); 125 + efi_bs_call(free_pool, m); 127 126 128 127 if (map->key_ptr && status == EFI_SUCCESS) 129 128 *map->key_ptr = key; ··· 136 135 } 137 136 138 137 139 - unsigned long get_dram_base(efi_system_table_t *sys_table_arg) 138 + unsigned long get_dram_base(void) 140 139 { 141 140 efi_status_t status; 142 141 unsigned long map_size, buff_size; ··· 152 151 boot_map.key_ptr = NULL; 153 152 boot_map.buff_size = &buff_size; 154 153 155 - status = efi_get_memory_map(sys_table_arg, &boot_map); 154 + status = efi_get_memory_map(&boot_map); 156 155 if (status != EFI_SUCCESS) 157 156 return membase; 158 157 ··· 165 164 } 166 165 } 167 166 168 - efi_call_early(free_pool, map.map); 167 + efi_bs_call(free_pool, map.map); 169 168 170 169 return membase; 171 170 } ··· 173 172 /* 174 173 * Allocate at the highest possible address that is not above 'max'. 175 174 */ 176 - efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, 177 - unsigned long size, unsigned long align, 175 + efi_status_t efi_high_alloc(unsigned long size, unsigned long align, 178 176 unsigned long *addr, unsigned long max) 179 177 { 180 178 unsigned long map_size, desc_size, buff_size; ··· 191 191 boot_map.key_ptr = NULL; 192 192 boot_map.buff_size = &buff_size; 193 193 194 - status = efi_get_memory_map(sys_table_arg, &boot_map); 194 + status = efi_get_memory_map(&boot_map); 195 195 if (status != EFI_SUCCESS) 196 196 goto fail; 197 197 ··· 251 251 if (!max_addr) 252 252 status = EFI_NOT_FOUND; 253 253 else { 254 - status = efi_call_early(allocate_pages, 255 - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, 256 - nr_pages, &max_addr); 254 + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, 255 + EFI_LOADER_DATA, nr_pages, &max_addr); 257 256 if (status != EFI_SUCCESS) { 258 257 max = max_addr; 259 258 max_addr = 0; ··· 262 263 *addr = max_addr; 263 264 } 264 265 265 - efi_call_early(free_pool, map); 266 + efi_bs_call(free_pool, map); 266 267 fail: 267 268 return status; 268 269 } ··· 270 271 /* 271 272 * Allocate at the lowest possible address that is not below 'min'. 272 273 */ 273 - efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, 274 - unsigned long size, unsigned long align, 274 + efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, 275 275 unsigned long *addr, unsigned long min) 276 276 { 277 277 unsigned long map_size, desc_size, buff_size; ··· 287 289 boot_map.key_ptr = NULL; 288 290 boot_map.buff_size = &buff_size; 289 291 290 - status = efi_get_memory_map(sys_table_arg, &boot_map); 292 + status = efi_get_memory_map(&boot_map); 291 293 if (status != EFI_SUCCESS) 292 294 goto fail; 293 295 ··· 329 331 if ((start + size) > end) 330 332 continue; 331 333 332 - status = efi_call_early(allocate_pages, 333 - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, 334 - nr_pages, &start); 334 + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, 335 + EFI_LOADER_DATA, nr_pages, &start); 335 336 if (status == EFI_SUCCESS) { 336 337 *addr = start; 337 338 break; ··· 340 343 if (i == map_size / desc_size) 341 344 status = EFI_NOT_FOUND; 342 345 343 - efi_call_early(free_pool, map); 346 + efi_bs_call(free_pool, map); 344 347 fail: 345 348 return status; 346 349 } 347 350 348 - void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, 349 - unsigned long addr) 351 + void efi_free(unsigned long size, unsigned long addr) 350 352 { 351 353 unsigned long nr_pages; 352 354 ··· 353 357 return; 354 358 355 359 nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; 356 - efi_call_early(free_pages, addr, nr_pages); 360 + efi_bs_call(free_pages, addr, nr_pages); 357 361 } 358 362 359 - static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh, 360 - efi_char16_t *filename_16, void **handle, 361 - u64 *file_sz) 363 + static efi_status_t efi_file_size(void *__fh, efi_char16_t *filename_16, 364 + void **handle, u64 *file_sz) 362 365 { 363 366 efi_file_handle_t *h, *fh = __fh; 364 367 efi_file_info_t *info; ··· 365 370 efi_guid_t info_guid = EFI_FILE_INFO_ID; 366 371 unsigned long info_sz; 367 372 368 - status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16, 369 - EFI_FILE_MODE_READ, (u64)0); 373 + status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, 0); 370 374 if (status != EFI_SUCCESS) { 371 - efi_printk(sys_table_arg, "Failed to open file: "); 372 - efi_char16_printk(sys_table_arg, filename_16); 373 - efi_printk(sys_table_arg, "\n"); 375 + efi_printk("Failed to open file: "); 376 + efi_char16_printk(filename_16); 377 + efi_printk("\n"); 374 378 return status; 375 379 } 376 380 377 381 *handle = h; 378 382 379 383 info_sz = 0; 380 - status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, 381 - &info_sz, NULL); 384 + status = h->get_info(h, &info_guid, &info_sz, NULL); 382 385 if (status != EFI_BUFFER_TOO_SMALL) { 383 - efi_printk(sys_table_arg, "Failed to get file info size\n"); 386 + efi_printk("Failed to get file info size\n"); 384 387 return status; 385 388 } 386 389 387 390 grow: 388 - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 389 - info_sz, (void **)&info); 391 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, info_sz, 392 + (void **)&info); 390 393 if (status != EFI_SUCCESS) { 391 - efi_printk(sys_table_arg, "Failed to alloc mem for file info\n"); 394 + efi_printk("Failed to alloc mem for file info\n"); 392 395 return status; 393 396 } 394 397 395 - status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, 396 - &info_sz, info); 398 + status = h->get_info(h, &info_guid, &info_sz, info); 397 399 if (status == EFI_BUFFER_TOO_SMALL) { 398 - efi_call_early(free_pool, info); 400 + efi_bs_call(free_pool, info); 399 401 goto grow; 400 402 } 401 403 402 404 *file_sz = info->file_size; 403 - efi_call_early(free_pool, info); 405 + efi_bs_call(free_pool, info); 404 406 405 407 if (status != EFI_SUCCESS) 406 - efi_printk(sys_table_arg, "Failed to get initrd info\n"); 408 + efi_printk("Failed to get initrd info\n"); 407 409 408 410 return status; 409 411 } 410 412 411 - static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr) 413 + static efi_status_t efi_file_read(efi_file_handle_t *handle, 414 + unsigned long *size, void *addr) 412 415 { 413 - return efi_call_proto(efi_file_handle, read, handle, size, addr); 416 + return handle->read(handle, size, addr); 414 417 } 415 418 416 - static efi_status_t efi_file_close(void *handle) 419 + static efi_status_t efi_file_close(efi_file_handle_t *handle) 417 420 { 418 - return efi_call_proto(efi_file_handle, close, handle); 421 + return handle->close(handle); 419 422 } 420 423 421 - static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, 422 - efi_loaded_image_t *image, 424 + static efi_status_t efi_open_volume(efi_loaded_image_t *image, 423 425 efi_file_handle_t **__fh) 424 426 { 425 427 efi_file_io_interface_t *io; 426 428 efi_file_handle_t *fh; 427 429 efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; 428 430 efi_status_t status; 429 - void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image, 430 - device_handle, 431 - image); 431 + efi_handle_t handle = image->device_handle; 432 432 433 - status = efi_call_early(handle_protocol, handle, 434 - &fs_proto, (void **)&io); 433 + status = efi_bs_call(handle_protocol, handle, &fs_proto, (void **)&io); 435 434 if (status != EFI_SUCCESS) { 436 - efi_printk(sys_table_arg, "Failed to handle fs_proto\n"); 435 + efi_printk("Failed to handle fs_proto\n"); 437 436 return status; 438 437 } 439 438 440 - status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh); 439 + status = io->open_volume(io, &fh); 441 440 if (status != EFI_SUCCESS) 442 - efi_printk(sys_table_arg, "Failed to open volume\n"); 441 + efi_printk("Failed to open volume\n"); 443 442 else 444 443 *__fh = fh; 445 444 ··· 454 465 455 466 str = strstr(cmdline, "nokaslr"); 456 467 if (str == cmdline || (str && str > cmdline && *(str - 1) == ' ')) 457 - __nokaslr = 1; 468 + efi_nokaslr = true; 458 469 459 470 str = strstr(cmdline, "quiet"); 460 471 if (str == cmdline || (str && str > cmdline && *(str - 1) == ' ')) 461 - __quiet = 1; 472 + efi_quiet = true; 462 473 463 474 /* 464 475 * If no EFI parameters were specified on the cmdline we've got ··· 478 489 while (*str && *str != ' ') { 479 490 if (!strncmp(str, "nochunk", 7)) { 480 491 str += strlen("nochunk"); 481 - __chunk_size = -1UL; 492 + efi_chunk_size = -1UL; 482 493 } 483 494 484 495 if (!strncmp(str, "novamap", 7)) { 485 496 str += strlen("novamap"); 486 - __novamap = 1; 497 + efi_novamap = true; 487 498 } 488 499 489 500 if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) && 490 501 !strncmp(str, "nosoftreserve", 7)) { 491 502 str += strlen("nosoftreserve"); 492 - efi_nosoftreserve = 1; 503 + efi_nosoftreserve = true; 504 + } 505 + 506 + if (!strncmp(str, "disable_early_pci_dma", 21)) { 507 + str += strlen("disable_early_pci_dma"); 508 + efi_disable_pci_dma = true; 509 + } 510 + 511 + if (!strncmp(str, "no_disable_early_pci_dma", 24)) { 512 + str += strlen("no_disable_early_pci_dma"); 513 + efi_disable_pci_dma = false; 493 514 } 494 515 495 516 /* Group words together, delimited by "," */ ··· 519 520 * We only support loading a file from the same filesystem as 520 521 * the kernel image. 521 522 */ 522 - efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, 523 - efi_loaded_image_t *image, 523 + efi_status_t handle_cmdline_files(efi_loaded_image_t *image, 524 524 char *cmd_line, char *option_string, 525 525 unsigned long max_addr, 526 526 unsigned long *load_addr, ··· 568 570 if (!nr_files) 569 571 return EFI_SUCCESS; 570 572 571 - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 572 - nr_files * sizeof(*files), (void **)&files); 573 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, 574 + nr_files * sizeof(*files), (void **)&files); 573 575 if (status != EFI_SUCCESS) { 574 - pr_efi_err(sys_table_arg, "Failed to alloc mem for file handle list\n"); 576 + pr_efi_err("Failed to alloc mem for file handle list\n"); 575 577 goto fail; 576 578 } 577 579 ··· 610 612 611 613 /* Only open the volume once. */ 612 614 if (!i) { 613 - status = efi_open_volume(sys_table_arg, image, &fh); 615 + status = efi_open_volume(image, &fh); 614 616 if (status != EFI_SUCCESS) 615 617 goto free_files; 616 618 } 617 619 618 - status = efi_file_size(sys_table_arg, fh, filename_16, 619 - (void **)&file->handle, &file->size); 620 + status = efi_file_size(fh, filename_16, (void **)&file->handle, 621 + &file->size); 620 622 if (status != EFI_SUCCESS) 621 623 goto close_handles; 622 624 ··· 631 633 * so allocate enough memory for all the files. This is used 632 634 * for loading multiple files. 633 635 */ 634 - status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000, 635 - &file_addr, max_addr); 636 + status = efi_high_alloc(file_size_total, 0x1000, &file_addr, 637 + max_addr); 636 638 if (status != EFI_SUCCESS) { 637 - pr_efi_err(sys_table_arg, "Failed to alloc highmem for files\n"); 639 + pr_efi_err("Failed to alloc highmem for files\n"); 638 640 goto close_handles; 639 641 } 640 642 641 643 /* We've run out of free low memory. */ 642 644 if (file_addr > max_addr) { 643 - pr_efi_err(sys_table_arg, "We've run out of free low memory\n"); 645 + pr_efi_err("We've run out of free low memory\n"); 644 646 status = EFI_INVALID_PARAMETER; 645 647 goto free_file_total; 646 648 } ··· 653 655 while (size) { 654 656 unsigned long chunksize; 655 657 656 - if (IS_ENABLED(CONFIG_X86) && size > __chunk_size) 657 - chunksize = __chunk_size; 658 + if (IS_ENABLED(CONFIG_X86) && size > efi_chunk_size) 659 + chunksize = efi_chunk_size; 658 660 else 659 661 chunksize = size; 660 662 ··· 662 664 &chunksize, 663 665 (void *)addr); 664 666 if (status != EFI_SUCCESS) { 665 - pr_efi_err(sys_table_arg, "Failed to read file\n"); 667 + pr_efi_err("Failed to read file\n"); 666 668 goto free_file_total; 667 669 } 668 670 addr += chunksize; ··· 674 676 675 677 } 676 678 677 - efi_call_early(free_pool, files); 679 + efi_bs_call(free_pool, files); 678 680 679 681 *load_addr = file_addr; 680 682 *load_size = file_size_total; ··· 682 684 return status; 683 685 684 686 free_file_total: 685 - efi_free(sys_table_arg, file_size_total, file_addr); 687 + efi_free(file_size_total, file_addr); 686 688 687 689 close_handles: 688 690 for (k = j; k < i; k++) 689 691 efi_file_close(files[k].handle); 690 692 free_files: 691 - efi_call_early(free_pool, files); 693 + efi_bs_call(free_pool, files); 692 694 fail: 693 695 *load_addr = 0; 694 696 *load_size = 0; ··· 705 707 * address is not available the lowest available address will 706 708 * be used. 707 709 */ 708 - efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, 709 - unsigned long *image_addr, 710 + efi_status_t efi_relocate_kernel(unsigned long *image_addr, 710 711 unsigned long image_size, 711 712 unsigned long alloc_size, 712 713 unsigned long preferred_addr, ··· 734 737 * as possible while respecting the required alignment. 735 738 */ 736 739 nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; 737 - status = efi_call_early(allocate_pages, 738 - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, 739 - nr_pages, &efi_addr); 740 + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, 741 + EFI_LOADER_DATA, nr_pages, &efi_addr); 740 742 new_addr = efi_addr; 741 743 /* 742 744 * If preferred address allocation failed allocate as low as 743 745 * possible. 744 746 */ 745 747 if (status != EFI_SUCCESS) { 746 - status = efi_low_alloc_above(sys_table_arg, alloc_size, 747 - alignment, &new_addr, min_addr); 748 + status = efi_low_alloc_above(alloc_size, alignment, &new_addr, 749 + min_addr); 748 750 } 749 751 if (status != EFI_SUCCESS) { 750 - pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n"); 752 + pr_efi_err("Failed to allocate usable memory for kernel.\n"); 751 753 return status; 752 754 } 753 755 ··· 820 824 * Size of memory allocated return in *cmd_line_len. 821 825 * Returns NULL on error. 822 826 */ 823 - char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, 824 - efi_loaded_image_t *image, 827 + char *efi_convert_cmdline(efi_loaded_image_t *image, 825 828 int *cmd_line_len) 826 829 { 827 830 const u16 *s2; ··· 849 854 850 855 options_bytes++; /* NUL termination */ 851 856 852 - status = efi_high_alloc(sys_table_arg, options_bytes, 0, 853 - &cmdline_addr, MAX_CMDLINE_ADDRESS); 857 + status = efi_high_alloc(options_bytes, 0, &cmdline_addr, 858 + MAX_CMDLINE_ADDRESS); 854 859 if (status != EFI_SUCCESS) 855 860 return NULL; 856 861 ··· 872 877 * specific structure may be passed to the function via priv. The client 873 878 * function may be called multiple times. 874 879 */ 875 - efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg, 876 - void *handle, 880 + efi_status_t efi_exit_boot_services(void *handle, 877 881 struct efi_boot_memmap *map, 878 882 void *priv, 879 883 efi_exit_boot_map_processing priv_func) 880 884 { 881 885 efi_status_t status; 882 886 883 - status = efi_get_memory_map(sys_table_arg, map); 887 + status = efi_get_memory_map(map); 884 888 885 889 if (status != EFI_SUCCESS) 886 890 goto fail; 887 891 888 - status = priv_func(sys_table_arg, map, priv); 892 + status = priv_func(map, priv); 889 893 if (status != EFI_SUCCESS) 890 894 goto free_map; 891 895 892 - status = efi_call_early(exit_boot_services, handle, *map->key_ptr); 896 + if (efi_disable_pci_dma) 897 + efi_pci_disable_bridge_busmaster(); 898 + 899 + status = efi_bs_call(exit_boot_services, handle, *map->key_ptr); 893 900 894 901 if (status == EFI_INVALID_PARAMETER) { 895 902 /* ··· 908 911 * to get_memory_map() is expected to succeed here. 909 912 */ 910 913 *map->map_size = *map->buff_size; 911 - status = efi_call_early(get_memory_map, 912 - map->map_size, 913 - *map->map, 914 - map->key_ptr, 915 - map->desc_size, 916 - map->desc_ver); 914 + status = efi_bs_call(get_memory_map, 915 + map->map_size, 916 + *map->map, 917 + map->key_ptr, 918 + map->desc_size, 919 + map->desc_ver); 917 920 918 921 /* exit_boot_services() was called, thus cannot free */ 919 922 if (status != EFI_SUCCESS) 920 923 goto fail; 921 924 922 - status = priv_func(sys_table_arg, map, priv); 925 + status = priv_func(map, priv); 923 926 /* exit_boot_services() was called, thus cannot free */ 924 927 if (status != EFI_SUCCESS) 925 928 goto fail; 926 929 927 - status = efi_call_early(exit_boot_services, handle, *map->key_ptr); 930 + status = efi_bs_call(exit_boot_services, handle, *map->key_ptr); 928 931 } 929 932 930 933 /* exit_boot_services() was called, thus cannot free */ ··· 934 937 return EFI_SUCCESS; 935 938 936 939 free_map: 937 - efi_call_early(free_pool, *map->map); 940 + efi_bs_call(free_pool, *map->map); 938 941 fail: 939 942 return status; 940 943 } 941 944 942 - #define GET_EFI_CONFIG_TABLE(bits) \ 943 - static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \ 944 - efi_guid_t guid) \ 945 - { \ 946 - efi_system_table_##bits##_t *sys_table; \ 947 - efi_config_table_##bits##_t *tables; \ 948 - int i; \ 949 - \ 950 - sys_table = (typeof(sys_table))_sys_table; \ 951 - tables = (typeof(tables))(unsigned long)sys_table->tables; \ 952 - \ 953 - for (i = 0; i < sys_table->nr_tables; i++) { \ 954 - if (efi_guidcmp(tables[i].guid, guid) != 0) \ 955 - continue; \ 956 - \ 957 - return (void *)(unsigned long)tables[i].table; \ 958 - } \ 959 - \ 960 - return NULL; \ 961 - } 962 - GET_EFI_CONFIG_TABLE(32) 963 - GET_EFI_CONFIG_TABLE(64) 964 - 965 - void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid) 945 + void *get_efi_config_table(efi_guid_t guid) 966 946 { 967 - if (efi_is_64bit()) 968 - return get_efi_config_table64(sys_table, guid); 969 - else 970 - return get_efi_config_table32(sys_table, guid); 947 + unsigned long tables = efi_table_attr(efi_system_table(), tables); 948 + int nr_tables = efi_table_attr(efi_system_table(), nr_tables); 949 + int i; 950 + 951 + for (i = 0; i < nr_tables; i++) { 952 + efi_config_table_t *t = (void *)tables; 953 + 954 + if (efi_guidcmp(t->guid, guid) == 0) 955 + return efi_table_attr(t, table); 956 + 957 + tables += efi_is_native() ? sizeof(efi_config_table_t) 958 + : sizeof(efi_config_table_32_t); 959 + } 960 + return NULL; 961 + } 962 + 963 + void efi_char16_printk(efi_char16_t *str) 964 + { 965 + efi_call_proto(efi_table_attr(efi_system_table(), con_out), 966 + output_string, str); 971 967 }
+31 -17
drivers/firmware/efi/libstub/efistub.h
··· 25 25 #define EFI_ALLOC_ALIGN EFI_PAGE_SIZE 26 26 #endif 27 27 28 - extern int __pure nokaslr(void); 29 - extern int __pure is_quiet(void); 30 - extern int __pure novamap(void); 28 + #ifdef CONFIG_ARM 29 + #define __efistub_global __section(.data) 30 + #else 31 + #define __efistub_global 32 + #endif 31 33 32 - #define pr_efi(sys_table, msg) do { \ 33 - if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \ 34 + extern bool __pure nokaslr(void); 35 + extern bool __pure is_quiet(void); 36 + extern bool __pure novamap(void); 37 + 38 + extern __pure efi_system_table_t *efi_system_table(void); 39 + 40 + #define pr_efi(msg) do { \ 41 + if (!is_quiet()) efi_printk("EFI stub: "msg); \ 34 42 } while (0) 35 43 36 - #define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg) 44 + #define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg) 37 45 38 - void efi_char16_printk(efi_system_table_t *, efi_char16_t *); 46 + void efi_char16_printk(efi_char16_t *); 47 + void efi_char16_printk(efi_char16_t *); 39 48 40 - unsigned long get_dram_base(efi_system_table_t *sys_table_arg); 49 + unsigned long get_dram_base(void); 41 50 42 - efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, 43 - void *handle, 51 + efi_status_t allocate_new_fdt_and_exit_boot(void *handle, 44 52 unsigned long *new_fdt_addr, 45 53 unsigned long max_addr, 46 54 u64 initrd_addr, u64 initrd_size, ··· 56 48 unsigned long fdt_addr, 57 49 unsigned long fdt_size); 58 50 59 - void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size); 51 + void *get_fdt(unsigned long *fdt_size); 60 52 61 53 void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, 62 54 unsigned long desc_size, efi_memory_desc_t *runtime_map, 63 55 int *count); 64 56 65 - efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table, 66 - unsigned long size, u8 *out); 57 + efi_status_t efi_get_random_bytes(unsigned long size, u8 *out); 67 58 68 - efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, 69 - unsigned long size, unsigned long align, 59 + efi_status_t efi_random_alloc(unsigned long size, unsigned long align, 70 60 unsigned long *addr, unsigned long random_seed); 71 61 72 - efi_status_t check_platform_features(efi_system_table_t *sys_table_arg); 62 + efi_status_t check_platform_features(void); 73 63 74 - void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid); 64 + void *get_efi_config_table(efi_guid_t guid); 75 65 76 66 /* Helper macros for the usual case of using simple C variables: */ 77 67 #ifndef fdt_setprop_inplace_var ··· 81 75 #define fdt_setprop_var(fdt, node_offset, name, var) \ 82 76 fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var)) 83 77 #endif 78 + 79 + #define get_efi_var(name, vendor, ...) \ 80 + efi_rt_call(get_variable, (efi_char16_t *)(name), \ 81 + (efi_guid_t *)(vendor), __VA_ARGS__) 82 + 83 + #define set_efi_var(name, vendor, ...) \ 84 + efi_rt_call(set_variable, (efi_char16_t *)(name), \ 85 + (efi_guid_t *)(vendor), __VA_ARGS__) 84 86 85 87 #endif
+25 -28
drivers/firmware/efi/libstub/fdt.c
··· 16 16 #define EFI_DT_ADDR_CELLS_DEFAULT 2 17 17 #define EFI_DT_SIZE_CELLS_DEFAULT 2 18 18 19 - static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt) 19 + static void fdt_update_cell_size(void *fdt) 20 20 { 21 21 int offset; 22 22 ··· 27 27 fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT); 28 28 } 29 29 30 - static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, 31 - unsigned long orig_fdt_size, 30 + static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size, 32 31 void *fdt, int new_fdt_size, char *cmdline_ptr, 33 32 u64 initrd_addr, u64 initrd_size) 34 33 { ··· 39 40 /* Do some checks on provided FDT, if it exists: */ 40 41 if (orig_fdt) { 41 42 if (fdt_check_header(orig_fdt)) { 42 - pr_efi_err(sys_table, "Device Tree header not valid!\n"); 43 + pr_efi_err("Device Tree header not valid!\n"); 43 44 return EFI_LOAD_ERROR; 44 45 } 45 46 /* ··· 47 48 * configuration table: 48 49 */ 49 50 if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) { 50 - pr_efi_err(sys_table, "Truncated device tree! foo!\n"); 51 + pr_efi_err("Truncated device tree! foo!\n"); 51 52 return EFI_LOAD_ERROR; 52 53 } 53 54 } ··· 61 62 * Any failure from the following function is 62 63 * non-critical: 63 64 */ 64 - fdt_update_cell_size(sys_table, fdt); 65 + fdt_update_cell_size(fdt); 65 66 } 66 67 } 67 68 ··· 110 111 111 112 /* Add FDT entries for EFI runtime services in chosen node. */ 112 113 node = fdt_subnode_offset(fdt, 0, "chosen"); 113 - fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table); 114 + fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table()); 114 115 115 116 status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64); 116 117 if (status) ··· 139 140 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 140 141 efi_status_t efi_status; 141 142 142 - efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64), 143 + efi_status = efi_get_random_bytes(sizeof(fdt_val64), 143 144 (u8 *)&fdt_val64); 144 145 if (efi_status == EFI_SUCCESS) { 145 146 status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64); ··· 209 210 void *new_fdt_addr; 210 211 }; 211 212 212 - static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, 213 - struct efi_boot_memmap *map, 213 + static efi_status_t exit_boot_func(struct efi_boot_memmap *map, 214 214 void *priv) 215 215 { 216 216 struct exit_boot_struct *p = priv; ··· 242 244 * with the final memory map in it. 243 245 */ 244 246 245 - efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, 246 - void *handle, 247 + efi_status_t allocate_new_fdt_and_exit_boot(void *handle, 247 248 unsigned long *new_fdt_addr, 248 249 unsigned long max_addr, 249 250 u64 initrd_addr, u64 initrd_size, ··· 272 275 * subsequent allocations adding entries, since they could not affect 273 276 * the number of EFI_MEMORY_RUNTIME regions. 274 277 */ 275 - status = efi_get_memory_map(sys_table, &map); 278 + status = efi_get_memory_map(&map); 276 279 if (status != EFI_SUCCESS) { 277 - pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n"); 280 + pr_efi_err("Unable to retrieve UEFI memory map.\n"); 278 281 return status; 279 282 } 280 283 281 - pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n"); 284 + pr_efi("Exiting boot services and installing virtual address map...\n"); 282 285 283 286 map.map = &memory_map; 284 - status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN, 287 + status = efi_high_alloc(MAX_FDT_SIZE, EFI_FDT_ALIGN, 285 288 new_fdt_addr, max_addr); 286 289 if (status != EFI_SUCCESS) { 287 - pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n"); 290 + pr_efi_err("Unable to allocate memory for new device tree.\n"); 288 291 goto fail; 289 292 } 290 293 ··· 292 295 * Now that we have done our final memory allocation (and free) 293 296 * we can get the memory map key needed for exit_boot_services(). 294 297 */ 295 - status = efi_get_memory_map(sys_table, &map); 298 + status = efi_get_memory_map(&map); 296 299 if (status != EFI_SUCCESS) 297 300 goto fail_free_new_fdt; 298 301 299 - status = update_fdt(sys_table, (void *)fdt_addr, fdt_size, 302 + status = update_fdt((void *)fdt_addr, fdt_size, 300 303 (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr, 301 304 initrd_addr, initrd_size); 302 305 303 306 if (status != EFI_SUCCESS) { 304 - pr_efi_err(sys_table, "Unable to construct new device tree.\n"); 307 + pr_efi_err("Unable to construct new device tree.\n"); 305 308 goto fail_free_new_fdt; 306 309 } 307 310 ··· 310 313 priv.runtime_entry_count = &runtime_entry_count; 311 314 priv.new_fdt_addr = (void *)*new_fdt_addr; 312 315 313 - status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func); 316 + status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func); 314 317 315 318 if (status == EFI_SUCCESS) { 316 319 efi_set_virtual_address_map_t *svam; ··· 319 322 return EFI_SUCCESS; 320 323 321 324 /* Install the new virtual address map */ 322 - svam = sys_table->runtime->set_virtual_address_map; 325 + svam = efi_system_table()->runtime->set_virtual_address_map; 323 326 status = svam(runtime_entry_count * desc_size, desc_size, 324 327 desc_ver, runtime_map); 325 328 ··· 347 350 return EFI_SUCCESS; 348 351 } 349 352 350 - pr_efi_err(sys_table, "Exit boot services failed.\n"); 353 + pr_efi_err("Exit boot services failed.\n"); 351 354 352 355 fail_free_new_fdt: 353 - efi_free(sys_table, MAX_FDT_SIZE, *new_fdt_addr); 356 + efi_free(MAX_FDT_SIZE, *new_fdt_addr); 354 357 355 358 fail: 356 - sys_table->boottime->free_pool(runtime_map); 359 + efi_system_table()->boottime->free_pool(runtime_map); 357 360 358 361 return EFI_LOAD_ERROR; 359 362 } 360 363 361 - void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size) 364 + void *get_fdt(unsigned long *fdt_size) 362 365 { 363 366 void *fdt; 364 367 365 - fdt = get_efi_config_table(sys_table, DEVICE_TREE_GUID); 368 + fdt = get_efi_config_table(DEVICE_TREE_GUID); 366 369 367 370 if (!fdt) 368 371 return NULL; 369 372 370 373 if (fdt_check_header(fdt) != 0) { 371 - pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n"); 374 + pr_efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n"); 372 375 return NULL; 373 376 } 374 377 *fdt_size = fdt_totalsize(fdt);
+27 -136
drivers/firmware/efi/libstub/gop.c
··· 10 10 #include <asm/efi.h> 11 11 #include <asm/setup.h> 12 12 13 + #include "efistub.h" 14 + 13 15 static void find_bits(unsigned long mask, u8 *pos, u8 *size) 14 16 { 15 17 u8 first, len; ··· 37 35 38 36 static void 39 37 setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, 40 - struct efi_pixel_bitmask pixel_info, int pixel_format) 38 + efi_pixel_bitmask_t pixel_info, int pixel_format) 41 39 { 42 40 if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) { 43 41 si->lfb_depth = 32; ··· 85 83 } 86 84 } 87 85 88 - static efi_status_t 89 - setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, 90 - efi_guid_t *proto, unsigned long size, void **gop_handle) 86 + static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, 87 + unsigned long size, void **handles) 91 88 { 92 - struct efi_graphics_output_protocol_32 *gop32, *first_gop; 93 - unsigned long nr_gops; 89 + efi_graphics_output_protocol_t *gop, *first_gop; 94 90 u16 width, height; 95 91 u32 pixels_per_scan_line; 96 92 u32 ext_lfb_base; 97 - u64 fb_base; 98 - struct efi_pixel_bitmask pixel_info; 93 + efi_physical_addr_t fb_base; 94 + efi_pixel_bitmask_t pixel_info; 99 95 int pixel_format; 100 96 efi_status_t status; 101 - u32 *handles = (u32 *)(unsigned long)gop_handle; 97 + efi_handle_t h; 102 98 int i; 103 99 104 100 first_gop = NULL; 105 - gop32 = NULL; 101 + gop = NULL; 106 102 107 - nr_gops = size / sizeof(u32); 108 - for (i = 0; i < nr_gops; i++) { 109 - struct efi_graphics_output_protocol_mode_32 *mode; 110 - struct efi_graphics_output_mode_info *info = NULL; 103 + for_each_efi_handle(h, handles, size, i) { 104 + efi_graphics_output_protocol_mode_t *mode; 105 + efi_graphics_output_mode_info_t *info = NULL; 111 106 efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; 112 107 bool conout_found = false; 113 108 void *dummy = NULL; 114 - efi_handle_t h = (efi_handle_t)(unsigned long)handles[i]; 115 - u64 current_fb_base; 109 + efi_physical_addr_t current_fb_base; 116 110 117 - status = efi_call_early(handle_protocol, h, 118 - proto, (void **)&gop32); 111 + status = efi_bs_call(handle_protocol, h, proto, (void **)&gop); 119 112 if (status != EFI_SUCCESS) 120 113 continue; 121 114 122 - status = efi_call_early(handle_protocol, h, 123 - &conout_proto, &dummy); 115 + status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy); 124 116 if (status == EFI_SUCCESS) 125 117 conout_found = true; 126 118 127 - mode = (void *)(unsigned long)gop32->mode; 128 - info = (void *)(unsigned long)mode->info; 129 - current_fb_base = mode->frame_buffer_base; 119 + mode = efi_table_attr(gop, mode); 120 + info = efi_table_attr(mode, info); 121 + current_fb_base = efi_table_attr(mode, frame_buffer_base); 130 122 131 123 if ((!first_gop || conout_found) && 132 124 info->pixel_format != PIXEL_BLT_ONLY) { ··· 142 146 * Once we've found a GOP supporting ConOut, 143 147 * don't bother looking any further. 144 148 */ 145 - first_gop = gop32; 146 - if (conout_found) 147 - break; 148 - } 149 - } 150 - 151 - /* Did we find any GOPs? */ 152 - if (!first_gop) 153 - return EFI_NOT_FOUND; 154 - 155 - /* EFI framebuffer */ 156 - si->orig_video_isVGA = VIDEO_TYPE_EFI; 157 - 158 - si->lfb_width = width; 159 - si->lfb_height = height; 160 - si->lfb_base = fb_base; 161 - 162 - ext_lfb_base = (u64)(unsigned long)fb_base >> 32; 163 - if (ext_lfb_base) { 164 - si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE; 165 - si->ext_lfb_base = ext_lfb_base; 166 - } 167 - 168 - si->pages = 1; 169 - 170 - setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format); 171 - 172 - si->lfb_size = si->lfb_linelength * si->lfb_height; 173 - 174 - si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; 175 - 176 - return EFI_SUCCESS; 177 - } 178 - 179 - static efi_status_t 180 - setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, 181 - efi_guid_t *proto, unsigned long size, void **gop_handle) 182 - { 183 - struct efi_graphics_output_protocol_64 *gop64, *first_gop; 184 - unsigned long nr_gops; 185 - u16 width, height; 186 - u32 pixels_per_scan_line; 187 - u32 ext_lfb_base; 188 - u64 fb_base; 189 - struct efi_pixel_bitmask pixel_info; 190 - int pixel_format; 191 - efi_status_t status; 192 - u64 *handles = (u64 *)(unsigned long)gop_handle; 193 - int i; 194 - 195 - first_gop = NULL; 196 - gop64 = NULL; 197 - 198 - nr_gops = size / sizeof(u64); 199 - for (i = 0; i < nr_gops; i++) { 200 - struct efi_graphics_output_protocol_mode_64 *mode; 201 - struct efi_graphics_output_mode_info *info = NULL; 202 - efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; 203 - bool conout_found = false; 204 - void *dummy = NULL; 205 - efi_handle_t h = (efi_handle_t)(unsigned long)handles[i]; 206 - u64 current_fb_base; 207 - 208 - status = efi_call_early(handle_protocol, h, 209 - proto, (void **)&gop64); 210 - if (status != EFI_SUCCESS) 211 - continue; 212 - 213 - status = efi_call_early(handle_protocol, h, 214 - &conout_proto, &dummy); 215 - if (status == EFI_SUCCESS) 216 - conout_found = true; 217 - 218 - mode = (void *)(unsigned long)gop64->mode; 219 - info = (void *)(unsigned long)mode->info; 220 - current_fb_base = mode->frame_buffer_base; 221 - 222 - if ((!first_gop || conout_found) && 223 - info->pixel_format != PIXEL_BLT_ONLY) { 224 - /* 225 - * Systems that use the UEFI Console Splitter may 226 - * provide multiple GOP devices, not all of which are 227 - * backed by real hardware. The workaround is to search 228 - * for a GOP implementing the ConOut protocol, and if 229 - * one isn't found, to just fall back to the first GOP. 230 - */ 231 - width = info->horizontal_resolution; 232 - height = info->vertical_resolution; 233 - pixel_format = info->pixel_format; 234 - pixel_info = info->pixel_information; 235 - pixels_per_scan_line = info->pixels_per_scan_line; 236 - fb_base = current_fb_base; 237 - 238 - /* 239 - * Once we've found a GOP supporting ConOut, 240 - * don't bother looking any further. 241 - */ 242 - first_gop = gop64; 149 + first_gop = gop; 243 150 if (conout_found) 244 151 break; 245 152 } ··· 179 280 /* 180 281 * See if we have Graphics Output Protocol 181 282 */ 182 - efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, 183 - struct screen_info *si, efi_guid_t *proto, 283 + efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto, 184 284 unsigned long size) 185 285 { 186 286 efi_status_t status; 187 287 void **gop_handle = NULL; 188 288 189 - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 190 - size, (void **)&gop_handle); 289 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, 290 + (void **)&gop_handle); 191 291 if (status != EFI_SUCCESS) 192 292 return status; 193 293 194 - status = efi_call_early(locate_handle, 195 - EFI_LOCATE_BY_PROTOCOL, 196 - proto, NULL, &size, gop_handle); 294 + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL, 295 + &size, gop_handle); 197 296 if (status != EFI_SUCCESS) 198 297 goto free_handle; 199 298 200 - if (efi_is_64bit()) { 201 - status = setup_gop64(sys_table_arg, si, proto, size, 202 - gop_handle); 203 - } else { 204 - status = setup_gop32(sys_table_arg, si, proto, size, 205 - gop_handle); 206 - } 299 + status = setup_gop(si, proto, size, gop_handle); 207 300 208 301 free_handle: 209 - efi_call_early(free_pool, gop_handle); 302 + efi_bs_call(free_pool, gop_handle); 210 303 return status; 211 304 }
+114
drivers/firmware/efi/libstub/pci.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * PCI-related functions used by the EFI stub on multiple 4 + * architectures. 5 + * 6 + * Copyright 2019 Google, LLC 7 + */ 8 + 9 + #include <linux/efi.h> 10 + #include <linux/pci.h> 11 + 12 + #include <asm/efi.h> 13 + 14 + #include "efistub.h" 15 + 16 + void efi_pci_disable_bridge_busmaster(void) 17 + { 18 + efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; 19 + unsigned long pci_handle_size = 0; 20 + efi_handle_t *pci_handle = NULL; 21 + efi_handle_t handle; 22 + efi_status_t status; 23 + u16 class, command; 24 + int i; 25 + 26 + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, 27 + NULL, &pci_handle_size, NULL); 28 + 29 + if (status != EFI_BUFFER_TOO_SMALL) { 30 + if (status != EFI_SUCCESS && status != EFI_NOT_FOUND) 31 + pr_efi_err("Failed to locate PCI I/O handles'\n"); 32 + return; 33 + } 34 + 35 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size, 36 + (void **)&pci_handle); 37 + if (status != EFI_SUCCESS) { 38 + pr_efi_err("Failed to allocate memory for 'pci_handle'\n"); 39 + return; 40 + } 41 + 42 + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, 43 + NULL, &pci_handle_size, pci_handle); 44 + if (status != EFI_SUCCESS) { 45 + pr_efi_err("Failed to locate PCI I/O handles'\n"); 46 + goto free_handle; 47 + } 48 + 49 + for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { 50 + efi_pci_io_protocol_t *pci; 51 + unsigned long segment_nr, bus_nr, device_nr, func_nr; 52 + 53 + status = efi_bs_call(handle_protocol, handle, &pci_proto, 54 + (void **)&pci); 55 + if (status != EFI_SUCCESS) 56 + continue; 57 + 58 + /* 59 + * Disregard devices living on bus 0 - these are not behind a 60 + * bridge so no point in disconnecting them from their drivers. 61 + */ 62 + status = efi_call_proto(pci, get_location, &segment_nr, &bus_nr, 63 + &device_nr, &func_nr); 64 + if (status != EFI_SUCCESS || bus_nr == 0) 65 + continue; 66 + 67 + /* 68 + * Don't disconnect VGA controllers so we don't risk losing 69 + * access to the framebuffer. Drivers for true PCIe graphics 70 + * controllers that are behind a PCIe root port do not use 71 + * DMA to implement the GOP framebuffer anyway [although they 72 + * may use it in their implentation of Gop->Blt()], and so 73 + * disabling DMA in the PCI bridge should not interfere with 74 + * normal operation of the device. 75 + */ 76 + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, 77 + PCI_CLASS_DEVICE, 1, &class); 78 + if (status != EFI_SUCCESS || class == PCI_CLASS_DISPLAY_VGA) 79 + continue; 80 + 81 + /* Disconnect this handle from all its drivers */ 82 + efi_bs_call(disconnect_controller, handle, NULL, NULL); 83 + } 84 + 85 + for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { 86 + efi_pci_io_protocol_t *pci; 87 + 88 + status = efi_bs_call(handle_protocol, handle, &pci_proto, 89 + (void **)&pci); 90 + if (status != EFI_SUCCESS || !pci) 91 + continue; 92 + 93 + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, 94 + PCI_CLASS_DEVICE, 1, &class); 95 + 96 + if (status != EFI_SUCCESS || class != PCI_CLASS_BRIDGE_PCI) 97 + continue; 98 + 99 + /* Disable busmastering */ 100 + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, 101 + PCI_COMMAND, 1, &command); 102 + if (status != EFI_SUCCESS || !(command & PCI_COMMAND_MASTER)) 103 + continue; 104 + 105 + command &= ~PCI_COMMAND_MASTER; 106 + status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16, 107 + PCI_COMMAND, 1, &command); 108 + if (status != EFI_SUCCESS) 109 + pr_efi_err("Failed to disable PCI busmastering\n"); 110 + } 111 + 112 + free_handle: 113 + efi_bs_call(free_pool, pci_handle); 114 + }
+34 -41
drivers/firmware/efi/libstub/random.c
··· 9 9 10 10 #include "efistub.h" 11 11 12 - typedef struct efi_rng_protocol efi_rng_protocol_t; 12 + typedef union efi_rng_protocol efi_rng_protocol_t; 13 13 14 - typedef struct { 15 - u32 get_info; 16 - u32 get_rng; 17 - } efi_rng_protocol_32_t; 18 - 19 - typedef struct { 20 - u64 get_info; 21 - u64 get_rng; 22 - } efi_rng_protocol_64_t; 23 - 24 - struct efi_rng_protocol { 25 - efi_status_t (*get_info)(struct efi_rng_protocol *, 26 - unsigned long *, efi_guid_t *); 27 - efi_status_t (*get_rng)(struct efi_rng_protocol *, 28 - efi_guid_t *, unsigned long, u8 *out); 14 + union efi_rng_protocol { 15 + struct { 16 + efi_status_t (__efiapi *get_info)(efi_rng_protocol_t *, 17 + unsigned long *, 18 + efi_guid_t *); 19 + efi_status_t (__efiapi *get_rng)(efi_rng_protocol_t *, 20 + efi_guid_t *, unsigned long, 21 + u8 *out); 22 + }; 23 + struct { 24 + u32 get_info; 25 + u32 get_rng; 26 + } mixed_mode; 29 27 }; 30 28 31 - efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg, 32 - unsigned long size, u8 *out) 29 + efi_status_t efi_get_random_bytes(unsigned long size, u8 *out) 33 30 { 34 31 efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; 35 32 efi_status_t status; 36 - struct efi_rng_protocol *rng = NULL; 33 + efi_rng_protocol_t *rng = NULL; 37 34 38 - status = efi_call_early(locate_protocol, &rng_proto, NULL, 39 - (void **)&rng); 35 + status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); 40 36 if (status != EFI_SUCCESS) 41 37 return status; 42 38 43 - return efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, size, out); 39 + return efi_call_proto(rng, get_rng, NULL, size, out); 44 40 } 45 41 46 42 /* ··· 77 81 */ 78 82 #define MD_NUM_SLOTS(md) ((md)->virt_addr) 79 83 80 - efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, 81 - unsigned long size, 84 + efi_status_t efi_random_alloc(unsigned long size, 82 85 unsigned long align, 83 86 unsigned long *addr, 84 87 unsigned long random_seed) ··· 96 101 map.key_ptr = NULL; 97 102 map.buff_size = &buff_size; 98 103 99 - status = efi_get_memory_map(sys_table_arg, &map); 104 + status = efi_get_memory_map(&map); 100 105 if (status != EFI_SUCCESS) 101 106 return status; 102 107 ··· 140 145 target = round_up(md->phys_addr, align) + target_slot * align; 141 146 pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; 142 147 143 - status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, 144 - EFI_LOADER_DATA, pages, &target); 148 + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, 149 + EFI_LOADER_DATA, pages, &target); 145 150 if (status == EFI_SUCCESS) 146 151 *addr = target; 147 152 break; 148 153 } 149 154 150 - efi_call_early(free_pool, memory_map); 155 + efi_bs_call(free_pool, memory_map); 151 156 152 157 return status; 153 158 } 154 159 155 - efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) 160 + efi_status_t efi_random_get_seed(void) 156 161 { 157 162 efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; 158 163 efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; 159 164 efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; 160 - struct efi_rng_protocol *rng = NULL; 165 + efi_rng_protocol_t *rng = NULL; 161 166 struct linux_efi_random_seed *seed = NULL; 162 167 efi_status_t status; 163 168 164 - status = efi_call_early(locate_protocol, &rng_proto, NULL, 165 - (void **)&rng); 169 + status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); 166 170 if (status != EFI_SUCCESS) 167 171 return status; 168 172 169 - status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA, 170 - sizeof(*seed) + EFI_RANDOM_SEED_SIZE, 171 - (void **)&seed); 173 + status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, 174 + sizeof(*seed) + EFI_RANDOM_SEED_SIZE, 175 + (void **)&seed); 172 176 if (status != EFI_SUCCESS) 173 177 return status; 174 178 175 - status = efi_call_proto(efi_rng_protocol, get_rng, rng, &rng_algo_raw, 179 + status = efi_call_proto(rng, get_rng, &rng_algo_raw, 176 180 EFI_RANDOM_SEED_SIZE, seed->bits); 177 181 178 182 if (status == EFI_UNSUPPORTED) ··· 179 185 * Use whatever algorithm we have available if the raw algorithm 180 186 * is not implemented. 181 187 */ 182 - status = efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, 183 - EFI_RANDOM_SEED_SIZE, seed->bits); 188 + status = efi_call_proto(rng, get_rng, NULL, 189 + EFI_RANDOM_SEED_SIZE, seed->bits); 184 190 185 191 if (status != EFI_SUCCESS) 186 192 goto err_freepool; 187 193 188 194 seed->size = EFI_RANDOM_SEED_SIZE; 189 - status = efi_call_early(install_configuration_table, &rng_table_guid, 190 - seed); 195 + status = efi_bs_call(install_configuration_table, &rng_table_guid, seed); 191 196 if (status != EFI_SUCCESS) 192 197 goto err_freepool; 193 198 194 199 return EFI_SUCCESS; 195 200 196 201 err_freepool: 197 - efi_call_early(free_pool, seed); 202 + efi_bs_call(free_pool, seed); 198 203 return status; 199 204 }
+3 -8
drivers/firmware/efi/libstub/secureboot.c
··· 21 21 static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; 22 22 static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; 23 23 24 - #define get_efi_var(name, vendor, ...) \ 25 - efi_call_runtime(get_variable, \ 26 - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ 27 - __VA_ARGS__); 28 - 29 24 /* 30 25 * Determine whether we're in secure boot mode. 31 26 * 32 27 * Please keep the logic in sync with 33 28 * arch/x86/xen/efi.c:xen_efi_get_secureboot(). 34 29 */ 35 - enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) 30 + enum efi_secureboot_mode efi_get_secureboot(void) 36 31 { 37 32 u32 attr; 38 33 u8 secboot, setupmode, moksbstate; ··· 67 72 return efi_secureboot_mode_disabled; 68 73 69 74 secure_boot_enabled: 70 - pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n"); 75 + pr_efi("UEFI Secure Boot is enabled.\n"); 71 76 return efi_secureboot_mode_enabled; 72 77 73 78 out_efi_err: 74 - pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); 79 + pr_efi_err("Could not determine UEFI Secure Boot status.\n"); 75 80 return efi_secureboot_mode_unknown; 76 81 }
+17 -31
drivers/firmware/efi/libstub/tpm.c
··· 20 20 #define MEMORY_ONLY_RESET_CONTROL_GUID \ 21 21 EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29) 22 22 23 - #define get_efi_var(name, vendor, ...) \ 24 - efi_call_runtime(get_variable, \ 25 - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ 26 - __VA_ARGS__) 27 - 28 - #define set_efi_var(name, vendor, ...) \ 29 - efi_call_runtime(set_variable, \ 30 - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ 31 - __VA_ARGS__) 32 - 33 23 /* 34 24 * Enable reboot attack mitigation. This requests that the firmware clear the 35 25 * RAM on next reboot before proceeding with boot, ensuring that any secrets 36 26 * are cleared. If userland has ensured that all secrets have been removed 37 27 * from RAM before reboot it can simply reset this variable. 38 28 */ 39 - void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) 29 + void efi_enable_reset_attack_mitigation(void) 40 30 { 41 31 u8 val = 1; 42 32 efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID; ··· 47 57 48 58 #endif 49 59 50 - void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) 60 + void efi_retrieve_tpm2_eventlog(void) 51 61 { 52 62 efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID; 53 63 efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; ··· 59 69 size_t log_size, last_entry_size; 60 70 efi_bool_t truncated; 61 71 int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; 62 - void *tcg2_protocol = NULL; 72 + efi_tcg2_protocol_t *tcg2_protocol = NULL; 63 73 int final_events_size = 0; 64 74 65 - status = efi_call_early(locate_protocol, &tcg2_guid, NULL, 66 - &tcg2_protocol); 75 + status = efi_bs_call(locate_protocol, &tcg2_guid, NULL, 76 + (void **)&tcg2_protocol); 67 77 if (status != EFI_SUCCESS) 68 78 return; 69 79 70 - status = efi_call_proto(efi_tcg2_protocol, get_event_log, 71 - tcg2_protocol, version, &log_location, 72 - &log_last_entry, &truncated); 80 + status = efi_call_proto(tcg2_protocol, get_event_log, version, 81 + &log_location, &log_last_entry, &truncated); 73 82 74 83 if (status != EFI_SUCCESS || !log_location) { 75 84 version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; 76 - status = efi_call_proto(efi_tcg2_protocol, get_event_log, 77 - tcg2_protocol, version, &log_location, 78 - &log_last_entry, &truncated); 85 + status = efi_call_proto(tcg2_protocol, get_event_log, version, 86 + &log_location, &log_last_entry, 87 + &truncated); 79 88 if (status != EFI_SUCCESS || !log_location) 80 89 return; 81 90 ··· 115 126 } 116 127 117 128 /* Allocate space for the logs and copy them. */ 118 - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 119 - sizeof(*log_tbl) + log_size, 120 - (void **) &log_tbl); 129 + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, 130 + sizeof(*log_tbl) + log_size, (void **)&log_tbl); 121 131 122 132 if (status != EFI_SUCCESS) { 123 - efi_printk(sys_table_arg, 124 - "Unable to allocate memory for event log\n"); 133 + efi_printk("Unable to allocate memory for event log\n"); 125 134 return; 126 135 } 127 136 ··· 127 140 * Figure out whether any events have already been logged to the 128 141 * final events structure, and if so how much space they take up 129 142 */ 130 - final_events_table = get_efi_config_table(sys_table_arg, 131 - LINUX_EFI_TPM_FINAL_LOG_GUID); 143 + final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID); 132 144 if (final_events_table && final_events_table->nr_events) { 133 145 struct tcg_pcr_event2_head *header; 134 146 int offset; ··· 155 169 log_tbl->version = version; 156 170 memcpy(log_tbl->log, (void *) first_entry_addr, log_size); 157 171 158 - status = efi_call_early(install_configuration_table, 159 - &linux_eventlog_guid, log_tbl); 172 + status = efi_bs_call(install_configuration_table, 173 + &linux_eventlog_guid, log_tbl); 160 174 if (status != EFI_SUCCESS) 161 175 goto err_free; 162 176 return; 163 177 164 178 err_free: 165 - efi_call_early(free_pool, log_tbl); 179 + efi_bs_call(free_pool, log_tbl); 166 180 }
+63 -30
drivers/firmware/efi/memmap.c
··· 29 29 return PFN_PHYS(page_to_pfn(p)); 30 30 } 31 31 32 + void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags) 33 + { 34 + if (flags & EFI_MEMMAP_MEMBLOCK) { 35 + if (slab_is_available()) 36 + memblock_free_late(phys, size); 37 + else 38 + memblock_free(phys, size); 39 + } else if (flags & EFI_MEMMAP_SLAB) { 40 + struct page *p = pfn_to_page(PHYS_PFN(phys)); 41 + unsigned int order = get_order(size); 42 + 43 + free_pages((unsigned long) page_address(p), order); 44 + } 45 + } 46 + 47 + static void __init efi_memmap_free(void) 48 + { 49 + __efi_memmap_free(efi.memmap.phys_map, 50 + efi.memmap.desc_size * efi.memmap.nr_map, 51 + efi.memmap.flags); 52 + } 53 + 32 54 /** 33 55 * efi_memmap_alloc - Allocate memory for the EFI memory map 34 56 * @num_entries: Number of entries in the allocated map. 57 + * @data: efi memmap installation parameters 35 58 * 36 59 * Depending on whether mm_init() has already been invoked or not, 37 60 * either memblock or "normal" page allocation is used. ··· 62 39 * Returns the physical address of the allocated memory map on 63 40 * success, zero on failure. 64 41 */ 65 - phys_addr_t __init efi_memmap_alloc(unsigned int num_entries) 42 + int __init efi_memmap_alloc(unsigned int num_entries, 43 + struct efi_memory_map_data *data) 66 44 { 67 - unsigned long size = num_entries * efi.memmap.desc_size; 45 + /* Expect allocation parameters are zero initialized */ 46 + WARN_ON(data->phys_map || data->size); 68 47 69 - if (slab_is_available()) 70 - return __efi_memmap_alloc_late(size); 48 + data->size = num_entries * efi.memmap.desc_size; 49 + data->desc_version = efi.memmap.desc_version; 50 + data->desc_size = efi.memmap.desc_size; 51 + data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK); 52 + data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE; 71 53 72 - return __efi_memmap_alloc_early(size); 54 + if (slab_is_available()) { 55 + data->flags |= EFI_MEMMAP_SLAB; 56 + data->phys_map = __efi_memmap_alloc_late(data->size); 57 + } else { 58 + data->flags |= EFI_MEMMAP_MEMBLOCK; 59 + data->phys_map = __efi_memmap_alloc_early(data->size); 60 + } 61 + 62 + if (!data->phys_map) 63 + return -ENOMEM; 64 + return 0; 73 65 } 74 66 75 67 /** 76 68 * __efi_memmap_init - Common code for mapping the EFI memory map 77 69 * @data: EFI memory map data 78 - * @late: Use early or late mapping function? 79 70 * 80 71 * This function takes care of figuring out which function to use to 81 72 * map the EFI memory map in efi.memmap based on how far into the boot 82 73 * we are. 83 74 * 84 - * During bootup @late should be %false since we only have access to 85 - * the early_memremap*() functions as the vmalloc space isn't setup. 86 - * Once the kernel is fully booted we can fallback to the more robust 87 - * memremap*() API. 75 + * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we 76 + * only have access to the early_memremap*() functions as the vmalloc 77 + * space isn't setup. Once the kernel is fully booted we can fallback 78 + * to the more robust memremap*() API. 88 79 * 89 80 * Returns zero on success, a negative error code on failure. 90 81 */ 91 - static int __init 92 - __efi_memmap_init(struct efi_memory_map_data *data, bool late) 82 + static int __init __efi_memmap_init(struct efi_memory_map_data *data) 93 83 { 94 84 struct efi_memory_map map; 95 85 phys_addr_t phys_map; ··· 112 76 113 77 phys_map = data->phys_map; 114 78 115 - if (late) 79 + if (data->flags & EFI_MEMMAP_LATE) 116 80 map.map = memremap(phys_map, data->size, MEMREMAP_WB); 117 81 else 118 82 map.map = early_memremap(phys_map, data->size); ··· 122 86 return -ENOMEM; 123 87 } 124 88 89 + /* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */ 90 + efi_memmap_free(); 91 + 125 92 map.phys_map = data->phys_map; 126 93 map.nr_map = data->size / data->desc_size; 127 94 map.map_end = map.map + data->size; 128 95 129 96 map.desc_version = data->desc_version; 130 97 map.desc_size = data->desc_size; 131 - map.late = late; 98 + map.flags = data->flags; 132 99 133 100 set_bit(EFI_MEMMAP, &efi.flags); 134 101 ··· 150 111 int __init efi_memmap_init_early(struct efi_memory_map_data *data) 151 112 { 152 113 /* Cannot go backwards */ 153 - WARN_ON(efi.memmap.late); 114 + WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE); 154 115 155 - return __efi_memmap_init(data, false); 116 + data->flags = 0; 117 + return __efi_memmap_init(data); 156 118 } 157 119 158 120 void __init efi_memmap_unmap(void) ··· 161 121 if (!efi_enabled(EFI_MEMMAP)) 162 122 return; 163 123 164 - if (!efi.memmap.late) { 124 + if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) { 165 125 unsigned long size; 166 126 167 127 size = efi.memmap.desc_size * efi.memmap.nr_map; ··· 202 162 struct efi_memory_map_data data = { 203 163 .phys_map = addr, 204 164 .size = size, 165 + .flags = EFI_MEMMAP_LATE, 205 166 }; 206 167 207 168 /* Did we forget to unmap the early EFI memmap? */ 208 169 WARN_ON(efi.memmap.map); 209 170 210 171 /* Were we already called? */ 211 - WARN_ON(efi.memmap.late); 172 + WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE); 212 173 213 174 /* 214 175 * It makes no sense to allow callers to register different ··· 219 178 data.desc_version = efi.memmap.desc_version; 220 179 data.desc_size = efi.memmap.desc_size; 221 180 222 - return __efi_memmap_init(&data, true); 181 + return __efi_memmap_init(&data); 223 182 } 224 183 225 184 /** 226 185 * efi_memmap_install - Install a new EFI memory map in efi.memmap 227 - * @addr: Physical address of the memory map 228 - * @nr_map: Number of entries in the memory map 186 + * @ctx: map allocation parameters (address, size, flags) 229 187 * 230 188 * Unlike efi_memmap_init_*(), this function does not allow the caller 231 189 * to switch from early to late mappings. It simply uses the existing ··· 232 192 * 233 193 * Returns zero on success, a negative error code on failure. 234 194 */ 235 - int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map) 195 + int __init efi_memmap_install(struct efi_memory_map_data *data) 236 196 { 237 - struct efi_memory_map_data data; 238 - 239 197 efi_memmap_unmap(); 240 198 241 - data.phys_map = addr; 242 - data.size = efi.memmap.desc_size * nr_map; 243 - data.desc_version = efi.memmap.desc_version; 244 - data.desc_size = efi.memmap.desc_size; 245 - 246 - return __efi_memmap_init(&data, efi.memmap.late); 199 + return __efi_memmap_init(data); 247 200 } 248 201 249 202 /**
+1 -1
drivers/infiniband/hw/mlx5/main.c
··· 40 40 #include <linux/slab.h> 41 41 #include <linux/bitmap.h> 42 42 #if defined(CONFIG_X86) 43 - #include <asm/pat.h> 43 + #include <asm/memtype.h> 44 44 #endif 45 45 #include <linux/sched.h> 46 46 #include <linux/sched/mm.h>
+1 -1
drivers/media/pci/ivtv/ivtvfb.c
··· 37 37 #include <linux/ivtvfb.h> 38 38 39 39 #ifdef CONFIG_X86_64 40 - #include <asm/pat.h> 40 + #include <asm/memtype.h> 41 41 #endif 42 42 43 43 /* card parameters */
+331 -437
include/linux/efi.h
··· 48 48 typedef u64 efi_physical_addr_t; 49 49 typedef void *efi_handle_t; 50 50 51 + #if defined(CONFIG_X86_64) 52 + #define __efiapi __attribute__((ms_abi)) 53 + #elif defined(CONFIG_X86_32) 54 + #define __efiapi __attribute__((regparm(0))) 55 + #else 56 + #define __efiapi 57 + #endif 58 + 59 + #define efi_get_handle_at(array, idx) \ 60 + (efi_is_native() ? (array)[idx] \ 61 + : (efi_handle_t)(unsigned long)((u32 *)(array))[idx]) 62 + 63 + #define efi_get_handle_num(size) \ 64 + ((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32))) 65 + 66 + #define for_each_efi_handle(handle, array, size, i) \ 67 + for (i = 0; \ 68 + i < efi_get_handle_num(size) && \ 69 + ((handle = efi_get_handle_at((array), i)) || true); \ 70 + i++) 71 + 51 72 /* 52 73 * The UEFI spec and EDK2 reference implementation both define EFI_GUID as 53 74 * struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment ··· 272 251 u32 create_event_ex; 273 252 } __packed efi_boot_services_32_t; 274 253 275 - typedef struct { 276 - efi_table_hdr_t hdr; 277 - u64 raise_tpl; 278 - u64 restore_tpl; 279 - u64 allocate_pages; 280 - u64 free_pages; 281 - u64 get_memory_map; 282 - u64 allocate_pool; 283 - u64 free_pool; 284 - u64 create_event; 285 - u64 set_timer; 286 - u64 wait_for_event; 287 - u64 signal_event; 288 - u64 close_event; 289 - u64 check_event; 290 - u64 install_protocol_interface; 291 - u64 reinstall_protocol_interface; 292 - u64 uninstall_protocol_interface; 293 - u64 handle_protocol; 294 - u64 __reserved; 295 - u64 register_protocol_notify; 296 - u64 locate_handle; 297 - u64 locate_device_path; 298 - u64 install_configuration_table; 299 - u64 load_image; 300 - u64 start_image; 301 - u64 exit; 302 - u64 unload_image; 303 - u64 exit_boot_services; 304 - u64 get_next_monotonic_count; 305 - u64 stall; 306 - u64 set_watchdog_timer; 307 - u64 connect_controller; 308 - u64 disconnect_controller; 309 - u64 open_protocol; 310 - u64 close_protocol; 311 - u64 open_protocol_information; 312 - u64 protocols_per_handle; 313 - u64 locate_handle_buffer; 314 - u64 locate_protocol; 315 - u64 install_multiple_protocol_interfaces; 316 - u64 uninstall_multiple_protocol_interfaces; 317 - u64 calculate_crc32; 318 - u64 copy_mem; 319 - u64 set_mem; 320 - u64 create_event_ex; 321 - } __packed efi_boot_services_64_t; 322 - 323 254 /* 324 255 * EFI Boot Services table 325 256 */ 326 - typedef struct { 327 - efi_table_hdr_t hdr; 328 - void *raise_tpl; 329 - void *restore_tpl; 330 - efi_status_t (*allocate_pages)(int, int, unsigned long, 331 - efi_physical_addr_t *); 332 - efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long); 333 - efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *, 334 - unsigned long *, u32 *); 335 - efi_status_t (*allocate_pool)(int, unsigned long, void **); 336 - efi_status_t (*free_pool)(void *); 337 - void *create_event; 338 - void *set_timer; 339 - void *wait_for_event; 340 - void *signal_event; 341 - void *close_event; 342 - void *check_event; 343 - void *install_protocol_interface; 344 - void *reinstall_protocol_interface; 345 - void *uninstall_protocol_interface; 346 - efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **); 347 - void *__reserved; 348 - void *register_protocol_notify; 349 - efi_status_t (*locate_handle)(int, efi_guid_t *, void *, 350 - unsigned long *, efi_handle_t *); 351 - void *locate_device_path; 352 - efi_status_t (*install_configuration_table)(efi_guid_t *, void *); 353 - void *load_image; 354 - void *start_image; 355 - void *exit; 356 - void *unload_image; 357 - efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long); 358 - void *get_next_monotonic_count; 359 - void *stall; 360 - void *set_watchdog_timer; 361 - void *connect_controller; 362 - void *disconnect_controller; 363 - void *open_protocol; 364 - void *close_protocol; 365 - void *open_protocol_information; 366 - void *protocols_per_handle; 367 - void *locate_handle_buffer; 368 - efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **); 369 - void *install_multiple_protocol_interfaces; 370 - void *uninstall_multiple_protocol_interfaces; 371 - void *calculate_crc32; 372 - void *copy_mem; 373 - void *set_mem; 374 - void *create_event_ex; 257 + typedef union { 258 + struct { 259 + efi_table_hdr_t hdr; 260 + void *raise_tpl; 261 + void *restore_tpl; 262 + efi_status_t (__efiapi *allocate_pages)(int, int, unsigned long, 263 + efi_physical_addr_t *); 264 + efi_status_t (__efiapi *free_pages)(efi_physical_addr_t, 265 + unsigned long); 266 + efi_status_t (__efiapi *get_memory_map)(unsigned long *, void *, 267 + unsigned long *, 268 + unsigned long *, u32 *); 269 + efi_status_t (__efiapi *allocate_pool)(int, unsigned long, 270 + void **); 271 + efi_status_t (__efiapi *free_pool)(void *); 272 + void *create_event; 273 + void *set_timer; 274 + void *wait_for_event; 275 + void *signal_event; 276 + void *close_event; 277 + void *check_event; 278 + void *install_protocol_interface; 279 + void *reinstall_protocol_interface; 280 + void *uninstall_protocol_interface; 281 + efi_status_t (__efiapi *handle_protocol)(efi_handle_t, 282 + efi_guid_t *, void **); 283 + void *__reserved; 284 + void *register_protocol_notify; 285 + efi_status_t (__efiapi *locate_handle)(int, efi_guid_t *, 286 + void *, unsigned long *, 287 + efi_handle_t *); 288 + void *locate_device_path; 289 + efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *, 290 + void *); 291 + void *load_image; 292 + void *start_image; 293 + void *exit; 294 + void *unload_image; 295 + efi_status_t (__efiapi *exit_boot_services)(efi_handle_t, 296 + unsigned long); 297 + void *get_next_monotonic_count; 298 + void *stall; 299 + void *set_watchdog_timer; 300 + void *connect_controller; 301 + efi_status_t (__efiapi *disconnect_controller)(efi_handle_t, 302 + efi_handle_t, 303 + efi_handle_t); 304 + void *open_protocol; 305 + void *close_protocol; 306 + void *open_protocol_information; 307 + void *protocols_per_handle; 308 + void *locate_handle_buffer; 309 + efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *, 310 + void **); 311 + void *install_multiple_protocol_interfaces; 312 + void *uninstall_multiple_protocol_interfaces; 313 + void *calculate_crc32; 314 + void *copy_mem; 315 + void *set_mem; 316 + void *create_event_ex; 317 + }; 318 + efi_boot_services_32_t mixed_mode; 375 319 } efi_boot_services_t; 376 320 377 321 typedef enum { ··· 369 383 u32 write; 370 384 } efi_pci_io_protocol_access_32_t; 371 385 372 - typedef struct { 373 - u64 read; 374 - u64 write; 375 - } efi_pci_io_protocol_access_64_t; 386 + typedef union efi_pci_io_protocol efi_pci_io_protocol_t; 387 + 388 + typedef 389 + efi_status_t (__efiapi *efi_pci_io_protocol_cfg_t)(efi_pci_io_protocol_t *, 390 + EFI_PCI_IO_PROTOCOL_WIDTH, 391 + u32 offset, 392 + unsigned long count, 393 + void *buffer); 376 394 377 395 typedef struct { 378 396 void *read; ··· 384 394 } efi_pci_io_protocol_access_t; 385 395 386 396 typedef struct { 387 - u32 poll_mem; 388 - u32 poll_io; 389 - efi_pci_io_protocol_access_32_t mem; 390 - efi_pci_io_protocol_access_32_t io; 391 - efi_pci_io_protocol_access_32_t pci; 392 - u32 copy_mem; 393 - u32 map; 394 - u32 unmap; 395 - u32 allocate_buffer; 396 - u32 free_buffer; 397 - u32 flush; 398 - u32 get_location; 399 - u32 attributes; 400 - u32 get_bar_attributes; 401 - u32 set_bar_attributes; 402 - u64 romsize; 403 - u32 romimage; 404 - } efi_pci_io_protocol_32_t; 397 + efi_pci_io_protocol_cfg_t read; 398 + efi_pci_io_protocol_cfg_t write; 399 + } efi_pci_io_protocol_config_access_t; 405 400 406 - typedef struct { 407 - u64 poll_mem; 408 - u64 poll_io; 409 - efi_pci_io_protocol_access_64_t mem; 410 - efi_pci_io_protocol_access_64_t io; 411 - efi_pci_io_protocol_access_64_t pci; 412 - u64 copy_mem; 413 - u64 map; 414 - u64 unmap; 415 - u64 allocate_buffer; 416 - u64 free_buffer; 417 - u64 flush; 418 - u64 get_location; 419 - u64 attributes; 420 - u64 get_bar_attributes; 421 - u64 set_bar_attributes; 422 - u64 romsize; 423 - u64 romimage; 424 - } efi_pci_io_protocol_64_t; 425 - 426 - typedef struct { 427 - void *poll_mem; 428 - void *poll_io; 429 - efi_pci_io_protocol_access_t mem; 430 - efi_pci_io_protocol_access_t io; 431 - efi_pci_io_protocol_access_t pci; 432 - void *copy_mem; 433 - void *map; 434 - void *unmap; 435 - void *allocate_buffer; 436 - void *free_buffer; 437 - void *flush; 438 - void *get_location; 439 - void *attributes; 440 - void *get_bar_attributes; 441 - void *set_bar_attributes; 442 - uint64_t romsize; 443 - void *romimage; 444 - } efi_pci_io_protocol_t; 401 + union efi_pci_io_protocol { 402 + struct { 403 + void *poll_mem; 404 + void *poll_io; 405 + efi_pci_io_protocol_access_t mem; 406 + efi_pci_io_protocol_access_t io; 407 + efi_pci_io_protocol_config_access_t pci; 408 + void *copy_mem; 409 + void *map; 410 + void *unmap; 411 + void *allocate_buffer; 412 + void *free_buffer; 413 + void *flush; 414 + efi_status_t (__efiapi *get_location)(efi_pci_io_protocol_t *, 415 + unsigned long *segment_nr, 416 + unsigned long *bus_nr, 417 + unsigned long *device_nr, 418 + unsigned long *func_nr); 419 + void *attributes; 420 + void *get_bar_attributes; 421 + void *set_bar_attributes; 422 + uint64_t romsize; 423 + void *romimage; 424 + }; 425 + struct { 426 + u32 poll_mem; 427 + u32 poll_io; 428 + efi_pci_io_protocol_access_32_t mem; 429 + efi_pci_io_protocol_access_32_t io; 430 + efi_pci_io_protocol_access_32_t pci; 431 + u32 copy_mem; 432 + u32 map; 433 + u32 unmap; 434 + u32 allocate_buffer; 435 + u32 free_buffer; 436 + u32 flush; 437 + u32 get_location; 438 + u32 attributes; 439 + u32 get_bar_attributes; 440 + u32 set_bar_attributes; 441 + u64 romsize; 442 + u32 romimage; 443 + } mixed_mode; 444 + }; 445 445 446 446 #define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001 447 447 #define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002 ··· 453 473 #define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000 454 474 #define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000 455 475 456 - typedef struct { 457 - u32 version; 458 - u32 get; 459 - u32 set; 460 - u32 del; 461 - u32 get_all; 462 - } apple_properties_protocol_32_t; 476 + struct efi_dev_path; 463 477 464 - typedef struct { 465 - u64 version; 466 - u64 get; 467 - u64 set; 468 - u64 del; 469 - u64 get_all; 470 - } apple_properties_protocol_64_t; 478 + typedef union apple_properties_protocol apple_properties_protocol_t; 471 479 472 - typedef struct { 473 - u32 get_capability; 474 - u32 get_event_log; 475 - u32 hash_log_extend_event; 476 - u32 submit_command; 477 - u32 get_active_pcr_banks; 478 - u32 set_active_pcr_banks; 479 - u32 get_result_of_set_active_pcr_banks; 480 - } efi_tcg2_protocol_32_t; 481 - 482 - typedef struct { 483 - u64 get_capability; 484 - u64 get_event_log; 485 - u64 hash_log_extend_event; 486 - u64 submit_command; 487 - u64 get_active_pcr_banks; 488 - u64 set_active_pcr_banks; 489 - u64 get_result_of_set_active_pcr_banks; 490 - } efi_tcg2_protocol_64_t; 480 + union apple_properties_protocol { 481 + struct { 482 + unsigned long version; 483 + efi_status_t (__efiapi *get)(apple_properties_protocol_t *, 484 + struct efi_dev_path *, 485 + efi_char16_t *, void *, u32 *); 486 + efi_status_t (__efiapi *set)(apple_properties_protocol_t *, 487 + struct efi_dev_path *, 488 + efi_char16_t *, void *, u32); 489 + efi_status_t (__efiapi *del)(apple_properties_protocol_t *, 490 + struct efi_dev_path *, 491 + efi_char16_t *); 492 + efi_status_t (__efiapi *get_all)(apple_properties_protocol_t *, 493 + void *buffer, u32 *); 494 + }; 495 + struct { 496 + u32 version; 497 + u32 get; 498 + u32 set; 499 + u32 del; 500 + u32 get_all; 501 + } mixed_mode; 502 + }; 491 503 492 504 typedef u32 efi_tcg2_event_log_format; 493 505 494 - typedef struct { 495 - void *get_capability; 496 - efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format, 497 - efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *); 498 - void *hash_log_extend_event; 499 - void *submit_command; 500 - void *get_active_pcr_banks; 501 - void *set_active_pcr_banks; 502 - void *get_result_of_set_active_pcr_banks; 503 - } efi_tcg2_protocol_t; 506 + typedef union efi_tcg2_protocol efi_tcg2_protocol_t; 507 + 508 + union efi_tcg2_protocol { 509 + struct { 510 + void *get_capability; 511 + efi_status_t (__efiapi *get_event_log)(efi_handle_t, 512 + efi_tcg2_event_log_format, 513 + efi_physical_addr_t *, 514 + efi_physical_addr_t *, 515 + efi_bool_t *); 516 + void *hash_log_extend_event; 517 + void *submit_command; 518 + void *get_active_pcr_banks; 519 + void *set_active_pcr_banks; 520 + void *get_result_of_set_active_pcr_banks; 521 + }; 522 + struct { 523 + u32 get_capability; 524 + u32 get_event_log; 525 + u32 hash_log_extend_event; 526 + u32 submit_command; 527 + u32 get_active_pcr_banks; 528 + u32 set_active_pcr_banks; 529 + u32 get_result_of_set_active_pcr_banks; 530 + } mixed_mode; 531 + }; 504 532 505 533 /* 506 534 * Types and defines for EFI ResetSystem ··· 540 552 u32 query_capsule_caps; 541 553 u32 query_variable_info; 542 554 } efi_runtime_services_32_t; 543 - 544 - typedef struct { 545 - efi_table_hdr_t hdr; 546 - u64 get_time; 547 - u64 set_time; 548 - u64 get_wakeup_time; 549 - u64 set_wakeup_time; 550 - u64 set_virtual_address_map; 551 - u64 convert_pointer; 552 - u64 get_variable; 553 - u64 get_next_variable; 554 - u64 set_variable; 555 - u64 get_next_high_mono_count; 556 - u64 reset_system; 557 - u64 update_capsule; 558 - u64 query_capsule_caps; 559 - u64 query_variable_info; 560 - } efi_runtime_services_64_t; 561 555 562 556 typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc); 563 557 typedef efi_status_t efi_set_time_t (efi_time_t *tm); ··· 575 605 unsigned long size, 576 606 bool nonblocking); 577 607 578 - typedef struct { 579 - efi_table_hdr_t hdr; 580 - efi_get_time_t *get_time; 581 - efi_set_time_t *set_time; 582 - efi_get_wakeup_time_t *get_wakeup_time; 583 - efi_set_wakeup_time_t *set_wakeup_time; 584 - efi_set_virtual_address_map_t *set_virtual_address_map; 585 - void *convert_pointer; 586 - efi_get_variable_t *get_variable; 587 - efi_get_next_variable_t *get_next_variable; 588 - efi_set_variable_t *set_variable; 589 - efi_get_next_high_mono_count_t *get_next_high_mono_count; 590 - efi_reset_system_t *reset_system; 591 - efi_update_capsule_t *update_capsule; 592 - efi_query_capsule_caps_t *query_capsule_caps; 593 - efi_query_variable_info_t *query_variable_info; 608 + typedef union { 609 + struct { 610 + efi_table_hdr_t hdr; 611 + efi_get_time_t __efiapi *get_time; 612 + efi_set_time_t __efiapi *set_time; 613 + efi_get_wakeup_time_t __efiapi *get_wakeup_time; 614 + efi_set_wakeup_time_t __efiapi *set_wakeup_time; 615 + efi_set_virtual_address_map_t __efiapi *set_virtual_address_map; 616 + void *convert_pointer; 617 + efi_get_variable_t __efiapi *get_variable; 618 + efi_get_next_variable_t __efiapi *get_next_variable; 619 + efi_set_variable_t __efiapi *set_variable; 620 + efi_get_next_high_mono_count_t __efiapi *get_next_high_mono_count; 621 + efi_reset_system_t __efiapi *reset_system; 622 + efi_update_capsule_t __efiapi *update_capsule; 623 + efi_query_capsule_caps_t __efiapi *query_capsule_caps; 624 + efi_query_variable_info_t __efiapi *query_variable_info; 625 + }; 626 + efi_runtime_services_32_t mixed_mode; 594 627 } efi_runtime_services_t; 595 628 596 629 void efi_native_runtime_setup(void); ··· 679 706 u32 table; 680 707 } efi_config_table_32_t; 681 708 682 - typedef struct { 683 - efi_guid_t guid; 684 - unsigned long table; 709 + typedef union { 710 + struct { 711 + efi_guid_t guid; 712 + void *table; 713 + }; 714 + efi_config_table_32_t mixed_mode; 685 715 } efi_config_table_t; 686 716 687 717 typedef struct { ··· 736 760 u32 tables; 737 761 } efi_system_table_32_t; 738 762 739 - typedef struct { 740 - efi_table_hdr_t hdr; 741 - unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */ 742 - u32 fw_revision; 743 - unsigned long con_in_handle; 744 - unsigned long con_in; 745 - unsigned long con_out_handle; 746 - unsigned long con_out; 747 - unsigned long stderr_handle; 748 - unsigned long stderr; 749 - efi_runtime_services_t *runtime; 750 - efi_boot_services_t *boottime; 751 - unsigned long nr_tables; 752 - unsigned long tables; 763 + typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t; 764 + 765 + typedef union { 766 + struct { 767 + efi_table_hdr_t hdr; 768 + unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */ 769 + u32 fw_revision; 770 + unsigned long con_in_handle; 771 + unsigned long con_in; 772 + unsigned long con_out_handle; 773 + efi_simple_text_output_protocol_t *con_out; 774 + unsigned long stderr_handle; 775 + unsigned long stderr; 776 + efi_runtime_services_t *runtime; 777 + efi_boot_services_t *boottime; 778 + unsigned long nr_tables; 779 + unsigned long tables; 780 + }; 781 + efi_system_table_32_t mixed_mode; 753 782 } efi_system_table_t; 754 783 755 784 /* 756 785 * Architecture independent structure for describing a memory map for the 757 - * benefit of efi_memmap_init_early(), saving us the need to pass four 758 - * parameters. 786 + * benefit of efi_memmap_init_early(), and for passing context between 787 + * efi_memmap_alloc() and efi_memmap_install(). 759 788 */ 760 789 struct efi_memory_map_data { 761 790 phys_addr_t phys_map; 762 791 unsigned long size; 763 792 unsigned long desc_version; 764 793 unsigned long desc_size; 794 + unsigned long flags; 765 795 }; 766 796 767 797 struct efi_memory_map { ··· 777 795 int nr_map; 778 796 unsigned long desc_version; 779 797 unsigned long desc_size; 780 - bool late; 798 + #define EFI_MEMMAP_LATE (1UL << 0) 799 + #define EFI_MEMMAP_MEMBLOCK (1UL << 1) 800 + #define EFI_MEMMAP_SLAB (1UL << 2) 801 + unsigned long flags; 781 802 }; 782 803 783 804 struct efi_mem_range { ··· 798 813 799 814 typedef struct { 800 815 u32 revision; 801 - u32 parent_handle; 802 - u32 system_table; 803 - u32 device_handle; 804 - u32 file_path; 805 - u32 reserved; 806 - u32 load_options_size; 807 - u32 load_options; 808 - u32 image_base; 809 - __aligned_u64 image_size; 810 - unsigned int image_code_type; 811 - unsigned int image_data_type; 812 - u32 unload; 813 - } efi_loaded_image_32_t; 814 - 815 - typedef struct { 816 - u32 revision; 817 - u64 parent_handle; 818 - u64 system_table; 819 - u64 device_handle; 820 - u64 file_path; 821 - u64 reserved; 822 - u32 load_options_size; 823 - u64 load_options; 824 - u64 image_base; 825 - __aligned_u64 image_size; 826 - unsigned int image_code_type; 827 - unsigned int image_data_type; 828 - u64 unload; 829 - } efi_loaded_image_64_t; 830 - 831 - typedef struct { 832 - u32 revision; 833 816 efi_handle_t parent_handle; 834 817 efi_system_table_t *system_table; 835 818 efi_handle_t device_handle; ··· 809 856 __aligned_u64 image_size; 810 857 unsigned int image_code_type; 811 858 unsigned int image_data_type; 812 - efi_status_t (*unload)(efi_handle_t image_handle); 859 + efi_status_t ( __efiapi *unload)(efi_handle_t image_handle); 813 860 } efi_loaded_image_t; 814 - 815 861 816 862 typedef struct { 817 863 u64 size; ··· 823 871 efi_char16_t filename[1]; 824 872 } efi_file_info_t; 825 873 826 - typedef struct { 827 - u64 revision; 828 - u32 open; 829 - u32 close; 830 - u32 delete; 831 - u32 read; 832 - u32 write; 833 - u32 get_position; 834 - u32 set_position; 835 - u32 get_info; 836 - u32 set_info; 837 - u32 flush; 838 - } efi_file_handle_32_t; 874 + typedef struct efi_file_handle efi_file_handle_t; 839 875 840 - typedef struct { 876 + struct efi_file_handle { 841 877 u64 revision; 842 - u64 open; 843 - u64 close; 844 - u64 delete; 845 - u64 read; 846 - u64 write; 847 - u64 get_position; 848 - u64 set_position; 849 - u64 get_info; 850 - u64 set_info; 851 - u64 flush; 852 - } efi_file_handle_64_t; 853 - 854 - typedef struct _efi_file_handle { 855 - u64 revision; 856 - efi_status_t (*open)(struct _efi_file_handle *, 857 - struct _efi_file_handle **, 858 - efi_char16_t *, u64, u64); 859 - efi_status_t (*close)(struct _efi_file_handle *); 878 + efi_status_t (__efiapi *open)(efi_file_handle_t *, 879 + efi_file_handle_t **, 880 + efi_char16_t *, u64, u64); 881 + efi_status_t (__efiapi *close)(efi_file_handle_t *); 860 882 void *delete; 861 - efi_status_t (*read)(struct _efi_file_handle *, unsigned long *, 862 - void *); 883 + efi_status_t (__efiapi *read)(efi_file_handle_t *, 884 + unsigned long *, void *); 863 885 void *write; 864 886 void *get_position; 865 887 void *set_position; 866 - efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *, 867 - unsigned long *, void *); 888 + efi_status_t (__efiapi *get_info)(efi_file_handle_t *, 889 + efi_guid_t *, unsigned long *, 890 + void *); 868 891 void *set_info; 869 892 void *flush; 870 - } efi_file_handle_t; 893 + }; 871 894 872 - typedef struct { 873 - u64 revision; 874 - u32 open_volume; 875 - } efi_file_io_interface_32_t; 895 + typedef struct efi_file_io_interface efi_file_io_interface_t; 876 896 877 - typedef struct { 897 + struct efi_file_io_interface { 878 898 u64 revision; 879 - u64 open_volume; 880 - } efi_file_io_interface_64_t; 881 - 882 - typedef struct _efi_file_io_interface { 883 - u64 revision; 884 - int (*open_volume)(struct _efi_file_io_interface *, 885 - efi_file_handle_t **); 886 - } efi_file_io_interface_t; 899 + int (__efiapi *open_volume)(efi_file_io_interface_t *, 900 + efi_file_handle_t **); 901 + }; 887 902 888 903 #define EFI_FILE_MODE_READ 0x0000000000000001 889 904 #define EFI_FILE_MODE_WRITE 0x0000000000000002 ··· 934 1015 efi_query_capsule_caps_t *query_capsule_caps; 935 1016 efi_get_next_high_mono_count_t *get_next_high_mono_count; 936 1017 efi_reset_system_t *reset_system; 937 - efi_set_virtual_address_map_t *set_virtual_address_map; 938 1018 struct efi_memory_map memmap; 939 1019 unsigned long flags; 940 1020 } efi; ··· 974 1056 #endif 975 1057 extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); 976 1058 977 - extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries); 1059 + extern int __init efi_memmap_alloc(unsigned int num_entries, 1060 + struct efi_memory_map_data *data); 1061 + extern void __efi_memmap_free(u64 phys, unsigned long size, 1062 + unsigned long flags); 978 1063 extern int __init efi_memmap_init_early(struct efi_memory_map_data *data); 979 1064 extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size); 980 1065 extern void __init efi_memmap_unmap(void); 981 - extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map); 1066 + extern int __init efi_memmap_install(struct efi_memory_map_data *data); 982 1067 extern int __init efi_memmap_split_count(efi_memory_desc_t *md, 983 1068 struct range *range); 984 1069 extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap, ··· 1312 1391 bool deleting; 1313 1392 }; 1314 1393 1315 - typedef struct { 1316 - u32 reset; 1317 - u32 output_string; 1318 - u32 test_string; 1319 - } efi_simple_text_output_protocol_32_t; 1320 - 1321 - typedef struct { 1322 - u64 reset; 1323 - u64 output_string; 1324 - u64 test_string; 1325 - } efi_simple_text_output_protocol_64_t; 1326 - 1327 - struct efi_simple_text_output_protocol { 1328 - void *reset; 1329 - efi_status_t (*output_string)(void *, void *); 1330 - void *test_string; 1394 + union efi_simple_text_output_protocol { 1395 + struct { 1396 + void *reset; 1397 + efi_status_t (__efiapi *output_string)(efi_simple_text_output_protocol_t *, 1398 + efi_char16_t *); 1399 + void *test_string; 1400 + }; 1401 + struct { 1402 + u32 reset; 1403 + u32 output_string; 1404 + u32 test_string; 1405 + } mixed_mode; 1331 1406 }; 1332 1407 1333 1408 #define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0 ··· 1332 1415 #define PIXEL_BLT_ONLY 3 1333 1416 #define PIXEL_FORMAT_MAX 4 1334 1417 1335 - struct efi_pixel_bitmask { 1418 + typedef struct { 1336 1419 u32 red_mask; 1337 1420 u32 green_mask; 1338 1421 u32 blue_mask; 1339 1422 u32 reserved_mask; 1340 - }; 1423 + } efi_pixel_bitmask_t; 1341 1424 1342 - struct efi_graphics_output_mode_info { 1425 + typedef struct { 1343 1426 u32 version; 1344 1427 u32 horizontal_resolution; 1345 1428 u32 vertical_resolution; 1346 1429 int pixel_format; 1347 - struct efi_pixel_bitmask pixel_information; 1430 + efi_pixel_bitmask_t pixel_information; 1348 1431 u32 pixels_per_scan_line; 1349 - } __packed; 1432 + } efi_graphics_output_mode_info_t; 1350 1433 1351 - struct efi_graphics_output_protocol_mode_32 { 1352 - u32 max_mode; 1353 - u32 mode; 1354 - u32 info; 1355 - u32 size_of_info; 1356 - u64 frame_buffer_base; 1357 - u32 frame_buffer_size; 1358 - } __packed; 1434 + typedef union efi_graphics_output_protocol_mode efi_graphics_output_protocol_mode_t; 1359 1435 1360 - struct efi_graphics_output_protocol_mode_64 { 1361 - u32 max_mode; 1362 - u32 mode; 1363 - u64 info; 1364 - u64 size_of_info; 1365 - u64 frame_buffer_base; 1366 - u64 frame_buffer_size; 1367 - } __packed; 1368 - 1369 - struct efi_graphics_output_protocol_mode { 1370 - u32 max_mode; 1371 - u32 mode; 1372 - unsigned long info; 1373 - unsigned long size_of_info; 1374 - u64 frame_buffer_base; 1375 - unsigned long frame_buffer_size; 1376 - } __packed; 1377 - 1378 - struct efi_graphics_output_protocol_32 { 1379 - u32 query_mode; 1380 - u32 set_mode; 1381 - u32 blt; 1382 - u32 mode; 1436 + union efi_graphics_output_protocol_mode { 1437 + struct { 1438 + u32 max_mode; 1439 + u32 mode; 1440 + efi_graphics_output_mode_info_t *info; 1441 + unsigned long size_of_info; 1442 + efi_physical_addr_t frame_buffer_base; 1443 + unsigned long frame_buffer_size; 1444 + }; 1445 + struct { 1446 + u32 max_mode; 1447 + u32 mode; 1448 + u32 info; 1449 + u32 size_of_info; 1450 + u64 frame_buffer_base; 1451 + u32 frame_buffer_size; 1452 + } mixed_mode; 1383 1453 }; 1384 1454 1385 - struct efi_graphics_output_protocol_64 { 1386 - u64 query_mode; 1387 - u64 set_mode; 1388 - u64 blt; 1389 - u64 mode; 1390 - }; 1455 + typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t; 1391 1456 1392 - struct efi_graphics_output_protocol { 1393 - unsigned long query_mode; 1394 - unsigned long set_mode; 1395 - unsigned long blt; 1396 - struct efi_graphics_output_protocol_mode *mode; 1457 + union efi_graphics_output_protocol { 1458 + struct { 1459 + void *query_mode; 1460 + void *set_mode; 1461 + void *blt; 1462 + efi_graphics_output_protocol_mode_t *mode; 1463 + }; 1464 + struct { 1465 + u32 query_mode; 1466 + u32 set_mode; 1467 + u32 blt; 1468 + u32 mode; 1469 + } mixed_mode; 1397 1470 }; 1398 - 1399 - typedef efi_status_t (*efi_graphics_output_protocol_query_mode)( 1400 - struct efi_graphics_output_protocol *, u32, unsigned long *, 1401 - struct efi_graphics_output_mode_info **); 1402 1471 1403 1472 extern struct list_head efivar_sysfs_list; 1404 1473 ··· 1485 1582 1486 1583 /* prototypes shared between arch specific and generic stub code */ 1487 1584 1488 - void efi_printk(efi_system_table_t *sys_table_arg, char *str); 1585 + void efi_printk(char *str); 1489 1586 1490 - void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, 1491 - unsigned long addr); 1587 + void efi_free(unsigned long size, unsigned long addr); 1492 1588 1493 - char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, 1494 - efi_loaded_image_t *image, int *cmd_line_len); 1589 + char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len); 1495 1590 1496 - efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, 1497 - struct efi_boot_memmap *map); 1591 + efi_status_t efi_get_memory_map(struct efi_boot_memmap *map); 1498 1592 1499 - efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, 1500 - unsigned long size, unsigned long align, 1593 + efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, 1501 1594 unsigned long *addr, unsigned long min); 1502 1595 1503 1596 static inline 1504 - efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, 1505 - unsigned long size, unsigned long align, 1597 + efi_status_t efi_low_alloc(unsigned long size, unsigned long align, 1506 1598 unsigned long *addr) 1507 1599 { 1508 1600 /* ··· 1505 1607 * checks pointers against NULL. Skip the first 8 1506 1608 * bytes so we start at a nice even number. 1507 1609 */ 1508 - return efi_low_alloc_above(sys_table_arg, size, align, addr, 0x8); 1610 + return efi_low_alloc_above(size, align, addr, 0x8); 1509 1611 } 1510 1612 1511 - efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, 1512 - unsigned long size, unsigned long align, 1613 + efi_status_t efi_high_alloc(unsigned long size, unsigned long align, 1513 1614 unsigned long *addr, unsigned long max); 1514 1615 1515 - efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, 1516 - unsigned long *image_addr, 1616 + efi_status_t efi_relocate_kernel(unsigned long *image_addr, 1517 1617 unsigned long image_size, 1518 1618 unsigned long alloc_size, 1519 1619 unsigned long preferred_addr, 1520 1620 unsigned long alignment, 1521 1621 unsigned long min_addr); 1522 1622 1523 - efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, 1524 - efi_loaded_image_t *image, 1623 + efi_status_t handle_cmdline_files(efi_loaded_image_t *image, 1525 1624 char *cmd_line, char *option_string, 1526 1625 unsigned long max_addr, 1527 1626 unsigned long *load_addr, ··· 1526 1631 1527 1632 efi_status_t efi_parse_options(char const *cmdline); 1528 1633 1529 - efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, 1530 - struct screen_info *si, efi_guid_t *proto, 1634 + efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto, 1531 1635 unsigned long size); 1532 1636 1533 1637 #ifdef CONFIG_EFI ··· 1544 1650 efi_secureboot_mode_disabled, 1545 1651 efi_secureboot_mode_enabled, 1546 1652 }; 1547 - enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table); 1653 + enum efi_secureboot_mode efi_get_secureboot(void); 1548 1654 1549 1655 #ifdef CONFIG_RESET_ATTACK_MITIGATION 1550 - void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg); 1656 + void efi_enable_reset_attack_mitigation(void); 1551 1657 #else 1552 1658 static inline void 1553 - efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { } 1659 + efi_enable_reset_attack_mitigation(void) { } 1554 1660 #endif 1555 1661 1556 - efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg); 1662 + efi_status_t efi_random_get_seed(void); 1557 1663 1558 - void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table); 1664 + void efi_retrieve_tpm2_eventlog(void); 1559 1665 1560 1666 /* 1561 1667 * Arch code can implement the following three template macros, avoiding ··· 1607 1713 }) 1608 1714 1609 1715 typedef efi_status_t (*efi_exit_boot_map_processing)( 1610 - efi_system_table_t *sys_table_arg, 1611 1716 struct efi_boot_memmap *map, 1612 1717 void *priv); 1613 1718 1614 - efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table, 1615 - void *handle, 1719 + efi_status_t efi_exit_boot_services(void *handle, 1616 1720 struct efi_boot_memmap *map, 1617 1721 void *priv, 1618 1722 efi_exit_boot_map_processing priv_func); ··· 1700 1808 1701 1809 #define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ 1702 1810 / sizeof(((struct linux_efi_memreserve *)0)->entry[0])) 1811 + 1812 + void efi_pci_disable_bridge_busmaster(void); 1703 1813 1704 1814 #endif /* _LINUX_EFI_H */
+5 -10
include/linux/mm.h
··· 625 625 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 626 626 * is no special casing required. 627 627 */ 628 - static inline bool is_vmalloc_addr(const void *x) 629 - { 630 - #ifdef CONFIG_MMU 631 - unsigned long addr = (unsigned long)x; 632 - 633 - return addr >= VMALLOC_START && addr < VMALLOC_END; 634 - #else 635 - return false; 636 - #endif 637 - } 638 628 639 629 #ifndef is_ioremap_addr 640 630 #define is_ioremap_addr(x) is_vmalloc_addr(x) 641 631 #endif 642 632 643 633 #ifdef CONFIG_MMU 634 + extern bool is_vmalloc_addr(const void *x); 644 635 extern int is_vmalloc_or_module_addr(const void *x); 645 636 #else 637 + static inline bool is_vmalloc_addr(const void *x) 638 + { 639 + return false; 640 + } 646 641 static inline int is_vmalloc_or_module_addr(const void *x) 647 642 { 648 643 return 0;
+2
include/linux/vmalloc.h
··· 10 10 #include <linux/rbtree.h> 11 11 #include <linux/overflow.h> 12 12 13 + #include <asm/vmalloc.h> 14 + 13 15 struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 14 16 struct notifier_block; /* in notifier.h */ 15 17
+1 -1
mm/highmem.c
··· 29 29 #include <linux/highmem.h> 30 30 #include <linux/kgdb.h> 31 31 #include <asm/tlbflush.h> 32 - 32 + #include <linux/vmalloc.h> 33 33 34 34 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) 35 35 DEFINE_PER_CPU(int, __kmap_atomic_idx);
+8
mm/vmalloc.c
··· 41 41 42 42 #include "internal.h" 43 43 44 + bool is_vmalloc_addr(const void *x) 45 + { 46 + unsigned long addr = (unsigned long)x; 47 + 48 + return addr >= VMALLOC_START && addr < VMALLOC_END; 49 + } 50 + EXPORT_SYMBOL(is_vmalloc_addr); 51 + 44 52 struct vfree_deferred { 45 53 struct llist_head list; 46 54 struct work_struct wq;