Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * efi.c - EFI subsystem
4 *
5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8 *
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kobject.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/debugfs.h>
21#include <linux/device.h>
22#include <linux/efi.h>
23#include <linux/of.h>
24#include <linux/io.h>
25#include <linux/kexec.h>
26#include <linux/platform_device.h>
27#include <linux/random.h>
28#include <linux/reboot.h>
29#include <linux/slab.h>
30#include <linux/acpi.h>
31#include <linux/ucs2_string.h>
32#include <linux/memblock.h>
33#include <linux/security.h>
34
35#include <asm/early_ioremap.h>
36
37struct efi __read_mostly efi = {
38 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
39 .acpi = EFI_INVALID_TABLE_ADDR,
40 .acpi20 = EFI_INVALID_TABLE_ADDR,
41 .smbios = EFI_INVALID_TABLE_ADDR,
42 .smbios3 = EFI_INVALID_TABLE_ADDR,
43 .esrt = EFI_INVALID_TABLE_ADDR,
44 .tpm_log = EFI_INVALID_TABLE_ADDR,
45 .tpm_final_log = EFI_INVALID_TABLE_ADDR,
46};
47EXPORT_SYMBOL(efi);
48
49unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
50static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
51static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
52
53struct mm_struct efi_mm = {
54 .mm_rb = RB_ROOT,
55 .mm_users = ATOMIC_INIT(2),
56 .mm_count = ATOMIC_INIT(1),
57 .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
58 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
59 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
60 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
61};
62
63struct workqueue_struct *efi_rts_wq;
64
65static bool disable_runtime;
66static int __init setup_noefi(char *arg)
67{
68 disable_runtime = true;
69 return 0;
70}
71early_param("noefi", setup_noefi);
72
73bool efi_runtime_disabled(void)
74{
75 return disable_runtime;
76}
77
78bool __pure __efi_soft_reserve_enabled(void)
79{
80 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
81}
82
83static int __init parse_efi_cmdline(char *str)
84{
85 if (!str) {
86 pr_warn("need at least one option\n");
87 return -EINVAL;
88 }
89
90 if (parse_option_str(str, "debug"))
91 set_bit(EFI_DBG, &efi.flags);
92
93 if (parse_option_str(str, "noruntime"))
94 disable_runtime = true;
95
96 if (parse_option_str(str, "nosoftreserve"))
97 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
98
99 return 0;
100}
101early_param("efi", parse_efi_cmdline);
102
103struct kobject *efi_kobj;
104
105/*
106 * Let's not leave out systab information that snuck into
107 * the efivars driver
108 * Note, do not add more fields in systab sysfs file as it breaks sysfs
109 * one value per file rule!
110 */
111static ssize_t systab_show(struct kobject *kobj,
112 struct kobj_attribute *attr, char *buf)
113{
114 char *str = buf;
115
116 if (!kobj || !buf)
117 return -EINVAL;
118
119 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
120 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
121 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
122 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
123 /*
124 * If both SMBIOS and SMBIOS3 entry points are implemented, the
125 * SMBIOS3 entry point shall be preferred, so we list it first to
126 * let applications stop parsing after the first match.
127 */
128 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
129 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
130 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
131 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
132
133 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) {
134 extern char *efi_systab_show_arch(char *str);
135
136 str = efi_systab_show_arch(str);
137 }
138
139 return str - buf;
140}
141
142static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
143
144static ssize_t fw_platform_size_show(struct kobject *kobj,
145 struct kobj_attribute *attr, char *buf)
146{
147 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
148}
149
150extern __weak struct kobj_attribute efi_attr_fw_vendor;
151extern __weak struct kobj_attribute efi_attr_runtime;
152extern __weak struct kobj_attribute efi_attr_config_table;
153static struct kobj_attribute efi_attr_fw_platform_size =
154 __ATTR_RO(fw_platform_size);
155
156static struct attribute *efi_subsys_attrs[] = {
157 &efi_attr_systab.attr,
158 &efi_attr_fw_platform_size.attr,
159 &efi_attr_fw_vendor.attr,
160 &efi_attr_runtime.attr,
161 &efi_attr_config_table.attr,
162 NULL,
163};
164
165umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
166 int n)
167{
168 return attr->mode;
169}
170
171static const struct attribute_group efi_subsys_attr_group = {
172 .attrs = efi_subsys_attrs,
173 .is_visible = efi_attr_is_visible,
174};
175
176static struct efivars generic_efivars;
177static struct efivar_operations generic_ops;
178
179static int generic_ops_register(void)
180{
181 generic_ops.get_variable = efi.get_variable;
182 generic_ops.set_variable = efi.set_variable;
183 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
184 generic_ops.get_next_variable = efi.get_next_variable;
185 generic_ops.query_variable_store = efi_query_variable_store;
186
187 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
188}
189
190static void generic_ops_unregister(void)
191{
192 efivars_unregister(&generic_efivars);
193}
194
195#if IS_ENABLED(CONFIG_ACPI)
196#define EFIVAR_SSDT_NAME_MAX 16
197static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
198static int __init efivar_ssdt_setup(char *str)
199{
200 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
201
202 if (ret)
203 return ret;
204
205 if (strlen(str) < sizeof(efivar_ssdt))
206 memcpy(efivar_ssdt, str, strlen(str));
207 else
208 pr_warn("efivar_ssdt: name too long: %s\n", str);
209 return 0;
210}
211__setup("efivar_ssdt=", efivar_ssdt_setup);
212
213static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
214 unsigned long name_size, void *data)
215{
216 struct efivar_entry *entry;
217 struct list_head *list = data;
218 char utf8_name[EFIVAR_SSDT_NAME_MAX];
219 int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
220
221 ucs2_as_utf8(utf8_name, name, limit - 1);
222 if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
223 return 0;
224
225 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
226 if (!entry)
227 return 0;
228
229 memcpy(entry->var.VariableName, name, name_size);
230 memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
231
232 efivar_entry_add(entry, list);
233
234 return 0;
235}
236
237static __init int efivar_ssdt_load(void)
238{
239 LIST_HEAD(entries);
240 struct efivar_entry *entry, *aux;
241 unsigned long size;
242 void *data;
243 int ret;
244
245 if (!efivar_ssdt[0])
246 return 0;
247
248 ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
249
250 list_for_each_entry_safe(entry, aux, &entries, list) {
251 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
252 &entry->var.VendorGuid);
253
254 list_del(&entry->list);
255
256 ret = efivar_entry_size(entry, &size);
257 if (ret) {
258 pr_err("failed to get var size\n");
259 goto free_entry;
260 }
261
262 data = kmalloc(size, GFP_KERNEL);
263 if (!data) {
264 ret = -ENOMEM;
265 goto free_entry;
266 }
267
268 ret = efivar_entry_get(entry, NULL, &size, data);
269 if (ret) {
270 pr_err("failed to get var data\n");
271 goto free_data;
272 }
273
274 ret = acpi_load_table(data, NULL);
275 if (ret) {
276 pr_err("failed to load table: %d\n", ret);
277 goto free_data;
278 }
279
280 goto free_entry;
281
282free_data:
283 kfree(data);
284
285free_entry:
286 kfree(entry);
287 }
288
289 return ret;
290}
291#else
292static inline int efivar_ssdt_load(void) { return 0; }
293#endif
294
295#ifdef CONFIG_DEBUG_FS
296
297#define EFI_DEBUGFS_MAX_BLOBS 32
298
299static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
300
301static void __init efi_debugfs_init(void)
302{
303 struct dentry *efi_debugfs;
304 efi_memory_desc_t *md;
305 char name[32];
306 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
307 int i = 0;
308
309 efi_debugfs = debugfs_create_dir("efi", NULL);
310 if (IS_ERR_OR_NULL(efi_debugfs))
311 return;
312
313 for_each_efi_memory_desc(md) {
314 switch (md->type) {
315 case EFI_BOOT_SERVICES_CODE:
316 snprintf(name, sizeof(name), "boot_services_code%d",
317 type_count[md->type]++);
318 break;
319 case EFI_BOOT_SERVICES_DATA:
320 snprintf(name, sizeof(name), "boot_services_data%d",
321 type_count[md->type]++);
322 break;
323 default:
324 continue;
325 }
326
327 if (i >= EFI_DEBUGFS_MAX_BLOBS) {
328 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
329 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
330 break;
331 }
332
333 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
334 debugfs_blob[i].data = memremap(md->phys_addr,
335 debugfs_blob[i].size,
336 MEMREMAP_WB);
337 if (!debugfs_blob[i].data)
338 continue;
339
340 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
341 i++;
342 }
343}
344#else
345static inline void efi_debugfs_init(void) {}
346#endif
347
348/*
349 * We register the efi subsystem with the firmware subsystem and the
350 * efivars subsystem with the efi subsystem, if the system was booted with
351 * EFI.
352 */
353static int __init efisubsys_init(void)
354{
355 int error;
356
357 if (!efi_enabled(EFI_RUNTIME_SERVICES))
358 efi.runtime_supported_mask = 0;
359
360 if (!efi_enabled(EFI_BOOT))
361 return 0;
362
363 if (efi.runtime_supported_mask) {
364 /*
365 * Since we process only one efi_runtime_service() at a time, an
366 * ordered workqueue (which creates only one execution context)
367 * should suffice for all our needs.
368 */
369 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
370 if (!efi_rts_wq) {
371 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
372 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
373 efi.runtime_supported_mask = 0;
374 return 0;
375 }
376 }
377
378 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
379 platform_device_register_simple("rtc-efi", 0, NULL, 0);
380
381 /* We register the efi directory at /sys/firmware/efi */
382 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
383 if (!efi_kobj) {
384 pr_err("efi: Firmware registration failed.\n");
385 return -ENOMEM;
386 }
387
388 if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) {
389 efivar_ssdt_load();
390 error = generic_ops_register();
391 if (error)
392 goto err_put;
393 platform_device_register_simple("efivars", 0, NULL, 0);
394 }
395
396 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
397 if (error) {
398 pr_err("efi: Sysfs attribute export failed with error %d.\n",
399 error);
400 goto err_unregister;
401 }
402
403 error = efi_runtime_map_init(efi_kobj);
404 if (error)
405 goto err_remove_group;
406
407 /* and the standard mountpoint for efivarfs */
408 error = sysfs_create_mount_point(efi_kobj, "efivars");
409 if (error) {
410 pr_err("efivars: Subsystem registration failed.\n");
411 goto err_remove_group;
412 }
413
414 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
415 efi_debugfs_init();
416
417 return 0;
418
419err_remove_group:
420 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
421err_unregister:
422 if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
423 generic_ops_unregister();
424err_put:
425 kobject_put(efi_kobj);
426 return error;
427}
428
429subsys_initcall(efisubsys_init);
430
431/*
432 * Find the efi memory descriptor for a given physical address. Given a
433 * physical address, determine if it exists within an EFI Memory Map entry,
434 * and if so, populate the supplied memory descriptor with the appropriate
435 * data.
436 */
437int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
438{
439 efi_memory_desc_t *md;
440
441 if (!efi_enabled(EFI_MEMMAP)) {
442 pr_err_once("EFI_MEMMAP is not enabled.\n");
443 return -EINVAL;
444 }
445
446 if (!out_md) {
447 pr_err_once("out_md is null.\n");
448 return -EINVAL;
449 }
450
451 for_each_efi_memory_desc(md) {
452 u64 size;
453 u64 end;
454
455 size = md->num_pages << EFI_PAGE_SHIFT;
456 end = md->phys_addr + size;
457 if (phys_addr >= md->phys_addr && phys_addr < end) {
458 memcpy(out_md, md, sizeof(*out_md));
459 return 0;
460 }
461 }
462 return -ENOENT;
463}
464
465/*
466 * Calculate the highest address of an efi memory descriptor.
467 */
468u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
469{
470 u64 size = md->num_pages << EFI_PAGE_SHIFT;
471 u64 end = md->phys_addr + size;
472 return end;
473}
474
475void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
476
477/**
478 * efi_mem_reserve - Reserve an EFI memory region
479 * @addr: Physical address to reserve
480 * @size: Size of reservation
481 *
482 * Mark a region as reserved from general kernel allocation and
483 * prevent it being released by efi_free_boot_services().
484 *
485 * This function should be called drivers once they've parsed EFI
486 * configuration tables to figure out where their data lives, e.g.
487 * efi_esrt_init().
488 */
489void __init efi_mem_reserve(phys_addr_t addr, u64 size)
490{
491 if (!memblock_is_region_reserved(addr, size))
492 memblock_reserve(addr, size);
493
494 /*
495 * Some architectures (x86) reserve all boot services ranges
496 * until efi_free_boot_services() because of buggy firmware
497 * implementations. This means the above memblock_reserve() is
498 * superfluous on x86 and instead what it needs to do is
499 * ensure the @start, @size is not freed.
500 */
501 efi_arch_mem_reserve(addr, size);
502}
503
504static const efi_config_table_type_t common_tables[] __initconst = {
505 {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
506 {ACPI_TABLE_GUID, "ACPI", &efi.acpi},
507 {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
508 {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
509 {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
510 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi_mem_attr_table},
511 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi_rng_seed},
512 {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
513 {LINUX_EFI_TPM_FINAL_LOG_GUID, "TPMFinalLog", &efi.tpm_final_log},
514 {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &mem_reserve},
515 {EFI_RT_PROPERTIES_TABLE_GUID, "RTPROP", &rt_prop},
516#ifdef CONFIG_EFI_RCI2_TABLE
517 {DELLEMC_EFI_RCI2_TABLE_GUID, NULL, &rci2_table_phys},
518#endif
519 {NULL_GUID, NULL, NULL},
520};
521
522static __init int match_config_table(const efi_guid_t *guid,
523 unsigned long table,
524 const efi_config_table_type_t *table_types)
525{
526 int i;
527
528 if (table_types) {
529 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
530 if (!efi_guidcmp(*guid, table_types[i].guid)) {
531 *(table_types[i].ptr) = table;
532 if (table_types[i].name)
533 pr_cont(" %s=0x%lx ",
534 table_types[i].name, table);
535 return 1;
536 }
537 }
538 }
539
540 return 0;
541}
542
543int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
544 int count,
545 const efi_config_table_type_t *arch_tables)
546{
547 const efi_config_table_64_t *tbl64 = (void *)config_tables;
548 const efi_config_table_32_t *tbl32 = (void *)config_tables;
549 const efi_guid_t *guid;
550 unsigned long table;
551 int i;
552
553 pr_info("");
554 for (i = 0; i < count; i++) {
555 if (!IS_ENABLED(CONFIG_X86)) {
556 guid = &config_tables[i].guid;
557 table = (unsigned long)config_tables[i].table;
558 } else if (efi_enabled(EFI_64BIT)) {
559 guid = &tbl64[i].guid;
560 table = tbl64[i].table;
561
562 if (IS_ENABLED(CONFIG_X86_32) &&
563 tbl64[i].table > U32_MAX) {
564 pr_cont("\n");
565 pr_err("Table located above 4GB, disabling EFI.\n");
566 return -EINVAL;
567 }
568 } else {
569 guid = &tbl32[i].guid;
570 table = tbl32[i].table;
571 }
572
573 if (!match_config_table(guid, table, common_tables))
574 match_config_table(guid, table, arch_tables);
575 }
576 pr_cont("\n");
577 set_bit(EFI_CONFIG_TABLES, &efi.flags);
578
579 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
580 struct linux_efi_random_seed *seed;
581 u32 size = 0;
582
583 seed = early_memremap(efi_rng_seed, sizeof(*seed));
584 if (seed != NULL) {
585 size = READ_ONCE(seed->size);
586 early_memunmap(seed, sizeof(*seed));
587 } else {
588 pr_err("Could not map UEFI random seed!\n");
589 }
590 if (size > 0) {
591 seed = early_memremap(efi_rng_seed,
592 sizeof(*seed) + size);
593 if (seed != NULL) {
594 pr_notice("seeding entropy pool\n");
595 add_bootloader_randomness(seed->bits, size);
596 early_memunmap(seed, sizeof(*seed) + size);
597 } else {
598 pr_err("Could not map UEFI random seed!\n");
599 }
600 }
601 }
602
603 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
604 efi_memattr_init();
605
606 efi_tpm_eventlog_init();
607
608 if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
609 unsigned long prsv = mem_reserve;
610
611 while (prsv) {
612 struct linux_efi_memreserve *rsv;
613 u8 *p;
614
615 /*
616 * Just map a full page: that is what we will get
617 * anyway, and it permits us to map the entire entry
618 * before knowing its size.
619 */
620 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
621 PAGE_SIZE);
622 if (p == NULL) {
623 pr_err("Could not map UEFI memreserve entry!\n");
624 return -ENOMEM;
625 }
626
627 rsv = (void *)(p + prsv % PAGE_SIZE);
628
629 /* reserve the entry itself */
630 memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size));
631
632 for (i = 0; i < atomic_read(&rsv->count); i++) {
633 memblock_reserve(rsv->entry[i].base,
634 rsv->entry[i].size);
635 }
636
637 prsv = rsv->next;
638 early_memunmap(p, PAGE_SIZE);
639 }
640 }
641
642 if (rt_prop != EFI_INVALID_TABLE_ADDR) {
643 efi_rt_properties_table_t *tbl;
644
645 tbl = early_memremap(rt_prop, sizeof(*tbl));
646 if (tbl) {
647 efi.runtime_supported_mask &= tbl->runtime_services_supported;
648 early_memunmap(tbl, sizeof(*tbl));
649 }
650 }
651
652 return 0;
653}
654
655int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
656 int min_major_version)
657{
658 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
659 pr_err("System table signature incorrect!\n");
660 return -EINVAL;
661 }
662
663 if ((systab_hdr->revision >> 16) < min_major_version)
664 pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
665 systab_hdr->revision >> 16,
666 systab_hdr->revision & 0xffff,
667 min_major_version);
668
669 return 0;
670}
671
672#ifndef CONFIG_IA64
673static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
674 size_t size)
675{
676 const efi_char16_t *ret;
677
678 ret = early_memremap_ro(fw_vendor, size);
679 if (!ret)
680 pr_err("Could not map the firmware vendor!\n");
681 return ret;
682}
683
684static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
685{
686 early_memunmap((void *)fw_vendor, size);
687}
688#else
689#define map_fw_vendor(p, s) __va(p)
690#define unmap_fw_vendor(v, s)
691#endif
692
693void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
694 unsigned long fw_vendor)
695{
696 char vendor[100] = "unknown";
697 const efi_char16_t *c16;
698 size_t i;
699
700 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
701 if (c16) {
702 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
703 vendor[i] = c16[i];
704 vendor[i] = '\0';
705
706 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
707 }
708
709 pr_info("EFI v%u.%.02u by %s\n",
710 systab_hdr->revision >> 16,
711 systab_hdr->revision & 0xffff,
712 vendor);
713}
714
715static __initdata char memory_type_name[][20] = {
716 "Reserved",
717 "Loader Code",
718 "Loader Data",
719 "Boot Code",
720 "Boot Data",
721 "Runtime Code",
722 "Runtime Data",
723 "Conventional Memory",
724 "Unusable Memory",
725 "ACPI Reclaim Memory",
726 "ACPI Memory NVS",
727 "Memory Mapped I/O",
728 "MMIO Port Space",
729 "PAL Code",
730 "Persistent Memory",
731};
732
733char * __init efi_md_typeattr_format(char *buf, size_t size,
734 const efi_memory_desc_t *md)
735{
736 char *pos;
737 int type_len;
738 u64 attr;
739
740 pos = buf;
741 if (md->type >= ARRAY_SIZE(memory_type_name))
742 type_len = snprintf(pos, size, "[type=%u", md->type);
743 else
744 type_len = snprintf(pos, size, "[%-*s",
745 (int)(sizeof(memory_type_name[0]) - 1),
746 memory_type_name[md->type]);
747 if (type_len >= size)
748 return buf;
749
750 pos += type_len;
751 size -= type_len;
752
753 attr = md->attribute;
754 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
755 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
756 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
757 EFI_MEMORY_NV | EFI_MEMORY_SP |
758 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
759 snprintf(pos, size, "|attr=0x%016llx]",
760 (unsigned long long)attr);
761 else
762 snprintf(pos, size,
763 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
764 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
765 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
766 attr & EFI_MEMORY_SP ? "SP" : "",
767 attr & EFI_MEMORY_NV ? "NV" : "",
768 attr & EFI_MEMORY_XP ? "XP" : "",
769 attr & EFI_MEMORY_RP ? "RP" : "",
770 attr & EFI_MEMORY_WP ? "WP" : "",
771 attr & EFI_MEMORY_RO ? "RO" : "",
772 attr & EFI_MEMORY_UCE ? "UCE" : "",
773 attr & EFI_MEMORY_WB ? "WB" : "",
774 attr & EFI_MEMORY_WT ? "WT" : "",
775 attr & EFI_MEMORY_WC ? "WC" : "",
776 attr & EFI_MEMORY_UC ? "UC" : "");
777 return buf;
778}
779
780/*
781 * IA64 has a funky EFI memory map that doesn't work the same way as
782 * other architectures.
783 */
784#ifndef CONFIG_IA64
785/*
786 * efi_mem_attributes - lookup memmap attributes for physical address
787 * @phys_addr: the physical address to lookup
788 *
789 * Search in the EFI memory map for the region covering
790 * @phys_addr. Returns the EFI memory attributes if the region
791 * was found in the memory map, 0 otherwise.
792 */
793u64 efi_mem_attributes(unsigned long phys_addr)
794{
795 efi_memory_desc_t *md;
796
797 if (!efi_enabled(EFI_MEMMAP))
798 return 0;
799
800 for_each_efi_memory_desc(md) {
801 if ((md->phys_addr <= phys_addr) &&
802 (phys_addr < (md->phys_addr +
803 (md->num_pages << EFI_PAGE_SHIFT))))
804 return md->attribute;
805 }
806 return 0;
807}
808
809/*
810 * efi_mem_type - lookup memmap type for physical address
811 * @phys_addr: the physical address to lookup
812 *
813 * Search in the EFI memory map for the region covering @phys_addr.
814 * Returns the EFI memory type if the region was found in the memory
815 * map, -EINVAL otherwise.
816 */
817int efi_mem_type(unsigned long phys_addr)
818{
819 const efi_memory_desc_t *md;
820
821 if (!efi_enabled(EFI_MEMMAP))
822 return -ENOTSUPP;
823
824 for_each_efi_memory_desc(md) {
825 if ((md->phys_addr <= phys_addr) &&
826 (phys_addr < (md->phys_addr +
827 (md->num_pages << EFI_PAGE_SHIFT))))
828 return md->type;
829 }
830 return -EINVAL;
831}
832#endif
833
834int efi_status_to_err(efi_status_t status)
835{
836 int err;
837
838 switch (status) {
839 case EFI_SUCCESS:
840 err = 0;
841 break;
842 case EFI_INVALID_PARAMETER:
843 err = -EINVAL;
844 break;
845 case EFI_OUT_OF_RESOURCES:
846 err = -ENOSPC;
847 break;
848 case EFI_DEVICE_ERROR:
849 err = -EIO;
850 break;
851 case EFI_WRITE_PROTECTED:
852 err = -EROFS;
853 break;
854 case EFI_SECURITY_VIOLATION:
855 err = -EACCES;
856 break;
857 case EFI_NOT_FOUND:
858 err = -ENOENT;
859 break;
860 case EFI_ABORTED:
861 err = -EINTR;
862 break;
863 default:
864 err = -EINVAL;
865 }
866
867 return err;
868}
869
870static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
871static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
872
873static int __init efi_memreserve_map_root(void)
874{
875 if (mem_reserve == EFI_INVALID_TABLE_ADDR)
876 return -ENODEV;
877
878 efi_memreserve_root = memremap(mem_reserve,
879 sizeof(*efi_memreserve_root),
880 MEMREMAP_WB);
881 if (WARN_ON_ONCE(!efi_memreserve_root))
882 return -ENOMEM;
883 return 0;
884}
885
886static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
887{
888 struct resource *res, *parent;
889
890 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
891 if (!res)
892 return -ENOMEM;
893
894 res->name = "reserved";
895 res->flags = IORESOURCE_MEM;
896 res->start = addr;
897 res->end = addr + size - 1;
898
899 /* we expect a conflict with a 'System RAM' region */
900 parent = request_resource_conflict(&iomem_resource, res);
901 return parent ? request_resource(parent, res) : 0;
902}
903
904int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
905{
906 struct linux_efi_memreserve *rsv;
907 unsigned long prsv;
908 int rc, index;
909
910 if (efi_memreserve_root == (void *)ULONG_MAX)
911 return -ENODEV;
912
913 if (!efi_memreserve_root) {
914 rc = efi_memreserve_map_root();
915 if (rc)
916 return rc;
917 }
918
919 /* first try to find a slot in an existing linked list entry */
920 for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
921 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
922 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
923 if (index < rsv->size) {
924 rsv->entry[index].base = addr;
925 rsv->entry[index].size = size;
926
927 memunmap(rsv);
928 return efi_mem_reserve_iomem(addr, size);
929 }
930 memunmap(rsv);
931 }
932
933 /* no slot found - allocate a new linked list entry */
934 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
935 if (!rsv)
936 return -ENOMEM;
937
938 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
939 if (rc) {
940 free_page((unsigned long)rsv);
941 return rc;
942 }
943
944 /*
945 * The memremap() call above assumes that a linux_efi_memreserve entry
946 * never crosses a page boundary, so let's ensure that this remains true
947 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
948 * using SZ_4K explicitly in the size calculation below.
949 */
950 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
951 atomic_set(&rsv->count, 1);
952 rsv->entry[0].base = addr;
953 rsv->entry[0].size = size;
954
955 spin_lock(&efi_mem_reserve_persistent_lock);
956 rsv->next = efi_memreserve_root->next;
957 efi_memreserve_root->next = __pa(rsv);
958 spin_unlock(&efi_mem_reserve_persistent_lock);
959
960 return efi_mem_reserve_iomem(addr, size);
961}
962
963static int __init efi_memreserve_root_init(void)
964{
965 if (efi_memreserve_root)
966 return 0;
967 if (efi_memreserve_map_root())
968 efi_memreserve_root = (void *)ULONG_MAX;
969 return 0;
970}
971early_initcall(efi_memreserve_root_init);
972
973#ifdef CONFIG_KEXEC
974static int update_efi_random_seed(struct notifier_block *nb,
975 unsigned long code, void *unused)
976{
977 struct linux_efi_random_seed *seed;
978 u32 size = 0;
979
980 if (!kexec_in_progress)
981 return NOTIFY_DONE;
982
983 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
984 if (seed != NULL) {
985 size = min(seed->size, EFI_RANDOM_SEED_SIZE);
986 memunmap(seed);
987 } else {
988 pr_err("Could not map UEFI random seed!\n");
989 }
990 if (size > 0) {
991 seed = memremap(efi_rng_seed, sizeof(*seed) + size,
992 MEMREMAP_WB);
993 if (seed != NULL) {
994 seed->size = size;
995 get_random_bytes(seed->bits, seed->size);
996 memunmap(seed);
997 } else {
998 pr_err("Could not map UEFI random seed!\n");
999 }
1000 }
1001 return NOTIFY_DONE;
1002}
1003
1004static struct notifier_block efi_random_seed_nb = {
1005 .notifier_call = update_efi_random_seed,
1006};
1007
1008static int __init register_update_efi_random_seed(void)
1009{
1010 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1011 return 0;
1012 return register_reboot_notifier(&efi_random_seed_nb);
1013}
1014late_initcall(register_update_efi_random_seed);
1015#endif