Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'efi-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi into x86/efi

Pull EFI virtual mapping changes from Matt Fleming:

* New static EFI runtime services virtual mapping layout which is
groundwork for kexec support on EFI. (Borislav Petkov)

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+755 -70
+6
Documentation/kernel-parameters.txt
··· 890 890 edd= [EDD] 891 891 Format: {"off" | "on" | "skip[mbr]"} 892 892 893 + efi= [EFI] 894 + Format: { "old_map" } 895 + old_map [X86-64]: switch to the old ioremap-based EFI 896 + runtime services mapping. 32-bit still uses this one by 897 + default. 898 + 893 899 efi_no_storage_paranoia [EFI; X86] 894 900 Using this parameter you can use more than 50% of 895 901 your efi variable storage. Use this parameter only if
+7
Documentation/x86/x86_64/mm.txt
··· 28 28 Current X86-64 implementations only support 40 bits of address space, 29 29 but we support up to 46 bits. This expands into MBZ space in the page tables. 30 30 31 + ->trampoline_pgd: 32 + 33 + We map EFI runtime services in the aforementioned PGD in the virtual 34 + range of 64Gb (arbitrarily set, can be raised if needed) 35 + 36 + 0xffffffef00000000 - 0xffffffff00000000 37 + 31 38 -Andi Kleen, Jul 2004
+47 -17
arch/x86/include/asm/efi.h
··· 1 1 #ifndef _ASM_X86_EFI_H 2 2 #define _ASM_X86_EFI_H 3 3 4 + /* 5 + * We map the EFI regions needed for runtime services non-contiguously, 6 + * with preserved alignment on virtual addresses starting from -4G down 7 + * for a total max space of 64G. This way, we provide for stable runtime 8 + * services addresses across kernels so that a kexec'd kernel can still 9 + * use them. 10 + * 11 + * This is the main reason why we're doing stable VA mappings for RT 12 + * services. 13 + * 14 + * This flag is used in conjuction with a chicken bit called 15 + * "efi=old_map" which can be used as a fallback to the old runtime 16 + * services mapping method in case there's some b0rkage with a 17 + * particular EFI implementation (haha, it is hard to hold up the 18 + * sarcasm here...). 19 + */ 20 + #define EFI_OLD_MEMMAP EFI_ARCH_1 21 + 4 22 #ifdef CONFIG_X86_32 5 23 6 24 #define EFI_LOADER_SIGNATURE "EL32" ··· 87 69 efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \ 88 70 (u64)(a4), (u64)(a5), (u64)(a6)) 89 71 72 + #define _efi_call_virtX(x, f, ...) \ 73 + ({ \ 74 + efi_status_t __s; \ 75 + \ 76 + efi_sync_low_kernel_mappings(); \ 77 + preempt_disable(); \ 78 + __s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__); \ 79 + preempt_enable(); \ 80 + __s; \ 81 + }) 82 + 90 83 #define efi_call_virt0(f) \ 91 - efi_call0((efi.systab->runtime->f)) 92 - #define efi_call_virt1(f, a1) \ 93 - efi_call1((efi.systab->runtime->f), (u64)(a1)) 94 - #define efi_call_virt2(f, a1, a2) \ 95 - efi_call2((efi.systab->runtime->f), (u64)(a1), (u64)(a2)) 96 - #define efi_call_virt3(f, a1, a2, a3) \ 97 - efi_call3((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 98 - (u64)(a3)) 99 - #define efi_call_virt4(f, a1, a2, a3, a4) \ 100 - efi_call4((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 101 - (u64)(a3), (u64)(a4)) 102 - #define efi_call_virt5(f, a1, a2, a3, a4, a5) \ 103 - efi_call5((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 104 - (u64)(a3), (u64)(a4), (u64)(a5)) 105 - #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 106 - efi_call6((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 107 - (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 84 + _efi_call_virtX(0, f) 85 + #define efi_call_virt1(f, a1) \ 86 + _efi_call_virtX(1, f, (u64)(a1)) 87 + #define efi_call_virt2(f, a1, a2) \ 88 + _efi_call_virtX(2, f, (u64)(a1), (u64)(a2)) 89 + #define efi_call_virt3(f, a1, a2, a3) \ 90 + _efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3)) 91 + #define efi_call_virt4(f, a1, a2, a3, a4) \ 92 + _efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4)) 93 + #define efi_call_virt5(f, a1, a2, a3, a4, a5) \ 94 + _efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5)) 95 + #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 96 + _efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 108 97 109 98 extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, 110 99 u32 type, u64 attribute); ··· 120 95 121 96 extern int add_efi_memmap; 122 97 extern unsigned long x86_efi_facility; 98 + extern struct efi_scratch efi_scratch; 123 99 extern void efi_set_executable(efi_memory_desc_t *md, bool executable); 124 100 extern int efi_memblock_x86_reserve_range(void); 125 101 extern void efi_call_phys_prelog(void); 126 102 extern void efi_call_phys_epilog(void); 127 103 extern void efi_unmap_memmap(void); 128 104 extern void efi_memory_uc(u64 addr, unsigned long size); 105 + extern void __init efi_map_region(efi_memory_desc_t *md); 106 + extern void efi_sync_low_kernel_mappings(void); 107 + extern void efi_setup_page_tables(void); 108 + extern void __init old_map_region(efi_memory_desc_t *md); 129 109 130 110 #ifdef CONFIG_EFI 131 111
+2 -1
arch/x86/include/asm/pgtable_types.h
··· 382 382 */ 383 383 extern pte_t *lookup_address(unsigned long address, unsigned int *level); 384 384 extern phys_addr_t slow_virt_to_phys(void *__address); 385 - 385 + extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, 386 + unsigned numpages, unsigned long page_flags); 386 387 #endif /* !__ASSEMBLY__ */ 387 388 388 389 #endif /* _ASM_X86_PGTABLE_DEFS_H */
+444 -17
arch/x86/mm/pageattr.c
··· 30 30 */ 31 31 struct cpa_data { 32 32 unsigned long *vaddr; 33 + pgd_t *pgd; 33 34 pgprot_t mask_set; 34 35 pgprot_t mask_clr; 35 36 int numpages; ··· 323 322 return prot; 324 323 } 325 324 326 - /* 327 - * Lookup the page table entry for a virtual address. Return a pointer 328 - * to the entry and the level of the mapping. 329 - * 330 - * Note: We return pud and pmd either when the entry is marked large 331 - * or when the present bit is not set. Otherwise we would return a 332 - * pointer to a nonexisting mapping. 333 - */ 334 - pte_t *lookup_address(unsigned long address, unsigned int *level) 325 + static pte_t *__lookup_address_in_pgd(pgd_t *pgd, unsigned long address, 326 + unsigned int *level) 335 327 { 336 - pgd_t *pgd = pgd_offset_k(address); 337 328 pud_t *pud; 338 329 pmd_t *pmd; 339 330 ··· 354 361 355 362 return pte_offset_kernel(pmd, address); 356 363 } 364 + 365 + /* 366 + * Lookup the page table entry for a virtual address. Return a pointer 367 + * to the entry and the level of the mapping. 368 + * 369 + * Note: We return pud and pmd either when the entry is marked large 370 + * or when the present bit is not set. Otherwise we would return a 371 + * pointer to a nonexisting mapping. 372 + */ 373 + pte_t *lookup_address(unsigned long address, unsigned int *level) 374 + { 375 + return __lookup_address_in_pgd(pgd_offset_k(address), address, level); 376 + } 357 377 EXPORT_SYMBOL_GPL(lookup_address); 378 + 379 + static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, 380 + unsigned int *level) 381 + { 382 + if (cpa->pgd) 383 + return __lookup_address_in_pgd(cpa->pgd + pgd_index(address), 384 + address, level); 385 + 386 + return lookup_address(address, level); 387 + } 358 388 359 389 /* 360 390 * This is necessary because __pa() does not work on some ··· 453 437 * Check for races, another CPU might have split this page 454 438 * up already: 455 439 */ 456 - tmp = lookup_address(address, &level); 440 + tmp = _lookup_address_cpa(cpa, address, &level); 457 441 if (tmp != kpte) 458 442 goto out_unlock; 459 443 ··· 559 543 } 560 544 561 545 static int 562 - __split_large_page(pte_t *kpte, unsigned long address, struct page *base) 546 + __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, 547 + struct page *base) 563 548 { 564 549 pte_t *pbase = (pte_t *)page_address(base); 565 550 unsigned long pfn, pfninc = 1; ··· 573 556 * Check for races, another CPU might have split this page 574 557 * up for us already: 575 558 */ 576 - tmp = lookup_address(address, &level); 559 + tmp = _lookup_address_cpa(cpa, address, &level); 577 560 if (tmp != kpte) { 578 561 spin_unlock(&pgd_lock); 579 562 return 1; ··· 649 632 return 0; 650 633 } 651 634 652 - static int split_large_page(pte_t *kpte, unsigned long address) 635 + static int split_large_page(struct cpa_data *cpa, pte_t *kpte, 636 + unsigned long address) 653 637 { 654 638 struct page *base; 655 639 ··· 662 644 if (!base) 663 645 return -ENOMEM; 664 646 665 - if (__split_large_page(kpte, address, base)) 647 + if (__split_large_page(cpa, kpte, address, base)) 666 648 __free_page(base); 667 649 650 + return 0; 651 + } 652 + 653 + static bool try_to_free_pte_page(pte_t *pte) 654 + { 655 + int i; 656 + 657 + for (i = 0; i < PTRS_PER_PTE; i++) 658 + if (!pte_none(pte[i])) 659 + return false; 660 + 661 + free_page((unsigned long)pte); 662 + return true; 663 + } 664 + 665 + static bool try_to_free_pmd_page(pmd_t *pmd) 666 + { 667 + int i; 668 + 669 + for (i = 0; i < PTRS_PER_PMD; i++) 670 + if (!pmd_none(pmd[i])) 671 + return false; 672 + 673 + free_page((unsigned long)pmd); 674 + return true; 675 + } 676 + 677 + static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) 678 + { 679 + pte_t *pte = pte_offset_kernel(pmd, start); 680 + 681 + while (start < end) { 682 + set_pte(pte, __pte(0)); 683 + 684 + start += PAGE_SIZE; 685 + pte++; 686 + } 687 + 688 + if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { 689 + pmd_clear(pmd); 690 + return true; 691 + } 692 + return false; 693 + } 694 + 695 + static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, 696 + unsigned long start, unsigned long end) 697 + { 698 + if (unmap_pte_range(pmd, start, end)) 699 + if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) 700 + pud_clear(pud); 701 + } 702 + 703 + static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) 704 + { 705 + pmd_t *pmd = pmd_offset(pud, start); 706 + 707 + /* 708 + * Not on a 2MB page boundary? 709 + */ 710 + if (start & (PMD_SIZE - 1)) { 711 + unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; 712 + unsigned long pre_end = min_t(unsigned long, end, next_page); 713 + 714 + __unmap_pmd_range(pud, pmd, start, pre_end); 715 + 716 + start = pre_end; 717 + pmd++; 718 + } 719 + 720 + /* 721 + * Try to unmap in 2M chunks. 722 + */ 723 + while (end - start >= PMD_SIZE) { 724 + if (pmd_large(*pmd)) 725 + pmd_clear(pmd); 726 + else 727 + __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); 728 + 729 + start += PMD_SIZE; 730 + pmd++; 731 + } 732 + 733 + /* 734 + * 4K leftovers? 735 + */ 736 + if (start < end) 737 + return __unmap_pmd_range(pud, pmd, start, end); 738 + 739 + /* 740 + * Try again to free the PMD page if haven't succeeded above. 741 + */ 742 + if (!pud_none(*pud)) 743 + if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) 744 + pud_clear(pud); 745 + } 746 + 747 + static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) 748 + { 749 + pud_t *pud = pud_offset(pgd, start); 750 + 751 + /* 752 + * Not on a GB page boundary? 753 + */ 754 + if (start & (PUD_SIZE - 1)) { 755 + unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; 756 + unsigned long pre_end = min_t(unsigned long, end, next_page); 757 + 758 + unmap_pmd_range(pud, start, pre_end); 759 + 760 + start = pre_end; 761 + pud++; 762 + } 763 + 764 + /* 765 + * Try to unmap in 1G chunks? 766 + */ 767 + while (end - start >= PUD_SIZE) { 768 + 769 + if (pud_large(*pud)) 770 + pud_clear(pud); 771 + else 772 + unmap_pmd_range(pud, start, start + PUD_SIZE); 773 + 774 + start += PUD_SIZE; 775 + pud++; 776 + } 777 + 778 + /* 779 + * 2M leftovers? 780 + */ 781 + if (start < end) 782 + unmap_pmd_range(pud, start, end); 783 + 784 + /* 785 + * No need to try to free the PUD page because we'll free it in 786 + * populate_pgd's error path 787 + */ 788 + } 789 + 790 + static int alloc_pte_page(pmd_t *pmd) 791 + { 792 + pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); 793 + if (!pte) 794 + return -1; 795 + 796 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); 797 + return 0; 798 + } 799 + 800 + static int alloc_pmd_page(pud_t *pud) 801 + { 802 + pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); 803 + if (!pmd) 804 + return -1; 805 + 806 + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 807 + return 0; 808 + } 809 + 810 + static void populate_pte(struct cpa_data *cpa, 811 + unsigned long start, unsigned long end, 812 + unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) 813 + { 814 + pte_t *pte; 815 + 816 + pte = pte_offset_kernel(pmd, start); 817 + 818 + while (num_pages-- && start < end) { 819 + 820 + /* deal with the NX bit */ 821 + if (!(pgprot_val(pgprot) & _PAGE_NX)) 822 + cpa->pfn &= ~_PAGE_NX; 823 + 824 + set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); 825 + 826 + start += PAGE_SIZE; 827 + cpa->pfn += PAGE_SIZE; 828 + pte++; 829 + } 830 + } 831 + 832 + static int populate_pmd(struct cpa_data *cpa, 833 + unsigned long start, unsigned long end, 834 + unsigned num_pages, pud_t *pud, pgprot_t pgprot) 835 + { 836 + unsigned int cur_pages = 0; 837 + pmd_t *pmd; 838 + 839 + /* 840 + * Not on a 2M boundary? 841 + */ 842 + if (start & (PMD_SIZE - 1)) { 843 + unsigned long pre_end = start + (num_pages << PAGE_SHIFT); 844 + unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; 845 + 846 + pre_end = min_t(unsigned long, pre_end, next_page); 847 + cur_pages = (pre_end - start) >> PAGE_SHIFT; 848 + cur_pages = min_t(unsigned int, num_pages, cur_pages); 849 + 850 + /* 851 + * Need a PTE page? 852 + */ 853 + pmd = pmd_offset(pud, start); 854 + if (pmd_none(*pmd)) 855 + if (alloc_pte_page(pmd)) 856 + return -1; 857 + 858 + populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); 859 + 860 + start = pre_end; 861 + } 862 + 863 + /* 864 + * We mapped them all? 865 + */ 866 + if (num_pages == cur_pages) 867 + return cur_pages; 868 + 869 + while (end - start >= PMD_SIZE) { 870 + 871 + /* 872 + * We cannot use a 1G page so allocate a PMD page if needed. 873 + */ 874 + if (pud_none(*pud)) 875 + if (alloc_pmd_page(pud)) 876 + return -1; 877 + 878 + pmd = pmd_offset(pud, start); 879 + 880 + set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); 881 + 882 + start += PMD_SIZE; 883 + cpa->pfn += PMD_SIZE; 884 + cur_pages += PMD_SIZE >> PAGE_SHIFT; 885 + } 886 + 887 + /* 888 + * Map trailing 4K pages. 889 + */ 890 + if (start < end) { 891 + pmd = pmd_offset(pud, start); 892 + if (pmd_none(*pmd)) 893 + if (alloc_pte_page(pmd)) 894 + return -1; 895 + 896 + populate_pte(cpa, start, end, num_pages - cur_pages, 897 + pmd, pgprot); 898 + } 899 + return num_pages; 900 + } 901 + 902 + static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, 903 + pgprot_t pgprot) 904 + { 905 + pud_t *pud; 906 + unsigned long end; 907 + int cur_pages = 0; 908 + 909 + end = start + (cpa->numpages << PAGE_SHIFT); 910 + 911 + /* 912 + * Not on a Gb page boundary? => map everything up to it with 913 + * smaller pages. 914 + */ 915 + if (start & (PUD_SIZE - 1)) { 916 + unsigned long pre_end; 917 + unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; 918 + 919 + pre_end = min_t(unsigned long, end, next_page); 920 + cur_pages = (pre_end - start) >> PAGE_SHIFT; 921 + cur_pages = min_t(int, (int)cpa->numpages, cur_pages); 922 + 923 + pud = pud_offset(pgd, start); 924 + 925 + /* 926 + * Need a PMD page? 927 + */ 928 + if (pud_none(*pud)) 929 + if (alloc_pmd_page(pud)) 930 + return -1; 931 + 932 + cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, 933 + pud, pgprot); 934 + if (cur_pages < 0) 935 + return cur_pages; 936 + 937 + start = pre_end; 938 + } 939 + 940 + /* We mapped them all? */ 941 + if (cpa->numpages == cur_pages) 942 + return cur_pages; 943 + 944 + pud = pud_offset(pgd, start); 945 + 946 + /* 947 + * Map everything starting from the Gb boundary, possibly with 1G pages 948 + */ 949 + while (end - start >= PUD_SIZE) { 950 + set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); 951 + 952 + start += PUD_SIZE; 953 + cpa->pfn += PUD_SIZE; 954 + cur_pages += PUD_SIZE >> PAGE_SHIFT; 955 + pud++; 956 + } 957 + 958 + /* Map trailing leftover */ 959 + if (start < end) { 960 + int tmp; 961 + 962 + pud = pud_offset(pgd, start); 963 + if (pud_none(*pud)) 964 + if (alloc_pmd_page(pud)) 965 + return -1; 966 + 967 + tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, 968 + pud, pgprot); 969 + if (tmp < 0) 970 + return cur_pages; 971 + 972 + cur_pages += tmp; 973 + } 974 + return cur_pages; 975 + } 976 + 977 + /* 978 + * Restrictions for kernel page table do not necessarily apply when mapping in 979 + * an alternate PGD. 980 + */ 981 + static int populate_pgd(struct cpa_data *cpa, unsigned long addr) 982 + { 983 + pgprot_t pgprot = __pgprot(_KERNPG_TABLE); 984 + bool allocd_pgd = false; 985 + pgd_t *pgd_entry; 986 + pud_t *pud = NULL; /* shut up gcc */ 987 + int ret; 988 + 989 + pgd_entry = cpa->pgd + pgd_index(addr); 990 + 991 + /* 992 + * Allocate a PUD page and hand it down for mapping. 993 + */ 994 + if (pgd_none(*pgd_entry)) { 995 + pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); 996 + if (!pud) 997 + return -1; 998 + 999 + set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE)); 1000 + allocd_pgd = true; 1001 + } 1002 + 1003 + pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); 1004 + pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); 1005 + 1006 + ret = populate_pud(cpa, addr, pgd_entry, pgprot); 1007 + if (ret < 0) { 1008 + unmap_pud_range(pgd_entry, addr, 1009 + addr + (cpa->numpages << PAGE_SHIFT)); 1010 + 1011 + if (allocd_pgd) { 1012 + /* 1013 + * If I allocated this PUD page, I can just as well 1014 + * free it in this error path. 1015 + */ 1016 + pgd_clear(pgd_entry); 1017 + free_page((unsigned long)pud); 1018 + } 1019 + return ret; 1020 + } 1021 + cpa->numpages = ret; 668 1022 return 0; 669 1023 } 670 1024 671 1025 static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, 672 1026 int primary) 673 1027 { 1028 + if (cpa->pgd) 1029 + return populate_pgd(cpa, vaddr); 1030 + 674 1031 /* 675 1032 * Ignore all non primary paths. 676 1033 */ ··· 1090 697 else 1091 698 address = *cpa->vaddr; 1092 699 repeat: 1093 - kpte = lookup_address(address, &level); 700 + kpte = _lookup_address_cpa(cpa, address, &level); 1094 701 if (!kpte) 1095 702 return __cpa_process_fault(cpa, address, primary); 1096 703 ··· 1154 761 /* 1155 762 * We have to split the large page: 1156 763 */ 1157 - err = split_large_page(kpte, address); 764 + err = split_large_page(cpa, kpte, address); 1158 765 if (!err) { 1159 766 /* 1160 767 * Do a global flush tlb after splitting the large page ··· 1302 909 struct cpa_data cpa; 1303 910 int ret, cache, checkalias; 1304 911 unsigned long baddr = 0; 912 + 913 + memset(&cpa, 0, sizeof(cpa)); 1305 914 1306 915 /* 1307 916 * Check, if we are requested to change a not supported ··· 1751 1356 { 1752 1357 unsigned long tempaddr = (unsigned long) page_address(page); 1753 1358 struct cpa_data cpa = { .vaddr = &tempaddr, 1359 + .pgd = NULL, 1754 1360 .numpages = numpages, 1755 1361 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), 1756 1362 .mask_clr = __pgprot(0), ··· 1770 1374 { 1771 1375 unsigned long tempaddr = (unsigned long) page_address(page); 1772 1376 struct cpa_data cpa = { .vaddr = &tempaddr, 1377 + .pgd = NULL, 1773 1378 .numpages = numpages, 1774 1379 .mask_set = __pgprot(0), 1775 1380 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), ··· 1830 1433 #endif /* CONFIG_HIBERNATION */ 1831 1434 1832 1435 #endif /* CONFIG_DEBUG_PAGEALLOC */ 1436 + 1437 + int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, 1438 + unsigned numpages, unsigned long page_flags) 1439 + { 1440 + int retval = -EINVAL; 1441 + 1442 + struct cpa_data cpa = { 1443 + .vaddr = &address, 1444 + .pfn = pfn, 1445 + .pgd = pgd, 1446 + .numpages = numpages, 1447 + .mask_set = __pgprot(0), 1448 + .mask_clr = __pgprot(0), 1449 + .flags = 0, 1450 + }; 1451 + 1452 + if (!(__supported_pte_mask & _PAGE_NX)) 1453 + goto out; 1454 + 1455 + if (!(page_flags & _PAGE_NX)) 1456 + cpa.mask_clr = __pgprot(_PAGE_NX); 1457 + 1458 + cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); 1459 + 1460 + retval = __change_page_attr_set_clr(&cpa, 0); 1461 + __flush_tlb_all(); 1462 + 1463 + out: 1464 + return retval; 1465 + } 1833 1466 1834 1467 /* 1835 1468 * The testcases use internal knowledge of the implementation that shouldn't
+77 -34
arch/x86/platform/efi/efi.c
··· 12 12 * Bibo Mao <bibo.mao@intel.com> 13 13 * Chandramouli Narayanan <mouli@linux.intel.com> 14 14 * Huang Ying <ying.huang@intel.com> 15 + * Copyright (C) 2013 SuSE Labs 16 + * Borislav Petkov <bp@suse.de> - runtime services VA mapping 15 17 * 16 18 * Copied from efi_32.c to eliminate the duplicated code between EFI 17 19 * 32/64 support code. --ying 2007-10-26 ··· 53 51 #include <asm/x86_init.h> 54 52 #include <asm/rtc.h> 55 53 56 - #define EFI_DEBUG 1 54 + #define EFI_DEBUG 57 55 58 56 #define EFI_MIN_RESERVE 5120 59 57 ··· 400 398 return 0; 401 399 } 402 400 403 - #if EFI_DEBUG 404 401 static void __init print_efi_memmap(void) 405 402 { 403 + #ifdef EFI_DEBUG 406 404 efi_memory_desc_t *md; 407 405 void *p; 408 406 int i; ··· 417 415 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), 418 416 (md->num_pages >> (20 - EFI_PAGE_SHIFT))); 419 417 } 420 - } 421 418 #endif /* EFI_DEBUG */ 419 + } 422 420 423 421 void __init efi_reserve_boot_services(void) 424 422 { ··· 698 696 x86_platform.set_wallclock = efi_set_rtc_mmss; 699 697 } 700 698 #endif 701 - 702 - #if EFI_DEBUG 703 699 print_efi_memmap(); 704 - #endif 705 700 } 706 701 707 702 void __init efi_late_init(void) ··· 747 748 set_memory_uc(addr, npages); 748 749 } 749 750 751 + void __init old_map_region(efi_memory_desc_t *md) 752 + { 753 + u64 start_pfn, end_pfn, end; 754 + unsigned long size; 755 + void *va; 756 + 757 + start_pfn = PFN_DOWN(md->phys_addr); 758 + size = md->num_pages << PAGE_SHIFT; 759 + end = md->phys_addr + size; 760 + end_pfn = PFN_UP(end); 761 + 762 + if (pfn_range_is_mapped(start_pfn, end_pfn)) { 763 + va = __va(md->phys_addr); 764 + 765 + if (!(md->attribute & EFI_MEMORY_WB)) 766 + efi_memory_uc((u64)(unsigned long)va, size); 767 + } else 768 + va = efi_ioremap(md->phys_addr, size, 769 + md->type, md->attribute); 770 + 771 + md->virt_addr = (u64) (unsigned long) va; 772 + if (!va) 773 + pr_err("ioremap of 0x%llX failed!\n", 774 + (unsigned long long)md->phys_addr); 775 + } 776 + 750 777 /* 751 778 * This function will switch the EFI runtime services to virtual mode. 752 - * Essentially, look through the EFI memmap and map every region that 753 - * has the runtime attribute bit set in its memory descriptor and update 754 - * that memory descriptor with the virtual address obtained from ioremap(). 755 - * This enables the runtime services to be called without having to 779 + * Essentially, we look through the EFI memmap and map every region that 780 + * has the runtime attribute bit set in its memory descriptor into the 781 + * ->trampoline_pgd page table using a top-down VA allocation scheme. 782 + * 783 + * The old method which used to update that memory descriptor with the 784 + * virtual address obtained from ioremap() is still supported when the 785 + * kernel is booted with efi=old_map on its command line. Same old 786 + * method enabled the runtime services to be called without having to 756 787 * thunk back into physical mode for every invocation. 788 + * 789 + * The new method does a pagetable switch in a preemption-safe manner 790 + * so that we're in a different address space when calling a runtime 791 + * function. For function arguments passing we do copy the PGDs of the 792 + * kernel page table into ->trampoline_pgd prior to each call. 757 793 */ 758 794 void __init efi_enter_virtual_mode(void) 759 795 { 760 796 efi_memory_desc_t *md, *prev_md = NULL; 761 - efi_status_t status; 797 + void *p, *new_memmap = NULL; 762 798 unsigned long size; 763 - u64 end, systab, start_pfn, end_pfn; 764 - void *p, *va, *new_memmap = NULL; 799 + efi_status_t status; 800 + u64 end, systab; 765 801 int count = 0; 766 802 767 803 efi.systab = NULL; ··· 805 771 * We don't do virtual mode, since we don't do runtime services, on 806 772 * non-native EFI 807 773 */ 808 - 809 774 if (!efi_is_native()) { 810 775 efi_unmap_memmap(); 811 776 return; ··· 835 802 continue; 836 803 } 837 804 prev_md = md; 805 + 838 806 } 839 807 840 808 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { ··· 848 814 continue; 849 815 } 850 816 817 + efi_map_region(md); 818 + 851 819 size = md->num_pages << EFI_PAGE_SHIFT; 852 820 end = md->phys_addr + size; 853 - 854 - start_pfn = PFN_DOWN(md->phys_addr); 855 - end_pfn = PFN_UP(end); 856 - if (pfn_range_is_mapped(start_pfn, end_pfn)) { 857 - va = __va(md->phys_addr); 858 - 859 - if (!(md->attribute & EFI_MEMORY_WB)) 860 - efi_memory_uc((u64)(unsigned long)va, size); 861 - } else 862 - va = efi_ioremap(md->phys_addr, size, 863 - md->type, md->attribute); 864 - 865 - md->virt_addr = (u64) (unsigned long) va; 866 - 867 - if (!va) { 868 - pr_err("ioremap of 0x%llX failed!\n", 869 - (unsigned long long)md->phys_addr); 870 - continue; 871 - } 872 821 873 822 systab = (u64) (unsigned long) efi_phys.systab; 874 823 if (md->phys_addr <= systab && systab < end) { 875 824 systab += md->virt_addr - md->phys_addr; 825 + 876 826 efi.systab = (efi_system_table_t *) (unsigned long) systab; 877 827 } 828 + 878 829 new_memmap = krealloc(new_memmap, 879 830 (count + 1) * memmap.desc_size, 880 831 GFP_KERNEL); 832 + if (!new_memmap) 833 + goto err_out; 834 + 881 835 memcpy(new_memmap + (count * memmap.desc_size), md, 882 836 memmap.desc_size); 883 837 count++; 884 838 } 885 839 886 840 BUG_ON(!efi.systab); 841 + 842 + efi_setup_page_tables(); 843 + efi_sync_low_kernel_mappings(); 887 844 888 845 status = phys_efi_set_virtual_address_map( 889 846 memmap.desc_size * count, ··· 908 883 efi.query_variable_info = virt_efi_query_variable_info; 909 884 efi.update_capsule = virt_efi_update_capsule; 910 885 efi.query_capsule_caps = virt_efi_query_capsule_caps; 911 - if (__supported_pte_mask & _PAGE_NX) 886 + 887 + if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) 912 888 runtime_code_page_mkexec(); 913 889 914 890 kfree(new_memmap); ··· 920 894 EFI_VARIABLE_BOOTSERVICE_ACCESS | 921 895 EFI_VARIABLE_RUNTIME_ACCESS, 922 896 0, NULL); 897 + 898 + return; 899 + 900 + err_out: 901 + pr_err("Error reallocating memory, EFI runtime non-functional!\n"); 923 902 } 924 903 925 904 /* ··· 1044 1013 return EFI_SUCCESS; 1045 1014 } 1046 1015 EXPORT_SYMBOL_GPL(efi_query_variable_store); 1016 + 1017 + static int __init parse_efi_cmdline(char *str) 1018 + { 1019 + if (*str == '=') 1020 + str++; 1021 + 1022 + if (!strncmp(str, "old_map", 7)) 1023 + set_bit(EFI_OLD_MEMMAP, &x86_efi_facility); 1024 + 1025 + return 0; 1026 + } 1027 + early_param("efi", parse_efi_cmdline);
+8 -1
arch/x86/platform/efi/efi_32.c
··· 37 37 * claim EFI runtime service handler exclusively and to duplicate a memory in 38 38 * low memory space say 0 - 3G. 39 39 */ 40 - 41 40 static unsigned long efi_rt_eflags; 41 + 42 + void efi_sync_low_kernel_mappings(void) {} 43 + void efi_setup_page_tables(void) {} 44 + 45 + void __init efi_map_region(efi_memory_desc_t *md) 46 + { 47 + old_map_region(md); 48 + } 42 49 43 50 void efi_call_phys_prelog(void) 44 51 {
+109
arch/x86/platform/efi/efi_64.c
··· 38 38 #include <asm/efi.h> 39 39 #include <asm/cacheflush.h> 40 40 #include <asm/fixmap.h> 41 + #include <asm/realmode.h> 41 42 42 43 static pgd_t *save_pgd __initdata; 43 44 static unsigned long efi_flags __initdata; 45 + 46 + /* 47 + * We allocate runtime services regions bottom-up, starting from -4G, i.e. 48 + * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G. 49 + */ 50 + static u64 efi_va = -4 * (1UL << 30); 51 + #define EFI_VA_END (-68 * (1UL << 30)) 52 + 53 + /* 54 + * Scratch space used for switching the pagetable in the EFI stub 55 + */ 56 + struct efi_scratch { 57 + u64 r15; 58 + u64 prev_cr3; 59 + pgd_t *efi_pgt; 60 + bool use_pgd; 61 + }; 44 62 45 63 static void __init early_code_mapping_set_exec(int executable) 46 64 { ··· 83 65 int pgd; 84 66 int n_pgds; 85 67 68 + if (!efi_enabled(EFI_OLD_MEMMAP)) 69 + return; 70 + 86 71 early_code_mapping_set_exec(1); 87 72 local_irq_save(efi_flags); 88 73 ··· 107 86 */ 108 87 int pgd; 109 88 int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); 89 + 90 + if (!efi_enabled(EFI_OLD_MEMMAP)) 91 + return; 92 + 110 93 for (pgd = 0; pgd < n_pgds; pgd++) 111 94 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); 112 95 kfree(save_pgd); 113 96 __flush_tlb_all(); 114 97 local_irq_restore(efi_flags); 115 98 early_code_mapping_set_exec(0); 99 + } 100 + 101 + /* 102 + * Add low kernel mappings for passing arguments to EFI functions. 103 + */ 104 + void efi_sync_low_kernel_mappings(void) 105 + { 106 + unsigned num_pgds; 107 + pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); 108 + 109 + if (efi_enabled(EFI_OLD_MEMMAP)) 110 + return; 111 + 112 + num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET); 113 + 114 + memcpy(pgd + pgd_index(PAGE_OFFSET), 115 + init_mm.pgd + pgd_index(PAGE_OFFSET), 116 + sizeof(pgd_t) * num_pgds); 117 + } 118 + 119 + void efi_setup_page_tables(void) 120 + { 121 + efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd; 122 + 123 + if (!efi_enabled(EFI_OLD_MEMMAP)) 124 + efi_scratch.use_pgd = true; 125 + } 126 + 127 + static void __init __map_region(efi_memory_desc_t *md, u64 va) 128 + { 129 + pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); 130 + unsigned long pf = 0, size; 131 + u64 end; 132 + 133 + if (!(md->attribute & EFI_MEMORY_WB)) 134 + pf |= _PAGE_PCD; 135 + 136 + size = md->num_pages << PAGE_SHIFT; 137 + end = va + size; 138 + 139 + if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) 140 + pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", 141 + md->phys_addr, va); 142 + } 143 + 144 + void __init efi_map_region(efi_memory_desc_t *md) 145 + { 146 + unsigned long size = md->num_pages << PAGE_SHIFT; 147 + u64 pa = md->phys_addr; 148 + 149 + if (efi_enabled(EFI_OLD_MEMMAP)) 150 + return old_map_region(md); 151 + 152 + /* 153 + * Make sure the 1:1 mappings are present as a catch-all for b0rked 154 + * firmware which doesn't update all internal pointers after switching 155 + * to virtual mode and would otherwise crap on us. 156 + */ 157 + __map_region(md, md->phys_addr); 158 + 159 + efi_va -= size; 160 + 161 + /* Is PA 2M-aligned? */ 162 + if (!(pa & (PMD_SIZE - 1))) { 163 + efi_va &= PMD_MASK; 164 + } else { 165 + u64 pa_offset = pa & (PMD_SIZE - 1); 166 + u64 prev_va = efi_va; 167 + 168 + /* get us the same offset within this 2M page */ 169 + efi_va = (efi_va & PMD_MASK) + pa_offset; 170 + 171 + if (efi_va > prev_va) 172 + efi_va -= PMD_SIZE; 173 + } 174 + 175 + if (efi_va < EFI_VA_END) { 176 + pr_warn(FW_WARN "VA address range overflow!\n"); 177 + return; 178 + } 179 + 180 + /* Do the VA map */ 181 + __map_region(md, efi_va); 182 + md->virt_addr = efi_va; 116 183 } 117 184 118 185 void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
+54
arch/x86/platform/efi/efi_stub_64.S
··· 34 34 mov %rsi, %cr0; \ 35 35 mov (%rsp), %rsp 36 36 37 + /* stolen from gcc */ 38 + .macro FLUSH_TLB_ALL 39 + movq %r15, efi_scratch(%rip) 40 + movq %r14, efi_scratch+8(%rip) 41 + movq %cr4, %r15 42 + movq %r15, %r14 43 + andb $0x7f, %r14b 44 + movq %r14, %cr4 45 + movq %r15, %cr4 46 + movq efi_scratch+8(%rip), %r14 47 + movq efi_scratch(%rip), %r15 48 + .endm 49 + 50 + .macro SWITCH_PGT 51 + cmpb $0, efi_scratch+24(%rip) 52 + je 1f 53 + movq %r15, efi_scratch(%rip) # r15 54 + # save previous CR3 55 + movq %cr3, %r15 56 + movq %r15, efi_scratch+8(%rip) # prev_cr3 57 + movq efi_scratch+16(%rip), %r15 # EFI pgt 58 + movq %r15, %cr3 59 + 1: 60 + .endm 61 + 62 + .macro RESTORE_PGT 63 + cmpb $0, efi_scratch+24(%rip) 64 + je 2f 65 + movq efi_scratch+8(%rip), %r15 66 + movq %r15, %cr3 67 + movq efi_scratch(%rip), %r15 68 + FLUSH_TLB_ALL 69 + 2: 70 + .endm 71 + 37 72 ENTRY(efi_call0) 38 73 SAVE_XMM 39 74 subq $32, %rsp 75 + SWITCH_PGT 40 76 call *%rdi 77 + RESTORE_PGT 41 78 addq $32, %rsp 42 79 RESTORE_XMM 43 80 ret ··· 84 47 SAVE_XMM 85 48 subq $32, %rsp 86 49 mov %rsi, %rcx 50 + SWITCH_PGT 87 51 call *%rdi 52 + RESTORE_PGT 88 53 addq $32, %rsp 89 54 RESTORE_XMM 90 55 ret ··· 96 57 SAVE_XMM 97 58 subq $32, %rsp 98 59 mov %rsi, %rcx 60 + SWITCH_PGT 99 61 call *%rdi 62 + RESTORE_PGT 100 63 addq $32, %rsp 101 64 RESTORE_XMM 102 65 ret ··· 109 68 subq $32, %rsp 110 69 mov %rcx, %r8 111 70 mov %rsi, %rcx 71 + SWITCH_PGT 112 72 call *%rdi 73 + RESTORE_PGT 113 74 addq $32, %rsp 114 75 RESTORE_XMM 115 76 ret ··· 123 80 mov %r8, %r9 124 81 mov %rcx, %r8 125 82 mov %rsi, %rcx 83 + SWITCH_PGT 126 84 call *%rdi 85 + RESTORE_PGT 127 86 addq $32, %rsp 128 87 RESTORE_XMM 129 88 ret ··· 138 93 mov %r8, %r9 139 94 mov %rcx, %r8 140 95 mov %rsi, %rcx 96 + SWITCH_PGT 141 97 call *%rdi 98 + RESTORE_PGT 142 99 addq $48, %rsp 143 100 RESTORE_XMM 144 101 ret ··· 156 109 mov %r8, %r9 157 110 mov %rcx, %r8 158 111 mov %rsi, %rcx 112 + SWITCH_PGT 159 113 call *%rdi 114 + RESTORE_PGT 160 115 addq $48, %rsp 161 116 RESTORE_XMM 162 117 ret 163 118 ENDPROC(efi_call6) 119 + 120 + .data 121 + ENTRY(efi_scratch) 122 + .fill 3,8,0 123 + .byte 0
+1
include/linux/efi.h
··· 653 653 #define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ 654 654 #define EFI_MEMMAP 4 /* Can we use EFI memory map? */ 655 655 #define EFI_64BIT 5 /* Is the firmware 64-bit? */ 656 + #define EFI_ARCH_1 6 /* First arch-specific bit */ 656 657 657 658 #ifdef CONFIG_EFI 658 659 # ifdef CONFIG_X86