Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/mce: relocate set{clear}_mce_nospec() functions

Relocate the twin mce functions to arch/x86/mm/pat/set_memory.c
file where they belong.

While at it, fixup a function name in a comment.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Jane Chu <jane.chu@oracle.com>
Acked-by: Borislav Petkov <bp@suse.de>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
[sfr: gate {set,clear}_mce_nospec() by CONFIG_X86_64]
Link: https://lore.kernel.org/r/165272527328.90175.8336008202048685278.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

authored by

Jane Chu and committed by
Dan Williams
b3fdf939 7917f9cd

+52 -58
-52
arch/x86/include/asm/set_memory.h
··· 86 86 87 87 extern int kernel_set_to_readonly; 88 88 89 - #ifdef CONFIG_X86_64 90 - /* 91 - * Prevent speculative access to the page by either unmapping 92 - * it (if we do not require access to any part of the page) or 93 - * marking it uncacheable (if we want to try to retrieve data 94 - * from non-poisoned lines in the page). 95 - */ 96 - static inline int set_mce_nospec(unsigned long pfn, bool unmap) 97 - { 98 - unsigned long decoy_addr; 99 - int rc; 100 - 101 - /* SGX pages are not in the 1:1 map */ 102 - if (arch_is_platform_page(pfn << PAGE_SHIFT)) 103 - return 0; 104 - /* 105 - * We would like to just call: 106 - * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1); 107 - * but doing that would radically increase the odds of a 108 - * speculative access to the poison page because we'd have 109 - * the virtual address of the kernel 1:1 mapping sitting 110 - * around in registers. 111 - * Instead we get tricky. We create a non-canonical address 112 - * that looks just like the one we want, but has bit 63 flipped. 113 - * This relies on set_memory_XX() properly sanitizing any __pa() 114 - * results with __PHYSICAL_MASK or PTE_PFN_MASK. 115 - */ 116 - decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63)); 117 - 118 - if (unmap) 119 - rc = set_memory_np(decoy_addr, 1); 120 - else 121 - rc = set_memory_uc(decoy_addr, 1); 122 - if (rc) 123 - pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); 124 - return rc; 125 - } 126 - #define set_mce_nospec set_mce_nospec 127 - 128 - /* Restore full speculative operation to the pfn. */ 129 - static inline int clear_mce_nospec(unsigned long pfn) 130 - { 131 - return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1); 132 - } 133 - #define clear_mce_nospec clear_mce_nospec 134 - #else 135 - /* 136 - * Few people would run a 32-bit kernel on a machine that supports 137 - * recoverable errors because they have too much memory to boot 32-bit. 138 - */ 139 - #endif 140 - 141 89 #endif /* _ASM_X86_SET_MEMORY_H */
+48 -2
arch/x86/mm/pat/set_memory.c
··· 19 19 #include <linux/vmstat.h> 20 20 #include <linux/kernel.h> 21 21 #include <linux/cc_platform.h> 22 + #include <linux/set_memory.h> 22 23 23 24 #include <asm/e820/api.h> 24 25 #include <asm/processor.h> ··· 30 29 #include <asm/pgalloc.h> 31 30 #include <asm/proto.h> 32 31 #include <asm/memtype.h> 33 - #include <asm/set_memory.h> 34 32 #include <asm/hyperv-tlfs.h> 35 33 #include <asm/mshyperv.h> 36 34 ··· 1816 1816 } 1817 1817 1818 1818 /* 1819 - * _set_memory_prot is an internal helper for callers that have been passed 1819 + * __set_memory_prot is an internal helper for callers that have been passed 1820 1820 * a pgprot_t value from upper layers and a reservation has already been taken. 1821 1821 * If you want to set the pgprot to a specific page protocol, use the 1822 1822 * set_memory_xx() functions. ··· 1924 1924 return 0; 1925 1925 } 1926 1926 EXPORT_SYMBOL(set_memory_wb); 1927 + 1928 + /* 1929 + * Prevent speculative access to the page by either unmapping 1930 + * it (if we do not require access to any part of the page) or 1931 + * marking it uncacheable (if we want to try to retrieve data 1932 + * from non-poisoned lines in the page). 1933 + */ 1934 + #ifdef CONFIG_X86_64 1935 + int set_mce_nospec(unsigned long pfn, bool unmap) 1936 + { 1937 + unsigned long decoy_addr; 1938 + int rc; 1939 + 1940 + /* SGX pages are not in the 1:1 map */ 1941 + if (arch_is_platform_page(pfn << PAGE_SHIFT)) 1942 + return 0; 1943 + /* 1944 + * We would like to just call: 1945 + * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1); 1946 + * but doing that would radically increase the odds of a 1947 + * speculative access to the poison page because we'd have 1948 + * the virtual address of the kernel 1:1 mapping sitting 1949 + * around in registers. 1950 + * Instead we get tricky. We create a non-canonical address 1951 + * that looks just like the one we want, but has bit 63 flipped. 1952 + * This relies on set_memory_XX() properly sanitizing any __pa() 1953 + * results with __PHYSICAL_MASK or PTE_PFN_MASK. 1954 + */ 1955 + decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63)); 1956 + 1957 + if (unmap) 1958 + rc = set_memory_np(decoy_addr, 1); 1959 + else 1960 + rc = set_memory_uc(decoy_addr, 1); 1961 + if (rc) 1962 + pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); 1963 + return rc; 1964 + } 1965 + 1966 + /* Restore full speculative operation to the pfn. */ 1967 + int clear_mce_nospec(unsigned long pfn) 1968 + { 1969 + return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1); 1970 + } 1971 + EXPORT_SYMBOL_GPL(clear_mce_nospec); 1972 + #endif /* CONFIG_X86_64 */ 1927 1973 1928 1974 int set_memory_x(unsigned long addr, int numpages) 1929 1975 {
+4 -4
include/linux/set_memory.h
··· 42 42 #endif 43 43 #endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ 44 44 45 - #ifndef set_mce_nospec 45 + #ifdef CONFIG_X86_64 46 + int set_mce_nospec(unsigned long pfn, bool unmap); 47 + int clear_mce_nospec(unsigned long pfn); 48 + #else 46 49 static inline int set_mce_nospec(unsigned long pfn, bool unmap) 47 50 { 48 51 return 0; 49 52 } 50 - #endif 51 - 52 - #ifndef clear_mce_nospec 53 53 static inline int clear_mce_nospec(unsigned long pfn) 54 54 { 55 55 return 0;