Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.13 300 lines 8.4 kB view raw
1#ifndef _ASM_X86_MMU_CONTEXT_H 2#define _ASM_X86_MMU_CONTEXT_H 3 4#include <asm/desc.h> 5#include <linux/atomic.h> 6#include <linux/mm_types.h> 7#include <linux/pkeys.h> 8 9#include <trace/events/tlb.h> 10 11#include <asm/pgalloc.h> 12#include <asm/tlbflush.h> 13#include <asm/paravirt.h> 14#include <asm/mpx.h> 15#ifndef CONFIG_PARAVIRT 16static inline void paravirt_activate_mm(struct mm_struct *prev, 17 struct mm_struct *next) 18{ 19} 20#endif /* !CONFIG_PARAVIRT */ 21 22#ifdef CONFIG_PERF_EVENTS 23extern struct static_key rdpmc_always_available; 24 25static inline void load_mm_cr4(struct mm_struct *mm) 26{ 27 if (static_key_false(&rdpmc_always_available) || 28 atomic_read(&mm->context.perf_rdpmc_allowed)) 29 cr4_set_bits(X86_CR4_PCE); 30 else 31 cr4_clear_bits(X86_CR4_PCE); 32} 33#else 34static inline void load_mm_cr4(struct mm_struct *mm) {} 35#endif 36 37#ifdef CONFIG_MODIFY_LDT_SYSCALL 38/* 39 * ldt_structs can be allocated, used, and freed, but they are never 40 * modified while live. 41 */ 42struct ldt_struct { 43 /* 44 * Xen requires page-aligned LDTs with special permissions. This is 45 * needed to prevent us from installing evil descriptors such as 46 * call gates. On native, we could merge the ldt_struct and LDT 47 * allocations, but it's not worth trying to optimize. 48 */ 49 struct desc_struct *entries; 50 unsigned int nr_entries; 51}; 52 53/* 54 * Used for LDT copy/destruction. 55 */ 56int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); 57void destroy_context_ldt(struct mm_struct *mm); 58#else /* CONFIG_MODIFY_LDT_SYSCALL */ 59static inline int init_new_context_ldt(struct task_struct *tsk, 60 struct mm_struct *mm) 61{ 62 return 0; 63} 64static inline void destroy_context_ldt(struct mm_struct *mm) {} 65#endif 66 67static inline void load_mm_ldt(struct mm_struct *mm) 68{ 69#ifdef CONFIG_MODIFY_LDT_SYSCALL 70 struct ldt_struct *ldt; 71 72 /* lockless_dereference synchronizes with smp_store_release */ 73 ldt = lockless_dereference(mm->context.ldt); 74 75 /* 76 * Any change to mm->context.ldt is followed by an IPI to all 77 * CPUs with the mm active. The LDT will not be freed until 78 * after the IPI is handled by all such CPUs. This means that, 79 * if the ldt_struct changes before we return, the values we see 80 * will be safe, and the new values will be loaded before we run 81 * any user code. 82 * 83 * NB: don't try to convert this to use RCU without extreme care. 84 * We would still need IRQs off, because we don't want to change 85 * the local LDT after an IPI loaded a newer value than the one 86 * that we can see. 87 */ 88 89 if (unlikely(ldt)) 90 set_ldt(ldt->entries, ldt->nr_entries); 91 else 92 clear_LDT(); 93#else 94 clear_LDT(); 95#endif 96} 97 98static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) 99{ 100#ifdef CONFIG_MODIFY_LDT_SYSCALL 101 /* 102 * Load the LDT if either the old or new mm had an LDT. 103 * 104 * An mm will never go from having an LDT to not having an LDT. Two 105 * mms never share an LDT, so we don't gain anything by checking to 106 * see whether the LDT changed. There's also no guarantee that 107 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, 108 * then prev->context.ldt will also be non-NULL. 109 * 110 * If we really cared, we could optimize the case where prev == next 111 * and we're exiting lazy mode. Most of the time, if this happens, 112 * we don't actually need to reload LDTR, but modify_ldt() is mostly 113 * used by legacy code and emulators where we don't need this level of 114 * performance. 115 * 116 * This uses | instead of || because it generates better code. 117 */ 118 if (unlikely((unsigned long)prev->context.ldt | 119 (unsigned long)next->context.ldt)) 120 load_mm_ldt(next); 121#endif 122 123 DEBUG_LOCKS_WARN_ON(preemptible()); 124} 125 126static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 127{ 128 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 129 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); 130} 131 132static inline int init_new_context(struct task_struct *tsk, 133 struct mm_struct *mm) 134{ 135 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 136 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { 137 /* pkey 0 is the default and always allocated */ 138 mm->context.pkey_allocation_map = 0x1; 139 /* -1 means unallocated or invalid */ 140 mm->context.execute_only_pkey = -1; 141 } 142 #endif 143 return init_new_context_ldt(tsk, mm); 144} 145static inline void destroy_context(struct mm_struct *mm) 146{ 147 destroy_context_ldt(mm); 148} 149 150extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, 151 struct task_struct *tsk); 152 153extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 154 struct task_struct *tsk); 155#define switch_mm_irqs_off switch_mm_irqs_off 156 157#define activate_mm(prev, next) \ 158do { \ 159 paravirt_activate_mm((prev), (next)); \ 160 switch_mm((prev), (next), NULL); \ 161} while (0); 162 163#ifdef CONFIG_X86_32 164#define deactivate_mm(tsk, mm) \ 165do { \ 166 lazy_load_gs(0); \ 167} while (0) 168#else 169#define deactivate_mm(tsk, mm) \ 170do { \ 171 load_gs_index(0); \ 172 loadsegment(fs, 0); \ 173} while (0) 174#endif 175 176static inline void arch_dup_mmap(struct mm_struct *oldmm, 177 struct mm_struct *mm) 178{ 179 paravirt_arch_dup_mmap(oldmm, mm); 180} 181 182static inline void arch_exit_mmap(struct mm_struct *mm) 183{ 184 paravirt_arch_exit_mmap(mm); 185} 186 187#ifdef CONFIG_X86_64 188static inline bool is_64bit_mm(struct mm_struct *mm) 189{ 190 return !IS_ENABLED(CONFIG_IA32_EMULATION) || 191 !(mm->context.ia32_compat == TIF_IA32); 192} 193#else 194static inline bool is_64bit_mm(struct mm_struct *mm) 195{ 196 return false; 197} 198#endif 199 200static inline void arch_bprm_mm_init(struct mm_struct *mm, 201 struct vm_area_struct *vma) 202{ 203 mpx_mm_init(mm); 204} 205 206static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, 207 unsigned long start, unsigned long end) 208{ 209 /* 210 * mpx_notify_unmap() goes and reads a rarely-hot 211 * cacheline in the mm_struct. That can be expensive 212 * enough to be seen in profiles. 213 * 214 * The mpx_notify_unmap() call and its contents have been 215 * observed to affect munmap() performance on hardware 216 * where MPX is not present. 217 * 218 * The unlikely() optimizes for the fast case: no MPX 219 * in the CPU, or no MPX use in the process. Even if 220 * we get this wrong (in the unlikely event that MPX 221 * is widely enabled on some system) the overhead of 222 * MPX itself (reading bounds tables) is expected to 223 * overwhelm the overhead of getting this unlikely() 224 * consistently wrong. 225 */ 226 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) 227 mpx_notify_unmap(mm, vma, start, end); 228} 229 230#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 231static inline int vma_pkey(struct vm_area_struct *vma) 232{ 233 unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | 234 VM_PKEY_BIT2 | VM_PKEY_BIT3; 235 236 return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; 237} 238#else 239static inline int vma_pkey(struct vm_area_struct *vma) 240{ 241 return 0; 242} 243#endif 244 245/* 246 * We only want to enforce protection keys on the current process 247 * because we effectively have no access to PKRU for other 248 * processes or any way to tell *which * PKRU in a threaded 249 * process we could use. 250 * 251 * So do not enforce things if the VMA is not from the current 252 * mm, or if we are in a kernel thread. 253 */ 254static inline bool vma_is_foreign(struct vm_area_struct *vma) 255{ 256 if (!current->mm) 257 return true; 258 /* 259 * Should PKRU be enforced on the access to this VMA? If 260 * the VMA is from another process, then PKRU has no 261 * relevance and should not be enforced. 262 */ 263 if (current->mm != vma->vm_mm) 264 return true; 265 266 return false; 267} 268 269static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 270 bool write, bool execute, bool foreign) 271{ 272 /* pkeys never affect instruction fetches */ 273 if (execute) 274 return true; 275 /* allow access if the VMA is not one from this process */ 276 if (foreign || vma_is_foreign(vma)) 277 return true; 278 return __pkru_allows_pkey(vma_pkey(vma), write); 279} 280 281 282/* 283 * This can be used from process context to figure out what the value of 284 * CR3 is without needing to do a (slow) __read_cr3(). 285 * 286 * It's intended to be used for code like KVM that sneakily changes CR3 287 * and needs to restore it. It needs to be used very carefully. 288 */ 289static inline unsigned long __get_current_cr3_fast(void) 290{ 291 unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); 292 293 /* For now, be very restrictive about when this can be called. */ 294 VM_WARN_ON(in_nmi() || preemptible()); 295 296 VM_BUG_ON(cr3 != __read_cr3()); 297 return cr3; 298} 299 300#endif /* _ASM_X86_MMU_CONTEXT_H */