Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at c9a28fa7b9ac19b676deefa0a171ce7df8755c08 71 lines 1.7 kB view raw
1#ifndef __X86_64_MMU_CONTEXT_H 2#define __X86_64_MMU_CONTEXT_H 3 4#include <asm/desc.h> 5#include <asm/atomic.h> 6#include <asm/pgalloc.h> 7#include <asm/pda.h> 8#include <asm/pgtable.h> 9#include <asm/tlbflush.h> 10#ifndef CONFIG_PARAVIRT 11#include <asm-generic/mm_hooks.h> 12#endif 13 14/* 15 * possibly do the LDT unload here? 16 */ 17int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 18void destroy_context(struct mm_struct *mm); 19 20static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 21{ 22#ifdef CONFIG_SMP 23 if (read_pda(mmu_state) == TLBSTATE_OK) 24 write_pda(mmu_state, TLBSTATE_LAZY); 25#endif 26} 27 28static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 29 struct task_struct *tsk) 30{ 31 unsigned cpu = smp_processor_id(); 32 if (likely(prev != next)) { 33 /* stop flush ipis for the previous mm */ 34 cpu_clear(cpu, prev->cpu_vm_mask); 35#ifdef CONFIG_SMP 36 write_pda(mmu_state, TLBSTATE_OK); 37 write_pda(active_mm, next); 38#endif 39 cpu_set(cpu, next->cpu_vm_mask); 40 load_cr3(next->pgd); 41 42 if (unlikely(next->context.ldt != prev->context.ldt)) 43 load_LDT_nolock(&next->context); 44 } 45#ifdef CONFIG_SMP 46 else { 47 write_pda(mmu_state, TLBSTATE_OK); 48 if (read_pda(active_mm) != next) 49 BUG(); 50 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { 51 /* We were in lazy tlb mode and leave_mm disabled 52 * tlb flush IPI delivery. We must reload CR3 53 * to make sure to use no freed page tables. 54 */ 55 load_cr3(next->pgd); 56 load_LDT_nolock(&next->context); 57 } 58 } 59#endif 60} 61 62#define deactivate_mm(tsk,mm) do { \ 63 load_gs_index(0); \ 64 asm volatile("movl %0,%%fs"::"r"(0)); \ 65} while(0) 66 67#define activate_mm(prev, next) \ 68 switch_mm((prev),(next),NULL) 69 70 71#endif