Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: merge mmu_context.h

Impact: cleanup

tj: * changed cpu to unsigned as was done on mmu_context_64.h as cpu
id is officially unsigned int
* added missing ';' to 32bit version of deactivate_mm()

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>

authored by

Brian Gerst and committed by
Tejun Heo
6826c8ff 0dd76d73

+59 -111
+59 -4
arch/x86/include/asm/mmu_context.h
··· 21 21 int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 22 22 void destroy_context(struct mm_struct *mm); 23 23 24 - #ifdef CONFIG_X86_32 25 - # include "mmu_context_32.h" 26 - #else 27 - # include "mmu_context_64.h" 24 + 25 + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 26 + { 27 + #ifdef CONFIG_SMP 28 + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 29 + percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); 28 30 #endif 31 + } 32 + 33 + static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 34 + struct task_struct *tsk) 35 + { 36 + unsigned cpu = smp_processor_id(); 37 + 38 + if (likely(prev != next)) { 39 + /* stop flush ipis for the previous mm */ 40 + cpu_clear(cpu, prev->cpu_vm_mask); 41 + #ifdef CONFIG_SMP 42 + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 43 + percpu_write(cpu_tlbstate.active_mm, next); 44 + #endif 45 + cpu_set(cpu, next->cpu_vm_mask); 46 + 47 + /* Re-load page tables */ 48 + load_cr3(next->pgd); 49 + 50 + /* 51 + * load the LDT, if the LDT is different: 52 + */ 53 + if (unlikely(prev->context.ldt != next->context.ldt)) 54 + load_LDT_nolock(&next->context); 55 + } 56 + #ifdef CONFIG_SMP 57 + else { 58 + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 59 + BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); 60 + 61 + if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { 62 + /* We were in lazy tlb mode and leave_mm disabled 63 + * tlb flush IPI delivery. We must reload CR3 64 + * to make sure to use no freed page tables. 65 + */ 66 + load_cr3(next->pgd); 67 + load_LDT_nolock(&next->context); 68 + } 69 + } 70 + #endif 71 + } 29 72 30 73 #define activate_mm(prev, next) \ 31 74 do { \ ··· 76 33 switch_mm((prev), (next), NULL); \ 77 34 } while (0); 78 35 36 + #ifdef CONFIG_X86_32 37 + #define deactivate_mm(tsk, mm) \ 38 + do { \ 39 + loadsegment(gs, 0); \ 40 + } while (0) 41 + #else 42 + #define deactivate_mm(tsk, mm) \ 43 + do { \ 44 + load_gs_index(0); \ 45 + loadsegment(fs, 0); \ 46 + } while (0) 47 + #endif 79 48 80 49 #endif /* _ASM_X86_MMU_CONTEXT_H */
-55
arch/x86/include/asm/mmu_context_32.h
··· 1 - #ifndef _ASM_X86_MMU_CONTEXT_32_H 2 - #define _ASM_X86_MMU_CONTEXT_32_H 3 - 4 - static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 5 - { 6 - #ifdef CONFIG_SMP 7 - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 8 - percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); 9 - #endif 10 - } 11 - 12 - static inline void switch_mm(struct mm_struct *prev, 13 - struct mm_struct *next, 14 - struct task_struct *tsk) 15 - { 16 - int cpu = smp_processor_id(); 17 - 18 - if (likely(prev != next)) { 19 - /* stop flush ipis for the previous mm */ 20 - cpu_clear(cpu, prev->cpu_vm_mask); 21 - #ifdef CONFIG_SMP 22 - percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 23 - percpu_write(cpu_tlbstate.active_mm, next); 24 - #endif 25 - cpu_set(cpu, next->cpu_vm_mask); 26 - 27 - /* Re-load page tables */ 28 - load_cr3(next->pgd); 29 - 30 - /* 31 - * load the LDT, if the LDT is different: 32 - */ 33 - if (unlikely(prev->context.ldt != next->context.ldt)) 34 - load_LDT_nolock(&next->context); 35 - } 36 - #ifdef CONFIG_SMP 37 - else { 38 - percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 39 - BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); 40 - 41 - if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { 42 - /* We were in lazy tlb mode and leave_mm disabled 43 - * tlb flush IPI delivery. We must reload %cr3. 44 - */ 45 - load_cr3(next->pgd); 46 - load_LDT_nolock(&next->context); 47 - } 48 - } 49 - #endif 50 - } 51 - 52 - #define deactivate_mm(tsk, mm) \ 53 - asm("movl %0,%%gs": :"r" (0)); 54 - 55 - #endif /* _ASM_X86_MMU_CONTEXT_32_H */
-52
arch/x86/include/asm/mmu_context_64.h
··· 1 - #ifndef _ASM_X86_MMU_CONTEXT_64_H 2 - #define _ASM_X86_MMU_CONTEXT_64_H 3 - 4 - static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 5 - { 6 - #ifdef CONFIG_SMP 7 - if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 8 - percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); 9 - #endif 10 - } 11 - 12 - static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 13 - struct task_struct *tsk) 14 - { 15 - unsigned cpu = smp_processor_id(); 16 - if (likely(prev != next)) { 17 - /* stop flush ipis for the previous mm */ 18 - cpu_clear(cpu, prev->cpu_vm_mask); 19 - #ifdef CONFIG_SMP 20 - percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 21 - percpu_write(cpu_tlbstate.active_mm, next); 22 - #endif 23 - cpu_set(cpu, next->cpu_vm_mask); 24 - load_cr3(next->pgd); 25 - 26 - if (unlikely(next->context.ldt != prev->context.ldt)) 27 - load_LDT_nolock(&next->context); 28 - } 29 - #ifdef CONFIG_SMP 30 - else { 31 - percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 32 - BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); 33 - 34 - if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { 35 - /* We were in lazy tlb mode and leave_mm disabled 36 - * tlb flush IPI delivery. We must reload CR3 37 - * to make sure to use no freed page tables. 38 - */ 39 - load_cr3(next->pgd); 40 - load_LDT_nolock(&next->context); 41 - } 42 - } 43 - #endif 44 - } 45 - 46 - #define deactivate_mm(tsk, mm) \ 47 - do { \ 48 - load_gs_index(0); \ 49 - asm volatile("movl %0,%%fs"::"r"(0)); \ 50 - } while (0) 51 - 52 - #endif /* _ASM_X86_MMU_CONTEXT_64_H */