Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/uaccess: simplify control register updates

Always switch to the kernel ASCE in switch_mm. Load the secondary
space ASCE in finish_arch_post_lock_switch after checking that
any pending page table operations have completed. The primary
ASCE is loaded in entry[64].S. With this the update_primary_asce
call can be removed from the switch_to macro and from the start
of switch_mm function. Remove the load_primary argument from
update_user_asce/clear_user_asce, rename update_user_asce to
set_user_asce and rename update_primary_asce to load_kernel_asce.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

+33 -43
+2 -2
arch/s390/include/asm/futex.h
··· 29 29 int cmparg = (encoded_op << 20) >> 20; 30 30 int oldval = 0, newval, ret; 31 31 32 - update_primary_asce(current); 32 + load_kernel_asce(); 33 33 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 34 34 oparg = 1 << oparg; 35 35 ··· 79 79 { 80 80 int ret; 81 81 82 - update_primary_asce(current); 82 + load_kernel_asce(); 83 83 asm volatile( 84 84 " sacf 256\n" 85 85 "0: cs %1,%4,0(%5)\n"
+17 -26
arch/s390/include/asm/mmu_context.h
··· 30 30 31 31 #define destroy_context(mm) do { } while (0) 32 32 33 - static inline void update_user_asce(struct mm_struct *mm, int load_primary) 33 + static inline void set_user_asce(struct mm_struct *mm) 34 34 { 35 35 pgd_t *pgd = mm->pgd; 36 36 37 37 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 38 - if (load_primary) 39 - __ctl_load(S390_lowcore.user_asce, 1, 1); 40 38 set_fs(current->thread.mm_segment); 39 + set_thread_flag(TIF_ASCE); 41 40 } 42 41 43 - static inline void clear_user_asce(struct mm_struct *mm, int load_primary) 42 + static inline void clear_user_asce(void) 44 43 { 45 44 S390_lowcore.user_asce = S390_lowcore.kernel_asce; 46 45 47 - if (load_primary) 48 - __ctl_load(S390_lowcore.user_asce, 1, 1); 46 + __ctl_load(S390_lowcore.user_asce, 1, 1); 49 47 __ctl_load(S390_lowcore.user_asce, 7, 7); 50 48 } 51 49 52 - static inline void update_primary_asce(struct task_struct *tsk) 50 + static inline void load_kernel_asce(void) 53 51 { 54 52 unsigned long asce; 55 53 56 54 __ctl_store(asce, 1, 1); 57 55 if (asce != S390_lowcore.kernel_asce) 58 56 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 59 - set_tsk_thread_flag(tsk, TIF_ASCE); 57 + set_thread_flag(TIF_ASCE); 60 58 } 61 59 62 60 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ··· 62 64 { 63 65 int cpu = smp_processor_id(); 64 66 65 - update_primary_asce(tsk); 66 67 if (prev == next) 67 68 return; 68 69 if (MACHINE_HAS_TLB_LC) 69 70 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); 70 - if (atomic_inc_return(&next->context.attach_count) >> 16) { 71 - /* Delay update_user_asce until all TLB flushes are done. */ 72 - set_tsk_thread_flag(tsk, TIF_TLB_WAIT); 73 - /* Clear old ASCE by loading the kernel ASCE. */ 74 - clear_user_asce(next, 0); 75 - } else { 76 - cpumask_set_cpu(cpu, mm_cpumask(next)); 77 - update_user_asce(next, 0); 78 - if (next->context.flush_mm) 79 - /* Flush pending TLBs */ 80 - __tlb_flush_mm(next); 81 - } 71 + /* Clear old ASCE by loading the kernel ASCE. */ 72 + __ctl_load(S390_lowcore.kernel_asce, 1, 1); 73 + __ctl_load(S390_lowcore.kernel_asce, 7, 7); 74 + /* Delay loading of the new ASCE to control registers CR1 & CR7 */ 75 + set_thread_flag(TIF_ASCE); 76 + atomic_inc(&next->context.attach_count); 82 77 atomic_dec(&prev->context.attach_count); 83 - WARN_ON(atomic_read(&prev->context.attach_count) < 0); 84 78 if (MACHINE_HAS_TLB_LC) 85 79 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 86 80 } ··· 83 93 struct task_struct *tsk = current; 84 94 struct mm_struct *mm = tsk->mm; 85 95 86 - if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT)) 96 + if (!mm) 87 97 return; 88 98 preempt_disable(); 89 - clear_tsk_thread_flag(tsk, TIF_TLB_WAIT); 90 99 while (atomic_read(&mm->context.attach_count) >> 16) 91 100 cpu_relax(); 92 101 93 102 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 94 - update_user_asce(mm, 0); 103 + set_user_asce(mm); 95 104 if (mm->context.flush_mm) 96 105 __tlb_flush_mm(mm); 97 106 preempt_enable(); ··· 102 113 static inline void activate_mm(struct mm_struct *prev, 103 114 struct mm_struct *next) 104 115 { 105 - switch_mm(prev, next, current); 116 + switch_mm(prev, next, current); 117 + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 118 + set_user_asce(next); 106 119 } 107 120 108 121 static inline void arch_dup_mmap(struct mm_struct *oldmm,
-1
arch/s390/include/asm/switch_to.h
··· 132 132 update_cr_regs(next); \ 133 133 } \ 134 134 prev = __switch_to(prev,next); \ 135 - update_primary_asce(current); \ 136 135 } while (0) 137 136 138 137 #define finish_arch_switch(prev) do { \
+1 -3
arch/s390/include/asm/thread_info.h
··· 81 81 #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 82 82 #define TIF_SIGPENDING 2 /* signal pending */ 83 83 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 84 - #define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ 85 - #define TIF_ASCE 5 /* primary asce needs fixup / uaccess */ 84 + #define TIF_ASCE 5 /* user asce needs fixup / uaccess */ 86 85 #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ 87 86 #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 88 87 #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ ··· 98 99 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 99 100 #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 100 101 #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 101 - #define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT) 102 102 #define _TIF_ASCE (1<<TIF_ASCE) 103 103 #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) 104 104 #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
+1 -1
arch/s390/kernel/entry.S
··· 43 43 _TIF_MCCK_PENDING | _TIF_ASCE) 44 44 _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 45 45 _TIF_SYSCALL_TRACEPOINT) 46 - _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) 46 + _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_ASCE) 47 47 48 48 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 49 49 STACK_SIZE = 1 << STACK_SHIFT
+1 -1
arch/s390/kernel/entry64.S
··· 48 48 _TIF_MCCK_PENDING | _TIF_ASCE) 49 49 _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 50 50 _TIF_SYSCALL_TRACEPOINT) 51 - _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) 51 + _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_ASCE) 52 52 53 53 #define BASED(name) name-system_call(%r13) 54 54
+5 -5
arch/s390/lib/uaccess.c
··· 76 76 { 77 77 unsigned long tmp1, tmp2; 78 78 79 - update_primary_asce(current); 79 + load_kernel_asce(); 80 80 tmp1 = -256UL; 81 81 asm volatile( 82 82 " sacf 0\n" ··· 159 159 { 160 160 unsigned long tmp1, tmp2; 161 161 162 - update_primary_asce(current); 162 + load_kernel_asce(); 163 163 tmp1 = -256UL; 164 164 asm volatile( 165 165 " sacf 0\n" ··· 225 225 { 226 226 unsigned long tmp1; 227 227 228 - update_primary_asce(current); 228 + load_kernel_asce(); 229 229 asm volatile( 230 230 " sacf 256\n" 231 231 " "AHI" %0,-1\n" ··· 292 292 { 293 293 unsigned long tmp1, tmp2; 294 294 295 - update_primary_asce(current); 295 + load_kernel_asce(); 296 296 asm volatile( 297 297 " sacf 256\n" 298 298 " "AHI" %0,-1\n" ··· 358 358 { 359 359 if (unlikely(!size)) 360 360 return 0; 361 - update_primary_asce(current); 361 + load_kernel_asce(); 362 362 return strnlen_user_srst(src, size); 363 363 } 364 364 EXPORT_SYMBOL(__strnlen_user);
+6 -4
arch/s390/mm/pgtable.c
··· 53 53 { 54 54 struct mm_struct *mm = arg; 55 55 56 - if (current->active_mm == mm) 57 - update_user_asce(mm, 1); 56 + if (current->active_mm == mm) { 57 + clear_user_asce(); 58 + set_user_asce(mm); 59 + } 58 60 __tlb_flush_local(); 59 61 } 60 62 ··· 110 108 pgd_t *pgd; 111 109 112 110 if (current->active_mm == mm) { 113 - clear_user_asce(mm, 1); 111 + clear_user_asce(); 114 112 __tlb_flush_mm(mm); 115 113 } 116 114 while (mm->context.asce_limit > limit) { ··· 136 134 crst_table_free(mm, (unsigned long *) pgd); 137 135 } 138 136 if (current->active_mm == mm) 139 - update_user_asce(mm, 1); 137 + set_user_asce(mm); 140 138 } 141 139 #endif 142 140