Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: Replace S390_lowcore by get_lowcore()

Replace all S390_lowcore usages in arch/s390/ by get_lowcore().

Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>

authored by

Sven Schnelle and committed by
Vasily Gorbik
208da1d5 7e8f89e5

+236 -237
+1 -1
arch/s390/include/asm/current.h
··· 14 14 15 15 struct task_struct; 16 16 17 - #define current ((struct task_struct *const)S390_lowcore.current_task) 17 + #define current ((struct task_struct *const)get_lowcore()->current_task) 18 18 19 19 #endif /* !(_S390_CURRENT_H) */
+2 -2
arch/s390/include/asm/facility.h
··· 92 92 93 93 asm volatile( 94 94 " stfl 0(0)\n" 95 - : "=m" (S390_lowcore.stfl_fac_list)); 96 - stfl_fac_list = S390_lowcore.stfl_fac_list; 95 + : "=m" (get_lowcore()->stfl_fac_list)); 96 + stfl_fac_list = get_lowcore()->stfl_fac_list; 97 97 memcpy(stfle_fac_list, &stfl_fac_list, 4); 98 98 nr = 4; /* bytes stored by stfl */ 99 99 if (stfl_fac_list & 0x01000000) {
+3 -3
arch/s390/include/asm/hardirq.h
··· 13 13 14 14 #include <asm/lowcore.h> 15 15 16 - #define local_softirq_pending() (S390_lowcore.softirq_pending) 17 - #define set_softirq_pending(x) (S390_lowcore.softirq_pending = (x)) 18 - #define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x)) 16 + #define local_softirq_pending() (get_lowcore()->softirq_pending) 17 + #define set_softirq_pending(x) (get_lowcore()->softirq_pending = (x)) 18 + #define or_softirq_pending(x) (get_lowcore()->softirq_pending |= (x)) 19 19 20 20 #define __ARCH_IRQ_STAT 21 21 #define __ARCH_IRQ_EXIT_IRQS_DISABLED
+4 -4
arch/s390/include/asm/mmu_context.h
··· 76 76 int cpu = smp_processor_id(); 77 77 78 78 if (next == &init_mm) 79 - S390_lowcore.user_asce = s390_invalid_asce; 79 + get_lowcore()->user_asce = s390_invalid_asce; 80 80 else 81 - S390_lowcore.user_asce.val = next->context.asce; 81 + get_lowcore()->user_asce.val = next->context.asce; 82 82 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); 83 83 /* Clear previous user-ASCE from CR7 */ 84 84 local_ctl_load(7, &s390_invalid_asce); ··· 111 111 __tlb_flush_mm_lazy(mm); 112 112 preempt_enable(); 113 113 } 114 - local_ctl_load(7, &S390_lowcore.user_asce); 114 + local_ctl_load(7, &get_lowcore()->user_asce); 115 115 } 116 116 117 117 #define activate_mm activate_mm ··· 120 120 { 121 121 switch_mm(prev, next, current); 122 122 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 123 - local_ctl_load(7, &S390_lowcore.user_asce); 123 + local_ctl_load(7, &get_lowcore()->user_asce); 124 124 } 125 125 126 126 #include <asm-generic/mmu_context.h>
+4 -4
arch/s390/include/asm/pai.h
··· 55 55 return; 56 56 if (!static_branch_unlikely(&pai_key)) 57 57 return; 58 - if (!S390_lowcore.ccd) 58 + if (!get_lowcore()->ccd) 59 59 return; 60 60 if (!user_mode(regs)) 61 61 return; 62 - WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd | PAI_CRYPTO_KERNEL_OFFSET); 62 + WRITE_ONCE(get_lowcore()->ccd, get_lowcore()->ccd | PAI_CRYPTO_KERNEL_OFFSET); 63 63 } 64 64 65 65 static __always_inline void pai_kernel_exit(struct pt_regs *regs) ··· 68 68 return; 69 69 if (!static_branch_unlikely(&pai_key)) 70 70 return; 71 - if (!S390_lowcore.ccd) 71 + if (!get_lowcore()->ccd) 72 72 return; 73 73 if (!user_mode(regs)) 74 74 return; 75 - WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET); 75 + WRITE_ONCE(get_lowcore()->ccd, get_lowcore()->ccd & ~PAI_CRYPTO_KERNEL_OFFSET); 76 76 } 77 77 78 78 #define PAI_SAVE_AREA(x) ((x)->hw.event_base)
+1 -1
arch/s390/include/asm/percpu.h
··· 9 9 * s390 uses its own implementation for per cpu data, the offset of 10 10 * the cpu local data area is cached in the cpu's lowcore memory. 11 11 */ 12 - #define __my_cpu_offset S390_lowcore.percpu_offset 12 + #define __my_cpu_offset get_lowcore()->percpu_offset 13 13 14 14 /* 15 15 * For 64 bit module code, the module may be more than 4G above the
+15 -15
arch/s390/include/asm/preempt.h
··· 14 14 15 15 static __always_inline int preempt_count(void) 16 16 { 17 - return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED; 17 + return READ_ONCE(get_lowcore()->preempt_count) & ~PREEMPT_NEED_RESCHED; 18 18 } 19 19 20 20 static __always_inline void preempt_count_set(int pc) ··· 22 22 int old, new; 23 23 24 24 do { 25 - old = READ_ONCE(S390_lowcore.preempt_count); 25 + old = READ_ONCE(get_lowcore()->preempt_count); 26 26 new = (old & PREEMPT_NEED_RESCHED) | 27 27 (pc & ~PREEMPT_NEED_RESCHED); 28 - } while (__atomic_cmpxchg(&S390_lowcore.preempt_count, 28 + } while (__atomic_cmpxchg(&get_lowcore()->preempt_count, 29 29 old, new) != old); 30 30 } 31 31 32 32 static __always_inline void set_preempt_need_resched(void) 33 33 { 34 - __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count); 34 + __atomic_and(~PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count); 35 35 } 36 36 37 37 static __always_inline void clear_preempt_need_resched(void) 38 38 { 39 - __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count); 39 + __atomic_or(PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count); 40 40 } 41 41 42 42 static __always_inline bool test_preempt_need_resched(void) 43 43 { 44 - return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED); 44 + return !(READ_ONCE(get_lowcore()->preempt_count) & PREEMPT_NEED_RESCHED); 45 45 } 46 46 47 47 static __always_inline void __preempt_count_add(int val) ··· 52 52 */ 53 53 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) { 54 54 if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) { 55 - __atomic_add_const(val, &S390_lowcore.preempt_count); 55 + __atomic_add_const(val, &get_lowcore()->preempt_count); 56 56 return; 57 57 } 58 58 } 59 - __atomic_add(val, &S390_lowcore.preempt_count); 59 + __atomic_add(val, &get_lowcore()->preempt_count); 60 60 } 61 61 62 62 static __always_inline void __preempt_count_sub(int val) ··· 66 66 67 67 static __always_inline bool __preempt_count_dec_and_test(void) 68 68 { 69 - return __atomic_add(-1, &S390_lowcore.preempt_count) == 1; 69 + return __atomic_add(-1, &get_lowcore()->preempt_count) == 1; 70 70 } 71 71 72 72 static __always_inline bool should_resched(int preempt_offset) 73 73 { 74 - return unlikely(READ_ONCE(S390_lowcore.preempt_count) == 74 + return unlikely(READ_ONCE(get_lowcore()->preempt_count) == 75 75 preempt_offset); 76 76 } 77 77 ··· 81 81 82 82 static __always_inline int preempt_count(void) 83 83 { 84 - return READ_ONCE(S390_lowcore.preempt_count); 84 + return READ_ONCE(get_lowcore()->preempt_count); 85 85 } 86 86 87 87 static __always_inline void preempt_count_set(int pc) 88 88 { 89 - S390_lowcore.preempt_count = pc; 89 + get_lowcore()->preempt_count = pc; 90 90 } 91 91 92 92 static __always_inline void set_preempt_need_resched(void) ··· 104 104 105 105 static __always_inline void __preempt_count_add(int val) 106 106 { 107 - S390_lowcore.preempt_count += val; 107 + get_lowcore()->preempt_count += val; 108 108 } 109 109 110 110 static __always_inline void __preempt_count_sub(int val) 111 111 { 112 - S390_lowcore.preempt_count -= val; 112 + get_lowcore()->preempt_count -= val; 113 113 } 114 114 115 115 static __always_inline bool __preempt_count_dec_and_test(void) 116 116 { 117 - return !--S390_lowcore.preempt_count && tif_need_resched(); 117 + return !--get_lowcore()->preempt_count && tif_need_resched(); 118 118 } 119 119 120 120 static __always_inline bool should_resched(int preempt_offset)
+4 -4
arch/s390/include/asm/processor.h
··· 46 46 47 47 static __always_inline void set_cpu_flag(int flag) 48 48 { 49 - S390_lowcore.cpu_flags |= (1UL << flag); 49 + get_lowcore()->cpu_flags |= (1UL << flag); 50 50 } 51 51 52 52 static __always_inline void clear_cpu_flag(int flag) 53 53 { 54 - S390_lowcore.cpu_flags &= ~(1UL << flag); 54 + get_lowcore()->cpu_flags &= ~(1UL << flag); 55 55 } 56 56 57 57 static __always_inline bool test_cpu_flag(int flag) 58 58 { 59 - return S390_lowcore.cpu_flags & (1UL << flag); 59 + return get_lowcore()->cpu_flags & (1UL << flag); 60 60 } 61 61 62 62 static __always_inline bool test_and_set_cpu_flag(int flag) ··· 269 269 270 270 static __always_inline bool on_thread_stack(void) 271 271 { 272 - unsigned long ksp = S390_lowcore.kernel_stack; 272 + unsigned long ksp = get_lowcore()->kernel_stack; 273 273 274 274 return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); 275 275 }
+17 -17
arch/s390/include/asm/setup.h
··· 77 77 /* The Write Back bit position in the physaddr is given by the SLPC PCI */ 78 78 extern unsigned long mio_wb_bit_mask; 79 79 80 - #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 81 - #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 82 - #define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) 80 + #define MACHINE_IS_VM (get_lowcore()->machine_flags & MACHINE_FLAG_VM) 81 + #define MACHINE_IS_KVM (get_lowcore()->machine_flags & MACHINE_FLAG_KVM) 82 + #define MACHINE_IS_LPAR (get_lowcore()->machine_flags & MACHINE_FLAG_LPAR) 83 83 84 - #define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) 85 - #define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP) 86 - #define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) 87 - #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) 88 - #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) 89 - #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 90 - #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) 91 - #define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) 92 - #define MACHINE_HAS_TLB_GUEST (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_GUEST) 93 - #define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX) 94 - #define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS) 95 - #define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC) 96 - #define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO) 97 - #define MACHINE_HAS_RDP (S390_lowcore.machine_flags & MACHINE_FLAG_RDP) 84 + #define MACHINE_HAS_DIAG9C (get_lowcore()->machine_flags & MACHINE_FLAG_DIAG9C) 85 + #define MACHINE_HAS_ESOP (get_lowcore()->machine_flags & MACHINE_FLAG_ESOP) 86 + #define MACHINE_HAS_IDTE (get_lowcore()->machine_flags & MACHINE_FLAG_IDTE) 87 + #define MACHINE_HAS_EDAT1 (get_lowcore()->machine_flags & MACHINE_FLAG_EDAT1) 88 + #define MACHINE_HAS_EDAT2 (get_lowcore()->machine_flags & MACHINE_FLAG_EDAT2) 89 + #define MACHINE_HAS_TOPOLOGY (get_lowcore()->machine_flags & MACHINE_FLAG_TOPOLOGY) 90 + #define MACHINE_HAS_TE (get_lowcore()->machine_flags & MACHINE_FLAG_TE) 91 + #define MACHINE_HAS_TLB_LC (get_lowcore()->machine_flags & MACHINE_FLAG_TLB_LC) 92 + #define MACHINE_HAS_TLB_GUEST (get_lowcore()->machine_flags & MACHINE_FLAG_TLB_GUEST) 93 + #define MACHINE_HAS_NX (get_lowcore()->machine_flags & MACHINE_FLAG_NX) 94 + #define MACHINE_HAS_GS (get_lowcore()->machine_flags & MACHINE_FLAG_GS) 95 + #define MACHINE_HAS_SCC (get_lowcore()->machine_flags & MACHINE_FLAG_SCC) 96 + #define MACHINE_HAS_PCI_MIO (get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO) 97 + #define MACHINE_HAS_RDP (get_lowcore()->machine_flags & MACHINE_FLAG_RDP) 98 98 99 99 /* 100 100 * Console mode. Override with conmode=
+1 -1
arch/s390/include/asm/smp.h
··· 11 11 #include <asm/lowcore.h> 12 12 #include <asm/processor.h> 13 13 14 - #define raw_smp_processor_id() (S390_lowcore.cpu_nr) 14 + #define raw_smp_processor_id() (get_lowcore()->cpu_nr) 15 15 16 16 extern struct mutex smp_cpu_state_mutex; 17 17 extern unsigned int smp_cpu_mt_shift;
+1 -1
arch/s390/include/asm/softirq_stack.h
··· 8 8 #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK 9 9 static inline void do_softirq_own_stack(void) 10 10 { 11 - call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq); 11 + call_on_stack(0, get_lowcore()->async_stack, void, __do_softirq); 12 12 } 13 13 #endif 14 14 #endif /* __ASM_S390_SOFTIRQ_STACK_H */
+1 -1
arch/s390/include/asm/spinlock.h
··· 16 16 #include <asm/processor.h> 17 17 #include <asm/alternative.h> 18 18 19 - #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) 19 + #define SPINLOCK_LOCKVAL (get_lowcore()->spinlock_lockval) 20 20 21 21 extern int spin_retry; 22 22
+5 -5
arch/s390/include/asm/timex.h
··· 161 161 { 162 162 unsigned long old; 163 163 164 - old = S390_lowcore.clock_comparator; 165 - S390_lowcore.clock_comparator = clock_comparator_max; 166 - set_clock_comparator(S390_lowcore.clock_comparator); 164 + old = get_lowcore()->clock_comparator; 165 + get_lowcore()->clock_comparator = clock_comparator_max; 166 + set_clock_comparator(get_lowcore()->clock_comparator); 167 167 return old; 168 168 } 169 169 170 170 static inline void local_tick_enable(unsigned long comp) 171 171 { 172 - S390_lowcore.clock_comparator = comp; 173 - set_clock_comparator(S390_lowcore.clock_comparator); 172 + get_lowcore()->clock_comparator = comp; 173 + set_clock_comparator(get_lowcore()->clock_comparator); 174 174 } 175 175 176 176 #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+6 -6
arch/s390/include/asm/vtime.h
··· 4 4 5 5 static inline void update_timer_sys(void) 6 6 { 7 - S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer; 8 - S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.sys_enter_timer; 9 - S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer; 7 + get_lowcore()->system_timer += get_lowcore()->last_update_timer - get_lowcore()->exit_timer; 8 + get_lowcore()->user_timer += get_lowcore()->exit_timer - get_lowcore()->sys_enter_timer; 9 + get_lowcore()->last_update_timer = get_lowcore()->sys_enter_timer; 10 10 } 11 11 12 12 static inline void update_timer_mcck(void) 13 13 { 14 - S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer; 15 - S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.mcck_enter_timer; 16 - S390_lowcore.last_update_timer = S390_lowcore.mcck_enter_timer; 14 + get_lowcore()->system_timer += get_lowcore()->last_update_timer - get_lowcore()->exit_timer; 15 + get_lowcore()->user_timer += get_lowcore()->exit_timer - get_lowcore()->mcck_enter_timer; 16 + get_lowcore()->last_update_timer = get_lowcore()->mcck_enter_timer; 17 17 } 18 18 19 19 #endif /* _S390_VTIME_H */
+4 -4
arch/s390/kernel/dumpstack.c
··· 61 61 62 62 static bool in_irq_stack(unsigned long sp, struct stack_info *info) 63 63 { 64 - unsigned long stack = S390_lowcore.async_stack - STACK_INIT_OFFSET; 64 + unsigned long stack = get_lowcore()->async_stack - STACK_INIT_OFFSET; 65 65 66 66 return in_stack(sp, info, STACK_TYPE_IRQ, stack); 67 67 } 68 68 69 69 static bool in_nodat_stack(unsigned long sp, struct stack_info *info) 70 70 { 71 - unsigned long stack = S390_lowcore.nodat_stack - STACK_INIT_OFFSET; 71 + unsigned long stack = get_lowcore()->nodat_stack - STACK_INIT_OFFSET; 72 72 73 73 return in_stack(sp, info, STACK_TYPE_NODAT, stack); 74 74 } 75 75 76 76 static bool in_mcck_stack(unsigned long sp, struct stack_info *info) 77 77 { 78 - unsigned long stack = S390_lowcore.mcck_stack - STACK_INIT_OFFSET; 78 + unsigned long stack = get_lowcore()->mcck_stack - STACK_INIT_OFFSET; 79 79 80 80 return in_stack(sp, info, STACK_TYPE_MCCK, stack); 81 81 } 82 82 83 83 static bool in_restart_stack(unsigned long sp, struct stack_info *info) 84 84 { 85 - unsigned long stack = S390_lowcore.restart_stack - STACK_INIT_OFFSET; 85 + unsigned long stack = get_lowcore()->restart_stack - STACK_INIT_OFFSET; 86 86 87 87 return in_stack(sp, info, STACK_TYPE_RESTART, stack); 88 88 }
+18 -18
arch/s390/kernel/early.c
··· 72 72 73 73 memset(&tod_clock_base, 0, sizeof(tod_clock_base)); 74 74 tod_clock_base.tod = TOD_UNIX_EPOCH; 75 - S390_lowcore.last_update_clock = TOD_UNIX_EPOCH; 75 + get_lowcore()->last_update_clock = TOD_UNIX_EPOCH; 76 76 } 77 77 78 78 /* ··· 99 99 100 100 /* Check current-configuration-level */ 101 101 if (stsi(NULL, 0, 0, 0) <= 2) { 102 - S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR; 102 + get_lowcore()->machine_flags |= MACHINE_FLAG_LPAR; 103 103 return; 104 104 } 105 105 /* Get virtual-machine cpu information. */ ··· 108 108 109 109 /* Detect known hypervisors */ 110 110 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) 111 - S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; 111 + get_lowcore()->machine_flags |= MACHINE_FLAG_KVM; 112 112 else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) 113 - S390_lowcore.machine_flags |= MACHINE_FLAG_VM; 113 + get_lowcore()->machine_flags |= MACHINE_FLAG_VM; 114 114 } 115 115 116 116 /* Remove leading, trailing and double whitespace. */ ··· 166 166 167 167 if (!test_facility(11)) 168 168 return; 169 - S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY; 169 + get_lowcore()->machine_flags |= MACHINE_FLAG_TOPOLOGY; 170 170 for (max_mnest = 6; max_mnest > 1; max_mnest--) { 171 171 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0) 172 172 break; ··· 186 186 187 187 psw.addr = (unsigned long)early_pgm_check_handler; 188 188 psw.mask = PSW_KERNEL_BITS; 189 - S390_lowcore.program_new_psw = psw; 190 - S390_lowcore.preempt_count = INIT_PREEMPT_COUNT; 189 + get_lowcore()->program_new_psw = psw; 190 + get_lowcore()->preempt_count = INIT_PREEMPT_COUNT; 191 191 } 192 192 193 193 static noinline __init void setup_facility_list(void) ··· 211 211 EX_TABLE(0b,1b) 212 212 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc"); 213 213 if (!rc) 214 - S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C; 214 + get_lowcore()->machine_flags |= MACHINE_FLAG_DIAG9C; 215 215 } 216 216 217 217 static __init void detect_machine_facilities(void) 218 218 { 219 219 if (test_facility(8)) { 220 - S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1; 220 + get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT1; 221 221 system_ctl_set_bit(0, CR0_EDAT_BIT); 222 222 } 223 223 if (test_facility(78)) 224 - S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2; 224 + get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT2; 225 225 if (test_facility(3)) 226 - S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; 226 + get_lowcore()->machine_flags |= MACHINE_FLAG_IDTE; 227 227 if (test_facility(50) && test_facility(73)) { 228 - S390_lowcore.machine_flags |= MACHINE_FLAG_TE; 228 + get_lowcore()->machine_flags |= MACHINE_FLAG_TE; 229 229 system_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT); 230 230 } 231 231 if (test_facility(51)) 232 - S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 232 + get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_LC; 233 233 if (test_facility(129)) 234 234 system_ctl_set_bit(0, CR0_VECTOR_BIT); 235 235 if (test_facility(130)) 236 - S390_lowcore.machine_flags |= MACHINE_FLAG_NX; 236 + get_lowcore()->machine_flags |= MACHINE_FLAG_NX; 237 237 if (test_facility(133)) 238 - S390_lowcore.machine_flags |= MACHINE_FLAG_GS; 238 + get_lowcore()->machine_flags |= MACHINE_FLAG_GS; 239 239 if (test_facility(139) && (tod_clock_base.tod >> 63)) { 240 240 /* Enabled signed clock comparator comparisons */ 241 - S390_lowcore.machine_flags |= MACHINE_FLAG_SCC; 241 + get_lowcore()->machine_flags |= MACHINE_FLAG_SCC; 242 242 clock_comparator_max = -1ULL >> 1; 243 243 system_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT); 244 244 } 245 245 if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) { 246 - S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO; 246 + get_lowcore()->machine_flags |= MACHINE_FLAG_PCI_MIO; 247 247 /* the control bit is set during PCI initialization */ 248 248 } 249 249 if (test_facility(194)) 250 - S390_lowcore.machine_flags |= MACHINE_FLAG_RDP; 250 + get_lowcore()->machine_flags |= MACHINE_FLAG_RDP; 251 251 } 252 252 253 253 static inline void save_vector_registers(void)
+5 -5
arch/s390/kernel/idle.c
··· 34 34 this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]); 35 35 } 36 36 37 - idle_time = S390_lowcore.int_clock - idle->clock_idle_enter; 37 + idle_time = get_lowcore()->int_clock - idle->clock_idle_enter; 38 38 39 - S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock; 40 - S390_lowcore.last_update_clock = S390_lowcore.int_clock; 39 + get_lowcore()->steal_timer += idle->clock_idle_enter - get_lowcore()->last_update_clock; 40 + get_lowcore()->last_update_clock = get_lowcore()->int_clock; 41 41 42 - S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter; 43 - S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer; 42 + get_lowcore()->system_timer += get_lowcore()->last_update_timer - idle->timer_idle_enter; 43 + get_lowcore()->last_update_timer = get_lowcore()->sys_enter_timer; 44 44 45 45 /* Account time spent with enabled wait psw loaded as idle time. */ 46 46 WRITE_ONCE(idle->idle_time, READ_ONCE(idle->idle_time) + idle_time);
+9 -9
arch/s390/kernel/irq.c
··· 100 100 101 101 static void do_IRQ(struct pt_regs *regs, int irq) 102 102 { 103 - if (tod_after_eq(S390_lowcore.int_clock, 104 - S390_lowcore.clock_comparator)) 103 + if (tod_after_eq(get_lowcore()->int_clock, 104 + get_lowcore()->clock_comparator)) 105 105 /* Serve timer interrupts first. */ 106 106 clock_comparator_work(); 107 107 generic_handle_irq(irq); ··· 111 111 { 112 112 unsigned long frame = current_frame_address(); 113 113 114 - return ((S390_lowcore.async_stack ^ frame) & ~(THREAD_SIZE - 1)) == 0; 114 + return ((get_lowcore()->async_stack ^ frame) & ~(THREAD_SIZE - 1)) == 0; 115 115 } 116 116 117 117 static void do_irq_async(struct pt_regs *regs, int irq) ··· 119 119 if (on_async_stack()) { 120 120 do_IRQ(regs, irq); 121 121 } else { 122 - call_on_stack(2, S390_lowcore.async_stack, void, do_IRQ, 122 + call_on_stack(2, get_lowcore()->async_stack, void, do_IRQ, 123 123 struct pt_regs *, regs, int, irq); 124 124 } 125 125 } ··· 153 153 154 154 set_cpu_flag(CIF_NOHZ_DELAY); 155 155 do { 156 - regs->tpi_info = S390_lowcore.tpi_info; 157 - if (S390_lowcore.tpi_info.adapter_IO) 156 + regs->tpi_info = get_lowcore()->tpi_info; 157 + if (get_lowcore()->tpi_info.adapter_IO) 158 158 do_irq_async(regs, THIN_INTERRUPT); 159 159 else 160 160 do_irq_async(regs, IO_INTERRUPT); ··· 183 183 current->thread.last_break = regs->last_break; 184 184 } 185 185 186 - regs->int_code = S390_lowcore.ext_int_code_addr; 187 - regs->int_parm = S390_lowcore.ext_params; 188 - regs->int_parm_long = S390_lowcore.ext_params2; 186 + regs->int_code = get_lowcore()->ext_int_code_addr; 187 + regs->int_parm = get_lowcore()->ext_params; 188 + regs->int_parm_long = get_lowcore()->ext_params2; 189 189 190 190 from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT); 191 191 if (from_idle)
+2 -2
arch/s390/kernel/machine_kexec.c
··· 52 52 purgatory = (purgatory_t)image->start; 53 53 54 54 /* store_status() saved the prefix register to lowcore */ 55 - prefix = (unsigned long) S390_lowcore.prefixreg_save_area; 55 + prefix = (unsigned long)get_lowcore()->prefixreg_save_area; 56 56 57 57 /* Now do the reset */ 58 58 s390_reset_system(); ··· 91 91 continue; 92 92 } 93 93 /* Store status of the boot CPU */ 94 - mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK); 94 + mcesa = __va(get_lowcore()->mcesad & MCESA_ORIGIN_MASK); 95 95 if (cpu_has_vx()) 96 96 save_vx_regs((__vector128 *) mcesa->vector_save_area); 97 97 if (MACHINE_HAS_GS) {
+14 -15
arch/s390/kernel/nmi.c
··· 125 125 smp_emergency_stop(); 126 126 diag_amode31_ops.diag308_reset(); 127 127 ptr = nmi_puts(message, "System stopped due to unrecoverable machine check, code: 0x"); 128 - u64_to_hex(ptr, S390_lowcore.mcck_interruption_code); 128 + u64_to_hex(ptr, get_lowcore()->mcck_interruption_code); 129 129 130 130 /* 131 131 * Disable low address protection and make machine check new PSW a ··· 135 135 cr0_new = cr0; 136 136 cr0_new.lap = 0; 137 137 local_ctl_load(0, &cr0_new.reg); 138 - psw_save = S390_lowcore.mcck_new_psw; 139 - psw_bits(S390_lowcore.mcck_new_psw).io = 0; 140 - psw_bits(S390_lowcore.mcck_new_psw).ext = 0; 141 - psw_bits(S390_lowcore.mcck_new_psw).wait = 1; 138 + psw_save = get_lowcore()->mcck_new_psw; 139 + psw_bits(get_lowcore()->mcck_new_psw).io = 0; 140 + psw_bits(get_lowcore()->mcck_new_psw).ext = 0; 141 + psw_bits(get_lowcore()->mcck_new_psw).wait = 1; 142 142 sclp_emergency_printk(message); 143 143 144 144 /* 145 145 * Restore machine check new PSW and control register 0 to original 146 146 * values. This makes possible system dump analysis easier. 147 147 */ 148 - S390_lowcore.mcck_new_psw = psw_save; 148 + get_lowcore()->mcck_new_psw = psw_save; 149 149 local_ctl_load(0, &cr0.reg); 150 150 disabled_wait(); 151 151 while (1); ··· 226 226 /* 227 227 * Set the clock comparator register to the next expected value. 228 228 */ 229 - set_clock_comparator(S390_lowcore.clock_comparator); 229 + set_clock_comparator(get_lowcore()->clock_comparator); 230 230 if (!mci.gr || !mci.fp || !mci.fc) 231 231 return false; 232 232 /* ··· 252 252 * check handling must take care of this. The host values are saved by 253 253 * KVM and are not affected. 254 254 */ 255 - cr2.reg = S390_lowcore.cregs_save_area[2]; 255 + cr2.reg = get_lowcore()->cregs_save_area[2]; 256 256 if (cr2.gse && !mci.gs && !test_cpu_flag(CIF_MCCK_GUEST)) 257 257 return false; 258 258 if (!mci.ms || !mci.pm || !mci.ia) ··· 278 278 279 279 sie_page = container_of(sie_block, struct sie_page, sie_block); 280 280 mcck_backup = &sie_page->mcck_info; 281 - mcck_backup->mcic = S390_lowcore.mcck_interruption_code & 281 + mcck_backup->mcic = get_lowcore()->mcck_interruption_code & 282 282 ~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE); 283 - mcck_backup->ext_damage_code = S390_lowcore.external_damage_code; 284 - mcck_backup->failing_storage_address 285 - = S390_lowcore.failing_storage_address; 283 + mcck_backup->ext_damage_code = get_lowcore()->external_damage_code; 284 + mcck_backup->failing_storage_address = get_lowcore()->failing_storage_address; 286 285 } 287 286 NOKPROBE_SYMBOL(s390_backup_mcck_info); 288 287 ··· 313 314 if (user_mode(regs)) 314 315 update_timer_mcck(); 315 316 inc_irq_stat(NMI_NMI); 316 - mci.val = S390_lowcore.mcck_interruption_code; 317 + mci.val = get_lowcore()->mcck_interruption_code; 317 318 mcck = this_cpu_ptr(&cpu_mcck); 318 319 319 320 /* ··· 381 382 } 382 383 if (mci.ed && mci.ec) { 383 384 /* External damage */ 384 - if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC)) 385 + if (get_lowcore()->external_damage_code & (1U << ED_STP_SYNC)) 385 386 mcck->stp_queue |= stp_sync_check(); 386 - if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) 387 + if (get_lowcore()->external_damage_code & (1U << ED_STP_ISLAND)) 387 388 mcck->stp_queue |= stp_island_check(); 388 389 mcck_pending = 1; 389 390 }
+1 -1
arch/s390/kernel/perf_cpum_sf.c
··· 1022 1022 } 1023 1023 1024 1024 /* Load current program parameter */ 1025 - lpp(&S390_lowcore.lpp); 1025 + lpp(&get_lowcore()->lpp); 1026 1026 1027 1027 debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i " 1028 1028 "interval %#lx tear %#lx dear %#lx\n", __func__,
+2 -2
arch/s390/kernel/perf_pai_crypto.c
··· 372 372 373 373 if (++cpump->active_events == 1) { 374 374 ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET; 375 - WRITE_ONCE(S390_lowcore.ccd, ccd); 375 + WRITE_ONCE(get_lowcore()->ccd, ccd); 376 376 local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 377 377 } 378 378 if (flags & PERF_EF_START) ··· 409 409 paicrypt_stop(event, PERF_EF_UPDATE); 410 410 if (--cpump->active_events == 0) { 411 411 local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 412 - WRITE_ONCE(S390_lowcore.ccd, 0); 412 + WRITE_ONCE(get_lowcore()->ccd, 0); 413 413 } 414 414 } 415 415
+2 -2
arch/s390/kernel/perf_pai_ext.c
··· 389 389 struct paiext_cb *pcb = cpump->paiext_cb; 390 390 391 391 if (++cpump->active_events == 1) { 392 - S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb); 392 + get_lowcore()->aicd = virt_to_phys(cpump->paiext_cb); 393 393 pcb->acc = virt_to_phys(cpump->area) | 0x1; 394 394 /* Enable CPU instruction lookup for PAIE1 control block */ 395 395 local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT); ··· 431 431 /* Disable CPU instruction lookup for PAIE1 control block */ 432 432 local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT); 433 433 pcb->acc = 0; 434 - S390_lowcore.aicd = 0; 434 + get_lowcore()->aicd = 0; 435 435 } 436 436 } 437 437
+3 -3
arch/s390/kernel/process.c
··· 71 71 72 72 void arch_setup_new_exec(void) 73 73 { 74 - if (S390_lowcore.current_pid != current->pid) { 75 - S390_lowcore.current_pid = current->pid; 74 + if (get_lowcore()->current_pid != current->pid) { 75 + get_lowcore()->current_pid = current->pid; 76 76 if (test_facility(40)) 77 - lpp(&S390_lowcore.lpp); 77 + lpp(&get_lowcore()->lpp); 78 78 } 79 79 } 80 80
+12 -12
arch/s390/kernel/setup.c
··· 421 421 lc->clock_comparator = clock_comparator_max; 422 422 lc->current_task = (unsigned long)&init_task; 423 423 lc->lpp = LPP_MAGIC; 424 - lc->machine_flags = S390_lowcore.machine_flags; 425 - lc->preempt_count = S390_lowcore.preempt_count; 424 + lc->machine_flags = get_lowcore()->machine_flags; 425 + lc->preempt_count = get_lowcore()->preempt_count; 426 426 nmi_alloc_mcesa_early(&lc->mcesad); 427 - lc->sys_enter_timer = S390_lowcore.sys_enter_timer; 428 - lc->exit_timer = S390_lowcore.exit_timer; 429 - lc->user_timer = S390_lowcore.user_timer; 430 - lc->system_timer = S390_lowcore.system_timer; 431 - lc->steal_timer = S390_lowcore.steal_timer; 432 - lc->last_update_timer = S390_lowcore.last_update_timer; 433 - lc->last_update_clock = S390_lowcore.last_update_clock; 427 + lc->sys_enter_timer = get_lowcore()->sys_enter_timer; 428 + lc->exit_timer = get_lowcore()->exit_timer; 429 + lc->user_timer = get_lowcore()->user_timer; 430 + lc->system_timer = get_lowcore()->system_timer; 431 + lc->steal_timer = get_lowcore()->steal_timer; 432 + lc->last_update_timer = get_lowcore()->last_update_timer; 433 + lc->last_update_clock = get_lowcore()->last_update_clock; 434 434 /* 435 435 * Allocate the global restart stack which is the same for 436 436 * all CPUs in case *one* of them does a PSW restart. ··· 439 439 lc->mcck_stack = stack_alloc_early() + STACK_INIT_OFFSET; 440 440 lc->async_stack = stack_alloc_early() + STACK_INIT_OFFSET; 441 441 lc->nodat_stack = stack_alloc_early() + STACK_INIT_OFFSET; 442 - lc->kernel_stack = S390_lowcore.kernel_stack; 442 + lc->kernel_stack = get_lowcore()->kernel_stack; 443 443 /* 444 444 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant 445 445 * restart data to the absolute zero lowcore. This is necessary if ··· 455 455 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 456 456 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 457 457 lc->preempt_count = PREEMPT_DISABLED; 458 - lc->kernel_asce = S390_lowcore.kernel_asce; 459 - lc->user_asce = S390_lowcore.user_asce; 458 + lc->kernel_asce = get_lowcore()->kernel_asce; 459 + lc->user_asce = get_lowcore()->user_asce; 460 460 461 461 system_ctlreg_init_save_area(lc); 462 462 abs_lc = get_abs_lowcore();
+15 -15
arch/s390/kernel/smp.c
··· 203 203 mcck_stack = stack_alloc(); 204 204 if (!lc || !nodat_stack || !async_stack || !mcck_stack) 205 205 goto out; 206 - memcpy(lc, &S390_lowcore, 512); 206 + memcpy(lc, get_lowcore(), 512); 207 207 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 208 208 lc->async_stack = async_stack + STACK_INIT_OFFSET; 209 209 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET; ··· 265 265 lc->spinlock_lockval = arch_spin_lockval(cpu); 266 266 lc->spinlock_index = 0; 267 267 lc->percpu_offset = __per_cpu_offset[cpu]; 268 - lc->kernel_asce = S390_lowcore.kernel_asce; 268 + lc->kernel_asce = get_lowcore()->kernel_asce; 269 269 lc->user_asce = s390_invalid_asce; 270 - lc->machine_flags = S390_lowcore.machine_flags; 270 + lc->machine_flags = get_lowcore()->machine_flags; 271 271 lc->user_timer = lc->system_timer = 272 272 lc->steal_timer = lc->avg_steal_timer = 0; 273 273 abs_lc = get_abs_lowcore(); ··· 407 407 struct lowcore *lc = lowcore_ptr[0]; 408 408 409 409 if (pcpu_devices[0].address == stap()) 410 - lc = &S390_lowcore; 410 + lc = get_lowcore(); 411 411 412 412 pcpu_delegate(&pcpu_devices[0], func, data, 413 413 lc->nodat_stack); ··· 844 844 { 845 845 int cpu = raw_smp_processor_id(); 846 846 847 - S390_lowcore.last_update_clock = get_tod_clock(); 848 - S390_lowcore.restart_stack = (unsigned long)restart_stack; 849 - S390_lowcore.restart_fn = (unsigned long)do_restart; 850 - S390_lowcore.restart_data = 0; 851 - S390_lowcore.restart_source = -1U; 852 - S390_lowcore.restart_flags = 0; 853 - restore_access_regs(S390_lowcore.access_regs_save_area); 847 + get_lowcore()->last_update_clock = get_tod_clock(); 848 + get_lowcore()->restart_stack = (unsigned long)restart_stack; 849 + get_lowcore()->restart_fn = (unsigned long)do_restart; 850 + get_lowcore()->restart_data = 0; 851 + get_lowcore()->restart_source = -1U; 852 + get_lowcore()->restart_flags = 0; 853 + restore_access_regs(get_lowcore()->access_regs_save_area); 854 854 cpu_init(); 855 855 rcutree_report_cpu_starting(cpu); 856 856 init_cpu_timer(); ··· 981 981 982 982 WARN_ON(!cpu_present(0) || !cpu_online(0)); 983 983 pcpu->state = CPU_STATE_CONFIGURED; 984 - S390_lowcore.percpu_offset = __per_cpu_offset[0]; 984 + get_lowcore()->percpu_offset = __per_cpu_offset[0]; 985 985 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 986 986 } 987 987 988 988 void __init smp_setup_processor_id(void) 989 989 { 990 990 pcpu_devices[0].address = stap(); 991 - S390_lowcore.cpu_nr = 0; 992 - S390_lowcore.spinlock_lockval = arch_spin_lockval(0); 993 - S390_lowcore.spinlock_index = 0; 991 + get_lowcore()->cpu_nr = 0; 992 + get_lowcore()->spinlock_lockval = arch_spin_lockval(0); 993 + get_lowcore()->spinlock_index = 0; 994 994 } 995 995 996 996 /*
+2 -2
arch/s390/kernel/syscall.c
··· 151 151 { 152 152 add_random_kstack_offset(); 153 153 enter_from_user_mode(regs); 154 - regs->psw = S390_lowcore.svc_old_psw; 155 - regs->int_code = S390_lowcore.svc_int_code; 154 + regs->psw = get_lowcore()->svc_old_psw; 155 + regs->int_code = get_lowcore()->svc_int_code; 156 156 update_timer_sys(); 157 157 if (static_branch_likely(&cpu_has_bear)) 158 158 current->thread.last_break = regs->last_break;
+11 -11
arch/s390/kernel/time.c
··· 131 131 { 132 132 struct clock_event_device *cd; 133 133 134 - S390_lowcore.clock_comparator = clock_comparator_max; 134 + get_lowcore()->clock_comparator = clock_comparator_max; 135 135 cd = this_cpu_ptr(&comparators); 136 136 cd->event_handler(cd); 137 137 } ··· 139 139 static int s390_next_event(unsigned long delta, 140 140 struct clock_event_device *evt) 141 141 { 142 - S390_lowcore.clock_comparator = get_tod_clock() + delta; 143 - set_clock_comparator(S390_lowcore.clock_comparator); 142 + get_lowcore()->clock_comparator = get_tod_clock() + delta; 143 + set_clock_comparator(get_lowcore()->clock_comparator); 144 144 return 0; 145 145 } 146 146 ··· 153 153 struct clock_event_device *cd; 154 154 int cpu; 155 155 156 - S390_lowcore.clock_comparator = clock_comparator_max; 157 - set_clock_comparator(S390_lowcore.clock_comparator); 156 + get_lowcore()->clock_comparator = clock_comparator_max; 157 + set_clock_comparator(get_lowcore()->clock_comparator); 158 158 159 159 cpu = smp_processor_id(); 160 160 cd = &per_cpu(comparators, cpu); ··· 184 184 unsigned long param64) 185 185 { 186 186 inc_irq_stat(IRQEXT_CLK); 187 - if (S390_lowcore.clock_comparator == clock_comparator_max) 188 - set_clock_comparator(S390_lowcore.clock_comparator); 187 + if (get_lowcore()->clock_comparator == clock_comparator_max) 188 + set_clock_comparator(get_lowcore()->clock_comparator); 189 189 } 190 190 191 191 static void stp_timing_alert(struct stp_irq_parm *); ··· 408 408 static void clock_sync_local(long delta) 409 409 { 410 410 /* Add the delta to the clock comparator. */ 411 - if (S390_lowcore.clock_comparator != clock_comparator_max) { 412 - S390_lowcore.clock_comparator += delta; 413 - set_clock_comparator(S390_lowcore.clock_comparator); 411 + if (get_lowcore()->clock_comparator != clock_comparator_max) { 412 + get_lowcore()->clock_comparator += delta; 413 + set_clock_comparator(get_lowcore()->clock_comparator); 414 414 } 415 415 /* Adjust the last_update_clock time-stamp. */ 416 - S390_lowcore.last_update_clock += delta; 416 + get_lowcore()->last_update_clock += delta; 417 417 } 418 418 419 419 /* Single threaded workqueue used for stp sync events */
+12 -12
arch/s390/kernel/traps.c
··· 293 293 294 294 local_irq_save(flags); 295 295 cr0 = local_ctl_clear_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT); 296 - psw_bits(S390_lowcore.external_new_psw).mcheck = 1; 297 - psw_bits(S390_lowcore.program_new_psw).mcheck = 1; 298 - psw_bits(S390_lowcore.svc_new_psw).mcheck = 1; 299 - psw_bits(S390_lowcore.io_new_psw).mcheck = 1; 296 + psw_bits(get_lowcore()->external_new_psw).mcheck = 1; 297 + psw_bits(get_lowcore()->program_new_psw).mcheck = 1; 298 + psw_bits(get_lowcore()->svc_new_psw).mcheck = 1; 299 + psw_bits(get_lowcore()->io_new_psw).mcheck = 1; 300 300 local_ctl_load(0, &cr0); 301 301 local_irq_restore(flags); 302 302 local_mcck_enable(); ··· 310 310 unsigned int trapnr; 311 311 irqentry_state_t state; 312 312 313 - regs->int_code = S390_lowcore.pgm_int_code; 314 - regs->int_parm_long = S390_lowcore.trans_exc_code; 313 + regs->int_code = get_lowcore()->pgm_int_code; 314 + regs->int_parm_long = get_lowcore()->trans_exc_code; 315 315 316 316 state = irqentry_enter(regs); 317 317 ··· 324 324 current->thread.last_break = regs->last_break; 325 325 } 326 326 327 - if (S390_lowcore.pgm_code & 0x0200) { 327 + if (get_lowcore()->pgm_code & 0x0200) { 328 328 /* transaction abort */ 329 - current->thread.trap_tdb = S390_lowcore.pgm_tdb; 329 + current->thread.trap_tdb = get_lowcore()->pgm_tdb; 330 330 } 331 331 332 - if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) { 332 + if (get_lowcore()->pgm_code & PGM_INT_CODE_PER) { 333 333 if (user_mode(regs)) { 334 334 struct per_event *ev = &current->thread.per_event; 335 335 336 336 set_thread_flag(TIF_PER_TRAP); 337 - ev->address = S390_lowcore.per_address; 338 - ev->cause = S390_lowcore.per_code_combined; 339 - ev->paid = S390_lowcore.per_access_id; 337 + ev->address = get_lowcore()->per_address; 338 + ev->cause = get_lowcore()->per_code_combined; 339 + ev->paid = get_lowcore()->per_access_id; 340 340 } else { 341 341 /* PER event in kernel is kprobes */ 342 342 __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
+37 -37
arch/s390/kernel/vtime.c
··· 41 41 " stpt %0\n" /* Store current cpu timer value */ 42 42 " spt %1" /* Set new value imm. afterwards */ 43 43 : "=Q" (timer) : "Q" (expires)); 44 - S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 45 - S390_lowcore.last_update_timer = expires; 44 + get_lowcore()->system_timer += get_lowcore()->last_update_timer - timer; 45 + get_lowcore()->last_update_timer = expires; 46 46 } 47 47 48 48 static inline int virt_timer_forward(u64 elapsed) ··· 118 118 { 119 119 u64 timer, clock, user, guest, system, hardirq, softirq; 120 120 121 - timer = S390_lowcore.last_update_timer; 122 - clock = S390_lowcore.last_update_clock; 121 + timer = get_lowcore()->last_update_timer; 122 + clock = get_lowcore()->last_update_clock; 123 123 asm volatile( 124 124 " stpt %0\n" /* Store current cpu timer value */ 125 125 " stckf %1" /* Store current tod clock value */ 126 - : "=Q" (S390_lowcore.last_update_timer), 127 - "=Q" (S390_lowcore.last_update_clock) 126 + : "=Q" (get_lowcore()->last_update_timer), 127 + "=Q" (get_lowcore()->last_update_clock) 128 128 : : "cc"); 129 - clock = S390_lowcore.last_update_clock - clock; 130 - timer -= S390_lowcore.last_update_timer; 129 + clock = get_lowcore()->last_update_clock - clock; 130 + timer -= get_lowcore()->last_update_timer; 131 131 132 132 if (hardirq_count()) 133 - S390_lowcore.hardirq_timer += timer; 133 + get_lowcore()->hardirq_timer += timer; 134 134 else 135 - S390_lowcore.system_timer += timer; 135 + get_lowcore()->system_timer += timer; 136 136 137 137 /* Update MT utilization calculation */ 138 138 if (smp_cpu_mtid && ··· 141 141 142 142 /* Calculate cputime delta */ 143 143 user = update_tsk_timer(&tsk->thread.user_timer, 144 - READ_ONCE(S390_lowcore.user_timer)); 144 + READ_ONCE(get_lowcore()->user_timer)); 145 145 guest = update_tsk_timer(&tsk->thread.guest_timer, 146 - READ_ONCE(S390_lowcore.guest_timer)); 146 + READ_ONCE(get_lowcore()->guest_timer)); 147 147 system = update_tsk_timer(&tsk->thread.system_timer, 148 - READ_ONCE(S390_lowcore.system_timer)); 148 + READ_ONCE(get_lowcore()->system_timer)); 149 149 hardirq = update_tsk_timer(&tsk->thread.hardirq_timer, 150 - READ_ONCE(S390_lowcore.hardirq_timer)); 150 + READ_ONCE(get_lowcore()->hardirq_timer)); 151 151 softirq = update_tsk_timer(&tsk->thread.softirq_timer, 152 - READ_ONCE(S390_lowcore.softirq_timer)); 153 - S390_lowcore.steal_timer += 152 + READ_ONCE(get_lowcore()->softirq_timer)); 153 + get_lowcore()->steal_timer += 154 154 clock - user - guest - system - hardirq - softirq; 155 155 156 156 /* Push account value */ ··· 177 177 void vtime_task_switch(struct task_struct *prev) 178 178 { 179 179 do_account_vtime(prev); 180 - prev->thread.user_timer = S390_lowcore.user_timer; 181 - prev->thread.guest_timer = S390_lowcore.guest_timer; 182 - prev->thread.system_timer = S390_lowcore.system_timer; 183 - prev->thread.hardirq_timer = S390_lowcore.hardirq_timer; 184 - prev->thread.softirq_timer = S390_lowcore.softirq_timer; 185 - S390_lowcore.user_timer = current->thread.user_timer; 186 - S390_lowcore.guest_timer = current->thread.guest_timer; 187 - S390_lowcore.system_timer = current->thread.system_timer; 188 - S390_lowcore.hardirq_timer = current->thread.hardirq_timer; 189 - S390_lowcore.softirq_timer = current->thread.softirq_timer; 180 + prev->thread.user_timer = get_lowcore()->user_timer; 181 + prev->thread.guest_timer = get_lowcore()->guest_timer; 182 + prev->thread.system_timer = get_lowcore()->system_timer; 183 + prev->thread.hardirq_timer = get_lowcore()->hardirq_timer; 184 + prev->thread.softirq_timer = get_lowcore()->softirq_timer; 185 + get_lowcore()->user_timer = current->thread.user_timer; 186 + get_lowcore()->guest_timer = current->thread.guest_timer; 187 + get_lowcore()->system_timer = current->thread.system_timer; 188 + get_lowcore()->hardirq_timer = current->thread.hardirq_timer; 189 + get_lowcore()->softirq_timer = current->thread.softirq_timer; 190 190 } 191 191 192 192 /* ··· 201 201 if (do_account_vtime(tsk)) 202 202 virt_timer_expire(); 203 203 204 - steal = S390_lowcore.steal_timer; 205 - avg_steal = S390_lowcore.avg_steal_timer; 204 + steal = get_lowcore()->steal_timer; 205 + avg_steal = get_lowcore()->avg_steal_timer; 206 206 if ((s64) steal > 0) { 207 - S390_lowcore.steal_timer = 0; 207 + get_lowcore()->steal_timer = 0; 208 208 account_steal_time(cputime_to_nsecs(steal)); 209 209 avg_steal += steal; 210 210 } 211 - S390_lowcore.avg_steal_timer = avg_steal / 2; 211 + get_lowcore()->avg_steal_timer = avg_steal / 2; 212 212 } 213 213 214 214 static u64 vtime_delta(void) 215 215 { 216 - u64 timer = S390_lowcore.last_update_timer; 216 + u64 timer = get_lowcore()->last_update_timer; 217 217 218 - S390_lowcore.last_update_timer = get_cpu_timer(); 218 + get_lowcore()->last_update_timer = get_cpu_timer(); 219 219 220 - return timer - S390_lowcore.last_update_timer; 220 + return timer - get_lowcore()->last_update_timer; 221 221 } 222 222 223 223 /* ··· 229 229 u64 delta = vtime_delta(); 230 230 231 231 if (tsk->flags & PF_VCPU) 232 - S390_lowcore.guest_timer += delta; 232 + get_lowcore()->guest_timer += delta; 233 233 else 234 - S390_lowcore.system_timer += delta; 234 + get_lowcore()->system_timer += delta; 235 235 236 236 virt_timer_forward(delta); 237 237 } ··· 241 241 { 242 242 u64 delta = vtime_delta(); 243 243 244 - S390_lowcore.softirq_timer += delta; 244 + get_lowcore()->softirq_timer += delta; 245 245 246 246 virt_timer_forward(delta); 247 247 } ··· 250 250 { 251 251 u64 delta = vtime_delta(); 252 252 253 - S390_lowcore.hardirq_timer += delta; 253 + get_lowcore()->hardirq_timer += delta; 254 254 255 255 virt_timer_forward(delta); 256 256 }
+1 -1
arch/s390/kvm/kvm-s390.c
··· 4079 4079 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 4080 4080 { 4081 4081 /* do not poll with more than halt_poll_max_steal percent of steal time */ 4082 - if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >= 4082 + if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >= 4083 4083 READ_ONCE(halt_poll_max_steal)) { 4084 4084 vcpu->stat.halt_no_poll_steal++; 4085 4085 return true;
+2 -2
arch/s390/lib/spinlock.c
··· 119 119 struct spin_wait *node, *next; 120 120 int lockval, ix, node_id, tail_id, old, new, owner, count; 121 121 122 - ix = S390_lowcore.spinlock_index++; 122 + ix = get_lowcore()->spinlock_index++; 123 123 barrier(); 124 124 lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */ 125 125 node = this_cpu_ptr(&spin_wait[ix]); ··· 205 205 } 206 206 207 207 out: 208 - S390_lowcore.spinlock_index--; 208 + get_lowcore()->spinlock_index--; 209 209 } 210 210 211 211 static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
+1 -1
arch/s390/lib/test_unwind.c
··· 356 356 if (u->flags & UWM_SWITCH_STACK) { 357 357 local_irq_save(flags); 358 358 local_mcck_save(mflags); 359 - rc = call_on_stack(1, S390_lowcore.nodat_stack, 359 + rc = call_on_stack(1, get_lowcore()->nodat_stack, 360 360 int, unwindme_func3, struct unwindme *, u); 361 361 local_mcck_restore(mflags); 362 362 local_irq_restore(flags);
+2 -2
arch/s390/lib/uaccess.c
··· 21 21 22 22 local_ctl_store(1, &cr1); 23 23 local_ctl_store(7, &cr7); 24 - if (cr1.val == S390_lowcore.kernel_asce.val && cr7.val == S390_lowcore.user_asce.val) 24 + if (cr1.val == get_lowcore()->kernel_asce.val && cr7.val == get_lowcore()->user_asce.val) 25 25 return; 26 26 panic("incorrect ASCE on kernel %s\n" 27 27 "cr1: %016lx cr7: %016lx\n" 28 28 "kernel: %016lx user: %016lx\n", 29 29 exit ? "exit" : "entry", cr1.val, cr7.val, 30 - S390_lowcore.kernel_asce.val, S390_lowcore.user_asce.val); 30 + get_lowcore()->kernel_asce.val, get_lowcore()->user_asce.val); 31 31 } 32 32 #endif /*CONFIG_DEBUG_ENTRY */ 33 33
+1 -1
arch/s390/mm/dump_pagetables.c
··· 288 288 * kernel ASCE. We need this to keep the page table walker functions 289 289 * from accessing non-existent entries. 290 290 */ 291 - max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2; 291 + max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2; 292 292 max_addr = 1UL << (max_addr * 11 + 31); 293 293 address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size; 294 294 address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
+8 -8
arch/s390/mm/fault.c
··· 74 74 return USER_FAULT; 75 75 if (!IS_ENABLED(CONFIG_PGSTE)) 76 76 return KERNEL_FAULT; 77 - gmap = (struct gmap *)S390_lowcore.gmap; 77 + gmap = (struct gmap *)get_lowcore()->gmap; 78 78 if (gmap && gmap->asce == regs->cr1) 79 79 return GMAP_FAULT; 80 80 return KERNEL_FAULT; ··· 182 182 pr_cont("mode while using "); 183 183 switch (get_fault_type(regs)) { 184 184 case USER_FAULT: 185 - asce = S390_lowcore.user_asce.val; 185 + asce = get_lowcore()->user_asce.val; 186 186 pr_cont("user "); 187 187 break; 188 188 case GMAP_FAULT: 189 - asce = ((struct gmap *)S390_lowcore.gmap)->asce; 189 + asce = ((struct gmap *)get_lowcore()->gmap)->asce; 190 190 pr_cont("gmap "); 191 191 break; 192 192 case KERNEL_FAULT: 193 - asce = S390_lowcore.kernel_asce.val; 193 + asce = get_lowcore()->kernel_asce.val; 194 194 pr_cont("kernel "); 195 195 break; 196 196 default: ··· 351 351 mmap_read_lock(mm); 352 352 gmap = NULL; 353 353 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) { 354 - gmap = (struct gmap *)S390_lowcore.gmap; 354 + gmap = (struct gmap *)get_lowcore()->gmap; 355 355 current->thread.gmap_addr = address; 356 356 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE); 357 357 current->thread.gmap_int_code = regs->int_code & 0xffff; ··· 522 522 switch (get_fault_type(regs)) { 523 523 case GMAP_FAULT: 524 524 mm = current->mm; 525 - gmap = (struct gmap *)S390_lowcore.gmap; 525 + gmap = (struct gmap *)get_lowcore()->gmap; 526 526 mmap_read_lock(mm); 527 527 addr = __gmap_translate(gmap, addr); 528 528 mmap_read_unlock(mm); ··· 563 563 564 564 void do_non_secure_storage_access(struct pt_regs *regs) 565 565 { 566 - struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; 566 + struct gmap *gmap = (struct gmap *)get_lowcore()->gmap; 567 567 unsigned long gaddr = get_fault_address(regs); 568 568 569 569 if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT)) ··· 575 575 576 576 void do_secure_storage_violation(struct pt_regs *regs) 577 577 { 578 - struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; 578 + struct gmap *gmap = (struct gmap *)get_lowcore()->gmap; 579 579 unsigned long gaddr = get_fault_address(regs); 580 580 581 581 /*
+3 -3
arch/s390/mm/gmap.c
··· 287 287 */ 288 288 void gmap_enable(struct gmap *gmap) 289 289 { 290 - S390_lowcore.gmap = (unsigned long) gmap; 290 + get_lowcore()->gmap = (unsigned long)gmap; 291 291 } 292 292 EXPORT_SYMBOL_GPL(gmap_enable); 293 293 ··· 297 297 */ 298 298 void gmap_disable(struct gmap *gmap) 299 299 { 300 - S390_lowcore.gmap = 0UL; 300 + get_lowcore()->gmap = 0UL; 301 301 } 302 302 EXPORT_SYMBOL_GPL(gmap_disable); 303 303 ··· 308 308 */ 309 309 struct gmap *gmap_get_enabled(void) 310 310 { 311 - return (struct gmap *) S390_lowcore.gmap; 311 + return (struct gmap *)get_lowcore()->gmap; 312 312 } 313 313 EXPORT_SYMBOL_GPL(gmap_get_enabled); 314 314
+1 -1
arch/s390/mm/pageattr.c
··· 75 75 break; 76 76 } 77 77 table = (unsigned long *)((unsigned long)old & mask); 78 - crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce.val); 78 + crdte(*old, new, table, dtt, addr, get_lowcore()->kernel_asce.val); 79 79 } else if (MACHINE_HAS_IDTE) { 80 80 cspg(old, *old, new); 81 81 } else {
+2 -2
arch/s390/mm/pgalloc.c
··· 64 64 65 65 /* change all active ASCEs to avoid the creation of new TLBs */ 66 66 if (current->active_mm == mm) { 67 - S390_lowcore.user_asce.val = mm->context.asce; 68 - local_ctl_load(7, &S390_lowcore.user_asce); 67 + get_lowcore()->user_asce.val = mm->context.asce; 68 + local_ctl_load(7, &get_lowcore()->user_asce); 69 69 } 70 70 __tlb_flush_local(); 71 71 }
+1 -1
arch/s390/pci/pci.c
··· 1064 1064 return NULL; 1065 1065 } 1066 1066 if (!strcmp(str, "nomio")) { 1067 - S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO; 1067 + get_lowcore()->machine_flags &= ~MACHINE_FLAG_PCI_MIO; 1068 1068 return NULL; 1069 1069 } 1070 1070 if (!strcmp(str, "force_floating")) {