at v3.2 283 lines 7.1 kB view raw
1/* 2 * Copyright IBM Corp. 1999, 2009 3 * 4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 5 */ 6 7#ifndef __ASM_SYSTEM_H 8#define __ASM_SYSTEM_H 9 10#include <linux/kernel.h> 11#include <linux/errno.h> 12#include <asm/types.h> 13#include <asm/ptrace.h> 14#include <asm/setup.h> 15#include <asm/processor.h> 16#include <asm/lowcore.h> 17#include <asm/cmpxchg.h> 18 19#ifdef __KERNEL__ 20 21struct task_struct; 22 23extern int sysctl_userprocess_debug; 24 25extern struct task_struct *__switch_to(void *, void *); 26extern void update_per_regs(struct task_struct *task); 27 28static inline void save_fp_regs(s390_fp_regs *fpregs) 29{ 30 asm volatile( 31 " std 0,%O0+8(%R0)\n" 32 " std 2,%O0+24(%R0)\n" 33 " std 4,%O0+40(%R0)\n" 34 " std 6,%O0+56(%R0)" 35 : "=Q" (*fpregs) : "Q" (*fpregs)); 36 if (!MACHINE_HAS_IEEE) 37 return; 38 asm volatile( 39 " stfpc %0\n" 40 " std 1,%O0+16(%R0)\n" 41 " std 3,%O0+32(%R0)\n" 42 " std 5,%O0+48(%R0)\n" 43 " std 7,%O0+64(%R0)\n" 44 " std 8,%O0+72(%R0)\n" 45 " std 9,%O0+80(%R0)\n" 46 " std 10,%O0+88(%R0)\n" 47 " std 11,%O0+96(%R0)\n" 48 " std 12,%O0+104(%R0)\n" 49 " std 13,%O0+112(%R0)\n" 50 " std 14,%O0+120(%R0)\n" 51 " std 15,%O0+128(%R0)\n" 52 : "=Q" (*fpregs) : "Q" (*fpregs)); 53} 54 55static inline void restore_fp_regs(s390_fp_regs *fpregs) 56{ 57 asm volatile( 58 " ld 0,%O0+8(%R0)\n" 59 " ld 2,%O0+24(%R0)\n" 60 " ld 4,%O0+40(%R0)\n" 61 " ld 6,%O0+56(%R0)" 62 : : "Q" (*fpregs)); 63 if (!MACHINE_HAS_IEEE) 64 return; 65 asm volatile( 66 " lfpc %0\n" 67 " ld 1,%O0+16(%R0)\n" 68 " ld 3,%O0+32(%R0)\n" 69 " ld 5,%O0+48(%R0)\n" 70 " ld 7,%O0+64(%R0)\n" 71 " ld 8,%O0+72(%R0)\n" 72 " ld 9,%O0+80(%R0)\n" 73 " ld 10,%O0+88(%R0)\n" 74 " ld 11,%O0+96(%R0)\n" 75 " ld 12,%O0+104(%R0)\n" 76 " ld 13,%O0+112(%R0)\n" 77 " ld 14,%O0+120(%R0)\n" 78 " ld 15,%O0+128(%R0)\n" 79 : : "Q" (*fpregs)); 80} 81 82static inline void save_access_regs(unsigned int *acrs) 83{ 84 asm volatile("stam 0,15,%0" : "=Q" (*acrs)); 85} 86 87static inline void restore_access_regs(unsigned int *acrs) 88{ 89 asm volatile("lam 0,15,%0" : : "Q" (*acrs)); 90} 91 92#define switch_to(prev,next,last) do { \ 93 if (prev->mm) { \ 94 save_fp_regs(&prev->thread.fp_regs); \ 95 save_access_regs(&prev->thread.acrs[0]); \ 96 } \ 97 if (next->mm) { \ 98 restore_fp_regs(&next->thread.fp_regs); \ 99 restore_access_regs(&next->thread.acrs[0]); \ 100 update_per_regs(next); \ 101 } \ 102 prev = __switch_to(prev,next); \ 103} while (0) 104 105extern void account_vtime(struct task_struct *, struct task_struct *); 106extern void account_tick_vtime(struct task_struct *); 107 108#ifdef CONFIG_PFAULT 109extern int pfault_init(void); 110extern void pfault_fini(void); 111#else /* CONFIG_PFAULT */ 112#define pfault_init() ({-1;}) 113#define pfault_fini() do { } while (0) 114#endif /* CONFIG_PFAULT */ 115 116extern void cmma_init(void); 117extern int memcpy_real(void *, void *, size_t); 118extern void copy_to_absolute_zero(void *dest, void *src, size_t count); 119extern int copy_to_user_real(void __user *dest, void *src, size_t count); 120extern int copy_from_user_real(void *dest, void __user *src, size_t count); 121 122#define finish_arch_switch(prev) do { \ 123 set_fs(current->thread.mm_segment); \ 124 account_vtime(prev, current); \ 125} while (0) 126 127#define nop() asm volatile("nop") 128 129/* 130 * Force strict CPU ordering. 131 * And yes, this is required on UP too when we're talking 132 * to devices. 133 * 134 * This is very similar to the ppc eieio/sync instruction in that is 135 * does a checkpoint syncronisation & makes sure that 136 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). 137 */ 138 139#define eieio() asm volatile("bcr 15,0" : : : "memory") 140#define SYNC_OTHER_CORES(x) eieio() 141#define mb() eieio() 142#define rmb() eieio() 143#define wmb() eieio() 144#define read_barrier_depends() do { } while(0) 145#define smp_mb() mb() 146#define smp_rmb() rmb() 147#define smp_wmb() wmb() 148#define smp_read_barrier_depends() read_barrier_depends() 149#define smp_mb__before_clear_bit() smp_mb() 150#define smp_mb__after_clear_bit() smp_mb() 151 152 153#define set_mb(var, value) do { var = value; mb(); } while (0) 154 155#ifdef __s390x__ 156 157#define __ctl_load(array, low, high) ({ \ 158 typedef struct { char _[sizeof(array)]; } addrtype; \ 159 asm volatile( \ 160 " lctlg %1,%2,%0\n" \ 161 : : "Q" (*(addrtype *)(&array)), \ 162 "i" (low), "i" (high)); \ 163 }) 164 165#define __ctl_store(array, low, high) ({ \ 166 typedef struct { char _[sizeof(array)]; } addrtype; \ 167 asm volatile( \ 168 " stctg %1,%2,%0\n" \ 169 : "=Q" (*(addrtype *)(&array)) \ 170 : "i" (low), "i" (high)); \ 171 }) 172 173#else /* __s390x__ */ 174 175#define __ctl_load(array, low, high) ({ \ 176 typedef struct { char _[sizeof(array)]; } addrtype; \ 177 asm volatile( \ 178 " lctl %1,%2,%0\n" \ 179 : : "Q" (*(addrtype *)(&array)), \ 180 "i" (low), "i" (high)); \ 181}) 182 183#define __ctl_store(array, low, high) ({ \ 184 typedef struct { char _[sizeof(array)]; } addrtype; \ 185 asm volatile( \ 186 " stctl %1,%2,%0\n" \ 187 : "=Q" (*(addrtype *)(&array)) \ 188 : "i" (low), "i" (high)); \ 189 }) 190 191#endif /* __s390x__ */ 192 193#define __ctl_set_bit(cr, bit) ({ \ 194 unsigned long __dummy; \ 195 __ctl_store(__dummy, cr, cr); \ 196 __dummy |= 1UL << (bit); \ 197 __ctl_load(__dummy, cr, cr); \ 198}) 199 200#define __ctl_clear_bit(cr, bit) ({ \ 201 unsigned long __dummy; \ 202 __ctl_store(__dummy, cr, cr); \ 203 __dummy &= ~(1UL << (bit)); \ 204 __ctl_load(__dummy, cr, cr); \ 205}) 206 207/* 208 * Use to set psw mask except for the first byte which 209 * won't be changed by this function. 210 */ 211static inline void 212__set_psw_mask(unsigned long mask) 213{ 214 __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); 215} 216 217#define local_mcck_enable() \ 218 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK) 219#define local_mcck_disable() \ 220 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT) 221 222#ifdef CONFIG_SMP 223 224extern void smp_ctl_set_bit(int cr, int bit); 225extern void smp_ctl_clear_bit(int cr, int bit); 226#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) 227#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) 228 229#else 230 231#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) 232#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) 233 234#endif /* CONFIG_SMP */ 235 236#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */ 237 238/* 239 * The test_facility function uses the bit odering where the MSB is bit 0. 240 * That makes it easier to query facility bits with the bit number as 241 * documented in the Principles of Operation. 242 */ 243static inline int test_facility(unsigned long nr) 244{ 245 unsigned char *ptr; 246 247 if (nr >= MAX_FACILITY_BIT) 248 return 0; 249 ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3); 250 return (*ptr & (0x80 >> (nr & 7))) != 0; 251} 252 253static inline unsigned short stap(void) 254{ 255 unsigned short cpu_address; 256 257 asm volatile("stap %0" : "=m" (cpu_address)); 258 return cpu_address; 259} 260 261extern void (*_machine_restart)(char *command); 262extern void (*_machine_halt)(void); 263extern void (*_machine_power_off)(void); 264 265extern unsigned long arch_align_stack(unsigned long sp); 266 267static inline int tprot(unsigned long addr) 268{ 269 int rc = -EFAULT; 270 271 asm volatile( 272 " tprot 0(%1),0\n" 273 "0: ipm %0\n" 274 " srl %0,28\n" 275 "1:\n" 276 EX_TABLE(0b,1b) 277 : "+d" (rc) : "a" (addr) : "cc"); 278 return rc; 279} 280 281#endif /* __KERNEL__ */ 282 283#endif