at v2.6.18 459 lines 13 kB view raw
1/* 2 * include/asm-s390/system.h 3 * 4 * S390 version 5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * 8 * Derived from "include/asm-i386/system.h" 9 */ 10 11#ifndef __ASM_SYSTEM_H 12#define __ASM_SYSTEM_H 13 14#include <linux/kernel.h> 15#include <asm/types.h> 16#include <asm/ptrace.h> 17#include <asm/setup.h> 18#include <asm/processor.h> 19 20#ifdef __KERNEL__ 21 22struct task_struct; 23 24extern struct task_struct *__switch_to(void *, void *); 25 26#ifdef __s390x__ 27#define __FLAG_SHIFT 56 28#else /* ! __s390x__ */ 29#define __FLAG_SHIFT 24 30#endif /* ! __s390x__ */ 31 32static inline void save_fp_regs(s390_fp_regs *fpregs) 33{ 34 asm volatile ( 35 " std 0,8(%1)\n" 36 " std 2,24(%1)\n" 37 " std 4,40(%1)\n" 38 " std 6,56(%1)" 39 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); 40 if (!MACHINE_HAS_IEEE) 41 return; 42 asm volatile( 43 " stfpc 0(%1)\n" 44 " std 1,16(%1)\n" 45 " std 3,32(%1)\n" 46 " std 5,48(%1)\n" 47 " std 7,64(%1)\n" 48 " std 8,72(%1)\n" 49 " std 9,80(%1)\n" 50 " std 10,88(%1)\n" 51 " std 11,96(%1)\n" 52 " std 12,104(%1)\n" 53 " std 13,112(%1)\n" 54 " std 14,120(%1)\n" 55 " std 15,128(%1)\n" 56 : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); 57} 58 59static inline void restore_fp_regs(s390_fp_regs *fpregs) 60{ 61 asm volatile ( 62 " ld 0,8(%0)\n" 63 " ld 2,24(%0)\n" 64 " ld 4,40(%0)\n" 65 " ld 6,56(%0)" 66 : : "a" (fpregs), "m" (*fpregs) ); 67 if (!MACHINE_HAS_IEEE) 68 return; 69 asm volatile( 70 " lfpc 0(%0)\n" 71 " ld 1,16(%0)\n" 72 " ld 3,32(%0)\n" 73 " ld 5,48(%0)\n" 74 " ld 7,64(%0)\n" 75 " ld 8,72(%0)\n" 76 " ld 9,80(%0)\n" 77 " ld 10,88(%0)\n" 78 " ld 11,96(%0)\n" 79 " ld 12,104(%0)\n" 80 " ld 13,112(%0)\n" 81 " ld 14,120(%0)\n" 82 " ld 15,128(%0)\n" 83 : : "a" (fpregs), "m" (*fpregs) ); 84} 85 86static inline void save_access_regs(unsigned int *acrs) 87{ 88 asm volatile ("stam 0,15,0(%0)" : : "a" (acrs) : "memory" ); 89} 90 91static inline void restore_access_regs(unsigned int *acrs) 92{ 93 asm volatile ("lam 0,15,0(%0)" : : "a" (acrs) ); 94} 95 96#define switch_to(prev,next,last) do { \ 97 if (prev == next) \ 98 break; \ 99 save_fp_regs(&prev->thread.fp_regs); \ 100 restore_fp_regs(&next->thread.fp_regs); \ 101 save_access_regs(&prev->thread.acrs[0]); \ 102 restore_access_regs(&next->thread.acrs[0]); \ 103 prev = __switch_to(prev,next); \ 104} while (0) 105 106/* 107 * On SMP systems, when the scheduler does migration-cost autodetection, 108 * it needs a way to flush as much of the CPU's caches as possible. 109 * 110 * TODO: fill this in! 111 */ 112static inline void sched_cacheflush(void) 113{ 114} 115 116#ifdef CONFIG_VIRT_CPU_ACCOUNTING 117extern void account_vtime(struct task_struct *); 118extern void account_tick_vtime(struct task_struct *); 119extern void account_system_vtime(struct task_struct *); 120#else 121#define account_vtime(x) do { /* empty */ } while (0) 122#endif 123 124#define finish_arch_switch(prev) do { \ 125 set_fs(current->thread.mm_segment); \ 126 account_vtime(prev); \ 127} while (0) 128 129#define nop() __asm__ __volatile__ ("nop") 130 131#define xchg(ptr,x) \ 132({ \ 133 __typeof__(*(ptr)) __ret; \ 134 __ret = (__typeof__(*(ptr))) \ 135 __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \ 136 __ret; \ 137}) 138 139static inline unsigned long __xchg(unsigned long x, void * ptr, int size) 140{ 141 unsigned long addr, old; 142 int shift; 143 144 switch (size) { 145 case 1: 146 addr = (unsigned long) ptr; 147 shift = (3 ^ (addr & 3)) << 3; 148 addr ^= addr & 3; 149 asm volatile( 150 " l %0,0(%4)\n" 151 "0: lr 0,%0\n" 152 " nr 0,%3\n" 153 " or 0,%2\n" 154 " cs %0,0,0(%4)\n" 155 " jl 0b\n" 156 : "=&d" (old), "=m" (*(int *) addr) 157 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), 158 "m" (*(int *) addr) : "memory", "cc", "0" ); 159 x = old >> shift; 160 break; 161 case 2: 162 addr = (unsigned long) ptr; 163 shift = (2 ^ (addr & 2)) << 3; 164 addr ^= addr & 2; 165 asm volatile( 166 " l %0,0(%4)\n" 167 "0: lr 0,%0\n" 168 " nr 0,%3\n" 169 " or 0,%2\n" 170 " cs %0,0,0(%4)\n" 171 " jl 0b\n" 172 : "=&d" (old), "=m" (*(int *) addr) 173 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), 174 "m" (*(int *) addr) : "memory", "cc", "0" ); 175 x = old >> shift; 176 break; 177 case 4: 178 asm volatile ( 179 " l %0,0(%3)\n" 180 "0: cs %0,%2,0(%3)\n" 181 " jl 0b\n" 182 : "=&d" (old), "=m" (*(int *) ptr) 183 : "d" (x), "a" (ptr), "m" (*(int *) ptr) 184 : "memory", "cc" ); 185 x = old; 186 break; 187#ifdef __s390x__ 188 case 8: 189 asm volatile ( 190 " lg %0,0(%3)\n" 191 "0: csg %0,%2,0(%3)\n" 192 " jl 0b\n" 193 : "=&d" (old), "=m" (*(long *) ptr) 194 : "d" (x), "a" (ptr), "m" (*(long *) ptr) 195 : "memory", "cc" ); 196 x = old; 197 break; 198#endif /* __s390x__ */ 199 } 200 return x; 201} 202 203/* 204 * Atomic compare and exchange. Compare OLD with MEM, if identical, 205 * store NEW in MEM. Return the initial value in MEM. Success is 206 * indicated by comparing RETURN with OLD. 207 */ 208 209#define __HAVE_ARCH_CMPXCHG 1 210 211#define cmpxchg(ptr,o,n)\ 212 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ 213 (unsigned long)(n),sizeof(*(ptr)))) 214 215static inline unsigned long 216__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 217{ 218 unsigned long addr, prev, tmp; 219 int shift; 220 221 switch (size) { 222 case 1: 223 addr = (unsigned long) ptr; 224 shift = (3 ^ (addr & 3)) << 3; 225 addr ^= addr & 3; 226 asm volatile( 227 " l %0,0(%4)\n" 228 "0: nr %0,%5\n" 229 " lr %1,%0\n" 230 " or %0,%2\n" 231 " or %1,%3\n" 232 " cs %0,%1,0(%4)\n" 233 " jnl 1f\n" 234 " xr %1,%0\n" 235 " nr %1,%5\n" 236 " jnz 0b\n" 237 "1:" 238 : "=&d" (prev), "=&d" (tmp) 239 : "d" (old << shift), "d" (new << shift), "a" (ptr), 240 "d" (~(255 << shift)) 241 : "memory", "cc" ); 242 return prev >> shift; 243 case 2: 244 addr = (unsigned long) ptr; 245 shift = (2 ^ (addr & 2)) << 3; 246 addr ^= addr & 2; 247 asm volatile( 248 " l %0,0(%4)\n" 249 "0: nr %0,%5\n" 250 " lr %1,%0\n" 251 " or %0,%2\n" 252 " or %1,%3\n" 253 " cs %0,%1,0(%4)\n" 254 " jnl 1f\n" 255 " xr %1,%0\n" 256 " nr %1,%5\n" 257 " jnz 0b\n" 258 "1:" 259 : "=&d" (prev), "=&d" (tmp) 260 : "d" (old << shift), "d" (new << shift), "a" (ptr), 261 "d" (~(65535 << shift)) 262 : "memory", "cc" ); 263 return prev >> shift; 264 case 4: 265 asm volatile ( 266 " cs %0,%2,0(%3)\n" 267 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 268 : "memory", "cc" ); 269 return prev; 270#ifdef __s390x__ 271 case 8: 272 asm volatile ( 273 " csg %0,%2,0(%3)\n" 274 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 275 : "memory", "cc" ); 276 return prev; 277#endif /* __s390x__ */ 278 } 279 return old; 280} 281 282/* 283 * Force strict CPU ordering. 284 * And yes, this is required on UP too when we're talking 285 * to devices. 286 * 287 * This is very similar to the ppc eieio/sync instruction in that is 288 * does a checkpoint syncronisation & makes sure that 289 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). 290 */ 291 292#define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" ) 293# define SYNC_OTHER_CORES(x) eieio() 294#define mb() eieio() 295#define rmb() eieio() 296#define wmb() eieio() 297#define read_barrier_depends() do { } while(0) 298#define smp_mb() mb() 299#define smp_rmb() rmb() 300#define smp_wmb() wmb() 301#define smp_read_barrier_depends() read_barrier_depends() 302#define smp_mb__before_clear_bit() smp_mb() 303#define smp_mb__after_clear_bit() smp_mb() 304 305 306#define set_mb(var, value) do { var = value; mb(); } while (0) 307 308#ifdef __s390x__ 309 310#define __ctl_load(array, low, high) ({ \ 311 typedef struct { char _[sizeof(array)]; } addrtype; \ 312 __asm__ __volatile__ ( \ 313 " bras 1,0f\n" \ 314 " lctlg 0,0,0(%0)\n" \ 315 "0: ex %1,0(1)" \ 316 : : "a" (&array), "a" (((low)<<4)+(high)), \ 317 "m" (*(addrtype *)(array)) : "1" ); \ 318 }) 319 320#define __ctl_store(array, low, high) ({ \ 321 typedef struct { char _[sizeof(array)]; } addrtype; \ 322 __asm__ __volatile__ ( \ 323 " bras 1,0f\n" \ 324 " stctg 0,0,0(%1)\n" \ 325 "0: ex %2,0(1)" \ 326 : "=m" (*(addrtype *)(array)) \ 327 : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \ 328 }) 329 330#define __ctl_set_bit(cr, bit) ({ \ 331 __u8 __dummy[24]; \ 332 __asm__ __volatile__ ( \ 333 " bras 1,0f\n" /* skip indirect insns */ \ 334 " stctg 0,0,0(%1)\n" \ 335 " lctlg 0,0,0(%1)\n" \ 336 "0: ex %2,0(1)\n" /* execute stctl */ \ 337 " lg 0,0(%1)\n" \ 338 " ogr 0,%3\n" /* set the bit */ \ 339 " stg 0,0(%1)\n" \ 340 "1: ex %2,6(1)" /* execute lctl */ \ 341 : "=m" (__dummy) \ 342 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ 343 "a" (cr*17), "a" (1L<<(bit)) \ 344 : "cc", "0", "1" ); \ 345 }) 346 347#define __ctl_clear_bit(cr, bit) ({ \ 348 __u8 __dummy[16]; \ 349 __asm__ __volatile__ ( \ 350 " bras 1,0f\n" /* skip indirect insns */ \ 351 " stctg 0,0,0(%1)\n" \ 352 " lctlg 0,0,0(%1)\n" \ 353 "0: ex %2,0(1)\n" /* execute stctl */ \ 354 " lg 0,0(%1)\n" \ 355 " ngr 0,%3\n" /* set the bit */ \ 356 " stg 0,0(%1)\n" \ 357 "1: ex %2,6(1)" /* execute lctl */ \ 358 : "=m" (__dummy) \ 359 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ 360 "a" (cr*17), "a" (~(1L<<(bit))) \ 361 : "cc", "0", "1" ); \ 362 }) 363 364#else /* __s390x__ */ 365 366#define __ctl_load(array, low, high) ({ \ 367 typedef struct { char _[sizeof(array)]; } addrtype; \ 368 __asm__ __volatile__ ( \ 369 " bras 1,0f\n" \ 370 " lctl 0,0,0(%0)\n" \ 371 "0: ex %1,0(1)" \ 372 : : "a" (&array), "a" (((low)<<4)+(high)), \ 373 "m" (*(addrtype *)(array)) : "1" ); \ 374 }) 375 376#define __ctl_store(array, low, high) ({ \ 377 typedef struct { char _[sizeof(array)]; } addrtype; \ 378 __asm__ __volatile__ ( \ 379 " bras 1,0f\n" \ 380 " stctl 0,0,0(%1)\n" \ 381 "0: ex %2,0(1)" \ 382 : "=m" (*(addrtype *)(array)) \ 383 : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \ 384 }) 385 386#define __ctl_set_bit(cr, bit) ({ \ 387 __u8 __dummy[16]; \ 388 __asm__ __volatile__ ( \ 389 " bras 1,0f\n" /* skip indirect insns */ \ 390 " stctl 0,0,0(%1)\n" \ 391 " lctl 0,0,0(%1)\n" \ 392 "0: ex %2,0(1)\n" /* execute stctl */ \ 393 " l 0,0(%1)\n" \ 394 " or 0,%3\n" /* set the bit */ \ 395 " st 0,0(%1)\n" \ 396 "1: ex %2,4(1)" /* execute lctl */ \ 397 : "=m" (__dummy) \ 398 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ 399 "a" (cr*17), "a" (1<<(bit)) \ 400 : "cc", "0", "1" ); \ 401 }) 402 403#define __ctl_clear_bit(cr, bit) ({ \ 404 __u8 __dummy[16]; \ 405 __asm__ __volatile__ ( \ 406 " bras 1,0f\n" /* skip indirect insns */ \ 407 " stctl 0,0,0(%1)\n" \ 408 " lctl 0,0,0(%1)\n" \ 409 "0: ex %2,0(1)\n" /* execute stctl */ \ 410 " l 0,0(%1)\n" \ 411 " nr 0,%3\n" /* set the bit */ \ 412 " st 0,0(%1)\n" \ 413 "1: ex %2,4(1)" /* execute lctl */ \ 414 : "=m" (__dummy) \ 415 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ 416 "a" (cr*17), "a" (~(1<<(bit))) \ 417 : "cc", "0", "1" ); \ 418 }) 419#endif /* __s390x__ */ 420 421#include <linux/irqflags.h> 422 423/* 424 * Use to set psw mask except for the first byte which 425 * won't be changed by this function. 426 */ 427static inline void 428__set_psw_mask(unsigned long mask) 429{ 430 local_save_flags(mask); 431 __load_psw_mask(mask); 432} 433 434#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) 435#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK) 436 437#ifdef CONFIG_SMP 438 439extern void smp_ctl_set_bit(int cr, int bit); 440extern void smp_ctl_clear_bit(int cr, int bit); 441#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) 442#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) 443 444#else 445 446#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) 447#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) 448 449#endif /* CONFIG_SMP */ 450 451extern void (*_machine_restart)(char *command); 452extern void (*_machine_halt)(void); 453extern void (*_machine_power_off)(void); 454 455#define arch_align_stack(x) (x) 456 457#endif /* __KERNEL__ */ 458 459#endif