at v2.6.23-rc2 817 lines 22 kB view raw
1#ifndef __ALPHA_SYSTEM_H 2#define __ALPHA_SYSTEM_H 3 4#include <asm/pal.h> 5#include <asm/page.h> 6#include <asm/barrier.h> 7 8/* 9 * System defines.. Note that this is included both from .c and .S 10 * files, so it does only defines, not any C code. 11 */ 12 13/* 14 * We leave one page for the initial stack page, and one page for 15 * the initial process structure. Also, the console eats 3 MB for 16 * the initial bootloader (one of which we can reclaim later). 17 */ 18#define BOOT_PCB 0x20000000 19#define BOOT_ADDR 0x20000000 20/* Remove when official MILO sources have ELF support: */ 21#define BOOT_SIZE (16*1024) 22 23#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS 24#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ 25#else 26#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ 27#endif 28 29#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) 30#define SWAPPER_PGD KERNEL_START 31#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) 32#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) 33#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) 34#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) 35 36#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) 37 38/* 39 * This is setup by the secondary bootstrap loader. Because 40 * the zero page is zeroed out as soon as the vm system is 41 * initialized, we need to copy things out into a more permanent 42 * place. 43 */ 44#define PARAM ZERO_PGE 45#define COMMAND_LINE ((char*)(PARAM + 0x0000)) 46#define INITRD_START (*(unsigned long *) (PARAM+0x100)) 47#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) 48 49#ifndef __ASSEMBLY__ 50#include <linux/kernel.h> 51 52/* 53 * This is the logout header that should be common to all platforms 54 * (assuming they are running OSF/1 PALcode, I guess). 55 */ 56struct el_common { 57 unsigned int size; /* size in bytes of logout area */ 58 unsigned int sbz1 : 30; /* should be zero */ 59 unsigned int err2 : 1; /* second error */ 60 unsigned int retry : 1; /* retry flag */ 61 unsigned int proc_offset; /* processor-specific offset */ 62 unsigned int sys_offset; /* system-specific offset */ 63 unsigned int code; /* machine check code */ 64 unsigned int frame_rev; /* frame revision */ 65}; 66 67/* Machine Check Frame for uncorrectable errors (Large format) 68 * --- This is used to log uncorrectable errors such as 69 * double bit ECC errors. 70 * --- These errors are detected by both processor and systems. 71 */ 72struct el_common_EV5_uncorrectable_mcheck { 73 unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ 74 unsigned long paltemp[24]; /* PAL TEMP REGS. */ 75 unsigned long exc_addr; /* Address of excepting instruction*/ 76 unsigned long exc_sum; /* Summary of arithmetic traps. */ 77 unsigned long exc_mask; /* Exception mask (from exc_sum). */ 78 unsigned long pal_base; /* Base address for PALcode. */ 79 unsigned long isr; /* Interrupt Status Reg. */ 80 unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */ 81 unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity 82 <12> set TAG parity*/ 83 unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1: 84 <2> Data error in bank 0 85 <3> Data error in bank 1 86 <4> Tag error in bank 0 87 <5> Tag error in bank 1 */ 88 unsigned long va; /* Effective VA of fault or miss. */ 89 unsigned long mm_stat; /* Holds the reason for D-stream 90 fault or D-cache parity errors */ 91 unsigned long sc_addr; /* Address that was being accessed 92 when EV5 detected Secondary cache 93 failure. */ 94 unsigned long sc_stat; /* Helps determine if the error was 95 TAG/Data parity(Secondary Cache)*/ 96 unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */ 97 unsigned long ei_addr; /* Physical address of any transfer 98 that is logged in EV5 EI_STAT */ 99 unsigned long fill_syndrome; /* For correcting ECC errors. */ 100 unsigned long ei_stat; /* Helps identify reason of any 101 processor uncorrectable error 102 at its external interface. */ 103 unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/ 104}; 105 106struct el_common_EV6_mcheck { 107 unsigned int FrameSize; /* Bytes, including this field */ 108 unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */ 109 unsigned int CpuOffset; /* Offset to CPU-specific info */ 110 unsigned int SystemOffset; /* Offset to system-specific info */ 111 unsigned int MCHK_Code; 112 unsigned int MCHK_Frame_Rev; 113 unsigned long I_STAT; /* EV6 Internal Processor Registers */ 114 unsigned long DC_STAT; /* (See the 21264 Spec) */ 115 unsigned long C_ADDR; 116 unsigned long DC1_SYNDROME; 117 unsigned long DC0_SYNDROME; 118 unsigned long C_STAT; 119 unsigned long C_STS; 120 unsigned long MM_STAT; 121 unsigned long EXC_ADDR; 122 unsigned long IER_CM; 123 unsigned long ISUM; 124 unsigned long RESERVED0; 125 unsigned long PAL_BASE; 126 unsigned long I_CTL; 127 unsigned long PCTX; 128}; 129 130extern void halt(void) __attribute__((noreturn)); 131#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) 132 133#define switch_to(P,N,L) \ 134 do { \ 135 (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \ 136 check_mmu_context(); \ 137 } while (0) 138 139struct task_struct; 140extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); 141 142#define imb() \ 143__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") 144 145#define draina() \ 146__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory") 147 148enum implver_enum { 149 IMPLVER_EV4, 150 IMPLVER_EV5, 151 IMPLVER_EV6 152}; 153 154#ifdef CONFIG_ALPHA_GENERIC 155#define implver() \ 156({ unsigned long __implver; \ 157 __asm__ ("implver %0" : "=r"(__implver)); \ 158 (enum implver_enum) __implver; }) 159#else 160/* Try to eliminate some dead code. */ 161#ifdef CONFIG_ALPHA_EV4 162#define implver() IMPLVER_EV4 163#endif 164#ifdef CONFIG_ALPHA_EV5 165#define implver() IMPLVER_EV5 166#endif 167#if defined(CONFIG_ALPHA_EV6) 168#define implver() IMPLVER_EV6 169#endif 170#endif 171 172enum amask_enum { 173 AMASK_BWX = (1UL << 0), 174 AMASK_FIX = (1UL << 1), 175 AMASK_CIX = (1UL << 2), 176 AMASK_MAX = (1UL << 8), 177 AMASK_PRECISE_TRAP = (1UL << 9), 178}; 179 180#define amask(mask) \ 181({ unsigned long __amask, __input = (mask); \ 182 __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \ 183 __amask; }) 184 185#define __CALL_PAL_R0(NAME, TYPE) \ 186static inline TYPE NAME(void) \ 187{ \ 188 register TYPE __r0 __asm__("$0"); \ 189 __asm__ __volatile__( \ 190 "call_pal %1 # " #NAME \ 191 :"=r" (__r0) \ 192 :"i" (PAL_ ## NAME) \ 193 :"$1", "$16", "$22", "$23", "$24", "$25"); \ 194 return __r0; \ 195} 196 197#define __CALL_PAL_W1(NAME, TYPE0) \ 198static inline void NAME(TYPE0 arg0) \ 199{ \ 200 register TYPE0 __r16 __asm__("$16") = arg0; \ 201 __asm__ __volatile__( \ 202 "call_pal %1 # "#NAME \ 203 : "=r"(__r16) \ 204 : "i"(PAL_ ## NAME), "0"(__r16) \ 205 : "$1", "$22", "$23", "$24", "$25"); \ 206} 207 208#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ 209static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ 210{ \ 211 register TYPE0 __r16 __asm__("$16") = arg0; \ 212 register TYPE1 __r17 __asm__("$17") = arg1; \ 213 __asm__ __volatile__( \ 214 "call_pal %2 # "#NAME \ 215 : "=r"(__r16), "=r"(__r17) \ 216 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ 217 : "$1", "$22", "$23", "$24", "$25"); \ 218} 219 220#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ 221static inline RTYPE NAME(TYPE0 arg0) \ 222{ \ 223 register RTYPE __r0 __asm__("$0"); \ 224 register TYPE0 __r16 __asm__("$16") = arg0; \ 225 __asm__ __volatile__( \ 226 "call_pal %2 # "#NAME \ 227 : "=r"(__r16), "=r"(__r0) \ 228 : "i"(PAL_ ## NAME), "0"(__r16) \ 229 : "$1", "$22", "$23", "$24", "$25"); \ 230 return __r0; \ 231} 232 233#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ 234static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ 235{ \ 236 register RTYPE __r0 __asm__("$0"); \ 237 register TYPE0 __r16 __asm__("$16") = arg0; \ 238 register TYPE1 __r17 __asm__("$17") = arg1; \ 239 __asm__ __volatile__( \ 240 "call_pal %3 # "#NAME \ 241 : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ 242 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ 243 : "$1", "$22", "$23", "$24", "$25"); \ 244 return __r0; \ 245} 246 247__CALL_PAL_W1(cflush, unsigned long); 248__CALL_PAL_R0(rdmces, unsigned long); 249__CALL_PAL_R0(rdps, unsigned long); 250__CALL_PAL_R0(rdusp, unsigned long); 251__CALL_PAL_RW1(swpipl, unsigned long, unsigned long); 252__CALL_PAL_R0(whami, unsigned long); 253__CALL_PAL_W2(wrent, void*, unsigned long); 254__CALL_PAL_W1(wripir, unsigned long); 255__CALL_PAL_W1(wrkgp, unsigned long); 256__CALL_PAL_W1(wrmces, unsigned long); 257__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); 258__CALL_PAL_W1(wrusp, unsigned long); 259__CALL_PAL_W1(wrvptptr, unsigned long); 260 261#define IPL_MIN 0 262#define IPL_SW0 1 263#define IPL_SW1 2 264#define IPL_DEV0 3 265#define IPL_DEV1 4 266#define IPL_TIMER 5 267#define IPL_PERF 6 268#define IPL_POWERFAIL 6 269#define IPL_MCHECK 7 270#define IPL_MAX 7 271 272#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK 273#undef IPL_MIN 274#define IPL_MIN __min_ipl 275extern int __min_ipl; 276#endif 277 278#define getipl() (rdps() & 7) 279#define setipl(ipl) ((void) swpipl(ipl)) 280 281#define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) 282#define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) 283#define local_save_flags(flags) ((flags) = rdps()) 284#define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) 285#define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) 286 287#define irqs_disabled() (getipl() == IPL_MAX) 288 289/* 290 * TB routines.. 291 */ 292#define __tbi(nr,arg,arg1...) \ 293({ \ 294 register unsigned long __r16 __asm__("$16") = (nr); \ 295 register unsigned long __r17 __asm__("$17"); arg; \ 296 __asm__ __volatile__( \ 297 "call_pal %3 #__tbi" \ 298 :"=r" (__r16),"=r" (__r17) \ 299 :"0" (__r16),"i" (PAL_tbi) ,##arg1 \ 300 :"$0", "$1", "$22", "$23", "$24", "$25"); \ 301}) 302 303#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17)) 304#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17)) 305#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17)) 306#define tbis(x) __tbi(3,__r17=(x),"1" (__r17)) 307#define tbiap() __tbi(-1, /* no second argument */) 308#define tbia() __tbi(-2, /* no second argument */) 309 310/* 311 * Atomic exchange. 312 * Since it can be used to implement critical sections 313 * it must clobber "memory" (also for interrupts in UP). 314 */ 315 316static inline unsigned long 317__xchg_u8(volatile char *m, unsigned long val) 318{ 319 unsigned long ret, tmp, addr64; 320 321 __asm__ __volatile__( 322 " andnot %4,7,%3\n" 323 " insbl %1,%4,%1\n" 324 "1: ldq_l %2,0(%3)\n" 325 " extbl %2,%4,%0\n" 326 " mskbl %2,%4,%2\n" 327 " or %1,%2,%2\n" 328 " stq_c %2,0(%3)\n" 329 " beq %2,2f\n" 330#ifdef CONFIG_SMP 331 " mb\n" 332#endif 333 ".subsection 2\n" 334 "2: br 1b\n" 335 ".previous" 336 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 337 : "r" ((long)m), "1" (val) : "memory"); 338 339 return ret; 340} 341 342static inline unsigned long 343__xchg_u16(volatile short *m, unsigned long val) 344{ 345 unsigned long ret, tmp, addr64; 346 347 __asm__ __volatile__( 348 " andnot %4,7,%3\n" 349 " inswl %1,%4,%1\n" 350 "1: ldq_l %2,0(%3)\n" 351 " extwl %2,%4,%0\n" 352 " mskwl %2,%4,%2\n" 353 " or %1,%2,%2\n" 354 " stq_c %2,0(%3)\n" 355 " beq %2,2f\n" 356#ifdef CONFIG_SMP 357 " mb\n" 358#endif 359 ".subsection 2\n" 360 "2: br 1b\n" 361 ".previous" 362 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 363 : "r" ((long)m), "1" (val) : "memory"); 364 365 return ret; 366} 367 368static inline unsigned long 369__xchg_u32(volatile int *m, unsigned long val) 370{ 371 unsigned long dummy; 372 373 __asm__ __volatile__( 374 "1: ldl_l %0,%4\n" 375 " bis $31,%3,%1\n" 376 " stl_c %1,%2\n" 377 " beq %1,2f\n" 378#ifdef CONFIG_SMP 379 " mb\n" 380#endif 381 ".subsection 2\n" 382 "2: br 1b\n" 383 ".previous" 384 : "=&r" (val), "=&r" (dummy), "=m" (*m) 385 : "rI" (val), "m" (*m) : "memory"); 386 387 return val; 388} 389 390static inline unsigned long 391__xchg_u64(volatile long *m, unsigned long val) 392{ 393 unsigned long dummy; 394 395 __asm__ __volatile__( 396 "1: ldq_l %0,%4\n" 397 " bis $31,%3,%1\n" 398 " stq_c %1,%2\n" 399 " beq %1,2f\n" 400#ifdef CONFIG_SMP 401 " mb\n" 402#endif 403 ".subsection 2\n" 404 "2: br 1b\n" 405 ".previous" 406 : "=&r" (val), "=&r" (dummy), "=m" (*m) 407 : "rI" (val), "m" (*m) : "memory"); 408 409 return val; 410} 411 412/* This function doesn't exist, so you'll get a linker error 413 if something tries to do an invalid xchg(). */ 414extern void __xchg_called_with_bad_pointer(void); 415 416#define __xchg(ptr, x, size) \ 417({ \ 418 unsigned long __xchg__res; \ 419 volatile void *__xchg__ptr = (ptr); \ 420 switch (size) { \ 421 case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \ 422 case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \ 423 case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \ 424 case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \ 425 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ 426 } \ 427 __xchg__res; \ 428}) 429 430#define xchg(ptr,x) \ 431 ({ \ 432 __typeof__(*(ptr)) _x_ = (x); \ 433 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ 434 }) 435 436static inline unsigned long 437__xchg_u8_local(volatile char *m, unsigned long val) 438{ 439 unsigned long ret, tmp, addr64; 440 441 __asm__ __volatile__( 442 " andnot %4,7,%3\n" 443 " insbl %1,%4,%1\n" 444 "1: ldq_l %2,0(%3)\n" 445 " extbl %2,%4,%0\n" 446 " mskbl %2,%4,%2\n" 447 " or %1,%2,%2\n" 448 " stq_c %2,0(%3)\n" 449 " beq %2,2f\n" 450 ".subsection 2\n" 451 "2: br 1b\n" 452 ".previous" 453 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 454 : "r" ((long)m), "1" (val) : "memory"); 455 456 return ret; 457} 458 459static inline unsigned long 460__xchg_u16_local(volatile short *m, unsigned long val) 461{ 462 unsigned long ret, tmp, addr64; 463 464 __asm__ __volatile__( 465 " andnot %4,7,%3\n" 466 " inswl %1,%4,%1\n" 467 "1: ldq_l %2,0(%3)\n" 468 " extwl %2,%4,%0\n" 469 " mskwl %2,%4,%2\n" 470 " or %1,%2,%2\n" 471 " stq_c %2,0(%3)\n" 472 " beq %2,2f\n" 473 ".subsection 2\n" 474 "2: br 1b\n" 475 ".previous" 476 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 477 : "r" ((long)m), "1" (val) : "memory"); 478 479 return ret; 480} 481 482static inline unsigned long 483__xchg_u32_local(volatile int *m, unsigned long val) 484{ 485 unsigned long dummy; 486 487 __asm__ __volatile__( 488 "1: ldl_l %0,%4\n" 489 " bis $31,%3,%1\n" 490 " stl_c %1,%2\n" 491 " beq %1,2f\n" 492 ".subsection 2\n" 493 "2: br 1b\n" 494 ".previous" 495 : "=&r" (val), "=&r" (dummy), "=m" (*m) 496 : "rI" (val), "m" (*m) : "memory"); 497 498 return val; 499} 500 501static inline unsigned long 502__xchg_u64_local(volatile long *m, unsigned long val) 503{ 504 unsigned long dummy; 505 506 __asm__ __volatile__( 507 "1: ldq_l %0,%4\n" 508 " bis $31,%3,%1\n" 509 " stq_c %1,%2\n" 510 " beq %1,2f\n" 511 ".subsection 2\n" 512 "2: br 1b\n" 513 ".previous" 514 : "=&r" (val), "=&r" (dummy), "=m" (*m) 515 : "rI" (val), "m" (*m) : "memory"); 516 517 return val; 518} 519 520#define __xchg_local(ptr, x, size) \ 521({ \ 522 unsigned long __xchg__res; \ 523 volatile void *__xchg__ptr = (ptr); \ 524 switch (size) { \ 525 case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \ 526 case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \ 527 case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \ 528 case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \ 529 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ 530 } \ 531 __xchg__res; \ 532}) 533 534#define xchg_local(ptr,x) \ 535 ({ \ 536 __typeof__(*(ptr)) _x_ = (x); \ 537 (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ 538 sizeof(*(ptr))); \ 539 }) 540 541/* 542 * Atomic compare and exchange. Compare OLD with MEM, if identical, 543 * store NEW in MEM. Return the initial value in MEM. Success is 544 * indicated by comparing RETURN with OLD. 545 * 546 * The memory barrier should be placed in SMP only when we actually 547 * make the change. If we don't change anything (so if the returned 548 * prev is equal to old) then we aren't acquiring anything new and 549 * we don't need any memory barrier as far I can tell. 550 */ 551 552#define __HAVE_ARCH_CMPXCHG 1 553 554static inline unsigned long 555__cmpxchg_u8(volatile char *m, long old, long new) 556{ 557 unsigned long prev, tmp, cmp, addr64; 558 559 __asm__ __volatile__( 560 " andnot %5,7,%4\n" 561 " insbl %1,%5,%1\n" 562 "1: ldq_l %2,0(%4)\n" 563 " extbl %2,%5,%0\n" 564 " cmpeq %0,%6,%3\n" 565 " beq %3,2f\n" 566 " mskbl %2,%5,%2\n" 567 " or %1,%2,%2\n" 568 " stq_c %2,0(%4)\n" 569 " beq %2,3f\n" 570#ifdef CONFIG_SMP 571 " mb\n" 572#endif 573 "2:\n" 574 ".subsection 2\n" 575 "3: br 1b\n" 576 ".previous" 577 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 578 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 579 580 return prev; 581} 582 583static inline unsigned long 584__cmpxchg_u16(volatile short *m, long old, long new) 585{ 586 unsigned long prev, tmp, cmp, addr64; 587 588 __asm__ __volatile__( 589 " andnot %5,7,%4\n" 590 " inswl %1,%5,%1\n" 591 "1: ldq_l %2,0(%4)\n" 592 " extwl %2,%5,%0\n" 593 " cmpeq %0,%6,%3\n" 594 " beq %3,2f\n" 595 " mskwl %2,%5,%2\n" 596 " or %1,%2,%2\n" 597 " stq_c %2,0(%4)\n" 598 " beq %2,3f\n" 599#ifdef CONFIG_SMP 600 " mb\n" 601#endif 602 "2:\n" 603 ".subsection 2\n" 604 "3: br 1b\n" 605 ".previous" 606 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 607 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 608 609 return prev; 610} 611 612static inline unsigned long 613__cmpxchg_u32(volatile int *m, int old, int new) 614{ 615 unsigned long prev, cmp; 616 617 __asm__ __volatile__( 618 "1: ldl_l %0,%5\n" 619 " cmpeq %0,%3,%1\n" 620 " beq %1,2f\n" 621 " mov %4,%1\n" 622 " stl_c %1,%2\n" 623 " beq %1,3f\n" 624#ifdef CONFIG_SMP 625 " mb\n" 626#endif 627 "2:\n" 628 ".subsection 2\n" 629 "3: br 1b\n" 630 ".previous" 631 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 632 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 633 634 return prev; 635} 636 637static inline unsigned long 638__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) 639{ 640 unsigned long prev, cmp; 641 642 __asm__ __volatile__( 643 "1: ldq_l %0,%5\n" 644 " cmpeq %0,%3,%1\n" 645 " beq %1,2f\n" 646 " mov %4,%1\n" 647 " stq_c %1,%2\n" 648 " beq %1,3f\n" 649#ifdef CONFIG_SMP 650 " mb\n" 651#endif 652 "2:\n" 653 ".subsection 2\n" 654 "3: br 1b\n" 655 ".previous" 656 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 657 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 658 659 return prev; 660} 661 662/* This function doesn't exist, so you'll get a linker error 663 if something tries to do an invalid cmpxchg(). */ 664extern void __cmpxchg_called_with_bad_pointer(void); 665 666static __always_inline unsigned long 667__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 668{ 669 switch (size) { 670 case 1: 671 return __cmpxchg_u8(ptr, old, new); 672 case 2: 673 return __cmpxchg_u16(ptr, old, new); 674 case 4: 675 return __cmpxchg_u32(ptr, old, new); 676 case 8: 677 return __cmpxchg_u64(ptr, old, new); 678 } 679 __cmpxchg_called_with_bad_pointer(); 680 return old; 681} 682 683#define cmpxchg(ptr,o,n) \ 684 ({ \ 685 __typeof__(*(ptr)) _o_ = (o); \ 686 __typeof__(*(ptr)) _n_ = (n); \ 687 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 688 (unsigned long)_n_, sizeof(*(ptr))); \ 689 }) 690 691static inline unsigned long 692__cmpxchg_u8_local(volatile char *m, long old, long new) 693{ 694 unsigned long prev, tmp, cmp, addr64; 695 696 __asm__ __volatile__( 697 " andnot %5,7,%4\n" 698 " insbl %1,%5,%1\n" 699 "1: ldq_l %2,0(%4)\n" 700 " extbl %2,%5,%0\n" 701 " cmpeq %0,%6,%3\n" 702 " beq %3,2f\n" 703 " mskbl %2,%5,%2\n" 704 " or %1,%2,%2\n" 705 " stq_c %2,0(%4)\n" 706 " beq %2,3f\n" 707 "2:\n" 708 ".subsection 2\n" 709 "3: br 1b\n" 710 ".previous" 711 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 712 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 713 714 return prev; 715} 716 717static inline unsigned long 718__cmpxchg_u16_local(volatile short *m, long old, long new) 719{ 720 unsigned long prev, tmp, cmp, addr64; 721 722 __asm__ __volatile__( 723 " andnot %5,7,%4\n" 724 " inswl %1,%5,%1\n" 725 "1: ldq_l %2,0(%4)\n" 726 " extwl %2,%5,%0\n" 727 " cmpeq %0,%6,%3\n" 728 " beq %3,2f\n" 729 " mskwl %2,%5,%2\n" 730 " or %1,%2,%2\n" 731 " stq_c %2,0(%4)\n" 732 " beq %2,3f\n" 733 "2:\n" 734 ".subsection 2\n" 735 "3: br 1b\n" 736 ".previous" 737 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 738 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 739 740 return prev; 741} 742 743static inline unsigned long 744__cmpxchg_u32_local(volatile int *m, int old, int new) 745{ 746 unsigned long prev, cmp; 747 748 __asm__ __volatile__( 749 "1: ldl_l %0,%5\n" 750 " cmpeq %0,%3,%1\n" 751 " beq %1,2f\n" 752 " mov %4,%1\n" 753 " stl_c %1,%2\n" 754 " beq %1,3f\n" 755 "2:\n" 756 ".subsection 2\n" 757 "3: br 1b\n" 758 ".previous" 759 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 760 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 761 762 return prev; 763} 764 765static inline unsigned long 766__cmpxchg_u64_local(volatile long *m, unsigned long old, unsigned long new) 767{ 768 unsigned long prev, cmp; 769 770 __asm__ __volatile__( 771 "1: ldq_l %0,%5\n" 772 " cmpeq %0,%3,%1\n" 773 " beq %1,2f\n" 774 " mov %4,%1\n" 775 " stq_c %1,%2\n" 776 " beq %1,3f\n" 777 "2:\n" 778 ".subsection 2\n" 779 "3: br 1b\n" 780 ".previous" 781 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 782 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 783 784 return prev; 785} 786 787static __always_inline unsigned long 788__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, 789 int size) 790{ 791 switch (size) { 792 case 1: 793 return __cmpxchg_u8_local(ptr, old, new); 794 case 2: 795 return __cmpxchg_u16_local(ptr, old, new); 796 case 4: 797 return __cmpxchg_u32_local(ptr, old, new); 798 case 8: 799 return __cmpxchg_u64_local(ptr, old, new); 800 } 801 __cmpxchg_called_with_bad_pointer(); 802 return old; 803} 804 805#define cmpxchg_local(ptr,o,n) \ 806 ({ \ 807 __typeof__(*(ptr)) _o_ = (o); \ 808 __typeof__(*(ptr)) _n_ = (n); \ 809 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ 810 (unsigned long)_n_, sizeof(*(ptr))); \ 811 }) 812 813#endif /* __ASSEMBLY__ */ 814 815#define arch_align_stack(x) (x) 816 817#endif