at v2.6.13 623 lines 18 kB view raw
1#ifndef __ALPHA_SYSTEM_H 2#define __ALPHA_SYSTEM_H 3 4#include <linux/config.h> 5#include <asm/pal.h> 6#include <asm/page.h> 7 8/* 9 * System defines.. Note that this is included both from .c and .S 10 * files, so it does only defines, not any C code. 11 */ 12 13/* 14 * We leave one page for the initial stack page, and one page for 15 * the initial process structure. Also, the console eats 3 MB for 16 * the initial bootloader (one of which we can reclaim later). 17 */ 18#define BOOT_PCB 0x20000000 19#define BOOT_ADDR 0x20000000 20/* Remove when official MILO sources have ELF support: */ 21#define BOOT_SIZE (16*1024) 22 23#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS 24#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ 25#else 26#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ 27#endif 28 29#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) 30#define SWAPPER_PGD KERNEL_START 31#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) 32#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) 33#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) 34#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) 35 36#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) 37 38/* 39 * This is setup by the secondary bootstrap loader. Because 40 * the zero page is zeroed out as soon as the vm system is 41 * initialized, we need to copy things out into a more permanent 42 * place. 43 */ 44#define PARAM ZERO_PGE 45#define COMMAND_LINE ((char*)(PARAM + 0x0000)) 46#define INITRD_START (*(unsigned long *) (PARAM+0x100)) 47#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) 48 49#ifndef __ASSEMBLY__ 50#include <linux/kernel.h> 51 52/* 53 * This is the logout header that should be common to all platforms 54 * (assuming they are running OSF/1 PALcode, I guess). 55 */ 56struct el_common { 57 unsigned int size; /* size in bytes of logout area */ 58 unsigned int sbz1 : 30; /* should be zero */ 59 unsigned int err2 : 1; /* second error */ 60 unsigned int retry : 1; /* retry flag */ 61 unsigned int proc_offset; /* processor-specific offset */ 62 unsigned int sys_offset; /* system-specific offset */ 63 unsigned int code; /* machine check code */ 64 unsigned int frame_rev; /* frame revision */ 65}; 66 67/* Machine Check Frame for uncorrectable errors (Large format) 68 * --- This is used to log uncorrectable errors such as 69 * double bit ECC errors. 70 * --- These errors are detected by both processor and systems. 71 */ 72struct el_common_EV5_uncorrectable_mcheck { 73 unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ 74 unsigned long paltemp[24]; /* PAL TEMP REGS. */ 75 unsigned long exc_addr; /* Address of excepting instruction*/ 76 unsigned long exc_sum; /* Summary of arithmetic traps. */ 77 unsigned long exc_mask; /* Exception mask (from exc_sum). */ 78 unsigned long pal_base; /* Base address for PALcode. */ 79 unsigned long isr; /* Interrupt Status Reg. */ 80 unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */ 81 unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity 82 <12> set TAG parity*/ 83 unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1: 84 <2> Data error in bank 0 85 <3> Data error in bank 1 86 <4> Tag error in bank 0 87 <5> Tag error in bank 1 */ 88 unsigned long va; /* Effective VA of fault or miss. */ 89 unsigned long mm_stat; /* Holds the reason for D-stream 90 fault or D-cache parity errors */ 91 unsigned long sc_addr; /* Address that was being accessed 92 when EV5 detected Secondary cache 93 failure. */ 94 unsigned long sc_stat; /* Helps determine if the error was 95 TAG/Data parity(Secondary Cache)*/ 96 unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */ 97 unsigned long ei_addr; /* Physical address of any transfer 98 that is logged in EV5 EI_STAT */ 99 unsigned long fill_syndrome; /* For correcting ECC errors. */ 100 unsigned long ei_stat; /* Helps identify reason of any 101 processor uncorrectable error 102 at its external interface. */ 103 unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/ 104}; 105 106struct el_common_EV6_mcheck { 107 unsigned int FrameSize; /* Bytes, including this field */ 108 unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */ 109 unsigned int CpuOffset; /* Offset to CPU-specific info */ 110 unsigned int SystemOffset; /* Offset to system-specific info */ 111 unsigned int MCHK_Code; 112 unsigned int MCHK_Frame_Rev; 113 unsigned long I_STAT; /* EV6 Internal Processor Registers */ 114 unsigned long DC_STAT; /* (See the 21264 Spec) */ 115 unsigned long C_ADDR; 116 unsigned long DC1_SYNDROME; 117 unsigned long DC0_SYNDROME; 118 unsigned long C_STAT; 119 unsigned long C_STS; 120 unsigned long MM_STAT; 121 unsigned long EXC_ADDR; 122 unsigned long IER_CM; 123 unsigned long ISUM; 124 unsigned long RESERVED0; 125 unsigned long PAL_BASE; 126 unsigned long I_CTL; 127 unsigned long PCTX; 128}; 129 130extern void halt(void) __attribute__((noreturn)); 131#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) 132 133#define switch_to(P,N,L) \ 134 do { \ 135 (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P)); \ 136 check_mmu_context(); \ 137 } while (0) 138 139struct task_struct; 140extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); 141 142#define mb() \ 143__asm__ __volatile__("mb": : :"memory") 144 145#define rmb() \ 146__asm__ __volatile__("mb": : :"memory") 147 148#define wmb() \ 149__asm__ __volatile__("wmb": : :"memory") 150 151#define read_barrier_depends() \ 152__asm__ __volatile__("mb": : :"memory") 153 154#ifdef CONFIG_SMP 155#define smp_mb() mb() 156#define smp_rmb() rmb() 157#define smp_wmb() wmb() 158#define smp_read_barrier_depends() read_barrier_depends() 159#else 160#define smp_mb() barrier() 161#define smp_rmb() barrier() 162#define smp_wmb() barrier() 163#define smp_read_barrier_depends() barrier() 164#endif 165 166#define set_mb(var, value) \ 167do { var = value; mb(); } while (0) 168 169#define set_wmb(var, value) \ 170do { var = value; wmb(); } while (0) 171 172#define imb() \ 173__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") 174 175#define draina() \ 176__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory") 177 178enum implver_enum { 179 IMPLVER_EV4, 180 IMPLVER_EV5, 181 IMPLVER_EV6 182}; 183 184#ifdef CONFIG_ALPHA_GENERIC 185#define implver() \ 186({ unsigned long __implver; \ 187 __asm__ ("implver %0" : "=r"(__implver)); \ 188 (enum implver_enum) __implver; }) 189#else 190/* Try to eliminate some dead code. */ 191#ifdef CONFIG_ALPHA_EV4 192#define implver() IMPLVER_EV4 193#endif 194#ifdef CONFIG_ALPHA_EV5 195#define implver() IMPLVER_EV5 196#endif 197#if defined(CONFIG_ALPHA_EV6) 198#define implver() IMPLVER_EV6 199#endif 200#endif 201 202enum amask_enum { 203 AMASK_BWX = (1UL << 0), 204 AMASK_FIX = (1UL << 1), 205 AMASK_CIX = (1UL << 2), 206 AMASK_MAX = (1UL << 8), 207 AMASK_PRECISE_TRAP = (1UL << 9), 208}; 209 210#define amask(mask) \ 211({ unsigned long __amask, __input = (mask); \ 212 __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \ 213 __amask; }) 214 215#define __CALL_PAL_R0(NAME, TYPE) \ 216static inline TYPE NAME(void) \ 217{ \ 218 register TYPE __r0 __asm__("$0"); \ 219 __asm__ __volatile__( \ 220 "call_pal %1 # " #NAME \ 221 :"=r" (__r0) \ 222 :"i" (PAL_ ## NAME) \ 223 :"$1", "$16", "$22", "$23", "$24", "$25"); \ 224 return __r0; \ 225} 226 227#define __CALL_PAL_W1(NAME, TYPE0) \ 228static inline void NAME(TYPE0 arg0) \ 229{ \ 230 register TYPE0 __r16 __asm__("$16") = arg0; \ 231 __asm__ __volatile__( \ 232 "call_pal %1 # "#NAME \ 233 : "=r"(__r16) \ 234 : "i"(PAL_ ## NAME), "0"(__r16) \ 235 : "$1", "$22", "$23", "$24", "$25"); \ 236} 237 238#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ 239static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ 240{ \ 241 register TYPE0 __r16 __asm__("$16") = arg0; \ 242 register TYPE1 __r17 __asm__("$17") = arg1; \ 243 __asm__ __volatile__( \ 244 "call_pal %2 # "#NAME \ 245 : "=r"(__r16), "=r"(__r17) \ 246 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ 247 : "$1", "$22", "$23", "$24", "$25"); \ 248} 249 250#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ 251static inline RTYPE NAME(TYPE0 arg0) \ 252{ \ 253 register RTYPE __r0 __asm__("$0"); \ 254 register TYPE0 __r16 __asm__("$16") = arg0; \ 255 __asm__ __volatile__( \ 256 "call_pal %2 # "#NAME \ 257 : "=r"(__r16), "=r"(__r0) \ 258 : "i"(PAL_ ## NAME), "0"(__r16) \ 259 : "$1", "$22", "$23", "$24", "$25"); \ 260 return __r0; \ 261} 262 263#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ 264static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ 265{ \ 266 register RTYPE __r0 __asm__("$0"); \ 267 register TYPE0 __r16 __asm__("$16") = arg0; \ 268 register TYPE1 __r17 __asm__("$17") = arg1; \ 269 __asm__ __volatile__( \ 270 "call_pal %3 # "#NAME \ 271 : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ 272 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ 273 : "$1", "$22", "$23", "$24", "$25"); \ 274 return __r0; \ 275} 276 277__CALL_PAL_W1(cflush, unsigned long); 278__CALL_PAL_R0(rdmces, unsigned long); 279__CALL_PAL_R0(rdps, unsigned long); 280__CALL_PAL_R0(rdusp, unsigned long); 281__CALL_PAL_RW1(swpipl, unsigned long, unsigned long); 282__CALL_PAL_R0(whami, unsigned long); 283__CALL_PAL_W2(wrent, void*, unsigned long); 284__CALL_PAL_W1(wripir, unsigned long); 285__CALL_PAL_W1(wrkgp, unsigned long); 286__CALL_PAL_W1(wrmces, unsigned long); 287__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); 288__CALL_PAL_W1(wrusp, unsigned long); 289__CALL_PAL_W1(wrvptptr, unsigned long); 290 291#define IPL_MIN 0 292#define IPL_SW0 1 293#define IPL_SW1 2 294#define IPL_DEV0 3 295#define IPL_DEV1 4 296#define IPL_TIMER 5 297#define IPL_PERF 6 298#define IPL_POWERFAIL 6 299#define IPL_MCHECK 7 300#define IPL_MAX 7 301 302#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK 303#undef IPL_MIN 304#define IPL_MIN __min_ipl 305extern int __min_ipl; 306#endif 307 308#define getipl() (rdps() & 7) 309#define setipl(ipl) ((void) swpipl(ipl)) 310 311#define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) 312#define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) 313#define local_save_flags(flags) ((flags) = rdps()) 314#define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) 315#define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) 316 317#define irqs_disabled() (getipl() == IPL_MAX) 318 319/* 320 * TB routines.. 321 */ 322#define __tbi(nr,arg,arg1...) \ 323({ \ 324 register unsigned long __r16 __asm__("$16") = (nr); \ 325 register unsigned long __r17 __asm__("$17"); arg; \ 326 __asm__ __volatile__( \ 327 "call_pal %3 #__tbi" \ 328 :"=r" (__r16),"=r" (__r17) \ 329 :"0" (__r16),"i" (PAL_tbi) ,##arg1 \ 330 :"$0", "$1", "$22", "$23", "$24", "$25"); \ 331}) 332 333#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17)) 334#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17)) 335#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17)) 336#define tbis(x) __tbi(3,__r17=(x),"1" (__r17)) 337#define tbiap() __tbi(-1, /* no second argument */) 338#define tbia() __tbi(-2, /* no second argument */) 339 340/* 341 * Atomic exchange. 342 * Since it can be used to implement critical sections 343 * it must clobber "memory" (also for interrupts in UP). 344 */ 345 346static inline unsigned long 347__xchg_u8(volatile char *m, unsigned long val) 348{ 349 unsigned long ret, tmp, addr64; 350 351 __asm__ __volatile__( 352 " andnot %4,7,%3\n" 353 " insbl %1,%4,%1\n" 354 "1: ldq_l %2,0(%3)\n" 355 " extbl %2,%4,%0\n" 356 " mskbl %2,%4,%2\n" 357 " or %1,%2,%2\n" 358 " stq_c %2,0(%3)\n" 359 " beq %2,2f\n" 360#ifdef CONFIG_SMP 361 " mb\n" 362#endif 363 ".subsection 2\n" 364 "2: br 1b\n" 365 ".previous" 366 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 367 : "r" ((long)m), "1" (val) : "memory"); 368 369 return ret; 370} 371 372static inline unsigned long 373__xchg_u16(volatile short *m, unsigned long val) 374{ 375 unsigned long ret, tmp, addr64; 376 377 __asm__ __volatile__( 378 " andnot %4,7,%3\n" 379 " inswl %1,%4,%1\n" 380 "1: ldq_l %2,0(%3)\n" 381 " extwl %2,%4,%0\n" 382 " mskwl %2,%4,%2\n" 383 " or %1,%2,%2\n" 384 " stq_c %2,0(%3)\n" 385 " beq %2,2f\n" 386#ifdef CONFIG_SMP 387 " mb\n" 388#endif 389 ".subsection 2\n" 390 "2: br 1b\n" 391 ".previous" 392 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 393 : "r" ((long)m), "1" (val) : "memory"); 394 395 return ret; 396} 397 398static inline unsigned long 399__xchg_u32(volatile int *m, unsigned long val) 400{ 401 unsigned long dummy; 402 403 __asm__ __volatile__( 404 "1: ldl_l %0,%4\n" 405 " bis $31,%3,%1\n" 406 " stl_c %1,%2\n" 407 " beq %1,2f\n" 408#ifdef CONFIG_SMP 409 " mb\n" 410#endif 411 ".subsection 2\n" 412 "2: br 1b\n" 413 ".previous" 414 : "=&r" (val), "=&r" (dummy), "=m" (*m) 415 : "rI" (val), "m" (*m) : "memory"); 416 417 return val; 418} 419 420static inline unsigned long 421__xchg_u64(volatile long *m, unsigned long val) 422{ 423 unsigned long dummy; 424 425 __asm__ __volatile__( 426 "1: ldq_l %0,%4\n" 427 " bis $31,%3,%1\n" 428 " stq_c %1,%2\n" 429 " beq %1,2f\n" 430#ifdef CONFIG_SMP 431 " mb\n" 432#endif 433 ".subsection 2\n" 434 "2: br 1b\n" 435 ".previous" 436 : "=&r" (val), "=&r" (dummy), "=m" (*m) 437 : "rI" (val), "m" (*m) : "memory"); 438 439 return val; 440} 441 442/* This function doesn't exist, so you'll get a linker error 443 if something tries to do an invalid xchg(). */ 444extern void __xchg_called_with_bad_pointer(void); 445 446#define __xchg(ptr, x, size) \ 447({ \ 448 unsigned long __xchg__res; \ 449 volatile void *__xchg__ptr = (ptr); \ 450 switch (size) { \ 451 case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \ 452 case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \ 453 case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \ 454 case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \ 455 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ 456 } \ 457 __xchg__res; \ 458}) 459 460#define xchg(ptr,x) \ 461 ({ \ 462 __typeof__(*(ptr)) _x_ = (x); \ 463 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ 464 }) 465 466#define tas(ptr) (xchg((ptr),1)) 467 468 469/* 470 * Atomic compare and exchange. Compare OLD with MEM, if identical, 471 * store NEW in MEM. Return the initial value in MEM. Success is 472 * indicated by comparing RETURN with OLD. 473 * 474 * The memory barrier should be placed in SMP only when we actually 475 * make the change. If we don't change anything (so if the returned 476 * prev is equal to old) then we aren't acquiring anything new and 477 * we don't need any memory barrier as far I can tell. 478 */ 479 480#define __HAVE_ARCH_CMPXCHG 1 481 482static inline unsigned long 483__cmpxchg_u8(volatile char *m, long old, long new) 484{ 485 unsigned long prev, tmp, cmp, addr64; 486 487 __asm__ __volatile__( 488 " andnot %5,7,%4\n" 489 " insbl %1,%5,%1\n" 490 "1: ldq_l %2,0(%4)\n" 491 " extbl %2,%5,%0\n" 492 " cmpeq %0,%6,%3\n" 493 " beq %3,2f\n" 494 " mskbl %2,%5,%2\n" 495 " or %1,%2,%2\n" 496 " stq_c %2,0(%4)\n" 497 " beq %2,3f\n" 498#ifdef CONFIG_SMP 499 " mb\n" 500#endif 501 "2:\n" 502 ".subsection 2\n" 503 "3: br 1b\n" 504 ".previous" 505 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 506 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 507 508 return prev; 509} 510 511static inline unsigned long 512__cmpxchg_u16(volatile short *m, long old, long new) 513{ 514 unsigned long prev, tmp, cmp, addr64; 515 516 __asm__ __volatile__( 517 " andnot %5,7,%4\n" 518 " inswl %1,%5,%1\n" 519 "1: ldq_l %2,0(%4)\n" 520 " extwl %2,%5,%0\n" 521 " cmpeq %0,%6,%3\n" 522 " beq %3,2f\n" 523 " mskwl %2,%5,%2\n" 524 " or %1,%2,%2\n" 525 " stq_c %2,0(%4)\n" 526 " beq %2,3f\n" 527#ifdef CONFIG_SMP 528 " mb\n" 529#endif 530 "2:\n" 531 ".subsection 2\n" 532 "3: br 1b\n" 533 ".previous" 534 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 535 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 536 537 return prev; 538} 539 540static inline unsigned long 541__cmpxchg_u32(volatile int *m, int old, int new) 542{ 543 unsigned long prev, cmp; 544 545 __asm__ __volatile__( 546 "1: ldl_l %0,%5\n" 547 " cmpeq %0,%3,%1\n" 548 " beq %1,2f\n" 549 " mov %4,%1\n" 550 " stl_c %1,%2\n" 551 " beq %1,3f\n" 552#ifdef CONFIG_SMP 553 " mb\n" 554#endif 555 "2:\n" 556 ".subsection 2\n" 557 "3: br 1b\n" 558 ".previous" 559 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 560 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 561 562 return prev; 563} 564 565static inline unsigned long 566__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) 567{ 568 unsigned long prev, cmp; 569 570 __asm__ __volatile__( 571 "1: ldq_l %0,%5\n" 572 " cmpeq %0,%3,%1\n" 573 " beq %1,2f\n" 574 " mov %4,%1\n" 575 " stq_c %1,%2\n" 576 " beq %1,3f\n" 577#ifdef CONFIG_SMP 578 " mb\n" 579#endif 580 "2:\n" 581 ".subsection 2\n" 582 "3: br 1b\n" 583 ".previous" 584 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 585 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 586 587 return prev; 588} 589 590/* This function doesn't exist, so you'll get a linker error 591 if something tries to do an invalid cmpxchg(). */ 592extern void __cmpxchg_called_with_bad_pointer(void); 593 594static inline unsigned long 595__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 596{ 597 switch (size) { 598 case 1: 599 return __cmpxchg_u8(ptr, old, new); 600 case 2: 601 return __cmpxchg_u16(ptr, old, new); 602 case 4: 603 return __cmpxchg_u32(ptr, old, new); 604 case 8: 605 return __cmpxchg_u64(ptr, old, new); 606 } 607 __cmpxchg_called_with_bad_pointer(); 608 return old; 609} 610 611#define cmpxchg(ptr,o,n) \ 612 ({ \ 613 __typeof__(*(ptr)) _o_ = (o); \ 614 __typeof__(*(ptr)) _n_ = (n); \ 615 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 616 (unsigned long)_n_, sizeof(*(ptr))); \ 617 }) 618 619#endif /* __ASSEMBLY__ */ 620 621#define arch_align_stack(x) (x) 622 623#endif