at v2.6.16-rc2 604 lines 17 kB view raw
1#ifndef __ALPHA_SYSTEM_H 2#define __ALPHA_SYSTEM_H 3 4#include <linux/config.h> 5#include <asm/pal.h> 6#include <asm/page.h> 7#include <asm/barrier.h> 8 9/* 10 * System defines.. Note that this is included both from .c and .S 11 * files, so it does only defines, not any C code. 12 */ 13 14/* 15 * We leave one page for the initial stack page, and one page for 16 * the initial process structure. Also, the console eats 3 MB for 17 * the initial bootloader (one of which we can reclaim later). 18 */ 19#define BOOT_PCB 0x20000000 20#define BOOT_ADDR 0x20000000 21/* Remove when official MILO sources have ELF support: */ 22#define BOOT_SIZE (16*1024) 23 24#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS 25#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ 26#else 27#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ 28#endif 29 30#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) 31#define SWAPPER_PGD KERNEL_START 32#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) 33#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) 34#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) 35#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) 36 37#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) 38 39/* 40 * This is setup by the secondary bootstrap loader. Because 41 * the zero page is zeroed out as soon as the vm system is 42 * initialized, we need to copy things out into a more permanent 43 * place. 44 */ 45#define PARAM ZERO_PGE 46#define COMMAND_LINE ((char*)(PARAM + 0x0000)) 47#define INITRD_START (*(unsigned long *) (PARAM+0x100)) 48#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) 49 50#ifndef __ASSEMBLY__ 51#include <linux/kernel.h> 52 53/* 54 * This is the logout header that should be common to all platforms 55 * (assuming they are running OSF/1 PALcode, I guess). 56 */ 57struct el_common { 58 unsigned int size; /* size in bytes of logout area */ 59 unsigned int sbz1 : 30; /* should be zero */ 60 unsigned int err2 : 1; /* second error */ 61 unsigned int retry : 1; /* retry flag */ 62 unsigned int proc_offset; /* processor-specific offset */ 63 unsigned int sys_offset; /* system-specific offset */ 64 unsigned int code; /* machine check code */ 65 unsigned int frame_rev; /* frame revision */ 66}; 67 68/* Machine Check Frame for uncorrectable errors (Large format) 69 * --- This is used to log uncorrectable errors such as 70 * double bit ECC errors. 71 * --- These errors are detected by both processor and systems. 72 */ 73struct el_common_EV5_uncorrectable_mcheck { 74 unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ 75 unsigned long paltemp[24]; /* PAL TEMP REGS. */ 76 unsigned long exc_addr; /* Address of excepting instruction*/ 77 unsigned long exc_sum; /* Summary of arithmetic traps. */ 78 unsigned long exc_mask; /* Exception mask (from exc_sum). */ 79 unsigned long pal_base; /* Base address for PALcode. */ 80 unsigned long isr; /* Interrupt Status Reg. */ 81 unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */ 82 unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity 83 <12> set TAG parity*/ 84 unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1: 85 <2> Data error in bank 0 86 <3> Data error in bank 1 87 <4> Tag error in bank 0 88 <5> Tag error in bank 1 */ 89 unsigned long va; /* Effective VA of fault or miss. */ 90 unsigned long mm_stat; /* Holds the reason for D-stream 91 fault or D-cache parity errors */ 92 unsigned long sc_addr; /* Address that was being accessed 93 when EV5 detected Secondary cache 94 failure. */ 95 unsigned long sc_stat; /* Helps determine if the error was 96 TAG/Data parity(Secondary Cache)*/ 97 unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */ 98 unsigned long ei_addr; /* Physical address of any transfer 99 that is logged in EV5 EI_STAT */ 100 unsigned long fill_syndrome; /* For correcting ECC errors. */ 101 unsigned long ei_stat; /* Helps identify reason of any 102 processor uncorrectable error 103 at its external interface. */ 104 unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/ 105}; 106 107struct el_common_EV6_mcheck { 108 unsigned int FrameSize; /* Bytes, including this field */ 109 unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */ 110 unsigned int CpuOffset; /* Offset to CPU-specific info */ 111 unsigned int SystemOffset; /* Offset to system-specific info */ 112 unsigned int MCHK_Code; 113 unsigned int MCHK_Frame_Rev; 114 unsigned long I_STAT; /* EV6 Internal Processor Registers */ 115 unsigned long DC_STAT; /* (See the 21264 Spec) */ 116 unsigned long C_ADDR; 117 unsigned long DC1_SYNDROME; 118 unsigned long DC0_SYNDROME; 119 unsigned long C_STAT; 120 unsigned long C_STS; 121 unsigned long MM_STAT; 122 unsigned long EXC_ADDR; 123 unsigned long IER_CM; 124 unsigned long ISUM; 125 unsigned long RESERVED0; 126 unsigned long PAL_BASE; 127 unsigned long I_CTL; 128 unsigned long PCTX; 129}; 130 131extern void halt(void) __attribute__((noreturn)); 132#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) 133 134#define switch_to(P,N,L) \ 135 do { \ 136 (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \ 137 check_mmu_context(); \ 138 } while (0) 139 140struct task_struct; 141extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); 142 143/* 144 * On SMP systems, when the scheduler does migration-cost autodetection, 145 * it needs a way to flush as much of the CPU's caches as possible. 146 * 147 * TODO: fill this in! 148 */ 149static inline void sched_cacheflush(void) 150{ 151} 152 153#define imb() \ 154__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") 155 156#define draina() \ 157__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory") 158 159enum implver_enum { 160 IMPLVER_EV4, 161 IMPLVER_EV5, 162 IMPLVER_EV6 163}; 164 165#ifdef CONFIG_ALPHA_GENERIC 166#define implver() \ 167({ unsigned long __implver; \ 168 __asm__ ("implver %0" : "=r"(__implver)); \ 169 (enum implver_enum) __implver; }) 170#else 171/* Try to eliminate some dead code. */ 172#ifdef CONFIG_ALPHA_EV4 173#define implver() IMPLVER_EV4 174#endif 175#ifdef CONFIG_ALPHA_EV5 176#define implver() IMPLVER_EV5 177#endif 178#if defined(CONFIG_ALPHA_EV6) 179#define implver() IMPLVER_EV6 180#endif 181#endif 182 183enum amask_enum { 184 AMASK_BWX = (1UL << 0), 185 AMASK_FIX = (1UL << 1), 186 AMASK_CIX = (1UL << 2), 187 AMASK_MAX = (1UL << 8), 188 AMASK_PRECISE_TRAP = (1UL << 9), 189}; 190 191#define amask(mask) \ 192({ unsigned long __amask, __input = (mask); \ 193 __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \ 194 __amask; }) 195 196#define __CALL_PAL_R0(NAME, TYPE) \ 197static inline TYPE NAME(void) \ 198{ \ 199 register TYPE __r0 __asm__("$0"); \ 200 __asm__ __volatile__( \ 201 "call_pal %1 # " #NAME \ 202 :"=r" (__r0) \ 203 :"i" (PAL_ ## NAME) \ 204 :"$1", "$16", "$22", "$23", "$24", "$25"); \ 205 return __r0; \ 206} 207 208#define __CALL_PAL_W1(NAME, TYPE0) \ 209static inline void NAME(TYPE0 arg0) \ 210{ \ 211 register TYPE0 __r16 __asm__("$16") = arg0; \ 212 __asm__ __volatile__( \ 213 "call_pal %1 # "#NAME \ 214 : "=r"(__r16) \ 215 : "i"(PAL_ ## NAME), "0"(__r16) \ 216 : "$1", "$22", "$23", "$24", "$25"); \ 217} 218 219#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ 220static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ 221{ \ 222 register TYPE0 __r16 __asm__("$16") = arg0; \ 223 register TYPE1 __r17 __asm__("$17") = arg1; \ 224 __asm__ __volatile__( \ 225 "call_pal %2 # "#NAME \ 226 : "=r"(__r16), "=r"(__r17) \ 227 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ 228 : "$1", "$22", "$23", "$24", "$25"); \ 229} 230 231#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ 232static inline RTYPE NAME(TYPE0 arg0) \ 233{ \ 234 register RTYPE __r0 __asm__("$0"); \ 235 register TYPE0 __r16 __asm__("$16") = arg0; \ 236 __asm__ __volatile__( \ 237 "call_pal %2 # "#NAME \ 238 : "=r"(__r16), "=r"(__r0) \ 239 : "i"(PAL_ ## NAME), "0"(__r16) \ 240 : "$1", "$22", "$23", "$24", "$25"); \ 241 return __r0; \ 242} 243 244#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ 245static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ 246{ \ 247 register RTYPE __r0 __asm__("$0"); \ 248 register TYPE0 __r16 __asm__("$16") = arg0; \ 249 register TYPE1 __r17 __asm__("$17") = arg1; \ 250 __asm__ __volatile__( \ 251 "call_pal %3 # "#NAME \ 252 : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ 253 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ 254 : "$1", "$22", "$23", "$24", "$25"); \ 255 return __r0; \ 256} 257 258__CALL_PAL_W1(cflush, unsigned long); 259__CALL_PAL_R0(rdmces, unsigned long); 260__CALL_PAL_R0(rdps, unsigned long); 261__CALL_PAL_R0(rdusp, unsigned long); 262__CALL_PAL_RW1(swpipl, unsigned long, unsigned long); 263__CALL_PAL_R0(whami, unsigned long); 264__CALL_PAL_W2(wrent, void*, unsigned long); 265__CALL_PAL_W1(wripir, unsigned long); 266__CALL_PAL_W1(wrkgp, unsigned long); 267__CALL_PAL_W1(wrmces, unsigned long); 268__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); 269__CALL_PAL_W1(wrusp, unsigned long); 270__CALL_PAL_W1(wrvptptr, unsigned long); 271 272#define IPL_MIN 0 273#define IPL_SW0 1 274#define IPL_SW1 2 275#define IPL_DEV0 3 276#define IPL_DEV1 4 277#define IPL_TIMER 5 278#define IPL_PERF 6 279#define IPL_POWERFAIL 6 280#define IPL_MCHECK 7 281#define IPL_MAX 7 282 283#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK 284#undef IPL_MIN 285#define IPL_MIN __min_ipl 286extern int __min_ipl; 287#endif 288 289#define getipl() (rdps() & 7) 290#define setipl(ipl) ((void) swpipl(ipl)) 291 292#define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) 293#define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) 294#define local_save_flags(flags) ((flags) = rdps()) 295#define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) 296#define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) 297 298#define irqs_disabled() (getipl() == IPL_MAX) 299 300/* 301 * TB routines.. 302 */ 303#define __tbi(nr,arg,arg1...) \ 304({ \ 305 register unsigned long __r16 __asm__("$16") = (nr); \ 306 register unsigned long __r17 __asm__("$17"); arg; \ 307 __asm__ __volatile__( \ 308 "call_pal %3 #__tbi" \ 309 :"=r" (__r16),"=r" (__r17) \ 310 :"0" (__r16),"i" (PAL_tbi) ,##arg1 \ 311 :"$0", "$1", "$22", "$23", "$24", "$25"); \ 312}) 313 314#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17)) 315#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17)) 316#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17)) 317#define tbis(x) __tbi(3,__r17=(x),"1" (__r17)) 318#define tbiap() __tbi(-1, /* no second argument */) 319#define tbia() __tbi(-2, /* no second argument */) 320 321/* 322 * Atomic exchange. 323 * Since it can be used to implement critical sections 324 * it must clobber "memory" (also for interrupts in UP). 325 */ 326 327static inline unsigned long 328__xchg_u8(volatile char *m, unsigned long val) 329{ 330 unsigned long ret, tmp, addr64; 331 332 __asm__ __volatile__( 333 " andnot %4,7,%3\n" 334 " insbl %1,%4,%1\n" 335 "1: ldq_l %2,0(%3)\n" 336 " extbl %2,%4,%0\n" 337 " mskbl %2,%4,%2\n" 338 " or %1,%2,%2\n" 339 " stq_c %2,0(%3)\n" 340 " beq %2,2f\n" 341#ifdef CONFIG_SMP 342 " mb\n" 343#endif 344 ".subsection 2\n" 345 "2: br 1b\n" 346 ".previous" 347 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 348 : "r" ((long)m), "1" (val) : "memory"); 349 350 return ret; 351} 352 353static inline unsigned long 354__xchg_u16(volatile short *m, unsigned long val) 355{ 356 unsigned long ret, tmp, addr64; 357 358 __asm__ __volatile__( 359 " andnot %4,7,%3\n" 360 " inswl %1,%4,%1\n" 361 "1: ldq_l %2,0(%3)\n" 362 " extwl %2,%4,%0\n" 363 " mskwl %2,%4,%2\n" 364 " or %1,%2,%2\n" 365 " stq_c %2,0(%3)\n" 366 " beq %2,2f\n" 367#ifdef CONFIG_SMP 368 " mb\n" 369#endif 370 ".subsection 2\n" 371 "2: br 1b\n" 372 ".previous" 373 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 374 : "r" ((long)m), "1" (val) : "memory"); 375 376 return ret; 377} 378 379static inline unsigned long 380__xchg_u32(volatile int *m, unsigned long val) 381{ 382 unsigned long dummy; 383 384 __asm__ __volatile__( 385 "1: ldl_l %0,%4\n" 386 " bis $31,%3,%1\n" 387 " stl_c %1,%2\n" 388 " beq %1,2f\n" 389#ifdef CONFIG_SMP 390 " mb\n" 391#endif 392 ".subsection 2\n" 393 "2: br 1b\n" 394 ".previous" 395 : "=&r" (val), "=&r" (dummy), "=m" (*m) 396 : "rI" (val), "m" (*m) : "memory"); 397 398 return val; 399} 400 401static inline unsigned long 402__xchg_u64(volatile long *m, unsigned long val) 403{ 404 unsigned long dummy; 405 406 __asm__ __volatile__( 407 "1: ldq_l %0,%4\n" 408 " bis $31,%3,%1\n" 409 " stq_c %1,%2\n" 410 " beq %1,2f\n" 411#ifdef CONFIG_SMP 412 " mb\n" 413#endif 414 ".subsection 2\n" 415 "2: br 1b\n" 416 ".previous" 417 : "=&r" (val), "=&r" (dummy), "=m" (*m) 418 : "rI" (val), "m" (*m) : "memory"); 419 420 return val; 421} 422 423/* This function doesn't exist, so you'll get a linker error 424 if something tries to do an invalid xchg(). */ 425extern void __xchg_called_with_bad_pointer(void); 426 427#define __xchg(ptr, x, size) \ 428({ \ 429 unsigned long __xchg__res; \ 430 volatile void *__xchg__ptr = (ptr); \ 431 switch (size) { \ 432 case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \ 433 case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \ 434 case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \ 435 case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \ 436 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ 437 } \ 438 __xchg__res; \ 439}) 440 441#define xchg(ptr,x) \ 442 ({ \ 443 __typeof__(*(ptr)) _x_ = (x); \ 444 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ 445 }) 446 447#define tas(ptr) (xchg((ptr),1)) 448 449 450/* 451 * Atomic compare and exchange. Compare OLD with MEM, if identical, 452 * store NEW in MEM. Return the initial value in MEM. Success is 453 * indicated by comparing RETURN with OLD. 454 * 455 * The memory barrier should be placed in SMP only when we actually 456 * make the change. If we don't change anything (so if the returned 457 * prev is equal to old) then we aren't acquiring anything new and 458 * we don't need any memory barrier as far I can tell. 459 */ 460 461#define __HAVE_ARCH_CMPXCHG 1 462 463static inline unsigned long 464__cmpxchg_u8(volatile char *m, long old, long new) 465{ 466 unsigned long prev, tmp, cmp, addr64; 467 468 __asm__ __volatile__( 469 " andnot %5,7,%4\n" 470 " insbl %1,%5,%1\n" 471 "1: ldq_l %2,0(%4)\n" 472 " extbl %2,%5,%0\n" 473 " cmpeq %0,%6,%3\n" 474 " beq %3,2f\n" 475 " mskbl %2,%5,%2\n" 476 " or %1,%2,%2\n" 477 " stq_c %2,0(%4)\n" 478 " beq %2,3f\n" 479#ifdef CONFIG_SMP 480 " mb\n" 481#endif 482 "2:\n" 483 ".subsection 2\n" 484 "3: br 1b\n" 485 ".previous" 486 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 487 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 488 489 return prev; 490} 491 492static inline unsigned long 493__cmpxchg_u16(volatile short *m, long old, long new) 494{ 495 unsigned long prev, tmp, cmp, addr64; 496 497 __asm__ __volatile__( 498 " andnot %5,7,%4\n" 499 " inswl %1,%5,%1\n" 500 "1: ldq_l %2,0(%4)\n" 501 " extwl %2,%5,%0\n" 502 " cmpeq %0,%6,%3\n" 503 " beq %3,2f\n" 504 " mskwl %2,%5,%2\n" 505 " or %1,%2,%2\n" 506 " stq_c %2,0(%4)\n" 507 " beq %2,3f\n" 508#ifdef CONFIG_SMP 509 " mb\n" 510#endif 511 "2:\n" 512 ".subsection 2\n" 513 "3: br 1b\n" 514 ".previous" 515 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 516 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 517 518 return prev; 519} 520 521static inline unsigned long 522__cmpxchg_u32(volatile int *m, int old, int new) 523{ 524 unsigned long prev, cmp; 525 526 __asm__ __volatile__( 527 "1: ldl_l %0,%5\n" 528 " cmpeq %0,%3,%1\n" 529 " beq %1,2f\n" 530 " mov %4,%1\n" 531 " stl_c %1,%2\n" 532 " beq %1,3f\n" 533#ifdef CONFIG_SMP 534 " mb\n" 535#endif 536 "2:\n" 537 ".subsection 2\n" 538 "3: br 1b\n" 539 ".previous" 540 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 541 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 542 543 return prev; 544} 545 546static inline unsigned long 547__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) 548{ 549 unsigned long prev, cmp; 550 551 __asm__ __volatile__( 552 "1: ldq_l %0,%5\n" 553 " cmpeq %0,%3,%1\n" 554 " beq %1,2f\n" 555 " mov %4,%1\n" 556 " stq_c %1,%2\n" 557 " beq %1,3f\n" 558#ifdef CONFIG_SMP 559 " mb\n" 560#endif 561 "2:\n" 562 ".subsection 2\n" 563 "3: br 1b\n" 564 ".previous" 565 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 566 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 567 568 return prev; 569} 570 571/* This function doesn't exist, so you'll get a linker error 572 if something tries to do an invalid cmpxchg(). */ 573extern void __cmpxchg_called_with_bad_pointer(void); 574 575static inline unsigned long 576__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 577{ 578 switch (size) { 579 case 1: 580 return __cmpxchg_u8(ptr, old, new); 581 case 2: 582 return __cmpxchg_u16(ptr, old, new); 583 case 4: 584 return __cmpxchg_u32(ptr, old, new); 585 case 8: 586 return __cmpxchg_u64(ptr, old, new); 587 } 588 __cmpxchg_called_with_bad_pointer(); 589 return old; 590} 591 592#define cmpxchg(ptr,o,n) \ 593 ({ \ 594 __typeof__(*(ptr)) _o_ = (o); \ 595 __typeof__(*(ptr)) _n_ = (n); \ 596 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 597 (unsigned long)_n_, sizeof(*(ptr))); \ 598 }) 599 600#endif /* __ASSEMBLY__ */ 601 602#define arch_align_stack(x) (x) 603 604#endif