at v2.6.15-rc2 594 lines 17 kB view raw
1#ifndef __ALPHA_SYSTEM_H 2#define __ALPHA_SYSTEM_H 3 4#include <linux/config.h> 5#include <asm/pal.h> 6#include <asm/page.h> 7#include <asm/barrier.h> 8 9/* 10 * System defines.. Note that this is included both from .c and .S 11 * files, so it does only defines, not any C code. 12 */ 13 14/* 15 * We leave one page for the initial stack page, and one page for 16 * the initial process structure. Also, the console eats 3 MB for 17 * the initial bootloader (one of which we can reclaim later). 18 */ 19#define BOOT_PCB 0x20000000 20#define BOOT_ADDR 0x20000000 21/* Remove when official MILO sources have ELF support: */ 22#define BOOT_SIZE (16*1024) 23 24#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS 25#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ 26#else 27#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ 28#endif 29 30#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) 31#define SWAPPER_PGD KERNEL_START 32#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) 33#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) 34#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) 35#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) 36 37#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) 38 39/* 40 * This is setup by the secondary bootstrap loader. Because 41 * the zero page is zeroed out as soon as the vm system is 42 * initialized, we need to copy things out into a more permanent 43 * place. 44 */ 45#define PARAM ZERO_PGE 46#define COMMAND_LINE ((char*)(PARAM + 0x0000)) 47#define INITRD_START (*(unsigned long *) (PARAM+0x100)) 48#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) 49 50#ifndef __ASSEMBLY__ 51#include <linux/kernel.h> 52 53/* 54 * This is the logout header that should be common to all platforms 55 * (assuming they are running OSF/1 PALcode, I guess). 56 */ 57struct el_common { 58 unsigned int size; /* size in bytes of logout area */ 59 unsigned int sbz1 : 30; /* should be zero */ 60 unsigned int err2 : 1; /* second error */ 61 unsigned int retry : 1; /* retry flag */ 62 unsigned int proc_offset; /* processor-specific offset */ 63 unsigned int sys_offset; /* system-specific offset */ 64 unsigned int code; /* machine check code */ 65 unsigned int frame_rev; /* frame revision */ 66}; 67 68/* Machine Check Frame for uncorrectable errors (Large format) 69 * --- This is used to log uncorrectable errors such as 70 * double bit ECC errors. 71 * --- These errors are detected by both processor and systems. 72 */ 73struct el_common_EV5_uncorrectable_mcheck { 74 unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ 75 unsigned long paltemp[24]; /* PAL TEMP REGS. */ 76 unsigned long exc_addr; /* Address of excepting instruction*/ 77 unsigned long exc_sum; /* Summary of arithmetic traps. */ 78 unsigned long exc_mask; /* Exception mask (from exc_sum). */ 79 unsigned long pal_base; /* Base address for PALcode. */ 80 unsigned long isr; /* Interrupt Status Reg. */ 81 unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */ 82 unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity 83 <12> set TAG parity*/ 84 unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1: 85 <2> Data error in bank 0 86 <3> Data error in bank 1 87 <4> Tag error in bank 0 88 <5> Tag error in bank 1 */ 89 unsigned long va; /* Effective VA of fault or miss. */ 90 unsigned long mm_stat; /* Holds the reason for D-stream 91 fault or D-cache parity errors */ 92 unsigned long sc_addr; /* Address that was being accessed 93 when EV5 detected Secondary cache 94 failure. */ 95 unsigned long sc_stat; /* Helps determine if the error was 96 TAG/Data parity(Secondary Cache)*/ 97 unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */ 98 unsigned long ei_addr; /* Physical address of any transfer 99 that is logged in EV5 EI_STAT */ 100 unsigned long fill_syndrome; /* For correcting ECC errors. */ 101 unsigned long ei_stat; /* Helps identify reason of any 102 processor uncorrectable error 103 at its external interface. */ 104 unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/ 105}; 106 107struct el_common_EV6_mcheck { 108 unsigned int FrameSize; /* Bytes, including this field */ 109 unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */ 110 unsigned int CpuOffset; /* Offset to CPU-specific info */ 111 unsigned int SystemOffset; /* Offset to system-specific info */ 112 unsigned int MCHK_Code; 113 unsigned int MCHK_Frame_Rev; 114 unsigned long I_STAT; /* EV6 Internal Processor Registers */ 115 unsigned long DC_STAT; /* (See the 21264 Spec) */ 116 unsigned long C_ADDR; 117 unsigned long DC1_SYNDROME; 118 unsigned long DC0_SYNDROME; 119 unsigned long C_STAT; 120 unsigned long C_STS; 121 unsigned long MM_STAT; 122 unsigned long EXC_ADDR; 123 unsigned long IER_CM; 124 unsigned long ISUM; 125 unsigned long RESERVED0; 126 unsigned long PAL_BASE; 127 unsigned long I_CTL; 128 unsigned long PCTX; 129}; 130 131extern void halt(void) __attribute__((noreturn)); 132#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) 133 134#define switch_to(P,N,L) \ 135 do { \ 136 (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P)); \ 137 check_mmu_context(); \ 138 } while (0) 139 140struct task_struct; 141extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); 142 143#define imb() \ 144__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") 145 146#define draina() \ 147__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory") 148 149enum implver_enum { 150 IMPLVER_EV4, 151 IMPLVER_EV5, 152 IMPLVER_EV6 153}; 154 155#ifdef CONFIG_ALPHA_GENERIC 156#define implver() \ 157({ unsigned long __implver; \ 158 __asm__ ("implver %0" : "=r"(__implver)); \ 159 (enum implver_enum) __implver; }) 160#else 161/* Try to eliminate some dead code. */ 162#ifdef CONFIG_ALPHA_EV4 163#define implver() IMPLVER_EV4 164#endif 165#ifdef CONFIG_ALPHA_EV5 166#define implver() IMPLVER_EV5 167#endif 168#if defined(CONFIG_ALPHA_EV6) 169#define implver() IMPLVER_EV6 170#endif 171#endif 172 173enum amask_enum { 174 AMASK_BWX = (1UL << 0), 175 AMASK_FIX = (1UL << 1), 176 AMASK_CIX = (1UL << 2), 177 AMASK_MAX = (1UL << 8), 178 AMASK_PRECISE_TRAP = (1UL << 9), 179}; 180 181#define amask(mask) \ 182({ unsigned long __amask, __input = (mask); \ 183 __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \ 184 __amask; }) 185 186#define __CALL_PAL_R0(NAME, TYPE) \ 187static inline TYPE NAME(void) \ 188{ \ 189 register TYPE __r0 __asm__("$0"); \ 190 __asm__ __volatile__( \ 191 "call_pal %1 # " #NAME \ 192 :"=r" (__r0) \ 193 :"i" (PAL_ ## NAME) \ 194 :"$1", "$16", "$22", "$23", "$24", "$25"); \ 195 return __r0; \ 196} 197 198#define __CALL_PAL_W1(NAME, TYPE0) \ 199static inline void NAME(TYPE0 arg0) \ 200{ \ 201 register TYPE0 __r16 __asm__("$16") = arg0; \ 202 __asm__ __volatile__( \ 203 "call_pal %1 # "#NAME \ 204 : "=r"(__r16) \ 205 : "i"(PAL_ ## NAME), "0"(__r16) \ 206 : "$1", "$22", "$23", "$24", "$25"); \ 207} 208 209#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ 210static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ 211{ \ 212 register TYPE0 __r16 __asm__("$16") = arg0; \ 213 register TYPE1 __r17 __asm__("$17") = arg1; \ 214 __asm__ __volatile__( \ 215 "call_pal %2 # "#NAME \ 216 : "=r"(__r16), "=r"(__r17) \ 217 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ 218 : "$1", "$22", "$23", "$24", "$25"); \ 219} 220 221#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ 222static inline RTYPE NAME(TYPE0 arg0) \ 223{ \ 224 register RTYPE __r0 __asm__("$0"); \ 225 register TYPE0 __r16 __asm__("$16") = arg0; \ 226 __asm__ __volatile__( \ 227 "call_pal %2 # "#NAME \ 228 : "=r"(__r16), "=r"(__r0) \ 229 : "i"(PAL_ ## NAME), "0"(__r16) \ 230 : "$1", "$22", "$23", "$24", "$25"); \ 231 return __r0; \ 232} 233 234#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ 235static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ 236{ \ 237 register RTYPE __r0 __asm__("$0"); \ 238 register TYPE0 __r16 __asm__("$16") = arg0; \ 239 register TYPE1 __r17 __asm__("$17") = arg1; \ 240 __asm__ __volatile__( \ 241 "call_pal %3 # "#NAME \ 242 : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ 243 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ 244 : "$1", "$22", "$23", "$24", "$25"); \ 245 return __r0; \ 246} 247 248__CALL_PAL_W1(cflush, unsigned long); 249__CALL_PAL_R0(rdmces, unsigned long); 250__CALL_PAL_R0(rdps, unsigned long); 251__CALL_PAL_R0(rdusp, unsigned long); 252__CALL_PAL_RW1(swpipl, unsigned long, unsigned long); 253__CALL_PAL_R0(whami, unsigned long); 254__CALL_PAL_W2(wrent, void*, unsigned long); 255__CALL_PAL_W1(wripir, unsigned long); 256__CALL_PAL_W1(wrkgp, unsigned long); 257__CALL_PAL_W1(wrmces, unsigned long); 258__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); 259__CALL_PAL_W1(wrusp, unsigned long); 260__CALL_PAL_W1(wrvptptr, unsigned long); 261 262#define IPL_MIN 0 263#define IPL_SW0 1 264#define IPL_SW1 2 265#define IPL_DEV0 3 266#define IPL_DEV1 4 267#define IPL_TIMER 5 268#define IPL_PERF 6 269#define IPL_POWERFAIL 6 270#define IPL_MCHECK 7 271#define IPL_MAX 7 272 273#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK 274#undef IPL_MIN 275#define IPL_MIN __min_ipl 276extern int __min_ipl; 277#endif 278 279#define getipl() (rdps() & 7) 280#define setipl(ipl) ((void) swpipl(ipl)) 281 282#define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) 283#define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) 284#define local_save_flags(flags) ((flags) = rdps()) 285#define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) 286#define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) 287 288#define irqs_disabled() (getipl() == IPL_MAX) 289 290/* 291 * TB routines.. 292 */ 293#define __tbi(nr,arg,arg1...) \ 294({ \ 295 register unsigned long __r16 __asm__("$16") = (nr); \ 296 register unsigned long __r17 __asm__("$17"); arg; \ 297 __asm__ __volatile__( \ 298 "call_pal %3 #__tbi" \ 299 :"=r" (__r16),"=r" (__r17) \ 300 :"0" (__r16),"i" (PAL_tbi) ,##arg1 \ 301 :"$0", "$1", "$22", "$23", "$24", "$25"); \ 302}) 303 304#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17)) 305#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17)) 306#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17)) 307#define tbis(x) __tbi(3,__r17=(x),"1" (__r17)) 308#define tbiap() __tbi(-1, /* no second argument */) 309#define tbia() __tbi(-2, /* no second argument */) 310 311/* 312 * Atomic exchange. 313 * Since it can be used to implement critical sections 314 * it must clobber "memory" (also for interrupts in UP). 315 */ 316 317static inline unsigned long 318__xchg_u8(volatile char *m, unsigned long val) 319{ 320 unsigned long ret, tmp, addr64; 321 322 __asm__ __volatile__( 323 " andnot %4,7,%3\n" 324 " insbl %1,%4,%1\n" 325 "1: ldq_l %2,0(%3)\n" 326 " extbl %2,%4,%0\n" 327 " mskbl %2,%4,%2\n" 328 " or %1,%2,%2\n" 329 " stq_c %2,0(%3)\n" 330 " beq %2,2f\n" 331#ifdef CONFIG_SMP 332 " mb\n" 333#endif 334 ".subsection 2\n" 335 "2: br 1b\n" 336 ".previous" 337 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 338 : "r" ((long)m), "1" (val) : "memory"); 339 340 return ret; 341} 342 343static inline unsigned long 344__xchg_u16(volatile short *m, unsigned long val) 345{ 346 unsigned long ret, tmp, addr64; 347 348 __asm__ __volatile__( 349 " andnot %4,7,%3\n" 350 " inswl %1,%4,%1\n" 351 "1: ldq_l %2,0(%3)\n" 352 " extwl %2,%4,%0\n" 353 " mskwl %2,%4,%2\n" 354 " or %1,%2,%2\n" 355 " stq_c %2,0(%3)\n" 356 " beq %2,2f\n" 357#ifdef CONFIG_SMP 358 " mb\n" 359#endif 360 ".subsection 2\n" 361 "2: br 1b\n" 362 ".previous" 363 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 364 : "r" ((long)m), "1" (val) : "memory"); 365 366 return ret; 367} 368 369static inline unsigned long 370__xchg_u32(volatile int *m, unsigned long val) 371{ 372 unsigned long dummy; 373 374 __asm__ __volatile__( 375 "1: ldl_l %0,%4\n" 376 " bis $31,%3,%1\n" 377 " stl_c %1,%2\n" 378 " beq %1,2f\n" 379#ifdef CONFIG_SMP 380 " mb\n" 381#endif 382 ".subsection 2\n" 383 "2: br 1b\n" 384 ".previous" 385 : "=&r" (val), "=&r" (dummy), "=m" (*m) 386 : "rI" (val), "m" (*m) : "memory"); 387 388 return val; 389} 390 391static inline unsigned long 392__xchg_u64(volatile long *m, unsigned long val) 393{ 394 unsigned long dummy; 395 396 __asm__ __volatile__( 397 "1: ldq_l %0,%4\n" 398 " bis $31,%3,%1\n" 399 " stq_c %1,%2\n" 400 " beq %1,2f\n" 401#ifdef CONFIG_SMP 402 " mb\n" 403#endif 404 ".subsection 2\n" 405 "2: br 1b\n" 406 ".previous" 407 : "=&r" (val), "=&r" (dummy), "=m" (*m) 408 : "rI" (val), "m" (*m) : "memory"); 409 410 return val; 411} 412 413/* This function doesn't exist, so you'll get a linker error 414 if something tries to do an invalid xchg(). */ 415extern void __xchg_called_with_bad_pointer(void); 416 417#define __xchg(ptr, x, size) \ 418({ \ 419 unsigned long __xchg__res; \ 420 volatile void *__xchg__ptr = (ptr); \ 421 switch (size) { \ 422 case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \ 423 case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \ 424 case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \ 425 case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \ 426 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ 427 } \ 428 __xchg__res; \ 429}) 430 431#define xchg(ptr,x) \ 432 ({ \ 433 __typeof__(*(ptr)) _x_ = (x); \ 434 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ 435 }) 436 437#define tas(ptr) (xchg((ptr),1)) 438 439 440/* 441 * Atomic compare and exchange. Compare OLD with MEM, if identical, 442 * store NEW in MEM. Return the initial value in MEM. Success is 443 * indicated by comparing RETURN with OLD. 444 * 445 * The memory barrier should be placed in SMP only when we actually 446 * make the change. If we don't change anything (so if the returned 447 * prev is equal to old) then we aren't acquiring anything new and 448 * we don't need any memory barrier as far I can tell. 449 */ 450 451#define __HAVE_ARCH_CMPXCHG 1 452 453static inline unsigned long 454__cmpxchg_u8(volatile char *m, long old, long new) 455{ 456 unsigned long prev, tmp, cmp, addr64; 457 458 __asm__ __volatile__( 459 " andnot %5,7,%4\n" 460 " insbl %1,%5,%1\n" 461 "1: ldq_l %2,0(%4)\n" 462 " extbl %2,%5,%0\n" 463 " cmpeq %0,%6,%3\n" 464 " beq %3,2f\n" 465 " mskbl %2,%5,%2\n" 466 " or %1,%2,%2\n" 467 " stq_c %2,0(%4)\n" 468 " beq %2,3f\n" 469#ifdef CONFIG_SMP 470 " mb\n" 471#endif 472 "2:\n" 473 ".subsection 2\n" 474 "3: br 1b\n" 475 ".previous" 476 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 477 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 478 479 return prev; 480} 481 482static inline unsigned long 483__cmpxchg_u16(volatile short *m, long old, long new) 484{ 485 unsigned long prev, tmp, cmp, addr64; 486 487 __asm__ __volatile__( 488 " andnot %5,7,%4\n" 489 " inswl %1,%5,%1\n" 490 "1: ldq_l %2,0(%4)\n" 491 " extwl %2,%5,%0\n" 492 " cmpeq %0,%6,%3\n" 493 " beq %3,2f\n" 494 " mskwl %2,%5,%2\n" 495 " or %1,%2,%2\n" 496 " stq_c %2,0(%4)\n" 497 " beq %2,3f\n" 498#ifdef CONFIG_SMP 499 " mb\n" 500#endif 501 "2:\n" 502 ".subsection 2\n" 503 "3: br 1b\n" 504 ".previous" 505 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 506 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 507 508 return prev; 509} 510 511static inline unsigned long 512__cmpxchg_u32(volatile int *m, int old, int new) 513{ 514 unsigned long prev, cmp; 515 516 __asm__ __volatile__( 517 "1: ldl_l %0,%5\n" 518 " cmpeq %0,%3,%1\n" 519 " beq %1,2f\n" 520 " mov %4,%1\n" 521 " stl_c %1,%2\n" 522 " beq %1,3f\n" 523#ifdef CONFIG_SMP 524 " mb\n" 525#endif 526 "2:\n" 527 ".subsection 2\n" 528 "3: br 1b\n" 529 ".previous" 530 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 531 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 532 533 return prev; 534} 535 536static inline unsigned long 537__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) 538{ 539 unsigned long prev, cmp; 540 541 __asm__ __volatile__( 542 "1: ldq_l %0,%5\n" 543 " cmpeq %0,%3,%1\n" 544 " beq %1,2f\n" 545 " mov %4,%1\n" 546 " stq_c %1,%2\n" 547 " beq %1,3f\n" 548#ifdef CONFIG_SMP 549 " mb\n" 550#endif 551 "2:\n" 552 ".subsection 2\n" 553 "3: br 1b\n" 554 ".previous" 555 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 556 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 557 558 return prev; 559} 560 561/* This function doesn't exist, so you'll get a linker error 562 if something tries to do an invalid cmpxchg(). */ 563extern void __cmpxchg_called_with_bad_pointer(void); 564 565static inline unsigned long 566__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 567{ 568 switch (size) { 569 case 1: 570 return __cmpxchg_u8(ptr, old, new); 571 case 2: 572 return __cmpxchg_u16(ptr, old, new); 573 case 4: 574 return __cmpxchg_u32(ptr, old, new); 575 case 8: 576 return __cmpxchg_u64(ptr, old, new); 577 } 578 __cmpxchg_called_with_bad_pointer(); 579 return old; 580} 581 582#define cmpxchg(ptr,o,n) \ 583 ({ \ 584 __typeof__(*(ptr)) _o_ = (o); \ 585 __typeof__(*(ptr)) _n_ = (n); \ 586 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 587 (unsigned long)_n_, sizeof(*(ptr))); \ 588 }) 589 590#endif /* __ASSEMBLY__ */ 591 592#define arch_align_stack(x) (x) 593 594#endif