at v2.6.21-rc2 603 lines 17 kB view raw
1#ifndef __ALPHA_SYSTEM_H 2#define __ALPHA_SYSTEM_H 3 4#include <asm/pal.h> 5#include <asm/page.h> 6#include <asm/barrier.h> 7 8/* 9 * System defines.. Note that this is included both from .c and .S 10 * files, so it does only defines, not any C code. 11 */ 12 13/* 14 * We leave one page for the initial stack page, and one page for 15 * the initial process structure. Also, the console eats 3 MB for 16 * the initial bootloader (one of which we can reclaim later). 17 */ 18#define BOOT_PCB 0x20000000 19#define BOOT_ADDR 0x20000000 20/* Remove when official MILO sources have ELF support: */ 21#define BOOT_SIZE (16*1024) 22 23#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS 24#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ 25#else 26#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ 27#endif 28 29#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) 30#define SWAPPER_PGD KERNEL_START 31#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) 32#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) 33#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) 34#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) 35 36#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) 37 38/* 39 * This is setup by the secondary bootstrap loader. Because 40 * the zero page is zeroed out as soon as the vm system is 41 * initialized, we need to copy things out into a more permanent 42 * place. 43 */ 44#define PARAM ZERO_PGE 45#define COMMAND_LINE ((char*)(PARAM + 0x0000)) 46#define INITRD_START (*(unsigned long *) (PARAM+0x100)) 47#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) 48 49#ifndef __ASSEMBLY__ 50#include <linux/kernel.h> 51 52/* 53 * This is the logout header that should be common to all platforms 54 * (assuming they are running OSF/1 PALcode, I guess). 55 */ 56struct el_common { 57 unsigned int size; /* size in bytes of logout area */ 58 unsigned int sbz1 : 30; /* should be zero */ 59 unsigned int err2 : 1; /* second error */ 60 unsigned int retry : 1; /* retry flag */ 61 unsigned int proc_offset; /* processor-specific offset */ 62 unsigned int sys_offset; /* system-specific offset */ 63 unsigned int code; /* machine check code */ 64 unsigned int frame_rev; /* frame revision */ 65}; 66 67/* Machine Check Frame for uncorrectable errors (Large format) 68 * --- This is used to log uncorrectable errors such as 69 * double bit ECC errors. 70 * --- These errors are detected by both processor and systems. 71 */ 72struct el_common_EV5_uncorrectable_mcheck { 73 unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ 74 unsigned long paltemp[24]; /* PAL TEMP REGS. */ 75 unsigned long exc_addr; /* Address of excepting instruction*/ 76 unsigned long exc_sum; /* Summary of arithmetic traps. */ 77 unsigned long exc_mask; /* Exception mask (from exc_sum). */ 78 unsigned long pal_base; /* Base address for PALcode. */ 79 unsigned long isr; /* Interrupt Status Reg. */ 80 unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */ 81 unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity 82 <12> set TAG parity*/ 83 unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1: 84 <2> Data error in bank 0 85 <3> Data error in bank 1 86 <4> Tag error in bank 0 87 <5> Tag error in bank 1 */ 88 unsigned long va; /* Effective VA of fault or miss. */ 89 unsigned long mm_stat; /* Holds the reason for D-stream 90 fault or D-cache parity errors */ 91 unsigned long sc_addr; /* Address that was being accessed 92 when EV5 detected Secondary cache 93 failure. */ 94 unsigned long sc_stat; /* Helps determine if the error was 95 TAG/Data parity(Secondary Cache)*/ 96 unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */ 97 unsigned long ei_addr; /* Physical address of any transfer 98 that is logged in EV5 EI_STAT */ 99 unsigned long fill_syndrome; /* For correcting ECC errors. */ 100 unsigned long ei_stat; /* Helps identify reason of any 101 processor uncorrectable error 102 at its external interface. */ 103 unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/ 104}; 105 106struct el_common_EV6_mcheck { 107 unsigned int FrameSize; /* Bytes, including this field */ 108 unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */ 109 unsigned int CpuOffset; /* Offset to CPU-specific info */ 110 unsigned int SystemOffset; /* Offset to system-specific info */ 111 unsigned int MCHK_Code; 112 unsigned int MCHK_Frame_Rev; 113 unsigned long I_STAT; /* EV6 Internal Processor Registers */ 114 unsigned long DC_STAT; /* (See the 21264 Spec) */ 115 unsigned long C_ADDR; 116 unsigned long DC1_SYNDROME; 117 unsigned long DC0_SYNDROME; 118 unsigned long C_STAT; 119 unsigned long C_STS; 120 unsigned long MM_STAT; 121 unsigned long EXC_ADDR; 122 unsigned long IER_CM; 123 unsigned long ISUM; 124 unsigned long RESERVED0; 125 unsigned long PAL_BASE; 126 unsigned long I_CTL; 127 unsigned long PCTX; 128}; 129 130extern void halt(void) __attribute__((noreturn)); 131#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) 132 133#define switch_to(P,N,L) \ 134 do { \ 135 (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \ 136 check_mmu_context(); \ 137 } while (0) 138 139struct task_struct; 140extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); 141 142/* 143 * On SMP systems, when the scheduler does migration-cost autodetection, 144 * it needs a way to flush as much of the CPU's caches as possible. 145 * 146 * TODO: fill this in! 147 */ 148static inline void sched_cacheflush(void) 149{ 150} 151 152#define imb() \ 153__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") 154 155#define draina() \ 156__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory") 157 158enum implver_enum { 159 IMPLVER_EV4, 160 IMPLVER_EV5, 161 IMPLVER_EV6 162}; 163 164#ifdef CONFIG_ALPHA_GENERIC 165#define implver() \ 166({ unsigned long __implver; \ 167 __asm__ ("implver %0" : "=r"(__implver)); \ 168 (enum implver_enum) __implver; }) 169#else 170/* Try to eliminate some dead code. */ 171#ifdef CONFIG_ALPHA_EV4 172#define implver() IMPLVER_EV4 173#endif 174#ifdef CONFIG_ALPHA_EV5 175#define implver() IMPLVER_EV5 176#endif 177#if defined(CONFIG_ALPHA_EV6) 178#define implver() IMPLVER_EV6 179#endif 180#endif 181 182enum amask_enum { 183 AMASK_BWX = (1UL << 0), 184 AMASK_FIX = (1UL << 1), 185 AMASK_CIX = (1UL << 2), 186 AMASK_MAX = (1UL << 8), 187 AMASK_PRECISE_TRAP = (1UL << 9), 188}; 189 190#define amask(mask) \ 191({ unsigned long __amask, __input = (mask); \ 192 __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \ 193 __amask; }) 194 195#define __CALL_PAL_R0(NAME, TYPE) \ 196static inline TYPE NAME(void) \ 197{ \ 198 register TYPE __r0 __asm__("$0"); \ 199 __asm__ __volatile__( \ 200 "call_pal %1 # " #NAME \ 201 :"=r" (__r0) \ 202 :"i" (PAL_ ## NAME) \ 203 :"$1", "$16", "$22", "$23", "$24", "$25"); \ 204 return __r0; \ 205} 206 207#define __CALL_PAL_W1(NAME, TYPE0) \ 208static inline void NAME(TYPE0 arg0) \ 209{ \ 210 register TYPE0 __r16 __asm__("$16") = arg0; \ 211 __asm__ __volatile__( \ 212 "call_pal %1 # "#NAME \ 213 : "=r"(__r16) \ 214 : "i"(PAL_ ## NAME), "0"(__r16) \ 215 : "$1", "$22", "$23", "$24", "$25"); \ 216} 217 218#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ 219static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ 220{ \ 221 register TYPE0 __r16 __asm__("$16") = arg0; \ 222 register TYPE1 __r17 __asm__("$17") = arg1; \ 223 __asm__ __volatile__( \ 224 "call_pal %2 # "#NAME \ 225 : "=r"(__r16), "=r"(__r17) \ 226 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ 227 : "$1", "$22", "$23", "$24", "$25"); \ 228} 229 230#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ 231static inline RTYPE NAME(TYPE0 arg0) \ 232{ \ 233 register RTYPE __r0 __asm__("$0"); \ 234 register TYPE0 __r16 __asm__("$16") = arg0; \ 235 __asm__ __volatile__( \ 236 "call_pal %2 # "#NAME \ 237 : "=r"(__r16), "=r"(__r0) \ 238 : "i"(PAL_ ## NAME), "0"(__r16) \ 239 : "$1", "$22", "$23", "$24", "$25"); \ 240 return __r0; \ 241} 242 243#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ 244static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ 245{ \ 246 register RTYPE __r0 __asm__("$0"); \ 247 register TYPE0 __r16 __asm__("$16") = arg0; \ 248 register TYPE1 __r17 __asm__("$17") = arg1; \ 249 __asm__ __volatile__( \ 250 "call_pal %3 # "#NAME \ 251 : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ 252 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ 253 : "$1", "$22", "$23", "$24", "$25"); \ 254 return __r0; \ 255} 256 257__CALL_PAL_W1(cflush, unsigned long); 258__CALL_PAL_R0(rdmces, unsigned long); 259__CALL_PAL_R0(rdps, unsigned long); 260__CALL_PAL_R0(rdusp, unsigned long); 261__CALL_PAL_RW1(swpipl, unsigned long, unsigned long); 262__CALL_PAL_R0(whami, unsigned long); 263__CALL_PAL_W2(wrent, void*, unsigned long); 264__CALL_PAL_W1(wripir, unsigned long); 265__CALL_PAL_W1(wrkgp, unsigned long); 266__CALL_PAL_W1(wrmces, unsigned long); 267__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); 268__CALL_PAL_W1(wrusp, unsigned long); 269__CALL_PAL_W1(wrvptptr, unsigned long); 270 271#define IPL_MIN 0 272#define IPL_SW0 1 273#define IPL_SW1 2 274#define IPL_DEV0 3 275#define IPL_DEV1 4 276#define IPL_TIMER 5 277#define IPL_PERF 6 278#define IPL_POWERFAIL 6 279#define IPL_MCHECK 7 280#define IPL_MAX 7 281 282#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK 283#undef IPL_MIN 284#define IPL_MIN __min_ipl 285extern int __min_ipl; 286#endif 287 288#define getipl() (rdps() & 7) 289#define setipl(ipl) ((void) swpipl(ipl)) 290 291#define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) 292#define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) 293#define local_save_flags(flags) ((flags) = rdps()) 294#define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) 295#define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) 296 297#define irqs_disabled() (getipl() == IPL_MAX) 298 299/* 300 * TB routines.. 301 */ 302#define __tbi(nr,arg,arg1...) \ 303({ \ 304 register unsigned long __r16 __asm__("$16") = (nr); \ 305 register unsigned long __r17 __asm__("$17"); arg; \ 306 __asm__ __volatile__( \ 307 "call_pal %3 #__tbi" \ 308 :"=r" (__r16),"=r" (__r17) \ 309 :"0" (__r16),"i" (PAL_tbi) ,##arg1 \ 310 :"$0", "$1", "$22", "$23", "$24", "$25"); \ 311}) 312 313#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17)) 314#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17)) 315#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17)) 316#define tbis(x) __tbi(3,__r17=(x),"1" (__r17)) 317#define tbiap() __tbi(-1, /* no second argument */) 318#define tbia() __tbi(-2, /* no second argument */) 319 320/* 321 * Atomic exchange. 322 * Since it can be used to implement critical sections 323 * it must clobber "memory" (also for interrupts in UP). 324 */ 325 326static inline unsigned long 327__xchg_u8(volatile char *m, unsigned long val) 328{ 329 unsigned long ret, tmp, addr64; 330 331 __asm__ __volatile__( 332 " andnot %4,7,%3\n" 333 " insbl %1,%4,%1\n" 334 "1: ldq_l %2,0(%3)\n" 335 " extbl %2,%4,%0\n" 336 " mskbl %2,%4,%2\n" 337 " or %1,%2,%2\n" 338 " stq_c %2,0(%3)\n" 339 " beq %2,2f\n" 340#ifdef CONFIG_SMP 341 " mb\n" 342#endif 343 ".subsection 2\n" 344 "2: br 1b\n" 345 ".previous" 346 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 347 : "r" ((long)m), "1" (val) : "memory"); 348 349 return ret; 350} 351 352static inline unsigned long 353__xchg_u16(volatile short *m, unsigned long val) 354{ 355 unsigned long ret, tmp, addr64; 356 357 __asm__ __volatile__( 358 " andnot %4,7,%3\n" 359 " inswl %1,%4,%1\n" 360 "1: ldq_l %2,0(%3)\n" 361 " extwl %2,%4,%0\n" 362 " mskwl %2,%4,%2\n" 363 " or %1,%2,%2\n" 364 " stq_c %2,0(%3)\n" 365 " beq %2,2f\n" 366#ifdef CONFIG_SMP 367 " mb\n" 368#endif 369 ".subsection 2\n" 370 "2: br 1b\n" 371 ".previous" 372 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 373 : "r" ((long)m), "1" (val) : "memory"); 374 375 return ret; 376} 377 378static inline unsigned long 379__xchg_u32(volatile int *m, unsigned long val) 380{ 381 unsigned long dummy; 382 383 __asm__ __volatile__( 384 "1: ldl_l %0,%4\n" 385 " bis $31,%3,%1\n" 386 " stl_c %1,%2\n" 387 " beq %1,2f\n" 388#ifdef CONFIG_SMP 389 " mb\n" 390#endif 391 ".subsection 2\n" 392 "2: br 1b\n" 393 ".previous" 394 : "=&r" (val), "=&r" (dummy), "=m" (*m) 395 : "rI" (val), "m" (*m) : "memory"); 396 397 return val; 398} 399 400static inline unsigned long 401__xchg_u64(volatile long *m, unsigned long val) 402{ 403 unsigned long dummy; 404 405 __asm__ __volatile__( 406 "1: ldq_l %0,%4\n" 407 " bis $31,%3,%1\n" 408 " stq_c %1,%2\n" 409 " beq %1,2f\n" 410#ifdef CONFIG_SMP 411 " mb\n" 412#endif 413 ".subsection 2\n" 414 "2: br 1b\n" 415 ".previous" 416 : "=&r" (val), "=&r" (dummy), "=m" (*m) 417 : "rI" (val), "m" (*m) : "memory"); 418 419 return val; 420} 421 422/* This function doesn't exist, so you'll get a linker error 423 if something tries to do an invalid xchg(). */ 424extern void __xchg_called_with_bad_pointer(void); 425 426#define __xchg(ptr, x, size) \ 427({ \ 428 unsigned long __xchg__res; \ 429 volatile void *__xchg__ptr = (ptr); \ 430 switch (size) { \ 431 case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \ 432 case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \ 433 case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \ 434 case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \ 435 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ 436 } \ 437 __xchg__res; \ 438}) 439 440#define xchg(ptr,x) \ 441 ({ \ 442 __typeof__(*(ptr)) _x_ = (x); \ 443 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ 444 }) 445 446#define tas(ptr) (xchg((ptr),1)) 447 448 449/* 450 * Atomic compare and exchange. Compare OLD with MEM, if identical, 451 * store NEW in MEM. Return the initial value in MEM. Success is 452 * indicated by comparing RETURN with OLD. 453 * 454 * The memory barrier should be placed in SMP only when we actually 455 * make the change. If we don't change anything (so if the returned 456 * prev is equal to old) then we aren't acquiring anything new and 457 * we don't need any memory barrier as far I can tell. 458 */ 459 460#define __HAVE_ARCH_CMPXCHG 1 461 462static inline unsigned long 463__cmpxchg_u8(volatile char *m, long old, long new) 464{ 465 unsigned long prev, tmp, cmp, addr64; 466 467 __asm__ __volatile__( 468 " andnot %5,7,%4\n" 469 " insbl %1,%5,%1\n" 470 "1: ldq_l %2,0(%4)\n" 471 " extbl %2,%5,%0\n" 472 " cmpeq %0,%6,%3\n" 473 " beq %3,2f\n" 474 " mskbl %2,%5,%2\n" 475 " or %1,%2,%2\n" 476 " stq_c %2,0(%4)\n" 477 " beq %2,3f\n" 478#ifdef CONFIG_SMP 479 " mb\n" 480#endif 481 "2:\n" 482 ".subsection 2\n" 483 "3: br 1b\n" 484 ".previous" 485 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 486 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 487 488 return prev; 489} 490 491static inline unsigned long 492__cmpxchg_u16(volatile short *m, long old, long new) 493{ 494 unsigned long prev, tmp, cmp, addr64; 495 496 __asm__ __volatile__( 497 " andnot %5,7,%4\n" 498 " inswl %1,%5,%1\n" 499 "1: ldq_l %2,0(%4)\n" 500 " extwl %2,%5,%0\n" 501 " cmpeq %0,%6,%3\n" 502 " beq %3,2f\n" 503 " mskwl %2,%5,%2\n" 504 " or %1,%2,%2\n" 505 " stq_c %2,0(%4)\n" 506 " beq %2,3f\n" 507#ifdef CONFIG_SMP 508 " mb\n" 509#endif 510 "2:\n" 511 ".subsection 2\n" 512 "3: br 1b\n" 513 ".previous" 514 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 515 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 516 517 return prev; 518} 519 520static inline unsigned long 521__cmpxchg_u32(volatile int *m, int old, int new) 522{ 523 unsigned long prev, cmp; 524 525 __asm__ __volatile__( 526 "1: ldl_l %0,%5\n" 527 " cmpeq %0,%3,%1\n" 528 " beq %1,2f\n" 529 " mov %4,%1\n" 530 " stl_c %1,%2\n" 531 " beq %1,3f\n" 532#ifdef CONFIG_SMP 533 " mb\n" 534#endif 535 "2:\n" 536 ".subsection 2\n" 537 "3: br 1b\n" 538 ".previous" 539 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 540 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 541 542 return prev; 543} 544 545static inline unsigned long 546__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) 547{ 548 unsigned long prev, cmp; 549 550 __asm__ __volatile__( 551 "1: ldq_l %0,%5\n" 552 " cmpeq %0,%3,%1\n" 553 " beq %1,2f\n" 554 " mov %4,%1\n" 555 " stq_c %1,%2\n" 556 " beq %1,3f\n" 557#ifdef CONFIG_SMP 558 " mb\n" 559#endif 560 "2:\n" 561 ".subsection 2\n" 562 "3: br 1b\n" 563 ".previous" 564 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 565 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 566 567 return prev; 568} 569 570/* This function doesn't exist, so you'll get a linker error 571 if something tries to do an invalid cmpxchg(). */ 572extern void __cmpxchg_called_with_bad_pointer(void); 573 574static __always_inline unsigned long 575__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 576{ 577 switch (size) { 578 case 1: 579 return __cmpxchg_u8(ptr, old, new); 580 case 2: 581 return __cmpxchg_u16(ptr, old, new); 582 case 4: 583 return __cmpxchg_u32(ptr, old, new); 584 case 8: 585 return __cmpxchg_u64(ptr, old, new); 586 } 587 __cmpxchg_called_with_bad_pointer(); 588 return old; 589} 590 591#define cmpxchg(ptr,o,n) \ 592 ({ \ 593 __typeof__(*(ptr)) _o_ = (o); \ 594 __typeof__(*(ptr)) _n_ = (n); \ 595 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 596 (unsigned long)_n_, sizeof(*(ptr))); \ 597 }) 598 599#endif /* __ASSEMBLY__ */ 600 601#define arch_align_stack(x) (x) 602 603#endif