Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

alpha: use single HAE window on T2 core logic (gamma, sable)

T2 are the only alpha SMP systems that do HAE switching at runtime, which
is fundamentally racy on SMP. This patch limits MMIO space on T2 to HAE0
only, like we did on MCPCIA (rawhide) long ago. This leaves us with only
112 Mb of PCI MMIO (128 Mb HAE aperture minus 16 Mb reserved for EISA),
but since linux PCI allocations are reasonably tight, it should be enough
for sane hardware configurations.

Also, fix a typo in MCPCIA_FROB_MMIO macro which shouldn't call set_hae()
if MCPCIA_ONE_HAE_WINDOW is defined. It's more for correctness, as
set_hae() is a no-op anyway in that case.

Signed-off-by: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Ivan Kokshaysky and committed by
Linus Torvalds
98c532ec 947272dd

+30 -40
+1 -1
arch/alpha/include/asm/core_mcpcia.h
··· 247 247 #define vip volatile int __force * 248 248 #define vuip volatile unsigned int __force * 249 249 250 - #ifdef MCPCIA_ONE_HAE_WINDOW 250 + #ifndef MCPCIA_ONE_HAE_WINDOW 251 251 #define MCPCIA_FROB_MMIO \ 252 252 if (__mcpcia_is_mmio(hose)) { \ 253 253 set_hae(hose & 0xffffffff); \
+18 -36
arch/alpha/include/asm/core_t2.h
··· 1 1 #ifndef __ALPHA_T2__H__ 2 2 #define __ALPHA_T2__H__ 3 3 4 + /* Fit everything into one 128MB HAE window. */ 5 + #define T2_ONE_HAE_WINDOW 1 6 + 4 7 #include <linux/types.h> 5 8 #include <linux/spinlock.h> 6 9 #include <asm/compiler.h> ··· 22 19 * 23 20 */ 24 21 25 - #define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */ 22 + #define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 27 bits */ 26 23 27 24 /* GAMMA-SABLE is a SABLE with EV5-based CPUs */ 28 25 /* All LYNX machines, EV4 or EV5, use the GAMMA bias also */ ··· 88 85 #define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL) 89 86 #define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL) 90 87 88 + #ifndef T2_ONE_HAE_WINDOW 91 89 #define T2_HAE_ADDRESS T2_HAE_1 90 + #endif 92 91 93 92 /* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to 94 93 3.8fff.ffff ··· 434 429 * 435 430 */ 436 431 432 + #ifdef T2_ONE_HAE_WINDOW 433 + #define t2_set_hae 434 + #else 437 435 #define t2_set_hae { \ 438 - msb = addr >> 27; \ 436 + unsigned long msb = addr >> 27; \ 439 437 addr &= T2_MEM_R1_MASK; \ 440 438 set_hae(msb); \ 441 439 } 442 - 443 - extern raw_spinlock_t t2_hae_lock; 440 + #endif 444 441 445 442 /* 446 443 * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since ··· 453 446 __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) 454 447 { 455 448 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 456 - unsigned long result, msb; 457 - unsigned long flags; 458 - raw_spin_lock_irqsave(&t2_hae_lock, flags); 449 + unsigned long result; 459 450 460 451 t2_set_hae; 461 452 462 453 result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); 463 - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 464 454 return __kernel_extbl(result, addr & 3); 465 455 } 466 456 467 457 __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) 468 458 { 469 459 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 470 - unsigned long result, msb; 471 - unsigned long flags; 472 - raw_spin_lock_irqsave(&t2_hae_lock, flags); 460 + unsigned long result; 473 461 474 462 t2_set_hae; 475 463 476 464 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); 477 - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 478 465 return __kernel_extwl(result, addr & 3); 479 466 } 480 467 ··· 479 478 __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) 480 479 { 481 480 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 482 - unsigned long result, msb; 483 - unsigned long flags; 484 - raw_spin_lock_irqsave(&t2_hae_lock, flags); 481 + unsigned long result; 485 482 486 483 t2_set_hae; 487 484 488 485 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); 489 - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 490 486 return result & 0xffffffffUL; 491 487 } 492 488 493 489 __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) 494 490 { 495 491 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 496 - unsigned long r0, r1, work, msb; 497 - unsigned long flags; 498 - raw_spin_lock_irqsave(&t2_hae_lock, flags); 492 + unsigned long r0, r1, work; 499 493 500 494 t2_set_hae; 501 495 502 496 work = (addr << 5) + T2_SPARSE_MEM + 0x18; 503 497 r0 = *(vuip)(work); 504 498 r1 = *(vuip)(work + (4 << 5)); 505 - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 506 499 return r1 << 32 | r0; 507 500 } 508 501 509 502 __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) 510 503 { 511 504 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 512 - unsigned long msb, w; 513 - unsigned long flags; 514 - raw_spin_lock_irqsave(&t2_hae_lock, flags); 505 + unsigned long w; 515 506 516 507 t2_set_hae; 517 508 518 509 w = __kernel_insbl(b, addr & 3); 519 510 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; 520 - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 521 511 } 522 512 523 513 __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) 524 514 { 525 515 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 526 - unsigned long msb, w; 527 - unsigned long flags; 528 - raw_spin_lock_irqsave(&t2_hae_lock, flags); 516 + unsigned long w; 529 517 530 518 t2_set_hae; 531 519 532 520 w = __kernel_inswl(b, addr & 3); 533 521 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; 534 - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 535 522 } 536 523 537 524 /* ··· 529 540 __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) 530 541 { 531 542 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 532 - unsigned long msb; 533 - unsigned long flags; 534 - raw_spin_lock_irqsave(&t2_hae_lock, flags); 535 543 536 544 t2_set_hae; 537 545 538 546 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; 539 - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 540 547 } 541 548 542 549 __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) 543 550 { 544 551 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 545 - unsigned long msb, work; 546 - unsigned long flags; 547 - raw_spin_lock_irqsave(&t2_hae_lock, flags); 552 + unsigned long work; 548 553 549 554 t2_set_hae; 550 555 551 556 work = (addr << 5) + T2_SPARSE_MEM + 0x18; 552 557 *(vuip)work = b; 553 558 *(vuip)(work + (4 << 5)) = b >> 32; 554 - raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 555 559 } 556 560 557 561 __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
+8 -3
arch/alpha/kernel/core_t2.c
··· 74 74 # define DBG(args) 75 75 #endif 76 76 77 - DEFINE_RAW_SPINLOCK(t2_hae_lock); 78 - 79 77 static volatile unsigned int t2_mcheck_any_expected; 80 78 static volatile unsigned int t2_mcheck_last_taken; 81 79 ··· 404 406 t2_init_arch(void) 405 407 { 406 408 struct pci_controller *hose; 409 + struct resource *hae_mem; 407 410 unsigned long temp; 408 411 unsigned int i; 409 412 ··· 432 433 */ 433 434 pci_isa_hose = hose = alloc_pci_controller(); 434 435 hose->io_space = &ioport_resource; 435 - hose->mem_space = &iomem_resource; 436 + hae_mem = alloc_resource(); 437 + hae_mem->start = 0; 438 + hae_mem->end = T2_MEM_R1_MASK; 439 + hae_mem->name = pci_hae0_name; 440 + if (request_resource(&iomem_resource, hae_mem) < 0) 441 + printk(KERN_ERR "Failed to request HAE_MEM\n"); 442 + hose->mem_space = hae_mem; 436 443 hose->index = 0; 437 444 438 445 hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR;
+3
arch/alpha/kernel/machvec_impl.h
··· 25 25 #ifdef MCPCIA_ONE_HAE_WINDOW 26 26 #define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache) 27 27 #endif 28 + #ifdef T2_ONE_HAE_WINDOW 29 + #define T2_HAE_ADDRESS (&alpha_mv.hae_cache) 30 + #endif 28 31 29 32 /* Only a few systems don't define IACK_SC, handling all interrupts through 30 33 the SRM console. But splitting out that one case from IO() below