Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.18 2262 lines 64 kB view raw
1/* 2** System Bus Adapter (SBA) I/O MMU manager 3** 4** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org> 5** (c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com> 6** (c) Copyright 2000-2004 Hewlett-Packard Company 7** 8** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) 9** 10** This program is free software; you can redistribute it and/or modify 11** it under the terms of the GNU General Public License as published by 12** the Free Software Foundation; either version 2 of the License, or 13** (at your option) any later version. 14** 15** 16** This module initializes the IOC (I/O Controller) found on B1000/C3000/ 17** J5000/J7000/N-class/L-class machines and their successors. 18** 19** FIXME: add DMA hint support programming in both sba and lba modules. 20*/ 21 22#include <linux/types.h> 23#include <linux/kernel.h> 24#include <linux/spinlock.h> 25#include <linux/slab.h> 26#include <linux/init.h> 27 28#include <linux/mm.h> 29#include <linux/string.h> 30#include <linux/pci.h> 31 32#include <asm/byteorder.h> 33#include <asm/io.h> 34#include <asm/dma.h> /* for DMA_CHUNK_SIZE */ 35 36#include <asm/hardware.h> /* for register_parisc_driver() stuff */ 37 38#include <linux/proc_fs.h> 39#include <linux/seq_file.h> 40 41#include <asm/runway.h> /* for proc_runway_root */ 42#include <asm/pdc.h> /* for PDC_MODEL_* */ 43#include <asm/pdcpat.h> /* for is_pdc_pat() */ 44#include <asm/parisc-device.h> 45 46 47/* declared in arch/parisc/kernel/setup.c */ 48extern struct proc_dir_entry * proc_mckinley_root; 49 50#define MODULE_NAME "SBA" 51 52#ifdef CONFIG_PROC_FS 53/* depends on proc fs support. But costs CPU performance */ 54#undef SBA_COLLECT_STATS 55#endif 56 57/* 58** The number of debug flags is a clue - this code is fragile. 59** Don't even think about messing with it unless you have 60** plenty of 710's to sacrifice to the computer gods. :^) 61*/ 62#undef DEBUG_SBA_INIT 63#undef DEBUG_SBA_RUN 64#undef DEBUG_SBA_RUN_SG 65#undef DEBUG_SBA_RESOURCE 66#undef ASSERT_PDIR_SANITY 67#undef DEBUG_LARGE_SG_ENTRIES 68#undef DEBUG_DMB_TRAP 69 70#ifdef DEBUG_SBA_INIT 71#define DBG_INIT(x...) printk(x) 72#else 73#define DBG_INIT(x...) 74#endif 75 76#ifdef DEBUG_SBA_RUN 77#define DBG_RUN(x...) printk(x) 78#else 79#define DBG_RUN(x...) 80#endif 81 82#ifdef DEBUG_SBA_RUN_SG 83#define DBG_RUN_SG(x...) printk(x) 84#else 85#define DBG_RUN_SG(x...) 86#endif 87 88 89#ifdef DEBUG_SBA_RESOURCE 90#define DBG_RES(x...) printk(x) 91#else 92#define DBG_RES(x...) 93#endif 94 95#if defined(CONFIG_64BIT) 96/* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */ 97#define ZX1_SUPPORT 98#endif 99 100#define SBA_INLINE __inline__ 101 102 103/* 104** The number of pdir entries to "free" before issueing 105** a read to PCOM register to flush out PCOM writes. 106** Interacts with allocation granularity (ie 4 or 8 entries 107** allocated and free'd/purged at a time might make this 108** less interesting). 109*/ 110#define DELAYED_RESOURCE_CNT 16 111 112#define DEFAULT_DMA_HINT_REG 0 113 114#define ASTRO_RUNWAY_PORT 0x582 115#define IKE_MERCED_PORT 0x803 116#define REO_MERCED_PORT 0x804 117#define REOG_MERCED_PORT 0x805 118#define PLUTO_MCKINLEY_PORT 0x880 119 120#define SBA_FUNC_ID 0x0000 /* function id */ 121#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */ 122 123#define IS_ASTRO(id) ((id)->hversion == ASTRO_RUNWAY_PORT) 124#define IS_IKE(id) ((id)->hversion == IKE_MERCED_PORT) 125#define IS_PLUTO(id) ((id)->hversion == PLUTO_MCKINLEY_PORT) 126 127#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */ 128 129#define ASTRO_IOC_OFFSET (32 * SBA_FUNC_SIZE) 130#define PLUTO_IOC_OFFSET (1 * SBA_FUNC_SIZE) 131/* Ike's IOC's occupy functions 2 and 3 */ 132#define IKE_IOC_OFFSET(p) ((p+2) * SBA_FUNC_SIZE) 133 134#define IOC_CTRL 0x8 /* IOC_CTRL offset */ 135#define IOC_CTRL_TC (1 << 0) /* TOC Enable */ 136#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */ 137#define IOC_CTRL_DE (1 << 2) /* Dillon Enable */ 138#define IOC_CTRL_RM (1 << 8) /* Real Mode */ 139#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */ 140#define IOC_CTRL_D4 (1 << 11) /* Disable 4-byte coalescing */ 141#define IOC_CTRL_DD (1 << 13) /* Disable distr. LMMIO range coalescing */ 142 143#define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */ 144 145#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */ 146 147 148/* 149** Offsets into MBIB (Function 0 on Ike and hopefully Astro) 150** Firmware programs this stuff. Don't touch it. 151*/ 152#define LMMIO_DIRECT0_BASE 0x300 153#define LMMIO_DIRECT0_MASK 0x308 154#define LMMIO_DIRECT0_ROUTE 0x310 155 156#define LMMIO_DIST_BASE 0x360 157#define LMMIO_DIST_MASK 0x368 158#define LMMIO_DIST_ROUTE 0x370 159 160#define IOS_DIST_BASE 0x390 161#define IOS_DIST_MASK 0x398 162#define IOS_DIST_ROUTE 0x3A0 163 164#define IOS_DIRECT_BASE 0x3C0 165#define IOS_DIRECT_MASK 0x3C8 166#define IOS_DIRECT_ROUTE 0x3D0 167 168/* 169** Offsets into I/O TLB (Function 2 and 3 on Ike) 170*/ 171#define ROPE0_CTL 0x200 /* "regbus pci0" */ 172#define ROPE1_CTL 0x208 173#define ROPE2_CTL 0x210 174#define ROPE3_CTL 0x218 175#define ROPE4_CTL 0x220 176#define ROPE5_CTL 0x228 177#define ROPE6_CTL 0x230 178#define ROPE7_CTL 0x238 179 180#define IOC_ROPE0_CFG 0x500 /* pluto only */ 181#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */ 182 183 184 185#define HF_ENABLE 0x40 186 187 188#define IOC_IBASE 0x300 /* IO TLB */ 189#define IOC_IMASK 0x308 190#define IOC_PCOM 0x310 191#define IOC_TCNFG 0x318 192#define IOC_PDIR_BASE 0x320 193 194/* AGP GART driver looks for this */ 195#define SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL 196 197 198/* 199** IOC supports 4/8/16/64KB page sizes (see TCNFG register) 200** It's safer (avoid memory corruption) to keep DMA page mappings 201** equivalently sized to VM PAGE_SIZE. 202** 203** We really can't avoid generating a new mapping for each 204** page since the Virtual Coherence Index has to be generated 205** and updated for each page. 206** 207** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse. 208*/ 209#define IOVP_SIZE PAGE_SIZE 210#define IOVP_SHIFT PAGE_SHIFT 211#define IOVP_MASK PAGE_MASK 212 213#define SBA_PERF_CFG 0x708 /* Performance Counter stuff */ 214#define SBA_PERF_MASK1 0x718 215#define SBA_PERF_MASK2 0x730 216 217 218/* 219** Offsets into PCI Performance Counters (functions 12 and 13) 220** Controlled by PERF registers in function 2 & 3 respectively. 221*/ 222#define SBA_PERF_CNT1 0x200 223#define SBA_PERF_CNT2 0x208 224#define SBA_PERF_CNT3 0x210 225 226 227struct ioc { 228 void __iomem *ioc_hpa; /* I/O MMU base address */ 229 char *res_map; /* resource map, bit == pdir entry */ 230 u64 *pdir_base; /* physical base address */ 231 unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */ 232 unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */ 233#ifdef ZX1_SUPPORT 234 unsigned long iovp_mask; /* help convert IOVA to IOVP */ 235#endif 236 unsigned long *res_hint; /* next avail IOVP - circular search */ 237 spinlock_t res_lock; 238 unsigned int res_bitshift; /* from the LEFT! */ 239 unsigned int res_size; /* size of resource map in bytes */ 240#ifdef SBA_HINT_SUPPORT 241/* FIXME : DMA HINTs not used */ 242 unsigned long hint_mask_pdir; /* bits used for DMA hints */ 243 unsigned int hint_shift_pdir; 244#endif 245#if DELAYED_RESOURCE_CNT > 0 246 int saved_cnt; 247 struct sba_dma_pair { 248 dma_addr_t iova; 249 size_t size; 250 } saved[DELAYED_RESOURCE_CNT]; 251#endif 252 253#ifdef SBA_COLLECT_STATS 254#define SBA_SEARCH_SAMPLE 0x100 255 unsigned long avg_search[SBA_SEARCH_SAMPLE]; 256 unsigned long avg_idx; /* current index into avg_search */ 257 unsigned long used_pages; 258 unsigned long msingle_calls; 259 unsigned long msingle_pages; 260 unsigned long msg_calls; 261 unsigned long msg_pages; 262 unsigned long usingle_calls; 263 unsigned long usingle_pages; 264 unsigned long usg_calls; 265 unsigned long usg_pages; 266#endif 267 268 /* STUFF We don't need in performance path */ 269 unsigned int pdir_size; /* in bytes, determined by IOV Space size */ 270}; 271 272struct sba_device { 273 struct sba_device *next; /* list of SBA's in system */ 274 struct parisc_device *dev; /* dev found in bus walk */ 275 struct parisc_device_id *iodc; /* data about dev from firmware */ 276 const char *name; 277 void __iomem *sba_hpa; /* base address */ 278 spinlock_t sba_lock; 279 unsigned int flags; /* state/functionality enabled */ 280 unsigned int hw_rev; /* HW revision of chip */ 281 282 struct resource chip_resv; /* MMIO reserved for chip */ 283 struct resource iommu_resv; /* MMIO reserved for iommu */ 284 285 unsigned int num_ioc; /* number of on-board IOC's */ 286 struct ioc ioc[MAX_IOC]; 287}; 288 289 290static struct sba_device *sba_list; 291 292static unsigned long ioc_needs_fdc = 0; 293 294/* global count of IOMMUs in the system */ 295static unsigned int global_ioc_cnt = 0; 296 297/* PA8700 (Piranha 2.2) bug workaround */ 298static unsigned long piranha_bad_128k = 0; 299 300/* Looks nice and keeps the compiler happy */ 301#define SBA_DEV(d) ((struct sba_device *) (d)) 302 303#ifdef SBA_AGP_SUPPORT 304static int reserve_sba_gart = 1; 305#endif 306 307#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) 308 309 310/************************************ 311** SBA register read and write support 312** 313** BE WARNED: register writes are posted. 314** (ie follow writes which must reach HW with a read) 315** 316** Superdome (in particular, REO) allows only 64-bit CSR accesses. 317*/ 318#define READ_REG32(addr) readl(addr) 319#define READ_REG64(addr) readq(addr) 320#define WRITE_REG32(val, addr) writel((val), (addr)) 321#define WRITE_REG64(val, addr) writeq((val), (addr)) 322 323#ifdef CONFIG_64BIT 324#define READ_REG(addr) READ_REG64(addr) 325#define WRITE_REG(value, addr) WRITE_REG64(value, addr) 326#else 327#define READ_REG(addr) READ_REG32(addr) 328#define WRITE_REG(value, addr) WRITE_REG32(value, addr) 329#endif 330 331#ifdef DEBUG_SBA_INIT 332 333/* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */ 334 335/** 336 * sba_dump_ranges - debugging only - print ranges assigned to this IOA 337 * @hpa: base address of the sba 338 * 339 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO 340 * IO Adapter (aka Bus Converter). 341 */ 342static void 343sba_dump_ranges(void __iomem *hpa) 344{ 345 DBG_INIT("SBA at 0x%p\n", hpa); 346 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE)); 347 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK)); 348 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE)); 349 DBG_INIT("\n"); 350 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE)); 351 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK)); 352 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE)); 353} 354 355/** 356 * sba_dump_tlb - debugging only - print IOMMU operating parameters 357 * @hpa: base address of the IOMMU 358 * 359 * Print the size/location of the IO MMU PDIR. 360 */ 361static void sba_dump_tlb(void __iomem *hpa) 362{ 363 DBG_INIT("IO TLB at 0x%p\n", hpa); 364 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE)); 365 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK)); 366 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG)); 367 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE)); 368 DBG_INIT("\n"); 369} 370#else 371#define sba_dump_ranges(x) 372#define sba_dump_tlb(x) 373#endif /* DEBUG_SBA_INIT */ 374 375 376#ifdef ASSERT_PDIR_SANITY 377 378/** 379 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry 380 * @ioc: IO MMU structure which owns the pdir we are interested in. 381 * @msg: text to print ont the output line. 382 * @pide: pdir index. 383 * 384 * Print one entry of the IO MMU PDIR in human readable form. 385 */ 386static void 387sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) 388{ 389 /* start printing from lowest pde in rval */ 390 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]); 391 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); 392 uint rcnt; 393 394 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", 395 msg, 396 rptr, pide & (BITS_PER_LONG - 1), *rptr); 397 398 rcnt = 0; 399 while (rcnt < BITS_PER_LONG) { 400 printk(KERN_DEBUG "%s %2d %p %016Lx\n", 401 (rcnt == (pide & (BITS_PER_LONG - 1))) 402 ? " -->" : " ", 403 rcnt, ptr, *ptr ); 404 rcnt++; 405 ptr++; 406 } 407 printk(KERN_DEBUG "%s", msg); 408} 409 410 411/** 412 * sba_check_pdir - debugging only - consistency checker 413 * @ioc: IO MMU structure which owns the pdir we are interested in. 414 * @msg: text to print ont the output line. 415 * 416 * Verify the resource map and pdir state is consistent 417 */ 418static int 419sba_check_pdir(struct ioc *ioc, char *msg) 420{ 421 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]); 422 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */ 423 u64 *pptr = ioc->pdir_base; /* pdir ptr */ 424 uint pide = 0; 425 426 while (rptr < rptr_end) { 427 u32 rval = *rptr; 428 int rcnt = 32; /* number of bits we might check */ 429 430 while (rcnt) { 431 /* Get last byte and highest bit from that */ 432 u32 pde = ((u32) (((char *)pptr)[7])) << 24; 433 if ((rval ^ pde) & 0x80000000) 434 { 435 /* 436 ** BUMMER! -- res_map != pdir -- 437 ** Dump rval and matching pdir entries 438 */ 439 sba_dump_pdir_entry(ioc, msg, pide); 440 return(1); 441 } 442 rcnt--; 443 rval <<= 1; /* try the next bit */ 444 pptr++; 445 pide++; 446 } 447 rptr++; /* look at next word of res_map */ 448 } 449 /* It'd be nice if we always got here :^) */ 450 return 0; 451} 452 453 454/** 455 * sba_dump_sg - debugging only - print Scatter-Gather list 456 * @ioc: IO MMU structure which owns the pdir we are interested in. 457 * @startsg: head of the SG list 458 * @nents: number of entries in SG list 459 * 460 * print the SG list so we can verify it's correct by hand. 461 */ 462static void 463sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) 464{ 465 while (nents-- > 0) { 466 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n", 467 nents, 468 (unsigned long) sg_dma_address(startsg), 469 sg_dma_len(startsg), 470 sg_virt_addr(startsg), startsg->length); 471 startsg++; 472 } 473} 474 475#endif /* ASSERT_PDIR_SANITY */ 476 477 478 479 480/************************************************************** 481* 482* I/O Pdir Resource Management 483* 484* Bits set in the resource map are in use. 485* Each bit can represent a number of pages. 486* LSbs represent lower addresses (IOVA's). 487* 488***************************************************************/ 489#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ 490 491/* Convert from IOVP to IOVA and vice versa. */ 492 493#ifdef ZX1_SUPPORT 494/* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */ 495#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset)) 496#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask) 497#else 498/* only support Astro and ancestors. Saves a few cycles in key places */ 499#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset)) 500#define SBA_IOVP(ioc,iova) (iova) 501#endif 502 503#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) 504 505#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) 506#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) 507 508 509/** 510 * sba_search_bitmap - find free space in IO PDIR resource bitmap 511 * @ioc: IO MMU structure which owns the pdir we are interested in. 512 * @bits_wanted: number of entries we need. 513 * 514 * Find consecutive free bits in resource bitmap. 515 * Each bit represents one entry in the IO Pdir. 516 * Cool perf optimization: search for log2(size) bits at a time. 517 */ 518static SBA_INLINE unsigned long 519sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) 520{ 521 unsigned long *res_ptr = ioc->res_hint; 522 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); 523 unsigned long pide = ~0UL; 524 525 if (bits_wanted > (BITS_PER_LONG/2)) { 526 /* Search word at a time - no mask needed */ 527 for(; res_ptr < res_end; ++res_ptr) { 528 if (*res_ptr == 0) { 529 *res_ptr = RESMAP_MASK(bits_wanted); 530 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 531 pide <<= 3; /* convert to bit address */ 532 break; 533 } 534 } 535 /* point to the next word on next pass */ 536 res_ptr++; 537 ioc->res_bitshift = 0; 538 } else { 539 /* 540 ** Search the resource bit map on well-aligned values. 541 ** "o" is the alignment. 542 ** We need the alignment to invalidate I/O TLB using 543 ** SBA HW features in the unmap path. 544 */ 545 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT); 546 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); 547 unsigned long mask; 548 549 if (bitshiftcnt >= BITS_PER_LONG) { 550 bitshiftcnt = 0; 551 res_ptr++; 552 } 553 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt; 554 555 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); 556 while(res_ptr < res_end) 557 { 558 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 559 WARN_ON(mask == 0); 560 if(((*res_ptr) & mask) == 0) { 561 *res_ptr |= mask; /* mark resources busy! */ 562 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 563 pide <<= 3; /* convert to bit address */ 564 pide += bitshiftcnt; 565 break; 566 } 567 mask >>= o; 568 bitshiftcnt += o; 569 if (mask == 0) { 570 mask = RESMAP_MASK(bits_wanted); 571 bitshiftcnt=0; 572 res_ptr++; 573 } 574 } 575 /* look in the same word on the next pass */ 576 ioc->res_bitshift = bitshiftcnt + bits_wanted; 577 } 578 579 /* wrapped ? */ 580 if (res_end <= res_ptr) { 581 ioc->res_hint = (unsigned long *) ioc->res_map; 582 ioc->res_bitshift = 0; 583 } else { 584 ioc->res_hint = res_ptr; 585 } 586 return (pide); 587} 588 589 590/** 591 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap 592 * @ioc: IO MMU structure which owns the pdir we are interested in. 593 * @size: number of bytes to create a mapping for 594 * 595 * Given a size, find consecutive unmarked and then mark those bits in the 596 * resource bit map. 597 */ 598static int 599sba_alloc_range(struct ioc *ioc, size_t size) 600{ 601 unsigned int pages_needed = size >> IOVP_SHIFT; 602#ifdef SBA_COLLECT_STATS 603 unsigned long cr_start = mfctl(16); 604#endif 605 unsigned long pide; 606 607 pide = sba_search_bitmap(ioc, pages_needed); 608 if (pide >= (ioc->res_size << 3)) { 609 pide = sba_search_bitmap(ioc, pages_needed); 610 if (pide >= (ioc->res_size << 3)) 611 panic("%s: I/O MMU @ %p is out of mapping resources\n", 612 __FILE__, ioc->ioc_hpa); 613 } 614 615#ifdef ASSERT_PDIR_SANITY 616 /* verify the first enable bit is clear */ 617 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) { 618 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); 619 } 620#endif 621 622 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", 623 __FUNCTION__, size, pages_needed, pide, 624 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 625 ioc->res_bitshift ); 626 627#ifdef SBA_COLLECT_STATS 628 { 629 unsigned long cr_end = mfctl(16); 630 unsigned long tmp = cr_end - cr_start; 631 /* check for roll over */ 632 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); 633 } 634 ioc->avg_search[ioc->avg_idx++] = cr_start; 635 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; 636 637 ioc->used_pages += pages_needed; 638#endif 639 640 return (pide); 641} 642 643 644/** 645 * sba_free_range - unmark bits in IO PDIR resource bitmap 646 * @ioc: IO MMU structure which owns the pdir we are interested in. 647 * @iova: IO virtual address which was previously allocated. 648 * @size: number of bytes to create a mapping for 649 * 650 * clear bits in the ioc's resource map 651 */ 652static SBA_INLINE void 653sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) 654{ 655 unsigned long iovp = SBA_IOVP(ioc, iova); 656 unsigned int pide = PDIR_INDEX(iovp); 657 unsigned int ridx = pide >> 3; /* convert bit to byte address */ 658 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); 659 660 int bits_not_wanted = size >> IOVP_SHIFT; 661 662 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ 663 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1)); 664 665 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", 666 __FUNCTION__, (uint) iova, size, 667 bits_not_wanted, m, pide, res_ptr, *res_ptr); 668 669#ifdef SBA_COLLECT_STATS 670 ioc->used_pages -= bits_not_wanted; 671#endif 672 673 *res_ptr &= ~m; 674} 675 676 677/************************************************************** 678* 679* "Dynamic DMA Mapping" support (aka "Coherent I/O") 680* 681***************************************************************/ 682 683#ifdef SBA_HINT_SUPPORT 684#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir) 685#endif 686 687typedef unsigned long space_t; 688#define KERNEL_SPACE 0 689 690/** 691 * sba_io_pdir_entry - fill in one IO PDIR entry 692 * @pdir_ptr: pointer to IO PDIR entry 693 * @sid: process Space ID - currently only support KERNEL_SPACE 694 * @vba: Virtual CPU address of buffer to map 695 * @hint: DMA hint set to use for this mapping 696 * 697 * SBA Mapping Routine 698 * 699 * Given a virtual address (vba, arg2) and space id, (sid, arg1) 700 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by 701 * pdir_ptr (arg0). 702 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry 703 * for Astro/Ike looks like: 704 * 705 * 706 * 0 19 51 55 63 707 * +-+---------------------+----------------------------------+----+--------+ 708 * |V| U | PPN[43:12] | U | VI | 709 * +-+---------------------+----------------------------------+----+--------+ 710 * 711 * Pluto is basically identical, supports fewer physical address bits: 712 * 713 * 0 23 51 55 63 714 * +-+------------------------+-------------------------------+----+--------+ 715 * |V| U | PPN[39:12] | U | VI | 716 * +-+------------------------+-------------------------------+----+--------+ 717 * 718 * V == Valid Bit (Most Significant Bit is bit 0) 719 * U == Unused 720 * PPN == Physical Page Number 721 * VI == Virtual Index (aka Coherent Index) 722 * 723 * LPA instruction output is put into PPN field. 724 * LCI (Load Coherence Index) instruction provides the "VI" bits. 725 * 726 * We pre-swap the bytes since PCX-W is Big Endian and the 727 * IOMMU uses little endian for the pdir. 728 */ 729 730void SBA_INLINE 731sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, 732 unsigned long hint) 733{ 734 u64 pa; /* physical address */ 735 register unsigned ci; /* coherent index */ 736 737 pa = virt_to_phys(vba); 738 pa &= IOVP_MASK; 739 740 mtsp(sid,1); 741 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); 742 pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */ 743 744 pa |= 0x8000000000000000ULL; /* set "valid" bit */ 745 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */ 746 747 /* 748 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 749 * (bit #61, big endian), we have to flush and sync every time 750 * IO-PDIR is changed in Ike/Astro. 751 */ 752 if (ioc_needs_fdc) 753 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 754} 755 756 757/** 758 * sba_mark_invalid - invalidate one or more IO PDIR entries 759 * @ioc: IO MMU structure which owns the pdir we are interested in. 760 * @iova: IO Virtual Address mapped earlier 761 * @byte_cnt: number of bytes this mapping covers. 762 * 763 * Marking the IO PDIR entry(ies) as Invalid and invalidate 764 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register) 765 * is to purge stale entries in the IO TLB when unmapping entries. 766 * 767 * The PCOM register supports purging of multiple pages, with a minium 768 * of 1 page and a maximum of 2GB. Hardware requires the address be 769 * aligned to the size of the range being purged. The size of the range 770 * must be a power of 2. The "Cool perf optimization" in the 771 * allocation routine helps keep that true. 772 */ 773static SBA_INLINE void 774sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) 775{ 776 u32 iovp = (u32) SBA_IOVP(ioc,iova); 777 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)]; 778 779#ifdef ASSERT_PDIR_SANITY 780 /* Assert first pdir entry is set. 781 ** 782 ** Even though this is a big-endian machine, the entries 783 ** in the iopdir are little endian. That's why we look at 784 ** the byte at +7 instead of at +0. 785 */ 786 if (0x80 != (((u8 *) pdir_ptr)[7])) { 787 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); 788 } 789#endif 790 791 if (byte_cnt > IOVP_SIZE) 792 { 793#if 0 794 unsigned long entries_per_cacheline = ioc_needs_fdc ? 795 L1_CACHE_ALIGN(((unsigned long) pdir_ptr)) 796 - (unsigned long) pdir_ptr; 797 : 262144; 798#endif 799 800 /* set "size" field for PCOM */ 801 iovp |= get_order(byte_cnt) + PAGE_SHIFT; 802 803 do { 804 /* clear I/O Pdir entry "valid" bit first */ 805 ((u8 *) pdir_ptr)[7] = 0; 806 if (ioc_needs_fdc) { 807 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 808#if 0 809 entries_per_cacheline = L1_CACHE_SHIFT - 3; 810#endif 811 } 812 pdir_ptr++; 813 byte_cnt -= IOVP_SIZE; 814 } while (byte_cnt > IOVP_SIZE); 815 } else 816 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ 817 818 /* 819 ** clear I/O PDIR entry "valid" bit. 820 ** We have to R/M/W the cacheline regardless how much of the 821 ** pdir entry that we clobber. 822 ** The rest of the entry would be useful for debugging if we 823 ** could dump core on HPMC. 824 */ 825 ((u8 *) pdir_ptr)[7] = 0; 826 if (ioc_needs_fdc) 827 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 828 829 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM); 830} 831 832/** 833 * sba_dma_supported - PCI driver can query DMA support 834 * @dev: instance of PCI owned by the driver that's asking 835 * @mask: number of address bits this PCI device can handle 836 * 837 * See Documentation/DMA-mapping.txt 838 */ 839static int sba_dma_supported( struct device *dev, u64 mask) 840{ 841 struct ioc *ioc; 842 843 if (dev == NULL) { 844 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); 845 BUG(); 846 return(0); 847 } 848 849 /* Documentation/DMA-mapping.txt tells drivers to try 64-bit first, 850 * then fall back to 32-bit if that fails. 851 * We are just "encouraging" 32-bit DMA masks here since we can 852 * never allow IOMMU bypass unless we add special support for ZX1. 853 */ 854 if (mask > ~0U) 855 return 0; 856 857 ioc = GET_IOC(dev); 858 859 /* 860 * check if mask is >= than the current max IO Virt Address 861 * The max IO Virt address will *always* < 30 bits. 862 */ 863 return((int)(mask >= (ioc->ibase - 1 + 864 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) ))); 865} 866 867 868/** 869 * sba_map_single - map one buffer and return IOVA for DMA 870 * @dev: instance of PCI owned by the driver that's asking. 871 * @addr: driver buffer to map. 872 * @size: number of bytes to map in driver buffer. 873 * @direction: R/W or both. 874 * 875 * See Documentation/DMA-mapping.txt 876 */ 877static dma_addr_t 878sba_map_single(struct device *dev, void *addr, size_t size, 879 enum dma_data_direction direction) 880{ 881 struct ioc *ioc; 882 unsigned long flags; 883 dma_addr_t iovp; 884 dma_addr_t offset; 885 u64 *pdir_start; 886 int pide; 887 888 ioc = GET_IOC(dev); 889 890 /* save offset bits */ 891 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; 892 893 /* round up to nearest IOVP_SIZE */ 894 size = (size + offset + ~IOVP_MASK) & IOVP_MASK; 895 896 spin_lock_irqsave(&ioc->res_lock, flags); 897#ifdef ASSERT_PDIR_SANITY 898 sba_check_pdir(ioc,"Check before sba_map_single()"); 899#endif 900 901#ifdef SBA_COLLECT_STATS 902 ioc->msingle_calls++; 903 ioc->msingle_pages += size >> IOVP_SHIFT; 904#endif 905 pide = sba_alloc_range(ioc, size); 906 iovp = (dma_addr_t) pide << IOVP_SHIFT; 907 908 DBG_RUN("%s() 0x%p -> 0x%lx\n", 909 __FUNCTION__, addr, (long) iovp | offset); 910 911 pdir_start = &(ioc->pdir_base[pide]); 912 913 while (size > 0) { 914 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0); 915 916 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n", 917 pdir_start, 918 (u8) (((u8 *) pdir_start)[7]), 919 (u8) (((u8 *) pdir_start)[6]), 920 (u8) (((u8 *) pdir_start)[5]), 921 (u8) (((u8 *) pdir_start)[4]), 922 (u8) (((u8 *) pdir_start)[3]), 923 (u8) (((u8 *) pdir_start)[2]), 924 (u8) (((u8 *) pdir_start)[1]), 925 (u8) (((u8 *) pdir_start)[0]) 926 ); 927 928 addr += IOVP_SIZE; 929 size -= IOVP_SIZE; 930 pdir_start++; 931 } 932 933 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 934 if (ioc_needs_fdc) 935 asm volatile("sync" : : ); 936 937#ifdef ASSERT_PDIR_SANITY 938 sba_check_pdir(ioc,"Check after sba_map_single()"); 939#endif 940 spin_unlock_irqrestore(&ioc->res_lock, flags); 941 942 /* form complete address */ 943 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); 944} 945 946 947/** 948 * sba_unmap_single - unmap one IOVA and free resources 949 * @dev: instance of PCI owned by the driver that's asking. 950 * @iova: IOVA of driver buffer previously mapped. 951 * @size: number of bytes mapped in driver buffer. 952 * @direction: R/W or both. 953 * 954 * See Documentation/DMA-mapping.txt 955 */ 956static void 957sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, 958 enum dma_data_direction direction) 959{ 960 struct ioc *ioc; 961#if DELAYED_RESOURCE_CNT > 0 962 struct sba_dma_pair *d; 963#endif 964 unsigned long flags; 965 dma_addr_t offset; 966 967 DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long) iova, size); 968 969 ioc = GET_IOC(dev); 970 offset = iova & ~IOVP_MASK; 971 iova ^= offset; /* clear offset bits */ 972 size += offset; 973 size = ROUNDUP(size, IOVP_SIZE); 974 975 spin_lock_irqsave(&ioc->res_lock, flags); 976 977#ifdef SBA_COLLECT_STATS 978 ioc->usingle_calls++; 979 ioc->usingle_pages += size >> IOVP_SHIFT; 980#endif 981 982 sba_mark_invalid(ioc, iova, size); 983 984#if DELAYED_RESOURCE_CNT > 0 985 /* Delaying when we re-use a IO Pdir entry reduces the number 986 * of MMIO reads needed to flush writes to the PCOM register. 987 */ 988 d = &(ioc->saved[ioc->saved_cnt]); 989 d->iova = iova; 990 d->size = size; 991 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) { 992 int cnt = ioc->saved_cnt; 993 while (cnt--) { 994 sba_free_range(ioc, d->iova, d->size); 995 d--; 996 } 997 ioc->saved_cnt = 0; 998 999 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 1000 } 1001#else /* DELAYED_RESOURCE_CNT == 0 */ 1002 sba_free_range(ioc, iova, size); 1003 1004 /* If fdc's were issued, force fdc's to be visible now */ 1005 if (ioc_needs_fdc) 1006 asm volatile("sync" : : ); 1007 1008 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 1009#endif /* DELAYED_RESOURCE_CNT == 0 */ 1010 1011 spin_unlock_irqrestore(&ioc->res_lock, flags); 1012 1013 /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support. 1014 ** For Astro based systems this isn't a big deal WRT performance. 1015 ** As long as 2.4 kernels copyin/copyout data from/to userspace, 1016 ** we don't need the syncdma. The issue here is I/O MMU cachelines 1017 ** are *not* coherent in all cases. May be hwrev dependent. 1018 ** Need to investigate more. 1019 asm volatile("syncdma"); 1020 */ 1021} 1022 1023 1024/** 1025 * sba_alloc_consistent - allocate/map shared mem for DMA 1026 * @hwdev: instance of PCI owned by the driver that's asking. 1027 * @size: number of bytes mapped in driver buffer. 1028 * @dma_handle: IOVA of new buffer. 1029 * 1030 * See Documentation/DMA-mapping.txt 1031 */ 1032static void *sba_alloc_consistent(struct device *hwdev, size_t size, 1033 dma_addr_t *dma_handle, gfp_t gfp) 1034{ 1035 void *ret; 1036 1037 if (!hwdev) { 1038 /* only support PCI */ 1039 *dma_handle = 0; 1040 return 0; 1041 } 1042 1043 ret = (void *) __get_free_pages(gfp, get_order(size)); 1044 1045 if (ret) { 1046 memset(ret, 0, size); 1047 *dma_handle = sba_map_single(hwdev, ret, size, 0); 1048 } 1049 1050 return ret; 1051} 1052 1053 1054/** 1055 * sba_free_consistent - free/unmap shared mem for DMA 1056 * @hwdev: instance of PCI owned by the driver that's asking. 1057 * @size: number of bytes mapped in driver buffer. 1058 * @vaddr: virtual address IOVA of "consistent" buffer. 1059 * @dma_handler: IO virtual address of "consistent" buffer. 1060 * 1061 * See Documentation/DMA-mapping.txt 1062 */ 1063static void 1064sba_free_consistent(struct device *hwdev, size_t size, void *vaddr, 1065 dma_addr_t dma_handle) 1066{ 1067 sba_unmap_single(hwdev, dma_handle, size, 0); 1068 free_pages((unsigned long) vaddr, get_order(size)); 1069} 1070 1071 1072/* 1073** Since 0 is a valid pdir_base index value, can't use that 1074** to determine if a value is valid or not. Use a flag to indicate 1075** the SG list entry contains a valid pdir index. 1076*/ 1077#define PIDE_FLAG 0x80000000UL 1078 1079#ifdef SBA_COLLECT_STATS 1080#define IOMMU_MAP_STATS 1081#endif 1082#include "iommu-helpers.h" 1083 1084#ifdef DEBUG_LARGE_SG_ENTRIES 1085int dump_run_sg = 0; 1086#endif 1087 1088 1089/** 1090 * sba_map_sg - map Scatter/Gather list 1091 * @dev: instance of PCI owned by the driver that's asking. 1092 * @sglist: array of buffer/length pairs 1093 * @nents: number of entries in list 1094 * @direction: R/W or both. 1095 * 1096 * See Documentation/DMA-mapping.txt 1097 */ 1098static int 1099sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 1100 enum dma_data_direction direction) 1101{ 1102 struct ioc *ioc; 1103 int coalesced, filled = 0; 1104 unsigned long flags; 1105 1106 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); 1107 1108 ioc = GET_IOC(dev); 1109 1110 /* Fast path single entry scatterlists. */ 1111 if (nents == 1) { 1112 sg_dma_address(sglist) = sba_map_single(dev, 1113 (void *)sg_virt_addr(sglist), 1114 sglist->length, direction); 1115 sg_dma_len(sglist) = sglist->length; 1116 return 1; 1117 } 1118 1119 spin_lock_irqsave(&ioc->res_lock, flags); 1120 1121#ifdef ASSERT_PDIR_SANITY 1122 if (sba_check_pdir(ioc,"Check before sba_map_sg()")) 1123 { 1124 sba_dump_sg(ioc, sglist, nents); 1125 panic("Check before sba_map_sg()"); 1126 } 1127#endif 1128 1129#ifdef SBA_COLLECT_STATS 1130 ioc->msg_calls++; 1131#endif 1132 1133 /* 1134 ** First coalesce the chunks and allocate I/O pdir space 1135 ** 1136 ** If this is one DMA stream, we can properly map using the 1137 ** correct virtual address associated with each DMA page. 1138 ** w/o this association, we wouldn't have coherent DMA! 1139 ** Access to the virtual address is what forces a two pass algorithm. 1140 */ 1141 coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range); 1142 1143 /* 1144 ** Program the I/O Pdir 1145 ** 1146 ** map the virtual addresses to the I/O Pdir 1147 ** o dma_address will contain the pdir index 1148 ** o dma_len will contain the number of bytes to map 1149 ** o address contains the virtual address. 1150 */ 1151 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry); 1152 1153 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 1154 if (ioc_needs_fdc) 1155 asm volatile("sync" : : ); 1156 1157#ifdef ASSERT_PDIR_SANITY 1158 if (sba_check_pdir(ioc,"Check after sba_map_sg()")) 1159 { 1160 sba_dump_sg(ioc, sglist, nents); 1161 panic("Check after sba_map_sg()\n"); 1162 } 1163#endif 1164 1165 spin_unlock_irqrestore(&ioc->res_lock, flags); 1166 1167 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); 1168 1169 return filled; 1170} 1171 1172 1173/** 1174 * sba_unmap_sg - unmap Scatter/Gather list 1175 * @dev: instance of PCI owned by the driver that's asking. 1176 * @sglist: array of buffer/length pairs 1177 * @nents: number of entries in list 1178 * @direction: R/W or both. 1179 * 1180 * See Documentation/DMA-mapping.txt 1181 */ 1182static void 1183sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 1184 enum dma_data_direction direction) 1185{ 1186 struct ioc *ioc; 1187#ifdef ASSERT_PDIR_SANITY 1188 unsigned long flags; 1189#endif 1190 1191 DBG_RUN_SG("%s() START %d entries, %p,%x\n", 1192 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length); 1193 1194 ioc = GET_IOC(dev); 1195 1196#ifdef SBA_COLLECT_STATS 1197 ioc->usg_calls++; 1198#endif 1199 1200#ifdef ASSERT_PDIR_SANITY 1201 spin_lock_irqsave(&ioc->res_lock, flags); 1202 sba_check_pdir(ioc,"Check before sba_unmap_sg()"); 1203 spin_unlock_irqrestore(&ioc->res_lock, flags); 1204#endif 1205 1206 while (sg_dma_len(sglist) && nents--) { 1207 1208 sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); 1209#ifdef SBA_COLLECT_STATS 1210 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; 1211 ioc->usingle_calls--; /* kluge since call is unmap_sg() */ 1212#endif 1213 ++sglist; 1214 } 1215 1216 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); 1217 1218#ifdef ASSERT_PDIR_SANITY 1219 spin_lock_irqsave(&ioc->res_lock, flags); 1220 sba_check_pdir(ioc,"Check after sba_unmap_sg()"); 1221 spin_unlock_irqrestore(&ioc->res_lock, flags); 1222#endif 1223 1224} 1225 1226static struct hppa_dma_ops sba_ops = { 1227 .dma_supported = sba_dma_supported, 1228 .alloc_consistent = sba_alloc_consistent, 1229 .alloc_noncoherent = sba_alloc_consistent, 1230 .free_consistent = sba_free_consistent, 1231 .map_single = sba_map_single, 1232 .unmap_single = sba_unmap_single, 1233 .map_sg = sba_map_sg, 1234 .unmap_sg = sba_unmap_sg, 1235 .dma_sync_single_for_cpu = NULL, 1236 .dma_sync_single_for_device = NULL, 1237 .dma_sync_sg_for_cpu = NULL, 1238 .dma_sync_sg_for_device = NULL, 1239}; 1240 1241 1242/************************************************************************** 1243** 1244** SBA PAT PDC support 1245** 1246** o call pdc_pat_cell_module() 1247** o store ranges in PCI "resource" structures 1248** 1249**************************************************************************/ 1250 1251static void 1252sba_get_pat_resources(struct sba_device *sba_dev) 1253{ 1254#if 0 1255/* 1256** TODO/REVISIT/FIXME: support for directed ranges requires calls to 1257** PAT PDC to program the SBA/LBA directed range registers...this 1258** burden may fall on the LBA code since it directly supports the 1259** PCI subsystem. It's not clear yet. - ggg 1260*/ 1261PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp); 1262 FIXME : ??? 1263PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp); 1264 Tells where the dvi bits are located in the address. 1265PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp); 1266 FIXME : ??? 1267#endif 1268} 1269 1270 1271/************************************************************** 1272* 1273* Initialization and claim 1274* 1275***************************************************************/ 1276#define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */ 1277#define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */ 1278static void * 1279sba_alloc_pdir(unsigned int pdir_size) 1280{ 1281 unsigned long pdir_base; 1282 unsigned long pdir_order = get_order(pdir_size); 1283 1284 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order); 1285 if (NULL == (void *) pdir_base) { 1286 panic("%s() could not allocate I/O Page Table\n", 1287 __FUNCTION__); 1288 } 1289 1290 /* If this is not PA8700 (PCX-W2) 1291 ** OR newer than ver 2.2 1292 ** OR in a system that doesn't need VINDEX bits from SBA, 1293 ** 1294 ** then we aren't exposed to the HW bug. 1295 */ 1296 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13 1297 || (boot_cpu_data.pdc.versions > 0x202) 1298 || (boot_cpu_data.pdc.capabilities & 0x08L) ) 1299 return (void *) pdir_base; 1300 1301 /* 1302 * PA8700 (PCX-W2, aka piranha) silent data corruption fix 1303 * 1304 * An interaction between PA8700 CPU (Ver 2.2 or older) and 1305 * Ike/Astro can cause silent data corruption. This is only 1306 * a problem if the I/O PDIR is located in memory such that 1307 * (little-endian) bits 17 and 18 are on and bit 20 is off. 1308 * 1309 * Since the max IO Pdir size is 2MB, by cleverly allocating the 1310 * right physical address, we can either avoid (IOPDIR <= 1MB) 1311 * or minimize (2MB IO Pdir) the problem if we restrict the 1312 * IO Pdir to a maximum size of 2MB-128K (1902K). 1313 * 1314 * Because we always allocate 2^N sized IO pdirs, either of the 1315 * "bad" regions will be the last 128K if at all. That's easy 1316 * to test for. 1317 * 1318 */ 1319 if (pdir_order <= (19-12)) { 1320 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) { 1321 /* allocate a new one on 512k alignment */ 1322 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12)); 1323 /* release original */ 1324 free_pages(pdir_base, pdir_order); 1325 1326 pdir_base = new_pdir; 1327 1328 /* release excess */ 1329 while (pdir_order < (19-12)) { 1330 new_pdir += pdir_size; 1331 free_pages(new_pdir, pdir_order); 1332 pdir_order +=1; 1333 pdir_size <<=1; 1334 } 1335 } 1336 } else { 1337 /* 1338 ** 1MB or 2MB Pdir 1339 ** Needs to be aligned on an "odd" 1MB boundary. 1340 */ 1341 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */ 1342 1343 /* release original */ 1344 free_pages( pdir_base, pdir_order); 1345 1346 /* release first 1MB */ 1347 free_pages(new_pdir, 20-12); 1348 1349 pdir_base = new_pdir + 1024*1024; 1350 1351 if (pdir_order > (20-12)) { 1352 /* 1353 ** 2MB Pdir. 1354 ** 1355 ** Flag tells init_bitmap() to mark bad 128k as used 1356 ** and to reduce the size by 128k. 1357 */ 1358 piranha_bad_128k = 1; 1359 1360 new_pdir += 3*1024*1024; 1361 /* release last 1MB */ 1362 free_pages(new_pdir, 20-12); 1363 1364 /* release unusable 128KB */ 1365 free_pages(new_pdir - 128*1024 , 17-12); 1366 1367 pdir_size -= 128*1024; 1368 } 1369 } 1370 1371 memset((void *) pdir_base, 0, pdir_size); 1372 return (void *) pdir_base; 1373} 1374 1375static struct device *next_device(struct klist_iter *i) 1376{ 1377 struct klist_node * n = klist_next(i); 1378 return n ? container_of(n, struct device, knode_parent) : NULL; 1379} 1380 1381/* setup Mercury or Elroy IBASE/IMASK registers. */ 1382static void 1383setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1384{ 1385 /* lba_set_iregs() is in drivers/parisc/lba_pci.c */ 1386 extern void lba_set_iregs(struct parisc_device *, u32, u32); 1387 struct device *dev; 1388 struct klist_iter i; 1389 1390 klist_iter_init(&sba->dev.klist_children, &i); 1391 while ((dev = next_device(&i))) { 1392 struct parisc_device *lba = to_parisc_device(dev); 1393 int rope_num = (lba->hpa.start >> 13) & 0xf; 1394 if (rope_num >> 3 == ioc_num) 1395 lba_set_iregs(lba, ioc->ibase, ioc->imask); 1396 } 1397 klist_iter_exit(&i); 1398} 1399 1400static void 1401sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1402{ 1403 u32 iova_space_mask; 1404 u32 iova_space_size; 1405 int iov_order, tcnfg; 1406#ifdef SBA_AGP_SUPPORT 1407 int agp_found = 0; 1408#endif 1409 /* 1410 ** Firmware programs the base and size of a "safe IOVA space" 1411 ** (one that doesn't overlap memory or LMMIO space) in the 1412 ** IBASE and IMASK registers. 1413 */ 1414 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); 1415 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; 1416 1417 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { 1418 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n"); 1419 iova_space_size /= 2; 1420 } 1421 1422 /* 1423 ** iov_order is always based on a 1GB IOVA space since we want to 1424 ** turn on the other half for AGP GART. 1425 */ 1426 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT)); 1427 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); 1428 1429 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n", 1430 __FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20, 1431 iov_order + PAGE_SHIFT); 1432 1433 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, 1434 get_order(ioc->pdir_size)); 1435 if (!ioc->pdir_base) 1436 panic("Couldn't allocate I/O Page Table\n"); 1437 1438 memset(ioc->pdir_base, 0, ioc->pdir_size); 1439 1440 DBG_INIT("%s() pdir %p size %x\n", 1441 __FUNCTION__, ioc->pdir_base, ioc->pdir_size); 1442 1443#ifdef SBA_HINT_SUPPORT 1444 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1445 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); 1446 1447 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n", 1448 ioc->hint_shift_pdir, ioc->hint_mask_pdir); 1449#endif 1450 1451 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base); 1452 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); 1453 1454 /* build IMASK for IOC and Elroy */ 1455 iova_space_mask = 0xffffffff; 1456 iova_space_mask <<= (iov_order + PAGE_SHIFT); 1457 ioc->imask = iova_space_mask; 1458#ifdef ZX1_SUPPORT 1459 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); 1460#endif 1461 sba_dump_tlb(ioc->ioc_hpa); 1462 1463 setup_ibase_imask(sba, ioc, ioc_num); 1464 1465 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); 1466 1467#ifdef CONFIG_64BIT 1468 /* 1469 ** Setting the upper bits makes checking for bypass addresses 1470 ** a little faster later on. 1471 */ 1472 ioc->imask |= 0xFFFFFFFF00000000UL; 1473#endif 1474 1475 /* Set I/O PDIR Page size to system page size */ 1476 switch (PAGE_SHIFT) { 1477 case 12: tcnfg = 0; break; /* 4K */ 1478 case 13: tcnfg = 1; break; /* 8K */ 1479 case 14: tcnfg = 2; break; /* 16K */ 1480 case 16: tcnfg = 3; break; /* 64K */ 1481 default: 1482 panic(__FILE__ "Unsupported system page size %d", 1483 1 << PAGE_SHIFT); 1484 break; 1485 } 1486 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); 1487 1488 /* 1489 ** Program the IOC's ibase and enable IOVA translation 1490 ** Bit zero == enable bit. 1491 */ 1492 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); 1493 1494 /* 1495 ** Clear I/O TLB of any possible entries. 1496 ** (Yes. This is a bit paranoid...but so what) 1497 */ 1498 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); 1499 1500#ifdef SBA_AGP_SUPPORT 1501 /* 1502 ** If an AGP device is present, only use half of the IOV space 1503 ** for PCI DMA. Unfortunately we can't know ahead of time 1504 ** whether GART support will actually be used, for now we 1505 ** can just key on any AGP device found in the system. 1506 ** We program the next pdir index after we stop w/ a key for 1507 ** the GART code to handshake on. 1508 */ 1509 device=NULL; 1510 for (lba = sba->child; lba; lba = lba->sibling) { 1511 if (IS_QUICKSILVER(lba)) 1512 break; 1513 } 1514 1515 if (lba) { 1516 DBG_INIT("%s: Reserving half of IOVA space for AGP GART support\n", __FUNCTION__); 1517 ioc->pdir_size /= 2; 1518 ((u64 *)ioc->pdir_base)[PDIR_INDEX(iova_space_size/2)] = SBA_IOMMU_COOKIE; 1519 } else { 1520 DBG_INIT("%s: No GART needed - no AGP controller found\n", __FUNCTION__); 1521 } 1522#endif /* 0 */ 1523 1524} 1525 1526static void 1527sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1528{ 1529 u32 iova_space_size, iova_space_mask; 1530 unsigned int pdir_size, iov_order; 1531 1532 /* 1533 ** Determine IOVA Space size from memory size. 1534 ** 1535 ** Ideally, PCI drivers would register the maximum number 1536 ** of DMA they can have outstanding for each device they 1537 ** own. Next best thing would be to guess how much DMA 1538 ** can be outstanding based on PCI Class/sub-class. Both 1539 ** methods still require some "extra" to support PCI 1540 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). 1541 ** 1542 ** While we have 32-bits "IOVA" space, top two 2 bits are used 1543 ** for DMA hints - ergo only 30 bits max. 1544 */ 1545 1546 iova_space_size = (u32) (num_physpages/global_ioc_cnt); 1547 1548 /* limit IOVA space size to 1MB-1GB */ 1549 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { 1550 iova_space_size = 1 << (20 - PAGE_SHIFT); 1551 } 1552 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) { 1553 iova_space_size = 1 << (30 - PAGE_SHIFT); 1554 } 1555 1556 /* 1557 ** iova space must be log2() in size. 1558 ** thus, pdir/res_map will also be log2(). 1559 ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced) 1560 */ 1561 iov_order = get_order(iova_space_size << PAGE_SHIFT); 1562 1563 /* iova_space_size is now bytes, not pages */ 1564 iova_space_size = 1 << (iov_order + PAGE_SHIFT); 1565 1566 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); 1567 1568 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", 1569 __FUNCTION__, 1570 ioc->ioc_hpa, 1571 (unsigned long) num_physpages >> (20 - PAGE_SHIFT), 1572 iova_space_size>>20, 1573 iov_order + PAGE_SHIFT); 1574 1575 ioc->pdir_base = sba_alloc_pdir(pdir_size); 1576 1577 DBG_INIT("%s() pdir %p size %x\n", 1578 __FUNCTION__, ioc->pdir_base, pdir_size); 1579 1580#ifdef SBA_HINT_SUPPORT 1581 /* FIXME : DMA HINTs not used */ 1582 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1583 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); 1584 1585 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n", 1586 ioc->hint_shift_pdir, ioc->hint_mask_pdir); 1587#endif 1588 1589 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); 1590 1591 /* build IMASK for IOC and Elroy */ 1592 iova_space_mask = 0xffffffff; 1593 iova_space_mask <<= (iov_order + PAGE_SHIFT); 1594 1595 /* 1596 ** On C3000 w/512MB mem, HP-UX 10.20 reports: 1597 ** ibase=0, imask=0xFE000000, size=0x2000000. 1598 */ 1599 ioc->ibase = 0; 1600 ioc->imask = iova_space_mask; /* save it */ 1601#ifdef ZX1_SUPPORT 1602 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); 1603#endif 1604 1605 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", 1606 __FUNCTION__, ioc->ibase, ioc->imask); 1607 1608 /* 1609 ** FIXME: Hint registers are programmed with default hint 1610 ** values during boot, so hints should be sane even if we 1611 ** can't reprogram them the way drivers want. 1612 */ 1613 1614 setup_ibase_imask(sba, ioc, ioc_num); 1615 1616 /* 1617 ** Program the IOC's ibase and enable IOVA translation 1618 */ 1619 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); 1620 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); 1621 1622 /* Set I/O PDIR Page size to 4K */ 1623 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG); 1624 1625 /* 1626 ** Clear I/O TLB of any possible entries. 1627 ** (Yes. This is a bit paranoid...but so what) 1628 */ 1629 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); 1630 1631 ioc->ibase = 0; /* used by SBA_IOVA and related macros */ 1632 1633 DBG_INIT("%s() DONE\n", __FUNCTION__); 1634} 1635 1636 1637 1638/************************************************************************** 1639** 1640** SBA initialization code (HW and SW) 1641** 1642** o identify SBA chip itself 1643** o initialize SBA chip modes (HardFail) 1644** o initialize SBA chip modes (HardFail) 1645** o FIXME: initialize DMA hints for reasonable defaults 1646** 1647**************************************************************************/ 1648 1649static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset) 1650{ 1651 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); 1652} 1653 1654static void sba_hw_init(struct sba_device *sba_dev) 1655{ 1656 int i; 1657 int num_ioc; 1658 u64 ioc_ctl; 1659 1660 if (!is_pdc_pat()) { 1661 /* Shutdown the USB controller on Astro-based workstations. 1662 ** Once we reprogram the IOMMU, the next DMA performed by 1663 ** USB will HPMC the box. USB is only enabled if a 1664 ** keyboard is present and found. 1665 ** 1666 ** With serial console, j6k v5.0 firmware says: 1667 ** mem_kbd hpa 0xfee003f8 sba 0x0 pad 0x0 cl_class 0x7 1668 ** 1669 ** FIXME: Using GFX+USB console at power up but direct 1670 ** linux to serial console is still broken. 1671 ** USB could generate DMA so we must reset USB. 1672 ** The proper sequence would be: 1673 ** o block console output 1674 ** o reset USB device 1675 ** o reprogram serial port 1676 ** o unblock console output 1677 */ 1678 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) { 1679 pdc_io_reset_devices(); 1680 } 1681 1682 } 1683 1684 1685#if 0 1686printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, 1687 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class); 1688 1689 /* 1690 ** Need to deal with DMA from LAN. 1691 ** Maybe use page zero boot device as a handle to talk 1692 ** to PDC about which device to shutdown. 1693 ** 1694 ** Netbooting, j6k v5.0 firmware says: 1695 ** mem_boot hpa 0xf4008000 sba 0x0 pad 0x0 cl_class 0x1002 1696 ** ARGH! invalid class. 1697 */ 1698 if ((PAGE0->mem_boot.cl_class != CL_RANDOM) 1699 && (PAGE0->mem_boot.cl_class != CL_SEQU)) { 1700 pdc_io_reset(); 1701 } 1702#endif 1703 1704 if (!IS_PLUTO(sba_dev->iodc)) { 1705 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); 1706 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", 1707 __FUNCTION__, sba_dev->sba_hpa, ioc_ctl); 1708 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE); 1709 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC; 1710 /* j6700 v1.6 firmware sets 0x294f */ 1711 /* A500 firmware sets 0x4d */ 1712 1713 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL); 1714 1715#ifdef DEBUG_SBA_INIT 1716 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL); 1717 DBG_INIT(" 0x%Lx\n", ioc_ctl); 1718#endif 1719 } /* if !PLUTO */ 1720 1721 if (IS_ASTRO(sba_dev->iodc)) { 1722 int err; 1723 /* PAT_PDC (L-class) also reports the same goofy base */ 1724 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET); 1725 num_ioc = 1; 1726 1727 sba_dev->chip_resv.name = "Astro Intr Ack"; 1728 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL; 1729 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ; 1730 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); 1731 BUG_ON(err < 0); 1732 1733 } else if (IS_PLUTO(sba_dev->iodc)) { 1734 int err; 1735 1736 /* We use a negative value for IOC HPA so it gets 1737 * corrected when we add it with IKE's IOC offset. 1738 * Doesnt look clean, but fewer code. 1739 */ 1740 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET); 1741 num_ioc = 1; 1742 1743 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA"; 1744 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL; 1745 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1); 1746 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); 1747 WARN_ON(err < 0); 1748 1749 sba_dev->iommu_resv.name = "IOVA Space"; 1750 sba_dev->iommu_resv.start = 0x40000000UL; 1751 sba_dev->iommu_resv.end = 0x50000000UL - 1; 1752 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv)); 1753 WARN_ON(err < 0); 1754 } else { 1755 /* IS_IKE (ie N-class, L3000, L1500) */ 1756 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0)); 1757 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1)); 1758 num_ioc = 2; 1759 1760 /* TODO - LOOKUP Ike/Stretch chipset mem map */ 1761 } 1762 /* XXX: What about Reo? */ 1763 1764 sba_dev->num_ioc = num_ioc; 1765 for (i = 0; i < num_ioc; i++) { 1766 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa; 1767 unsigned int j; 1768 1769 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) { 1770 1771 /* 1772 * Clear ROPE(N)_CONFIG AO bit. 1773 * Disables "NT Ordering" (~= !"Relaxed Ordering") 1774 * Overrides bit 1 in DMA Hint Sets. 1775 * Improves netperf UDP_STREAM by ~10% for bcm5701. 1776 */ 1777 if (IS_PLUTO(sba_dev->iodc)) { 1778 void __iomem *rope_cfg; 1779 unsigned long cfg_val; 1780 1781 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j; 1782 cfg_val = READ_REG(rope_cfg); 1783 cfg_val &= ~IOC_ROPE_AO; 1784 WRITE_REG(cfg_val, rope_cfg); 1785 } 1786 1787 /* 1788 ** Make sure the box crashes on rope errors. 1789 */ 1790 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j); 1791 } 1792 1793 /* flush out the last writes */ 1794 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); 1795 1796 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n", 1797 i, 1798 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40), 1799 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50) 1800 ); 1801 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n", 1802 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108), 1803 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) 1804 ); 1805 1806 if (IS_PLUTO(sba_dev->iodc)) { 1807 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i); 1808 } else { 1809 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i); 1810 } 1811 } 1812} 1813 1814static void 1815sba_common_init(struct sba_device *sba_dev) 1816{ 1817 int i; 1818 1819 /* add this one to the head of the list (order doesn't matter) 1820 ** This will be useful for debugging - especially if we get coredumps 1821 */ 1822 sba_dev->next = sba_list; 1823 sba_list = sba_dev; 1824 1825 for(i=0; i< sba_dev->num_ioc; i++) { 1826 int res_size; 1827#ifdef DEBUG_DMB_TRAP 1828 extern void iterate_pages(unsigned long , unsigned long , 1829 void (*)(pte_t * , unsigned long), 1830 unsigned long ); 1831 void set_data_memory_break(pte_t * , unsigned long); 1832#endif 1833 /* resource map size dictated by pdir_size */ 1834 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ 1835 1836 /* Second part of PIRANHA BUG */ 1837 if (piranha_bad_128k) { 1838 res_size -= (128*1024)/sizeof(u64); 1839 } 1840 1841 res_size >>= 3; /* convert bit count to byte count */ 1842 DBG_INIT("%s() res_size 0x%x\n", 1843 __FUNCTION__, res_size); 1844 1845 sba_dev->ioc[i].res_size = res_size; 1846 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); 1847 1848#ifdef DEBUG_DMB_TRAP 1849 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1850 set_data_memory_break, 0); 1851#endif 1852 1853 if (NULL == sba_dev->ioc[i].res_map) 1854 { 1855 panic("%s:%s() could not allocate resource map\n", 1856 __FILE__, __FUNCTION__ ); 1857 } 1858 1859 memset(sba_dev->ioc[i].res_map, 0, res_size); 1860 /* next available IOVP - circular search */ 1861 sba_dev->ioc[i].res_hint = (unsigned long *) 1862 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]); 1863 1864#ifdef ASSERT_PDIR_SANITY 1865 /* Mark first bit busy - ie no IOVA 0 */ 1866 sba_dev->ioc[i].res_map[0] = 0x80; 1867 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL; 1868#endif 1869 1870 /* Third (and last) part of PIRANHA BUG */ 1871 if (piranha_bad_128k) { 1872 /* region from +1408K to +1536 is un-usable. */ 1873 1874 int idx_start = (1408*1024/sizeof(u64)) >> 3; 1875 int idx_end = (1536*1024/sizeof(u64)) >> 3; 1876 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]); 1877 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]); 1878 1879 /* mark that part of the io pdir busy */ 1880 while (p_start < p_end) 1881 *p_start++ = -1; 1882 1883 } 1884 1885#ifdef DEBUG_DMB_TRAP 1886 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1887 set_data_memory_break, 0); 1888 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size, 1889 set_data_memory_break, 0); 1890#endif 1891 1892 DBG_INIT("%s() %d res_map %x %p\n", 1893 __FUNCTION__, i, res_size, sba_dev->ioc[i].res_map); 1894 } 1895 1896 spin_lock_init(&sba_dev->sba_lock); 1897 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC; 1898 1899#ifdef DEBUG_SBA_INIT 1900 /* 1901 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 1902 * (bit #61, big endian), we have to flush and sync every time 1903 * IO-PDIR is changed in Ike/Astro. 1904 */ 1905 if (ioc_needs_fdc) { 1906 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n"); 1907 } else { 1908 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n"); 1909 } 1910#endif 1911} 1912 1913#ifdef CONFIG_PROC_FS 1914static int sba_proc_info(struct seq_file *m, void *p) 1915{ 1916 struct sba_device *sba_dev = sba_list; 1917 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ 1918 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ 1919#ifdef SBA_COLLECT_STATS 1920 unsigned long avg = 0, min, max; 1921#endif 1922 int i, len = 0; 1923 1924 len += seq_printf(m, "%s rev %d.%d\n", 1925 sba_dev->name, 1926 (sba_dev->hw_rev & 0x7) + 1, 1927 (sba_dev->hw_rev & 0x18) >> 3 1928 ); 1929 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n", 1930 (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ 1931 total_pages); 1932 1933 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n", 1934 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ 1935 1936 len += seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n", 1937 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE), 1938 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK), 1939 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE) 1940 ); 1941 1942 for (i=0; i<4; i++) 1943 len += seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", i, 1944 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18), 1945 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18), 1946 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18) 1947 ); 1948 1949#ifdef SBA_COLLECT_STATS 1950 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n", 1951 total_pages - ioc->used_pages, ioc->used_pages, 1952 (int) (ioc->used_pages * 100 / total_pages)); 1953 1954 min = max = ioc->avg_search[0]; 1955 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { 1956 avg += ioc->avg_search[i]; 1957 if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; 1958 if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; 1959 } 1960 avg /= SBA_SEARCH_SAMPLE; 1961 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", 1962 min, avg, max); 1963 1964 len += seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", 1965 ioc->msingle_calls, ioc->msingle_pages, 1966 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls)); 1967 1968 /* KLUGE - unmap_sg calls unmap_single for each mapped page */ 1969 min = ioc->usingle_calls; 1970 max = ioc->usingle_pages - ioc->usg_pages; 1971 len += seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", 1972 min, max, (int) ((max * 1000)/min)); 1973 1974 len += seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1975 ioc->msg_calls, ioc->msg_pages, 1976 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls)); 1977 1978 len += seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1979 ioc->usg_calls, ioc->usg_pages, 1980 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); 1981#endif 1982 1983 return 0; 1984} 1985 1986static int 1987sba_proc_open(struct inode *i, struct file *f) 1988{ 1989 return single_open(f, &sba_proc_info, NULL); 1990} 1991 1992static struct file_operations sba_proc_fops = { 1993 .owner = THIS_MODULE, 1994 .open = sba_proc_open, 1995 .read = seq_read, 1996 .llseek = seq_lseek, 1997 .release = single_release, 1998}; 1999 2000static int 2001sba_proc_bitmap_info(struct seq_file *m, void *p) 2002{ 2003 struct sba_device *sba_dev = sba_list; 2004 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ 2005 unsigned int *res_ptr = (unsigned int *)ioc->res_map; 2006 int i, len = 0; 2007 2008 for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) { 2009 if ((i & 7) == 0) 2010 len += seq_printf(m, "\n "); 2011 len += seq_printf(m, " %08x", *res_ptr); 2012 } 2013 len += seq_printf(m, "\n"); 2014 2015 return 0; 2016} 2017 2018static int 2019sba_proc_bitmap_open(struct inode *i, struct file *f) 2020{ 2021 return single_open(f, &sba_proc_bitmap_info, NULL); 2022} 2023 2024static struct file_operations sba_proc_bitmap_fops = { 2025 .owner = THIS_MODULE, 2026 .open = sba_proc_bitmap_open, 2027 .read = seq_read, 2028 .llseek = seq_lseek, 2029 .release = single_release, 2030}; 2031#endif /* CONFIG_PROC_FS */ 2032 2033static struct parisc_device_id sba_tbl[] = { 2034 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb }, 2035 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc }, 2036 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc }, 2037 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc }, 2038 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc }, 2039 { 0, } 2040}; 2041 2042int sba_driver_callback(struct parisc_device *); 2043 2044static struct parisc_driver sba_driver = { 2045 .name = MODULE_NAME, 2046 .id_table = sba_tbl, 2047 .probe = sba_driver_callback, 2048}; 2049 2050/* 2051** Determine if sba should claim this chip (return 0) or not (return 1). 2052** If so, initialize the chip and tell other partners in crime they 2053** have work to do. 2054*/ 2055int 2056sba_driver_callback(struct parisc_device *dev) 2057{ 2058 struct sba_device *sba_dev; 2059 u32 func_class; 2060 int i; 2061 char *version; 2062 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); 2063 struct proc_dir_entry *info_entry, *bitmap_entry, *root; 2064 2065 sba_dump_ranges(sba_addr); 2066 2067 /* Read HW Rev First */ 2068 func_class = READ_REG(sba_addr + SBA_FCLASS); 2069 2070 if (IS_ASTRO(&dev->id)) { 2071 unsigned long fclass; 2072 static char astro_rev[]="Astro ?.?"; 2073 2074 /* Astro is broken...Read HW Rev First */ 2075 fclass = READ_REG(sba_addr); 2076 2077 astro_rev[6] = '1' + (char) (fclass & 0x7); 2078 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3); 2079 version = astro_rev; 2080 2081 } else if (IS_IKE(&dev->id)) { 2082 static char ike_rev[] = "Ike rev ?"; 2083 ike_rev[8] = '0' + (char) (func_class & 0xff); 2084 version = ike_rev; 2085 } else if (IS_PLUTO(&dev->id)) { 2086 static char pluto_rev[]="Pluto ?.?"; 2087 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4); 2088 pluto_rev[8] = '0' + (char) (func_class & 0x0f); 2089 version = pluto_rev; 2090 } else { 2091 static char reo_rev[] = "REO rev ?"; 2092 reo_rev[8] = '0' + (char) (func_class & 0xff); 2093 version = reo_rev; 2094 } 2095 2096 if (!global_ioc_cnt) { 2097 global_ioc_cnt = count_parisc_driver(&sba_driver); 2098 2099 /* Astro and Pluto have one IOC per SBA */ 2100 if ((!IS_ASTRO(&dev->id)) || (!IS_PLUTO(&dev->id))) 2101 global_ioc_cnt *= 2; 2102 } 2103 2104 printk(KERN_INFO "%s found %s at 0x%lx\n", 2105 MODULE_NAME, version, dev->hpa.start); 2106 2107 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL); 2108 if (!sba_dev) { 2109 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n"); 2110 return -ENOMEM; 2111 } 2112 2113 parisc_set_drvdata(dev, sba_dev); 2114 2115 for(i=0; i<MAX_IOC; i++) 2116 spin_lock_init(&(sba_dev->ioc[i].res_lock)); 2117 2118 sba_dev->dev = dev; 2119 sba_dev->hw_rev = func_class; 2120 sba_dev->iodc = &dev->id; 2121 sba_dev->name = dev->name; 2122 sba_dev->sba_hpa = sba_addr; 2123 2124 sba_get_pat_resources(sba_dev); 2125 sba_hw_init(sba_dev); 2126 sba_common_init(sba_dev); 2127 2128 hppa_dma_ops = &sba_ops; 2129 2130#ifdef CONFIG_PROC_FS 2131 switch (dev->id.hversion) { 2132 case PLUTO_MCKINLEY_PORT: 2133 root = proc_mckinley_root; 2134 break; 2135 case ASTRO_RUNWAY_PORT: 2136 case IKE_MERCED_PORT: 2137 default: 2138 root = proc_runway_root; 2139 break; 2140 } 2141 2142 info_entry = create_proc_entry("sba_iommu", 0, root); 2143 bitmap_entry = create_proc_entry("sba_iommu-bitmap", 0, root); 2144 2145 if (info_entry) 2146 info_entry->proc_fops = &sba_proc_fops; 2147 2148 if (bitmap_entry) 2149 bitmap_entry->proc_fops = &sba_proc_bitmap_fops; 2150#endif 2151 2152 parisc_vmerge_boundary = IOVP_SIZE; 2153 parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG; 2154 parisc_has_iommu(); 2155 return 0; 2156} 2157 2158/* 2159** One time initialization to let the world know the SBA was found. 2160** This is the only routine which is NOT static. 2161** Must be called exactly once before pci_init(). 2162*/ 2163void __init sba_init(void) 2164{ 2165 register_parisc_driver(&sba_driver); 2166} 2167 2168 2169/** 2170 * sba_get_iommu - Assign the iommu pointer for the pci bus controller. 2171 * @dev: The parisc device. 2172 * 2173 * Returns the appropriate IOMMU data for the given parisc PCI controller. 2174 * This is cached and used later for PCI DMA Mapping. 2175 */ 2176void * sba_get_iommu(struct parisc_device *pci_hba) 2177{ 2178 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2179 struct sba_device *sba = sba_dev->dev.driver_data; 2180 char t = sba_dev->id.hw_type; 2181 int iocnum = (pci_hba->hw_path >> 3); /* rope # */ 2182 2183 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT)); 2184 2185 return &(sba->ioc[iocnum]); 2186} 2187 2188 2189/** 2190 * sba_directed_lmmio - return first directed LMMIO range routed to rope 2191 * @pa_dev: The parisc device. 2192 * @r: resource PCI host controller wants start/end fields assigned. 2193 * 2194 * For the given parisc PCI controller, determine if any direct ranges 2195 * are routed down the corresponding rope. 2196 */ 2197void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r) 2198{ 2199 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2200 struct sba_device *sba = sba_dev->dev.driver_data; 2201 char t = sba_dev->id.hw_type; 2202 int i; 2203 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 2204 2205 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); 2206 2207 r->start = r->end = 0; 2208 2209 /* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */ 2210 for (i=0; i<4; i++) { 2211 int base, size; 2212 void __iomem *reg = sba->sba_hpa + i*0x18; 2213 2214 base = READ_REG32(reg + LMMIO_DIRECT0_BASE); 2215 if ((base & 1) == 0) 2216 continue; /* not enabled */ 2217 2218 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE); 2219 2220 if ((size & (ROPES_PER_IOC-1)) != rope) 2221 continue; /* directed down different rope */ 2222 2223 r->start = (base & ~1UL) | PCI_F_EXTEND; 2224 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK); 2225 r->end = r->start + size; 2226 } 2227} 2228 2229 2230/** 2231 * sba_distributed_lmmio - return portion of distributed LMMIO range 2232 * @pa_dev: The parisc device. 2233 * @r: resource PCI host controller wants start/end fields assigned. 2234 * 2235 * For the given parisc PCI controller, return portion of distributed LMMIO 2236 * range. The distributed LMMIO is always present and it's just a question 2237 * of the base address and size of the range. 2238 */ 2239void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r ) 2240{ 2241 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2242 struct sba_device *sba = sba_dev->dev.driver_data; 2243 char t = sba_dev->id.hw_type; 2244 int base, size; 2245 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 2246 2247 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); 2248 2249 r->start = r->end = 0; 2250 2251 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE); 2252 if ((base & 1) == 0) { 2253 BUG(); /* Gah! Distr Range wasn't enabled! */ 2254 return; 2255 } 2256 2257 r->start = (base & ~1UL) | PCI_F_EXTEND; 2258 2259 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC; 2260 r->start += rope * (size + 1); /* adjust base for this rope */ 2261 r->end = r->start + size; 2262}