at v3.3-rc1 576 lines 14 kB view raw
1#ifndef __ALPHA_IO_H 2#define __ALPHA_IO_H 3 4#ifdef __KERNEL__ 5 6#include <linux/kernel.h> 7#include <linux/mm.h> 8#include <asm/compiler.h> 9#include <asm/system.h> 10#include <asm/pgtable.h> 11#include <asm/machvec.h> 12#include <asm/hwrpb.h> 13 14/* The generic header contains only prototypes. Including it ensures that 15 the implementation we have here matches that interface. */ 16#include <asm-generic/iomap.h> 17 18/* We don't use IO slowdowns on the Alpha, but.. */ 19#define __SLOW_DOWN_IO do { } while (0) 20#define SLOW_DOWN_IO do { } while (0) 21 22/* 23 * Virtual -> physical identity mapping starts at this offset 24 */ 25#ifdef USE_48_BIT_KSEG 26#define IDENT_ADDR 0xffff800000000000UL 27#else 28#define IDENT_ADDR 0xfffffc0000000000UL 29#endif 30 31/* 32 * We try to avoid hae updates (thus the cache), but when we 33 * do need to update the hae, we need to do it atomically, so 34 * that any interrupts wouldn't get confused with the hae 35 * register not being up-to-date with respect to the hardware 36 * value. 37 */ 38extern inline void __set_hae(unsigned long new_hae) 39{ 40 unsigned long flags = swpipl(IPL_MAX); 41 42 barrier(); 43 44 alpha_mv.hae_cache = new_hae; 45 *alpha_mv.hae_register = new_hae; 46 mb(); 47 /* Re-read to make sure it was written. */ 48 new_hae = *alpha_mv.hae_register; 49 50 setipl(flags); 51 barrier(); 52} 53 54extern inline void set_hae(unsigned long new_hae) 55{ 56 if (new_hae != alpha_mv.hae_cache) 57 __set_hae(new_hae); 58} 59 60/* 61 * Change virtual addresses to physical addresses and vv. 62 */ 63#ifdef USE_48_BIT_KSEG 64static inline unsigned long virt_to_phys(void *address) 65{ 66 return (unsigned long)address - IDENT_ADDR; 67} 68 69static inline void * phys_to_virt(unsigned long address) 70{ 71 return (void *) (address + IDENT_ADDR); 72} 73#else 74static inline unsigned long virt_to_phys(void *address) 75{ 76 unsigned long phys = (unsigned long)address; 77 78 /* Sign-extend from bit 41. */ 79 phys <<= (64 - 41); 80 phys = (long)phys >> (64 - 41); 81 82 /* Crop to the physical address width of the processor. */ 83 phys &= (1ul << hwrpb->pa_bits) - 1; 84 85 return phys; 86} 87 88static inline void * phys_to_virt(unsigned long address) 89{ 90 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1))); 91} 92#endif 93 94#define page_to_phys(page) page_to_pa(page) 95 96static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page) 97{ 98 return page_to_phys(page); 99} 100 101/* Maximum PIO space address supported? */ 102#define IO_SPACE_LIMIT 0xffff 103 104/* 105 * Change addresses as seen by the kernel (virtual) to addresses as 106 * seen by a device (bus), and vice versa. 107 * 108 * Note that this only works for a limited range of kernel addresses, 109 * and very well may not span all memory. Consider this interface 110 * deprecated in favour of the DMA-mapping API. 111 */ 112extern unsigned long __direct_map_base; 113extern unsigned long __direct_map_size; 114 115static inline unsigned long __deprecated virt_to_bus(void *address) 116{ 117 unsigned long phys = virt_to_phys(address); 118 unsigned long bus = phys + __direct_map_base; 119 return phys <= __direct_map_size ? bus : 0; 120} 121#define isa_virt_to_bus virt_to_bus 122 123static inline void * __deprecated bus_to_virt(unsigned long address) 124{ 125 void *virt; 126 127 /* This check is a sanity check but also ensures that bus address 0 128 maps to virtual address 0 which is useful to detect null pointers 129 (the NCR driver is much simpler if NULL pointers are preserved). */ 130 address -= __direct_map_base; 131 virt = phys_to_virt(address); 132 return (long)address <= 0 ? NULL : virt; 133} 134#define isa_bus_to_virt bus_to_virt 135 136/* 137 * There are different chipsets to interface the Alpha CPUs to the world. 138 */ 139 140#define IO_CONCAT(a,b) _IO_CONCAT(a,b) 141#define _IO_CONCAT(a,b) a ## _ ## b 142 143#ifdef CONFIG_ALPHA_GENERIC 144 145/* In a generic kernel, we always go through the machine vector. */ 146 147#define REMAP1(TYPE, NAME, QUAL) \ 148static inline TYPE generic_##NAME(QUAL void __iomem *addr) \ 149{ \ 150 return alpha_mv.mv_##NAME(addr); \ 151} 152 153#define REMAP2(TYPE, NAME, QUAL) \ 154static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ 155{ \ 156 alpha_mv.mv_##NAME(b, addr); \ 157} 158 159REMAP1(unsigned int, ioread8, /**/) 160REMAP1(unsigned int, ioread16, /**/) 161REMAP1(unsigned int, ioread32, /**/) 162REMAP1(u8, readb, const volatile) 163REMAP1(u16, readw, const volatile) 164REMAP1(u32, readl, const volatile) 165REMAP1(u64, readq, const volatile) 166 167REMAP2(u8, iowrite8, /**/) 168REMAP2(u16, iowrite16, /**/) 169REMAP2(u32, iowrite32, /**/) 170REMAP2(u8, writeb, volatile) 171REMAP2(u16, writew, volatile) 172REMAP2(u32, writel, volatile) 173REMAP2(u64, writeq, volatile) 174 175#undef REMAP1 176#undef REMAP2 177 178extern inline void __iomem *generic_ioportmap(unsigned long a) 179{ 180 return alpha_mv.mv_ioportmap(a); 181} 182 183static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s) 184{ 185 return alpha_mv.mv_ioremap(a, s); 186} 187 188static inline void generic_iounmap(volatile void __iomem *a) 189{ 190 return alpha_mv.mv_iounmap(a); 191} 192 193static inline int generic_is_ioaddr(unsigned long a) 194{ 195 return alpha_mv.mv_is_ioaddr(a); 196} 197 198static inline int generic_is_mmio(const volatile void __iomem *a) 199{ 200 return alpha_mv.mv_is_mmio(a); 201} 202 203#define __IO_PREFIX generic 204#define generic_trivial_rw_bw 0 205#define generic_trivial_rw_lq 0 206#define generic_trivial_io_bw 0 207#define generic_trivial_io_lq 0 208#define generic_trivial_iounmap 0 209 210#else 211 212#if defined(CONFIG_ALPHA_APECS) 213# include <asm/core_apecs.h> 214#elif defined(CONFIG_ALPHA_CIA) 215# include <asm/core_cia.h> 216#elif defined(CONFIG_ALPHA_IRONGATE) 217# include <asm/core_irongate.h> 218#elif defined(CONFIG_ALPHA_JENSEN) 219# include <asm/jensen.h> 220#elif defined(CONFIG_ALPHA_LCA) 221# include <asm/core_lca.h> 222#elif defined(CONFIG_ALPHA_MARVEL) 223# include <asm/core_marvel.h> 224#elif defined(CONFIG_ALPHA_MCPCIA) 225# include <asm/core_mcpcia.h> 226#elif defined(CONFIG_ALPHA_POLARIS) 227# include <asm/core_polaris.h> 228#elif defined(CONFIG_ALPHA_T2) 229# include <asm/core_t2.h> 230#elif defined(CONFIG_ALPHA_TSUNAMI) 231# include <asm/core_tsunami.h> 232#elif defined(CONFIG_ALPHA_TITAN) 233# include <asm/core_titan.h> 234#elif defined(CONFIG_ALPHA_WILDFIRE) 235# include <asm/core_wildfire.h> 236#else 237#error "What system is this?" 238#endif 239 240#endif /* GENERIC */ 241 242/* 243 * We always have external versions of these routines. 244 */ 245extern u8 inb(unsigned long port); 246extern u16 inw(unsigned long port); 247extern u32 inl(unsigned long port); 248extern void outb(u8 b, unsigned long port); 249extern void outw(u16 b, unsigned long port); 250extern void outl(u32 b, unsigned long port); 251 252extern u8 readb(const volatile void __iomem *addr); 253extern u16 readw(const volatile void __iomem *addr); 254extern u32 readl(const volatile void __iomem *addr); 255extern u64 readq(const volatile void __iomem *addr); 256extern void writeb(u8 b, volatile void __iomem *addr); 257extern void writew(u16 b, volatile void __iomem *addr); 258extern void writel(u32 b, volatile void __iomem *addr); 259extern void writeq(u64 b, volatile void __iomem *addr); 260 261extern u8 __raw_readb(const volatile void __iomem *addr); 262extern u16 __raw_readw(const volatile void __iomem *addr); 263extern u32 __raw_readl(const volatile void __iomem *addr); 264extern u64 __raw_readq(const volatile void __iomem *addr); 265extern void __raw_writeb(u8 b, volatile void __iomem *addr); 266extern void __raw_writew(u16 b, volatile void __iomem *addr); 267extern void __raw_writel(u32 b, volatile void __iomem *addr); 268extern void __raw_writeq(u64 b, volatile void __iomem *addr); 269 270/* 271 * Mapping from port numbers to __iomem space is pretty easy. 272 */ 273 274/* These two have to be extern inline because of the extern prototype from 275 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for 276 the same declaration. */ 277extern inline void __iomem *ioport_map(unsigned long port, unsigned int size) 278{ 279 return IO_CONCAT(__IO_PREFIX,ioportmap) (port); 280} 281 282extern inline void ioport_unmap(void __iomem *addr) 283{ 284} 285 286static inline void __iomem *ioremap(unsigned long port, unsigned long size) 287{ 288 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size); 289} 290 291static inline void __iomem *__ioremap(unsigned long port, unsigned long size, 292 unsigned long flags) 293{ 294 return ioremap(port, size); 295} 296 297static inline void __iomem * ioremap_nocache(unsigned long offset, 298 unsigned long size) 299{ 300 return ioremap(offset, size); 301} 302 303static inline void iounmap(volatile void __iomem *addr) 304{ 305 IO_CONCAT(__IO_PREFIX,iounmap)(addr); 306} 307 308static inline int __is_ioaddr(unsigned long addr) 309{ 310 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr); 311} 312#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) 313 314static inline int __is_mmio(const volatile void __iomem *addr) 315{ 316 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr); 317} 318 319 320/* 321 * If the actual I/O bits are sufficiently trivial, then expand inline. 322 */ 323 324#if IO_CONCAT(__IO_PREFIX,trivial_io_bw) 325extern inline unsigned int ioread8(void __iomem *addr) 326{ 327 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); 328 mb(); 329 return ret; 330} 331 332extern inline unsigned int ioread16(void __iomem *addr) 333{ 334 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); 335 mb(); 336 return ret; 337} 338 339extern inline void iowrite8(u8 b, void __iomem *addr) 340{ 341 IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); 342 mb(); 343} 344 345extern inline void iowrite16(u16 b, void __iomem *addr) 346{ 347 IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); 348 mb(); 349} 350 351extern inline u8 inb(unsigned long port) 352{ 353 return ioread8(ioport_map(port, 1)); 354} 355 356extern inline u16 inw(unsigned long port) 357{ 358 return ioread16(ioport_map(port, 2)); 359} 360 361extern inline void outb(u8 b, unsigned long port) 362{ 363 iowrite8(b, ioport_map(port, 1)); 364} 365 366extern inline void outw(u16 b, unsigned long port) 367{ 368 iowrite16(b, ioport_map(port, 2)); 369} 370#endif 371 372#if IO_CONCAT(__IO_PREFIX,trivial_io_lq) 373extern inline unsigned int ioread32(void __iomem *addr) 374{ 375 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); 376 mb(); 377 return ret; 378} 379 380extern inline void iowrite32(u32 b, void __iomem *addr) 381{ 382 IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); 383 mb(); 384} 385 386extern inline u32 inl(unsigned long port) 387{ 388 return ioread32(ioport_map(port, 4)); 389} 390 391extern inline void outl(u32 b, unsigned long port) 392{ 393 iowrite32(b, ioport_map(port, 4)); 394} 395#endif 396 397#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 398extern inline u8 __raw_readb(const volatile void __iomem *addr) 399{ 400 return IO_CONCAT(__IO_PREFIX,readb)(addr); 401} 402 403extern inline u16 __raw_readw(const volatile void __iomem *addr) 404{ 405 return IO_CONCAT(__IO_PREFIX,readw)(addr); 406} 407 408extern inline void __raw_writeb(u8 b, volatile void __iomem *addr) 409{ 410 IO_CONCAT(__IO_PREFIX,writeb)(b, addr); 411} 412 413extern inline void __raw_writew(u16 b, volatile void __iomem *addr) 414{ 415 IO_CONCAT(__IO_PREFIX,writew)(b, addr); 416} 417 418extern inline u8 readb(const volatile void __iomem *addr) 419{ 420 u8 ret = __raw_readb(addr); 421 mb(); 422 return ret; 423} 424 425extern inline u16 readw(const volatile void __iomem *addr) 426{ 427 u16 ret = __raw_readw(addr); 428 mb(); 429 return ret; 430} 431 432extern inline void writeb(u8 b, volatile void __iomem *addr) 433{ 434 __raw_writeb(b, addr); 435 mb(); 436} 437 438extern inline void writew(u16 b, volatile void __iomem *addr) 439{ 440 __raw_writew(b, addr); 441 mb(); 442} 443#endif 444 445#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 446extern inline u32 __raw_readl(const volatile void __iomem *addr) 447{ 448 return IO_CONCAT(__IO_PREFIX,readl)(addr); 449} 450 451extern inline u64 __raw_readq(const volatile void __iomem *addr) 452{ 453 return IO_CONCAT(__IO_PREFIX,readq)(addr); 454} 455 456extern inline void __raw_writel(u32 b, volatile void __iomem *addr) 457{ 458 IO_CONCAT(__IO_PREFIX,writel)(b, addr); 459} 460 461extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) 462{ 463 IO_CONCAT(__IO_PREFIX,writeq)(b, addr); 464} 465 466extern inline u32 readl(const volatile void __iomem *addr) 467{ 468 u32 ret = __raw_readl(addr); 469 mb(); 470 return ret; 471} 472 473extern inline u64 readq(const volatile void __iomem *addr) 474{ 475 u64 ret = __raw_readq(addr); 476 mb(); 477 return ret; 478} 479 480extern inline void writel(u32 b, volatile void __iomem *addr) 481{ 482 __raw_writel(b, addr); 483 mb(); 484} 485 486extern inline void writeq(u64 b, volatile void __iomem *addr) 487{ 488 __raw_writeq(b, addr); 489 mb(); 490} 491#endif 492 493#define inb_p inb 494#define inw_p inw 495#define inl_p inl 496#define outb_p outb 497#define outw_p outw 498#define outl_p outl 499#define readb_relaxed(addr) __raw_readb(addr) 500#define readw_relaxed(addr) __raw_readw(addr) 501#define readl_relaxed(addr) __raw_readl(addr) 502#define readq_relaxed(addr) __raw_readq(addr) 503 504#define mmiowb() 505 506/* 507 * String version of IO memory access ops: 508 */ 509extern void memcpy_fromio(void *, const volatile void __iomem *, long); 510extern void memcpy_toio(volatile void __iomem *, const void *, long); 511extern void _memset_c_io(volatile void __iomem *, unsigned long, long); 512 513static inline void memset_io(volatile void __iomem *addr, u8 c, long len) 514{ 515 _memset_c_io(addr, 0x0101010101010101UL * c, len); 516} 517 518#define __HAVE_ARCH_MEMSETW_IO 519static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) 520{ 521 _memset_c_io(addr, 0x0001000100010001UL * c, len); 522} 523 524/* 525 * String versions of in/out ops: 526 */ 527extern void insb (unsigned long port, void *dst, unsigned long count); 528extern void insw (unsigned long port, void *dst, unsigned long count); 529extern void insl (unsigned long port, void *dst, unsigned long count); 530extern void outsb (unsigned long port, const void *src, unsigned long count); 531extern void outsw (unsigned long port, const void *src, unsigned long count); 532extern void outsl (unsigned long port, const void *src, unsigned long count); 533 534/* 535 * The Alpha Jensen hardware for some rather strange reason puts 536 * the RTC clock at 0x170 instead of 0x70. Probably due to some 537 * misguided idea about using 0x70 for NMI stuff. 538 * 539 * These defines will override the defaults when doing RTC queries 540 */ 541 542#ifdef CONFIG_ALPHA_GENERIC 543# define RTC_PORT(x) ((x) + alpha_mv.rtc_port) 544#else 545# ifdef CONFIG_ALPHA_JENSEN 546# define RTC_PORT(x) (0x170+(x)) 547# else 548# define RTC_PORT(x) (0x70 + (x)) 549# endif 550#endif 551#define RTC_ALWAYS_BCD 0 552 553/* 554 * Some mucking forons use if[n]def writeq to check if platform has it. 555 * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them 556 * to play with; for now just use cpp anti-recursion logics and make sure 557 * that damn thing is defined and expands to itself. 558 */ 559 560#define writeq writeq 561#define readq readq 562 563/* 564 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 565 * access 566 */ 567#define xlate_dev_mem_ptr(p) __va(p) 568 569/* 570 * Convert a virtual cached pointer to an uncached pointer 571 */ 572#define xlate_dev_kmem_ptr(p) p 573 574#endif /* __KERNEL__ */ 575 576#endif /* __ALPHA_IO_H */