Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.35-rc5 574 lines 14 kB view raw
1#ifndef __ALPHA_IO_H 2#define __ALPHA_IO_H 3 4#ifdef __KERNEL__ 5 6#include <linux/kernel.h> 7#include <linux/mm.h> 8#include <asm/compiler.h> 9#include <asm/system.h> 10#include <asm/pgtable.h> 11#include <asm/machvec.h> 12#include <asm/hwrpb.h> 13 14/* The generic header contains only prototypes. Including it ensures that 15 the implementation we have here matches that interface. */ 16#include <asm-generic/iomap.h> 17 18/* We don't use IO slowdowns on the Alpha, but.. */ 19#define __SLOW_DOWN_IO do { } while (0) 20#define SLOW_DOWN_IO do { } while (0) 21 22/* 23 * Virtual -> physical identity mapping starts at this offset 24 */ 25#ifdef USE_48_BIT_KSEG 26#define IDENT_ADDR 0xffff800000000000UL 27#else 28#define IDENT_ADDR 0xfffffc0000000000UL 29#endif 30 31/* 32 * We try to avoid hae updates (thus the cache), but when we 33 * do need to update the hae, we need to do it atomically, so 34 * that any interrupts wouldn't get confused with the hae 35 * register not being up-to-date with respect to the hardware 36 * value. 37 */ 38extern inline void __set_hae(unsigned long new_hae) 39{ 40 unsigned long flags; 41 local_irq_save(flags); 42 43 alpha_mv.hae_cache = new_hae; 44 *alpha_mv.hae_register = new_hae; 45 mb(); 46 /* Re-read to make sure it was written. */ 47 new_hae = *alpha_mv.hae_register; 48 49 local_irq_restore(flags); 50} 51 52extern inline void set_hae(unsigned long new_hae) 53{ 54 if (new_hae != alpha_mv.hae_cache) 55 __set_hae(new_hae); 56} 57 58/* 59 * Change virtual addresses to physical addresses and vv. 60 */ 61#ifdef USE_48_BIT_KSEG 62static inline unsigned long virt_to_phys(void *address) 63{ 64 return (unsigned long)address - IDENT_ADDR; 65} 66 67static inline void * phys_to_virt(unsigned long address) 68{ 69 return (void *) (address + IDENT_ADDR); 70} 71#else 72static inline unsigned long virt_to_phys(void *address) 73{ 74 unsigned long phys = (unsigned long)address; 75 76 /* Sign-extend from bit 41. */ 77 phys <<= (64 - 41); 78 phys = (long)phys >> (64 - 41); 79 80 /* Crop to the physical address width of the processor. */ 81 phys &= (1ul << hwrpb->pa_bits) - 1; 82 83 return phys; 84} 85 86static inline void * phys_to_virt(unsigned long address) 87{ 88 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1))); 89} 90#endif 91 92#define page_to_phys(page) page_to_pa(page) 93 94static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page) 95{ 96 return page_to_phys(page); 97} 98 99/* Maximum PIO space address supported? */ 100#define IO_SPACE_LIMIT 0xffff 101 102/* 103 * Change addresses as seen by the kernel (virtual) to addresses as 104 * seen by a device (bus), and vice versa. 105 * 106 * Note that this only works for a limited range of kernel addresses, 107 * and very well may not span all memory. Consider this interface 108 * deprecated in favour of the DMA-mapping API. 109 */ 110extern unsigned long __direct_map_base; 111extern unsigned long __direct_map_size; 112 113static inline unsigned long __deprecated virt_to_bus(void *address) 114{ 115 unsigned long phys = virt_to_phys(address); 116 unsigned long bus = phys + __direct_map_base; 117 return phys <= __direct_map_size ? bus : 0; 118} 119#define isa_virt_to_bus virt_to_bus 120 121static inline void * __deprecated bus_to_virt(unsigned long address) 122{ 123 void *virt; 124 125 /* This check is a sanity check but also ensures that bus address 0 126 maps to virtual address 0 which is useful to detect null pointers 127 (the NCR driver is much simpler if NULL pointers are preserved). */ 128 address -= __direct_map_base; 129 virt = phys_to_virt(address); 130 return (long)address <= 0 ? NULL : virt; 131} 132#define isa_bus_to_virt bus_to_virt 133 134/* 135 * There are different chipsets to interface the Alpha CPUs to the world. 136 */ 137 138#define IO_CONCAT(a,b) _IO_CONCAT(a,b) 139#define _IO_CONCAT(a,b) a ## _ ## b 140 141#ifdef CONFIG_ALPHA_GENERIC 142 143/* In a generic kernel, we always go through the machine vector. */ 144 145#define REMAP1(TYPE, NAME, QUAL) \ 146static inline TYPE generic_##NAME(QUAL void __iomem *addr) \ 147{ \ 148 return alpha_mv.mv_##NAME(addr); \ 149} 150 151#define REMAP2(TYPE, NAME, QUAL) \ 152static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ 153{ \ 154 alpha_mv.mv_##NAME(b, addr); \ 155} 156 157REMAP1(unsigned int, ioread8, /**/) 158REMAP1(unsigned int, ioread16, /**/) 159REMAP1(unsigned int, ioread32, /**/) 160REMAP1(u8, readb, const volatile) 161REMAP1(u16, readw, const volatile) 162REMAP1(u32, readl, const volatile) 163REMAP1(u64, readq, const volatile) 164 165REMAP2(u8, iowrite8, /**/) 166REMAP2(u16, iowrite16, /**/) 167REMAP2(u32, iowrite32, /**/) 168REMAP2(u8, writeb, volatile) 169REMAP2(u16, writew, volatile) 170REMAP2(u32, writel, volatile) 171REMAP2(u64, writeq, volatile) 172 173#undef REMAP1 174#undef REMAP2 175 176extern inline void __iomem *generic_ioportmap(unsigned long a) 177{ 178 return alpha_mv.mv_ioportmap(a); 179} 180 181static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s) 182{ 183 return alpha_mv.mv_ioremap(a, s); 184} 185 186static inline void generic_iounmap(volatile void __iomem *a) 187{ 188 return alpha_mv.mv_iounmap(a); 189} 190 191static inline int generic_is_ioaddr(unsigned long a) 192{ 193 return alpha_mv.mv_is_ioaddr(a); 194} 195 196static inline int generic_is_mmio(const volatile void __iomem *a) 197{ 198 return alpha_mv.mv_is_mmio(a); 199} 200 201#define __IO_PREFIX generic 202#define generic_trivial_rw_bw 0 203#define generic_trivial_rw_lq 0 204#define generic_trivial_io_bw 0 205#define generic_trivial_io_lq 0 206#define generic_trivial_iounmap 0 207 208#else 209 210#if defined(CONFIG_ALPHA_APECS) 211# include <asm/core_apecs.h> 212#elif defined(CONFIG_ALPHA_CIA) 213# include <asm/core_cia.h> 214#elif defined(CONFIG_ALPHA_IRONGATE) 215# include <asm/core_irongate.h> 216#elif defined(CONFIG_ALPHA_JENSEN) 217# include <asm/jensen.h> 218#elif defined(CONFIG_ALPHA_LCA) 219# include <asm/core_lca.h> 220#elif defined(CONFIG_ALPHA_MARVEL) 221# include <asm/core_marvel.h> 222#elif defined(CONFIG_ALPHA_MCPCIA) 223# include <asm/core_mcpcia.h> 224#elif defined(CONFIG_ALPHA_POLARIS) 225# include <asm/core_polaris.h> 226#elif defined(CONFIG_ALPHA_T2) 227# include <asm/core_t2.h> 228#elif defined(CONFIG_ALPHA_TSUNAMI) 229# include <asm/core_tsunami.h> 230#elif defined(CONFIG_ALPHA_TITAN) 231# include <asm/core_titan.h> 232#elif defined(CONFIG_ALPHA_WILDFIRE) 233# include <asm/core_wildfire.h> 234#else 235#error "What system is this?" 236#endif 237 238#endif /* GENERIC */ 239 240/* 241 * We always have external versions of these routines. 242 */ 243extern u8 inb(unsigned long port); 244extern u16 inw(unsigned long port); 245extern u32 inl(unsigned long port); 246extern void outb(u8 b, unsigned long port); 247extern void outw(u16 b, unsigned long port); 248extern void outl(u32 b, unsigned long port); 249 250extern u8 readb(const volatile void __iomem *addr); 251extern u16 readw(const volatile void __iomem *addr); 252extern u32 readl(const volatile void __iomem *addr); 253extern u64 readq(const volatile void __iomem *addr); 254extern void writeb(u8 b, volatile void __iomem *addr); 255extern void writew(u16 b, volatile void __iomem *addr); 256extern void writel(u32 b, volatile void __iomem *addr); 257extern void writeq(u64 b, volatile void __iomem *addr); 258 259extern u8 __raw_readb(const volatile void __iomem *addr); 260extern u16 __raw_readw(const volatile void __iomem *addr); 261extern u32 __raw_readl(const volatile void __iomem *addr); 262extern u64 __raw_readq(const volatile void __iomem *addr); 263extern void __raw_writeb(u8 b, volatile void __iomem *addr); 264extern void __raw_writew(u16 b, volatile void __iomem *addr); 265extern void __raw_writel(u32 b, volatile void __iomem *addr); 266extern void __raw_writeq(u64 b, volatile void __iomem *addr); 267 268/* 269 * Mapping from port numbers to __iomem space is pretty easy. 270 */ 271 272/* These two have to be extern inline because of the extern prototype from 273 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for 274 the same declaration. */ 275extern inline void __iomem *ioport_map(unsigned long port, unsigned int size) 276{ 277 return IO_CONCAT(__IO_PREFIX,ioportmap) (port); 278} 279 280extern inline void ioport_unmap(void __iomem *addr) 281{ 282} 283 284static inline void __iomem *ioremap(unsigned long port, unsigned long size) 285{ 286 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size); 287} 288 289static inline void __iomem *__ioremap(unsigned long port, unsigned long size, 290 unsigned long flags) 291{ 292 return ioremap(port, size); 293} 294 295static inline void __iomem * ioremap_nocache(unsigned long offset, 296 unsigned long size) 297{ 298 return ioremap(offset, size); 299} 300 301static inline void iounmap(volatile void __iomem *addr) 302{ 303 IO_CONCAT(__IO_PREFIX,iounmap)(addr); 304} 305 306static inline int __is_ioaddr(unsigned long addr) 307{ 308 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr); 309} 310#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) 311 312static inline int __is_mmio(const volatile void __iomem *addr) 313{ 314 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr); 315} 316 317 318/* 319 * If the actual I/O bits are sufficiently trivial, then expand inline. 320 */ 321 322#if IO_CONCAT(__IO_PREFIX,trivial_io_bw) 323extern inline unsigned int ioread8(void __iomem *addr) 324{ 325 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); 326 mb(); 327 return ret; 328} 329 330extern inline unsigned int ioread16(void __iomem *addr) 331{ 332 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); 333 mb(); 334 return ret; 335} 336 337extern inline void iowrite8(u8 b, void __iomem *addr) 338{ 339 IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); 340 mb(); 341} 342 343extern inline void iowrite16(u16 b, void __iomem *addr) 344{ 345 IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); 346 mb(); 347} 348 349extern inline u8 inb(unsigned long port) 350{ 351 return ioread8(ioport_map(port, 1)); 352} 353 354extern inline u16 inw(unsigned long port) 355{ 356 return ioread16(ioport_map(port, 2)); 357} 358 359extern inline void outb(u8 b, unsigned long port) 360{ 361 iowrite8(b, ioport_map(port, 1)); 362} 363 364extern inline void outw(u16 b, unsigned long port) 365{ 366 iowrite16(b, ioport_map(port, 2)); 367} 368#endif 369 370#if IO_CONCAT(__IO_PREFIX,trivial_io_lq) 371extern inline unsigned int ioread32(void __iomem *addr) 372{ 373 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); 374 mb(); 375 return ret; 376} 377 378extern inline void iowrite32(u32 b, void __iomem *addr) 379{ 380 IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); 381 mb(); 382} 383 384extern inline u32 inl(unsigned long port) 385{ 386 return ioread32(ioport_map(port, 4)); 387} 388 389extern inline void outl(u32 b, unsigned long port) 390{ 391 iowrite32(b, ioport_map(port, 4)); 392} 393#endif 394 395#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 396extern inline u8 __raw_readb(const volatile void __iomem *addr) 397{ 398 return IO_CONCAT(__IO_PREFIX,readb)(addr); 399} 400 401extern inline u16 __raw_readw(const volatile void __iomem *addr) 402{ 403 return IO_CONCAT(__IO_PREFIX,readw)(addr); 404} 405 406extern inline void __raw_writeb(u8 b, volatile void __iomem *addr) 407{ 408 IO_CONCAT(__IO_PREFIX,writeb)(b, addr); 409} 410 411extern inline void __raw_writew(u16 b, volatile void __iomem *addr) 412{ 413 IO_CONCAT(__IO_PREFIX,writew)(b, addr); 414} 415 416extern inline u8 readb(const volatile void __iomem *addr) 417{ 418 u8 ret = __raw_readb(addr); 419 mb(); 420 return ret; 421} 422 423extern inline u16 readw(const volatile void __iomem *addr) 424{ 425 u16 ret = __raw_readw(addr); 426 mb(); 427 return ret; 428} 429 430extern inline void writeb(u8 b, volatile void __iomem *addr) 431{ 432 __raw_writeb(b, addr); 433 mb(); 434} 435 436extern inline void writew(u16 b, volatile void __iomem *addr) 437{ 438 __raw_writew(b, addr); 439 mb(); 440} 441#endif 442 443#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 444extern inline u32 __raw_readl(const volatile void __iomem *addr) 445{ 446 return IO_CONCAT(__IO_PREFIX,readl)(addr); 447} 448 449extern inline u64 __raw_readq(const volatile void __iomem *addr) 450{ 451 return IO_CONCAT(__IO_PREFIX,readq)(addr); 452} 453 454extern inline void __raw_writel(u32 b, volatile void __iomem *addr) 455{ 456 IO_CONCAT(__IO_PREFIX,writel)(b, addr); 457} 458 459extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) 460{ 461 IO_CONCAT(__IO_PREFIX,writeq)(b, addr); 462} 463 464extern inline u32 readl(const volatile void __iomem *addr) 465{ 466 u32 ret = __raw_readl(addr); 467 mb(); 468 return ret; 469} 470 471extern inline u64 readq(const volatile void __iomem *addr) 472{ 473 u64 ret = __raw_readq(addr); 474 mb(); 475 return ret; 476} 477 478extern inline void writel(u32 b, volatile void __iomem *addr) 479{ 480 __raw_writel(b, addr); 481 mb(); 482} 483 484extern inline void writeq(u64 b, volatile void __iomem *addr) 485{ 486 __raw_writeq(b, addr); 487 mb(); 488} 489#endif 490 491#define inb_p inb 492#define inw_p inw 493#define inl_p inl 494#define outb_p outb 495#define outw_p outw 496#define outl_p outl 497#define readb_relaxed(addr) __raw_readb(addr) 498#define readw_relaxed(addr) __raw_readw(addr) 499#define readl_relaxed(addr) __raw_readl(addr) 500#define readq_relaxed(addr) __raw_readq(addr) 501 502#define mmiowb() 503 504/* 505 * String version of IO memory access ops: 506 */ 507extern void memcpy_fromio(void *, const volatile void __iomem *, long); 508extern void memcpy_toio(volatile void __iomem *, const void *, long); 509extern void _memset_c_io(volatile void __iomem *, unsigned long, long); 510 511static inline void memset_io(volatile void __iomem *addr, u8 c, long len) 512{ 513 _memset_c_io(addr, 0x0101010101010101UL * c, len); 514} 515 516#define __HAVE_ARCH_MEMSETW_IO 517static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) 518{ 519 _memset_c_io(addr, 0x0001000100010001UL * c, len); 520} 521 522/* 523 * String versions of in/out ops: 524 */ 525extern void insb (unsigned long port, void *dst, unsigned long count); 526extern void insw (unsigned long port, void *dst, unsigned long count); 527extern void insl (unsigned long port, void *dst, unsigned long count); 528extern void outsb (unsigned long port, const void *src, unsigned long count); 529extern void outsw (unsigned long port, const void *src, unsigned long count); 530extern void outsl (unsigned long port, const void *src, unsigned long count); 531 532/* 533 * The Alpha Jensen hardware for some rather strange reason puts 534 * the RTC clock at 0x170 instead of 0x70. Probably due to some 535 * misguided idea about using 0x70 for NMI stuff. 536 * 537 * These defines will override the defaults when doing RTC queries 538 */ 539 540#ifdef CONFIG_ALPHA_GENERIC 541# define RTC_PORT(x) ((x) + alpha_mv.rtc_port) 542#else 543# ifdef CONFIG_ALPHA_JENSEN 544# define RTC_PORT(x) (0x170+(x)) 545# else 546# define RTC_PORT(x) (0x70 + (x)) 547# endif 548#endif 549#define RTC_ALWAYS_BCD 0 550 551/* 552 * Some mucking forons use if[n]def writeq to check if platform has it. 553 * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them 554 * to play with; for now just use cpp anti-recursion logics and make sure 555 * that damn thing is defined and expands to itself. 556 */ 557 558#define writeq writeq 559#define readq readq 560 561/* 562 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 563 * access 564 */ 565#define xlate_dev_mem_ptr(p) __va(p) 566 567/* 568 * Convert a virtual cached pointer to an uncached pointer 569 */ 570#define xlate_dev_kmem_ptr(p) p 571 572#endif /* __KERNEL__ */ 573 574#endif /* __ALPHA_IO_H */