Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'asm-generic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic

Pull asm-generic asm/io.h rewrite from Arnd Bergmann:
"While there normally is no reason to have a pull request for
asm-generic but have all changes get merged through whichever tree
needs them, I do have a series for 3.19.

There are two sets of patches that change significant portions of
asm/io.h, and this branch contains both in order to resolve the
conflicts:

- Will Deacon has done a set of patches to ensure that all
architectures define {read,write}{b,w,l,q}_relaxed() functions or
get them by including asm-generic/io.h.

These functions are commonly used on ARM specific drivers to avoid
expensive L2 cache synchronization implied by the normal
{read,write}{b,w,l,q}, but we need to define them on all
architectures in order to share the drivers across architectures
and to enable CONFIG_COMPILE_TEST configurations for them

- Thierry Reding has done an unrelated set of patches that extends
the asm-generic/io.h file to the degree necessary to make it useful
on ARM64 and potentially other architectures"

* tag 'asm-generic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: (29 commits)
ARM64: use GENERIC_PCI_IOMAP
sparc: io: remove duplicate relaxed accessors on sparc32
ARM: sa11x0: Use void __iomem * in MMIO accessors
arm64: Use include/asm-generic/io.h
ARM: Use include/asm-generic/io.h
asm-generic/io.h: Implement generic {read,write}s*()
asm-generic/io.h: Reconcile I/O accessor overrides
/dev/mem: Use more consistent data types
Change xlate_dev_{kmem,mem}_ptr() prototypes
ARM: ixp4xx: Properly override I/O accessors
ARM: ixp4xx: Fix build with IXP4XX_INDIRECT_PCI
ARM: ebsa110: Properly override I/O accessors
ARC: Remove redundant PCI_IOBASE declaration
documentation: memory-barriers: clarify relaxed io accessor semantics
x86: io: implement dummy relaxed accessor macros for writes
tile: io: implement dummy relaxed accessor macros for writes
sparc: io: implement dummy relaxed accessor macros for writes
powerpc: io: implement dummy relaxed accessor macros for writes
parisc: io: implement dummy relaxed accessor macros for writes
mn10300: io: implement dummy relaxed accessor macros for writes
...

+849 -382
+8 -3
Documentation/memory-barriers.txt
··· 2465 2465 Please refer to the PCI specification for more information on interactions 2466 2466 between PCI transactions. 2467 2467 2468 - (*) readX_relaxed() 2468 + (*) readX_relaxed(), writeX_relaxed() 2469 2469 2470 - These are similar to readX(), but are not guaranteed to be ordered in any 2471 - way. Be aware that there is no I/O read barrier available. 2470 + These are similar to readX() and writeX(), but provide weaker memory 2471 + ordering guarantees. Specifically, they do not guarantee ordering with 2472 + respect to normal memory accesses (e.g. DMA buffers) nor do they guarantee 2473 + ordering with respect to LOCK or UNLOCK operations. If the latter is 2474 + required, an mmiowb() barrier can be used. Note that relaxed accesses to 2475 + the same peripheral are guaranteed to be ordered with respect to each 2476 + other. 2472 2477 2473 2478 (*) ioreadX(), iowriteX() 2474 2479
-2
arch/arc/include/asm/io.h
··· 13 13 #include <asm/byteorder.h> 14 14 #include <asm/page.h> 15 15 16 - #define PCI_IOBASE ((void __iomem *)0) 17 - 18 16 extern void __iomem *ioremap(unsigned long physaddr, unsigned long size); 19 17 extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, 20 18 unsigned long flags);
+36 -53
arch/arm/include/asm/io.h
··· 47 47 * Generic IO read/write. These perform native-endian accesses. Note 48 48 * that some architectures will want to re-define __raw_{read,write}w. 49 49 */ 50 - extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen); 51 - extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen); 52 - extern void __raw_writesl(void __iomem *addr, const void *data, int longlen); 50 + void __raw_writesb(volatile void __iomem *addr, const void *data, int bytelen); 51 + void __raw_writesw(volatile void __iomem *addr, const void *data, int wordlen); 52 + void __raw_writesl(volatile void __iomem *addr, const void *data, int longlen); 53 53 54 - extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen); 55 - extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); 56 - extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); 54 + void __raw_readsb(const volatile void __iomem *addr, void *data, int bytelen); 55 + void __raw_readsw(const volatile void __iomem *addr, void *data, int wordlen); 56 + void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen); 57 57 58 58 #if __LINUX_ARM_ARCH__ < 6 59 59 /* ··· 69 69 * writeback addressing modes as these incur a significant performance 70 70 * overhead (the address generation must be emulated in software). 71 71 */ 72 + #define __raw_writew __raw_writew 72 73 static inline void __raw_writew(u16 val, volatile void __iomem *addr) 73 74 { 74 75 asm volatile("strh %1, %0" ··· 77 76 : "r" (val)); 78 77 } 79 78 79 + #define __raw_readw __raw_readw 80 80 static inline u16 __raw_readw(const volatile void __iomem *addr) 81 81 { 82 82 u16 val; ··· 88 86 } 89 87 #endif 90 88 89 + #define __raw_writeb __raw_writeb 91 90 static inline void __raw_writeb(u8 val, volatile void __iomem *addr) 92 91 { 93 92 asm volatile("strb %1, %0" ··· 96 93 : "r" (val)); 97 94 } 98 95 96 + #define __raw_writel __raw_writel 99 97 static inline void __raw_writel(u32 val, volatile void __iomem *addr) 100 98 { 101 99 asm volatile("str %1, %0" ··· 104 100 : "r" (val)); 105 101 } 106 102 103 + #define __raw_readb __raw_readb 107 104 static inline u8 __raw_readb(const volatile void __iomem *addr) 108 105 { 109 106 u8 val; ··· 114 109 return val; 115 110 } 116 111 112 + #define __raw_readl __raw_readl 117 113 static inline u32 __raw_readl(const volatile void __iomem *addr) 118 114 { 119 115 u32 val; ··· 273 267 #define insl(p,d,l) __raw_readsl(__io(p),d,l) 274 268 #endif 275 269 276 - #define outb_p(val,port) outb((val),(port)) 277 - #define outw_p(val,port) outw((val),(port)) 278 - #define outl_p(val,port) outl((val),(port)) 279 - #define inb_p(port) inb((port)) 280 - #define inw_p(port) inw((port)) 281 - #define inl_p(port) inl((port)) 282 - 283 - #define outsb_p(port,from,len) outsb(port,from,len) 284 - #define outsw_p(port,from,len) outsw(port,from,len) 285 - #define outsl_p(port,from,len) outsl(port,from,len) 286 - #define insb_p(port,to,len) insb(port,to,len) 287 - #define insw_p(port,to,len) insw(port,to,len) 288 - #define insl_p(port,to,len) insl(port,to,len) 289 - 290 270 /* 291 271 * String version of IO memory access ops: 292 272 */ ··· 339 347 #define iounmap __arm_iounmap 340 348 341 349 /* 342 - * io{read,write}{8,16,32} macros 350 + * io{read,write}{16,32}be() macros 343 351 */ 344 - #ifndef ioread8 345 - #define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; }) 346 - #define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; }) 347 - #define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; }) 352 + #define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) 353 + #define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) 348 354 349 - #define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) 350 - #define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) 355 + #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) 356 + #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) 351 357 352 - #define iowrite8(v,p) ({ __iowmb(); __raw_writeb(v, p); }) 353 - #define iowrite16(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); }) 354 - #define iowrite32(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); }) 355 - 356 - #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) 357 - #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) 358 - 359 - #define ioread8_rep(p,d,c) __raw_readsb(p,d,c) 360 - #define ioread16_rep(p,d,c) __raw_readsw(p,d,c) 361 - #define ioread32_rep(p,d,c) __raw_readsl(p,d,c) 362 - 363 - #define iowrite8_rep(p,s,c) __raw_writesb(p,s,c) 364 - #define iowrite16_rep(p,s,c) __raw_writesw(p,s,c) 365 - #define iowrite32_rep(p,s,c) __raw_writesl(p,s,c) 366 - 358 + #ifndef ioport_map 359 + #define ioport_map ioport_map 367 360 extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 361 + #endif 362 + #ifndef ioport_unmap 363 + #define ioport_unmap ioport_unmap 368 364 extern void ioport_unmap(void __iomem *addr); 369 365 #endif 370 366 371 367 struct pci_dev; 372 368 369 + #define pci_iounmap pci_iounmap 373 370 extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); 371 + 372 + /* 373 + * Convert a physical pointer to a virtual kernel pointer for /dev/mem 374 + * access 375 + */ 376 + #define xlate_dev_mem_ptr(p) __va(p) 377 + 378 + /* 379 + * Convert a virtual cached pointer to an uncached pointer 380 + */ 381 + #define xlate_dev_kmem_ptr(p) p 382 + 383 + #include <asm-generic/io.h> 374 384 375 385 /* 376 386 * can the hardware map this into one segment or not, given no other ··· 394 400 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); 395 401 extern int devmem_is_allowed(unsigned long pfn); 396 402 #endif 397 - 398 - /* 399 - * Convert a physical pointer to a virtual kernel pointer for /dev/mem 400 - * access 401 - */ 402 - #define xlate_dev_mem_ptr(p) __va(p) 403 - 404 - /* 405 - * Convert a virtual cached pointer to an uncached pointer 406 - */ 407 - #define xlate_dev_kmem_ptr(p) p 408 403 409 404 /* 410 405 * Register ISA memory and port locations for glibc iopl/inb/outb
+4
arch/arm/include/asm/memory.h
··· 274 274 * translation for translating DMA addresses. Use the driver 275 275 * DMA support - see dma-mapping.h. 276 276 */ 277 + #define virt_to_phys virt_to_phys 277 278 static inline phys_addr_t virt_to_phys(const volatile void *x) 278 279 { 279 280 return __virt_to_phys((unsigned long)(x)); 280 281 } 281 282 283 + #define phys_to_virt phys_to_virt 282 284 static inline void *phys_to_virt(phys_addr_t x) 283 285 { 284 286 return (void *)__phys_to_virt(x); ··· 324 322 #endif 325 323 326 324 #ifdef CONFIG_VIRT_TO_BUS 325 + #define virt_to_bus virt_to_bus 327 326 static inline __deprecated unsigned long virt_to_bus(void *x) 328 327 { 329 328 return __virt_to_bus((unsigned long)x); 330 329 } 331 330 331 + #define bus_to_virt bus_to_virt 332 332 static inline __deprecated void *bus_to_virt(unsigned long x) 333 333 { 334 334 return (void *)__bus_to_virt(x);
+18 -7
arch/arm/mach-ebsa110/include/mach/io.h
··· 29 29 u16 __readw(const volatile void __iomem *addr); 30 30 u32 __readl(const volatile void __iomem *addr); 31 31 32 - void __writeb(u8 val, void __iomem *addr); 33 - void __writew(u16 val, void __iomem *addr); 34 - void __writel(u32 val, void __iomem *addr); 32 + void __writeb(u8 val, volatile void __iomem *addr); 33 + void __writew(u16 val, volatile void __iomem *addr); 34 + void __writel(u32 val, volatile void __iomem *addr); 35 35 36 36 /* 37 37 * Argh, someone forgot the IOCS16 line. We therefore have to handle ··· 62 62 #define writew(v,b) __writew(v,b) 63 63 #define writel(v,b) __writel(v,b) 64 64 65 + #define insb insb 65 66 extern void insb(unsigned int port, void *buf, int sz); 67 + #define insw insw 66 68 extern void insw(unsigned int port, void *buf, int sz); 69 + #define insl insl 67 70 extern void insl(unsigned int port, void *buf, int sz); 68 71 72 + #define outsb outsb 69 73 extern void outsb(unsigned int port, const void *buf, int sz); 74 + #define outsw outsw 70 75 extern void outsw(unsigned int port, const void *buf, int sz); 76 + #define outsl outsl 71 77 extern void outsl(unsigned int port, const void *buf, int sz); 72 78 73 79 /* can't support writesb atm */ 74 - extern void writesw(void __iomem *addr, const void *data, int wordlen); 75 - extern void writesl(void __iomem *addr, const void *data, int longlen); 80 + #define writesw writesw 81 + extern void writesw(volatile void __iomem *addr, const void *data, int wordlen); 82 + #define writesl writesl 83 + extern void writesl(volatile void __iomem *addr, const void *data, int longlen); 76 84 77 85 /* can't support readsb atm */ 78 - extern void readsw(const void __iomem *addr, void *data, int wordlen); 79 - extern void readsl(const void __iomem *addr, void *data, int longlen); 86 + #define readsw readsw 87 + extern void readsw(const volatile void __iomem *addr, void *data, int wordlen); 88 + 89 + #define readsl readsl 90 + extern void readsl(const volatile void __iomem *addr, void *data, int longlen); 80 91 81 92 #endif
+7 -7
arch/arm/mach-ebsa110/io.c
··· 102 102 EXPORT_SYMBOL(__readw); 103 103 EXPORT_SYMBOL(__readl); 104 104 105 - void readsw(const void __iomem *addr, void *data, int len) 105 + void readsw(const volatile void __iomem *addr, void *data, int len) 106 106 { 107 107 void __iomem *a = __isamem_convert_addr(addr); 108 108 ··· 112 112 } 113 113 EXPORT_SYMBOL(readsw); 114 114 115 - void readsl(const void __iomem *addr, void *data, int len) 115 + void readsl(const volatile void __iomem *addr, void *data, int len) 116 116 { 117 117 void __iomem *a = __isamem_convert_addr(addr); 118 118 ··· 122 122 } 123 123 EXPORT_SYMBOL(readsl); 124 124 125 - void __writeb(u8 val, void __iomem *addr) 125 + void __writeb(u8 val, volatile void __iomem *addr) 126 126 { 127 127 void __iomem *a = __isamem_convert_addr(addr); 128 128 ··· 132 132 __raw_writeb(val, a); 133 133 } 134 134 135 - void __writew(u16 val, void __iomem *addr) 135 + void __writew(u16 val, volatile void __iomem *addr) 136 136 { 137 137 void __iomem *a = __isamem_convert_addr(addr); 138 138 ··· 142 142 __raw_writew(val, a); 143 143 } 144 144 145 - void __writel(u32 val, void __iomem *addr) 145 + void __writel(u32 val, volatile void __iomem *addr) 146 146 { 147 147 void __iomem *a = __isamem_convert_addr(addr); 148 148 ··· 157 157 EXPORT_SYMBOL(__writew); 158 158 EXPORT_SYMBOL(__writel); 159 159 160 - void writesw(void __iomem *addr, const void *data, int len) 160 + void writesw(volatile void __iomem *addr, const void *data, int len) 161 161 { 162 162 void __iomem *a = __isamem_convert_addr(addr); 163 163 ··· 167 167 } 168 168 EXPORT_SYMBOL(writesw); 169 169 170 - void writesl(void __iomem *addr, const void *data, int len) 170 + void writesl(volatile void __iomem *addr, const void *data, int len) 171 171 { 172 172 void __iomem *a = __isamem_convert_addr(addr); 173 173
+1 -1
arch/arm/mach-ixp4xx/common.c
··· 652 652 return (void __iomem *)addr; 653 653 } 654 654 655 - static void ixp4xx_iounmap(void __iomem *addr) 655 + static void ixp4xx_iounmap(volatile void __iomem *addr) 656 656 { 657 657 if (!is_pci_memory((__force u32)addr)) 658 658 __iounmap(addr);
+22 -2
arch/arm/mach-ixp4xx/include/mach/io.h
··· 58 58 #define writew(v, p) __indirect_writew(v, p) 59 59 #define writel(v, p) __indirect_writel(v, p) 60 60 61 + #define writeb_relaxed(v, p) __indirect_writeb(v, p) 62 + #define writew_relaxed(v, p) __indirect_writew(v, p) 63 + #define writel_relaxed(v, p) __indirect_writel(v, p) 64 + 61 65 #define writesb(p, v, l) __indirect_writesb(p, v, l) 62 66 #define writesw(p, v, l) __indirect_writesw(p, v, l) 63 67 #define writesl(p, v, l) __indirect_writesl(p, v, l) ··· 69 65 #define readb(p) __indirect_readb(p) 70 66 #define readw(p) __indirect_readw(p) 71 67 #define readl(p) __indirect_readl(p) 68 + 69 + #define readb_relaxed(p) __indirect_readb(p) 70 + #define readw_relaxed(p) __indirect_readw(p) 71 + #define readl_relaxed(p) __indirect_readl(p) 72 72 73 73 #define readsb(p, v, l) __indirect_readsb(p, v, l) 74 74 #define readsw(p, v, l) __indirect_readsw(p, v, l) ··· 107 99 u32 n, byte_enables, data; 108 100 109 101 if (!is_pci_memory(addr)) { 110 - __raw_writew(value, addr); 102 + __raw_writew(value, p); 111 103 return; 112 104 } 113 105 ··· 172 164 u32 n, byte_enables, data; 173 165 174 166 if (!is_pci_memory(addr)) 175 - return __raw_readw(addr); 167 + return __raw_readw(p); 176 168 177 169 n = addr % 4; 178 170 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL; ··· 234 226 * I/O functions. 235 227 */ 236 228 229 + #define outb outb 237 230 static inline void outb(u8 value, u32 addr) 238 231 { 239 232 u32 n, byte_enables, data; ··· 244 235 ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data); 245 236 } 246 237 238 + #define outsb outsb 247 239 static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count) 248 240 { 249 241 while (count--) 250 242 outb(*vaddr++, io_addr); 251 243 } 252 244 245 + #define outw outw 253 246 static inline void outw(u16 value, u32 addr) 254 247 { 255 248 u32 n, byte_enables, data; ··· 261 250 ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data); 262 251 } 263 252 253 + #define outsw outsw 264 254 static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count) 265 255 { 266 256 while (count--) 267 257 outw(cpu_to_le16(*vaddr++), io_addr); 268 258 } 269 259 260 + #define outl outl 270 261 static inline void outl(u32 value, u32 addr) 271 262 { 272 263 ixp4xx_pci_write(addr, NP_CMD_IOWRITE, value); 273 264 } 274 265 266 + #define outsl outsl 275 267 static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count) 276 268 { 277 269 while (count--) 278 270 outl(cpu_to_le32(*vaddr++), io_addr); 279 271 } 280 272 273 + #define inb inb 281 274 static inline u8 inb(u32 addr) 282 275 { 283 276 u32 n, byte_enables, data; ··· 293 278 return data >> (8*n); 294 279 } 295 280 281 + #define insb insb 296 282 static inline void insb(u32 io_addr, u8 *vaddr, u32 count) 297 283 { 298 284 while (count--) 299 285 *vaddr++ = inb(io_addr); 300 286 } 301 287 288 + #define inw inw 302 289 static inline u16 inw(u32 addr) 303 290 { 304 291 u32 n, byte_enables, data; ··· 312 295 return data>>(8*n); 313 296 } 314 297 298 + #define insw insw 315 299 static inline void insw(u32 io_addr, u16 *vaddr, u32 count) 316 300 { 317 301 while (count--) 318 302 *vaddr++ = le16_to_cpu(inw(io_addr)); 319 303 } 320 304 305 + #define inl inl 321 306 static inline u32 inl(u32 addr) 322 307 { 323 308 u32 data; ··· 329 310 return data; 330 311 } 331 312 313 + #define insl insl 332 314 static inline void insl(u32 io_addr, u32 *vaddr, u32 count) 333 315 { 334 316 while (count--)
+4 -4
arch/arm/mach-sa1100/pci-nanoengine.c
··· 33 33 static DEFINE_SPINLOCK(nano_lock); 34 34 35 35 static int nanoengine_get_pci_address(struct pci_bus *bus, 36 - unsigned int devfn, int where, unsigned long *address) 36 + unsigned int devfn, int where, void __iomem **address) 37 37 { 38 38 int ret = PCIBIOS_DEVICE_NOT_FOUND; 39 39 unsigned int busnr = bus->number; 40 40 41 - *address = NANO_PCI_CONFIG_SPACE_VIRT + 41 + *address = (void __iomem *)NANO_PCI_CONFIG_SPACE_VIRT + 42 42 ((bus->number << 16) | (devfn << 8) | (where & ~3)); 43 43 44 44 ret = (busnr > 255 || devfn > 255 || where > 255) ? ··· 51 51 int size, u32 *val) 52 52 { 53 53 int ret; 54 - unsigned long address; 54 + void __iomem *address; 55 55 unsigned long flags; 56 56 u32 v; 57 57 ··· 85 85 int size, u32 val) 86 86 { 87 87 int ret; 88 - unsigned long address; 88 + void __iomem *address; 89 89 unsigned long flags; 90 90 unsigned shift; 91 91 u32 v;
+1 -1
arch/arm64/Kconfig
··· 24 24 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 25 25 select GENERIC_CPU_AUTOPROBE 26 26 select GENERIC_EARLY_IOREMAP 27 - select GENERIC_IOMAP 28 27 select GENERIC_IRQ_PROBE 29 28 select GENERIC_IRQ_SHOW 29 + select GENERIC_PCI_IOMAP 30 30 select GENERIC_SCHED_CLOCK 31 31 select GENERIC_SMP_IDLE_THREAD 32 32 select GENERIC_STRNCPY_FROM_USER
+25 -97
arch/arm64/include/asm/io.h
··· 36 36 /* 37 37 * Generic IO read/write. These perform native-endian accesses. 38 38 */ 39 + #define __raw_writeb __raw_writeb 39 40 static inline void __raw_writeb(u8 val, volatile void __iomem *addr) 40 41 { 41 42 asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr)); 42 43 } 43 44 45 + #define __raw_writew __raw_writew 44 46 static inline void __raw_writew(u16 val, volatile void __iomem *addr) 45 47 { 46 48 asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr)); 47 49 } 48 50 51 + #define __raw_writel __raw_writel 49 52 static inline void __raw_writel(u32 val, volatile void __iomem *addr) 50 53 { 51 54 asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr)); 52 55 } 53 56 57 + #define __raw_writeq __raw_writeq 54 58 static inline void __raw_writeq(u64 val, volatile void __iomem *addr) 55 59 { 56 60 asm volatile("str %0, [%1]" : : "r" (val), "r" (addr)); 57 61 } 58 62 63 + #define __raw_readb __raw_readb 59 64 static inline u8 __raw_readb(const volatile void __iomem *addr) 60 65 { 61 66 u8 val; ··· 71 66 return val; 72 67 } 73 68 69 + #define __raw_readw __raw_readw 74 70 static inline u16 __raw_readw(const volatile void __iomem *addr) 75 71 { 76 72 u16 val; ··· 83 77 return val; 84 78 } 85 79 80 + #define __raw_readl __raw_readl 86 81 static inline u32 __raw_readl(const volatile void __iomem *addr) 87 82 { 88 83 u32 val; ··· 94 87 return val; 95 88 } 96 89 90 + #define __raw_readq __raw_readq 97 91 static inline u64 __raw_readq(const volatile void __iomem *addr) 98 92 { 99 93 u64 val; ··· 148 140 #define IO_SPACE_LIMIT (SZ_32M - 1) 149 141 #define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M)) 150 142 151 - static inline u8 inb(unsigned long addr) 152 - { 153 - return readb(addr + PCI_IOBASE); 154 - } 155 - 156 - static inline u16 inw(unsigned long addr) 157 - { 158 - return readw(addr + PCI_IOBASE); 159 - } 160 - 161 - static inline u32 inl(unsigned long addr) 162 - { 163 - return readl(addr + PCI_IOBASE); 164 - } 165 - 166 - static inline void outb(u8 b, unsigned long addr) 167 - { 168 - writeb(b, addr + PCI_IOBASE); 169 - } 170 - 171 - static inline void outw(u16 b, unsigned long addr) 172 - { 173 - writew(b, addr + PCI_IOBASE); 174 - } 175 - 176 - static inline void outl(u32 b, unsigned long addr) 177 - { 178 - writel(b, addr + PCI_IOBASE); 179 - } 180 - 181 - #define inb_p(addr) inb(addr) 182 - #define inw_p(addr) inw(addr) 183 - #define inl_p(addr) inl(addr) 184 - 185 - #define outb_p(x, addr) outb((x), (addr)) 186 - #define outw_p(x, addr) outw((x), (addr)) 187 - #define outl_p(x, addr) outl((x), (addr)) 188 - 189 - static inline void insb(unsigned long addr, void *buffer, int count) 190 - { 191 - u8 *buf = buffer; 192 - while (count--) 193 - *buf++ = __raw_readb(addr + PCI_IOBASE); 194 - } 195 - 196 - static inline void insw(unsigned long addr, void *buffer, int count) 197 - { 198 - u16 *buf = buffer; 199 - while (count--) 200 - *buf++ = __raw_readw(addr + PCI_IOBASE); 201 - } 202 - 203 - static inline void insl(unsigned long addr, void *buffer, int count) 204 - { 205 - u32 *buf = buffer; 206 - while (count--) 207 - *buf++ = __raw_readl(addr + PCI_IOBASE); 208 - } 209 - 210 - static inline void outsb(unsigned long addr, const void *buffer, int count) 211 - { 212 - const u8 *buf = buffer; 213 - while (count--) 214 - __raw_writeb(*buf++, addr + PCI_IOBASE); 215 - } 216 - 217 - static inline void outsw(unsigned long addr, const void *buffer, int count) 218 - { 219 - const u16 *buf = buffer; 220 - while (count--) 221 - __raw_writew(*buf++, addr + PCI_IOBASE); 222 - } 223 - 224 - static inline void outsl(unsigned long addr, const void *buffer, int count) 225 - { 226 - const u32 *buf = buffer; 227 - while (count--) 228 - __raw_writel(*buf++, addr + PCI_IOBASE); 229 - } 230 - 231 - #define insb_p(port,to,len) insb(port,to,len) 232 - #define insw_p(port,to,len) insw(port,to,len) 233 - #define insl_p(port,to,len) insl(port,to,len) 234 - 235 - #define outsb_p(port,from,len) outsb(port,from,len) 236 - #define outsw_p(port,from,len) outsw(port,from,len) 237 - #define outsl_p(port,from,len) outsl(port,from,len) 238 - 239 143 /* 240 144 * String version of I/O memory access operations. 241 145 */ ··· 171 251 #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) 172 252 #define iounmap __iounmap 173 253 174 - #define ARCH_HAS_IOREMAP_WC 175 - #include <asm-generic/iomap.h> 176 - 177 254 /* 178 - * More restrictive address range checking than the default implementation 179 - * (PHYS_OFFSET and PHYS_MASK taken into account). 255 + * io{read,write}{16,32}be() macros 180 256 */ 181 - #define ARCH_HAS_VALID_PHYS_ADDR_RANGE 182 - extern int valid_phys_addr_range(phys_addr_t addr, size_t size); 183 - extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); 257 + #define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) 258 + #define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) 184 259 185 - extern int devmem_is_allowed(unsigned long pfn); 260 + #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) 261 + #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) 186 262 187 263 /* 188 264 * Convert a physical pointer to a virtual kernel pointer for /dev/mem ··· 190 274 * Convert a virtual cached pointer to an uncached pointer 191 275 */ 192 276 #define xlate_dev_kmem_ptr(p) p 277 + 278 + #include <asm-generic/io.h> 279 + 280 + /* 281 + * More restrictive address range checking than the default implementation 282 + * (PHYS_OFFSET and PHYS_MASK taken into account). 283 + */ 284 + #define ARCH_HAS_VALID_PHYS_ADDR_RANGE 285 + extern int valid_phys_addr_range(phys_addr_t addr, size_t size); 286 + extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); 287 + 288 + extern int devmem_is_allowed(unsigned long pfn); 193 289 194 290 struct bio_vec; 195 291 extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+2
arch/arm64/include/asm/memory.h
··· 120 120 * translation for translating DMA addresses. Use the driver 121 121 * DMA support - see dma-mapping.h. 122 122 */ 123 + #define virt_to_phys virt_to_phys 123 124 static inline phys_addr_t virt_to_phys(const volatile void *x) 124 125 { 125 126 return __virt_to_phys((unsigned long)(x)); 126 127 } 127 128 129 + #define phys_to_virt phys_to_virt 128 130 static inline void *phys_to_virt(phys_addr_t x) 129 131 { 130 132 return (void *)(__phys_to_virt(x));
+3
arch/cris/include/asm/io.h
··· 112 112 else 113 113 *(volatile unsigned int __force *) addr = b; 114 114 } 115 + #define writeb_relaxed(b, addr) writeb(b, addr) 116 + #define writew_relaxed(b, addr) writew(b, addr) 117 + #define writel_relaxed(b, addr) writel(b, addr) 115 118 #define __raw_writeb writeb 116 119 #define __raw_writew writew 117 120 #define __raw_writel writel
+3
arch/frv/include/asm/io.h
··· 243 243 __flush_PCI_writes(); 244 244 } 245 245 246 + #define writeb_relaxed writeb 247 + #define writew_relaxed writew 248 + #define writel_relaxed writel 246 249 247 250 /* Values for nocacheflag and cmode */ 248 251 #define IOMAP_FULL_CACHING 0
+4
arch/ia64/include/asm/io.h
··· 393 393 #define writew(v,a) __writew((v), (a)) 394 394 #define writel(v,a) __writel((v), (a)) 395 395 #define writeq(v,a) __writeq((v), (a)) 396 + #define writeb_relaxed(v,a) __writeb((v), (a)) 397 + #define writew_relaxed(v,a) __writew((v), (a)) 398 + #define writel_relaxed(v,a) __writel((v), (a)) 399 + #define writeq_relaxed(v,a) __writeq((v), (a)) 396 400 #define __raw_writeb writeb 397 401 #define __raw_writew writew 398 402 #define __raw_writel writel
+8 -8
arch/ia64/include/asm/uaccess.h
··· 365 365 } 366 366 367 367 #define ARCH_HAS_TRANSLATE_MEM_PTR 1 368 - static __inline__ char * 369 - xlate_dev_mem_ptr (unsigned long p) 368 + static __inline__ void * 369 + xlate_dev_mem_ptr(phys_addr_t p) 370 370 { 371 371 struct page *page; 372 - char * ptr; 372 + void *ptr; 373 373 374 374 page = pfn_to_page(p >> PAGE_SHIFT); 375 375 if (PageUncached(page)) 376 - ptr = (char *)p + __IA64_UNCACHED_OFFSET; 376 + ptr = (void *)p + __IA64_UNCACHED_OFFSET; 377 377 else 378 378 ptr = __va(p); 379 379 ··· 383 383 /* 384 384 * Convert a virtual cached kernel memory pointer to an uncached pointer 385 385 */ 386 - static __inline__ char * 387 - xlate_dev_kmem_ptr (char * p) 386 + static __inline__ void * 387 + xlate_dev_kmem_ptr(void *p) 388 388 { 389 389 struct page *page; 390 - char * ptr; 390 + void *ptr; 391 391 392 392 page = virt_to_page((unsigned long)p); 393 393 if (PageUncached(page)) 394 - ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET; 394 + ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET; 395 395 else 396 396 ptr = p; 397 397
+3
arch/m32r/include/asm/io.h
··· 161 161 #define __raw_writeb writeb 162 162 #define __raw_writew writew 163 163 #define __raw_writel writel 164 + #define writeb_relaxed writeb 165 + #define writew_relaxed writew 166 + #define writel_relaxed writel 164 167 165 168 #define ioread8 read 166 169 #define ioread16 readw
+8
arch/m68k/include/asm/io.h
··· 3 3 #else 4 4 #include <asm/io_mm.h> 5 5 #endif 6 + 7 + #define readb_relaxed(addr) readb(addr) 8 + #define readw_relaxed(addr) readw(addr) 9 + #define readl_relaxed(addr) readl(addr) 10 + 11 + #define writeb_relaxed(b, addr) writeb(b, addr) 12 + #define writew_relaxed(b, addr) writew(b, addr) 13 + #define writel_relaxed(b, addr) writel(b, addr)
-4
arch/m68k/include/asm/io_no.h
··· 40 40 #define readl(addr) \ 41 41 ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; }) 42 42 43 - #define readb_relaxed(addr) readb(addr) 44 - #define readw_relaxed(addr) readw(addr) 45 - #define readl_relaxed(addr) readl(addr) 46 - 47 43 #define writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b)) 48 44 #define writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b)) 49 45 #define writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
-8
arch/microblaze/include/asm/io.h
··· 69 69 70 70 #include <asm-generic/io.h> 71 71 72 - #define readb_relaxed readb 73 - #define readw_relaxed readw 74 - #define readl_relaxed readl 75 - 76 - #define writeb_relaxed writeb 77 - #define writew_relaxed writew 78 - #define writel_relaxed writel 79 - 80 72 #endif /* _ASM_MICROBLAZE_IO_H */
+4
arch/mn10300/include/asm/io.h
··· 67 67 #define __raw_writew writew 68 68 #define __raw_writel writel 69 69 70 + #define writeb_relaxed writeb 71 + #define writew_relaxed writew 72 + #define writel_relaxed writel 73 + 70 74 /*****************************************************************************/ 71 75 /* 72 76 * traditional input/output functions
+8 -4
arch/parisc/include/asm/io.h
··· 217 217 #define writel writel 218 218 #define writeq writeq 219 219 220 - #define readb_relaxed(addr) readb(addr) 221 - #define readw_relaxed(addr) readw(addr) 222 - #define readl_relaxed(addr) readl(addr) 223 - #define readq_relaxed(addr) readq(addr) 220 + #define readb_relaxed(addr) readb(addr) 221 + #define readw_relaxed(addr) readw(addr) 222 + #define readl_relaxed(addr) readl(addr) 223 + #define readq_relaxed(addr) readq(addr) 224 + #define writeb_relaxed(b, addr) writeb(b, addr) 225 + #define writew_relaxed(w, addr) writew(w, addr) 226 + #define writel_relaxed(l, addr) writel(l, addr) 227 + #define writeq_relaxed(q, addr) writeq(q, addr) 224 228 225 229 #define mmiowb() do { } while (0) 226 230
+8 -4
arch/powerpc/include/asm/io.h
··· 617 617 /* 618 618 * We don't do relaxed operations yet, at least not with this semantic 619 619 */ 620 - #define readb_relaxed(addr) readb(addr) 621 - #define readw_relaxed(addr) readw(addr) 622 - #define readl_relaxed(addr) readl(addr) 623 - #define readq_relaxed(addr) readq(addr) 620 + #define readb_relaxed(addr) readb(addr) 621 + #define readw_relaxed(addr) readw(addr) 622 + #define readl_relaxed(addr) readl(addr) 623 + #define readq_relaxed(addr) readq(addr) 624 + #define writeb_relaxed(v, addr) writeb(v, addr) 625 + #define writew_relaxed(v, addr) writew(v, addr) 626 + #define writel_relaxed(v, addr) writel(v, addr) 627 + #define writeq_relaxed(v, addr) writeq(v, addr) 624 628 625 629 #ifdef CONFIG_PPC32 626 630 #define mmiowb()
+3 -7
arch/s390/include/asm/io.h
··· 13 13 #include <asm/page.h> 14 14 #include <asm/pci_io.h> 15 15 16 - void *xlate_dev_mem_ptr(unsigned long phys); 17 16 #define xlate_dev_mem_ptr xlate_dev_mem_ptr 18 - void unxlate_dev_mem_ptr(unsigned long phys, void *addr); 17 + void *xlate_dev_mem_ptr(phys_addr_t phys); 18 + #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr 19 + void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); 19 20 20 21 /* 21 22 * Convert a virtual cached pointer to an uncached pointer ··· 60 59 #define __raw_writew zpci_write_u16 61 60 #define __raw_writel zpci_write_u32 62 61 #define __raw_writeq zpci_write_u64 63 - 64 - #define readb_relaxed readb 65 - #define readw_relaxed readw 66 - #define readl_relaxed readl 67 - #define readq_relaxed readq 68 62 69 63 #endif /* CONFIG_PCI */ 70 64
+2 -2
arch/s390/mm/maccess.c
··· 176 176 * For swapped prefix pages a new buffer is returned that contains a copy of 177 177 * the absolute memory. The buffer size is maximum one page large. 178 178 */ 179 - void *xlate_dev_mem_ptr(unsigned long addr) 179 + void *xlate_dev_mem_ptr(phys_addr_t addr) 180 180 { 181 181 void *bounce = (void *) addr; 182 182 unsigned long size; ··· 197 197 /* 198 198 * Free converted buffer for /dev/mem access (if necessary) 199 199 */ 200 - void unxlate_dev_mem_ptr(unsigned long addr, void *buf) 200 + void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf) 201 201 { 202 202 if ((void *) addr != buf) 203 203 free_page((unsigned long) buf);
-4
arch/sparc/include/asm/io_32.h
··· 4 4 #include <linux/kernel.h> 5 5 #include <linux/ioport.h> /* struct resource */ 6 6 7 - #define readb_relaxed(__addr) readb(__addr) 8 - #define readw_relaxed(__addr) readw(__addr) 9 - #define readl_relaxed(__addr) readl(__addr) 10 - 11 7 #define IO_SPACE_LIMIT 0xffffffff 12 8 13 9 #define memset_io(d,c,sz) _memset_io(d,c,sz)
+8 -6
arch/sparc/include/asm/io_64.h
··· 101 101 * the cache by using ASI_PHYS_BYPASS_EC_E_L 102 102 */ 103 103 #define readb readb 104 + #define readb_relaxed readb 104 105 static inline u8 readb(const volatile void __iomem *addr) 105 106 { u8 ret; 106 107 ··· 113 112 } 114 113 115 114 #define readw readw 115 + #define readw_relaxed readw 116 116 static inline u16 readw(const volatile void __iomem *addr) 117 117 { u16 ret; 118 118 ··· 126 124 } 127 125 128 126 #define readl readl 127 + #define readl_relaxed readl 129 128 static inline u32 readl(const volatile void __iomem *addr) 130 129 { u32 ret; 131 130 ··· 139 136 } 140 137 141 138 #define readq readq 139 + #define readq_relaxed readq 142 140 static inline u64 readq(const volatile void __iomem *addr) 143 141 { u64 ret; 144 142 ··· 152 148 } 153 149 154 150 #define writeb writeb 151 + #define writeb_relaxed writeb 155 152 static inline void writeb(u8 b, volatile void __iomem *addr) 156 153 { 157 154 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */" ··· 162 157 } 163 158 164 159 #define writew writew 160 + #define writew_relaxed writew 165 161 static inline void writew(u16 w, volatile void __iomem *addr) 166 162 { 167 163 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */" ··· 172 166 } 173 167 174 168 #define writel writel 169 + #define writel_relaxed writel 175 170 static inline void writel(u32 l, volatile void __iomem *addr) 176 171 { 177 172 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */" ··· 182 175 } 183 176 184 177 #define writeq writeq 178 + #define writeq_relaxed writeq 185 179 static inline void writeq(u64 q, volatile void __iomem *addr) 186 180 { 187 181 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */" ··· 190 182 : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 191 183 : "memory"); 192 184 } 193 - 194 185 195 186 #define inb inb 196 187 static inline u8 inb(unsigned long addr) ··· 270 263 { 271 264 outsl((unsigned long __force)port, buf, count); 272 265 } 273 - 274 - #define readb_relaxed(__addr) readb(__addr) 275 - #define readw_relaxed(__addr) readw(__addr) 276 - #define readl_relaxed(__addr) readl(__addr) 277 - #define readq_relaxed(__addr) readq(__addr) 278 266 279 267 /* Valid I/O Space regions are anywhere, because each PCI bus supported 280 268 * can live in an arbitrary area of the physical address range.
+4
arch/tile/include/asm/io.h
··· 241 241 #define readw_relaxed readw 242 242 #define readl_relaxed readl 243 243 #define readq_relaxed readq 244 + #define writeb_relaxed writeb 245 + #define writew_relaxed writew 246 + #define writel_relaxed writel 247 + #define writeq_relaxed writeq 244 248 245 249 #define ioread8 readb 246 250 #define ioread16 readw
+6 -2
arch/x86/include/asm/io.h
··· 74 74 #define __raw_readw __readw 75 75 #define __raw_readl __readl 76 76 77 + #define writeb_relaxed(v, a) __writeb(v, a) 78 + #define writew_relaxed(v, a) __writew(v, a) 79 + #define writel_relaxed(v, a) __writel(v, a) 77 80 #define __raw_writeb __writeb 78 81 #define __raw_writew __writew 79 82 #define __raw_writel __writel ··· 89 86 build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 90 87 91 88 #define readq_relaxed(a) readq(a) 89 + #define writeq_relaxed(v, a) writeq(v, a) 92 90 93 91 #define __raw_readq(a) readq(a) 94 92 #define __raw_writeq(val, addr) writeq(val, addr) ··· 314 310 BUILDIO(w, w, short) 315 311 BUILDIO(l, , int) 316 312 317 - extern void *xlate_dev_mem_ptr(unsigned long phys); 318 - extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); 313 + extern void *xlate_dev_mem_ptr(phys_addr_t phys); 314 + extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); 319 315 320 316 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 321 317 unsigned long prot_val);
+2 -2
arch/x86/mm/ioremap.c
··· 327 327 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 328 328 * access 329 329 */ 330 - void *xlate_dev_mem_ptr(unsigned long phys) 330 + void *xlate_dev_mem_ptr(phys_addr_t phys) 331 331 { 332 332 void *addr; 333 333 unsigned long start = phys & PAGE_MASK; ··· 343 343 return addr; 344 344 } 345 345 346 - void unxlate_dev_mem_ptr(unsigned long phys, void *addr) 346 + void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 347 347 { 348 348 if (page_is_ram(phys >> PAGE_SHIFT)) 349 349 return;
-7
arch/xtensa/include/asm/io.h
··· 74 74 75 75 #endif /* CONFIG_MMU */ 76 76 77 - /* 78 - * Generic I/O 79 - */ 80 - #define readb_relaxed readb 81 - #define readw_relaxed readw 82 - #define readl_relaxed readl 83 - 84 77 #endif /* __KERNEL__ */ 85 78 86 79 #include <asm-generic/io.h>
+8 -5
drivers/char/mem.c
··· 84 84 } 85 85 #endif 86 86 87 - void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr) 87 + #ifndef unxlate_dev_mem_ptr 88 + #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr 89 + void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 88 90 { 89 91 } 92 + #endif 90 93 91 94 /* 92 95 * This funcion reads the *physical* memory. The f_pos points directly to the ··· 100 97 { 101 98 phys_addr_t p = *ppos; 102 99 ssize_t read, sz; 103 - char *ptr; 100 + void *ptr; 104 101 105 102 if (p != *ppos) 106 103 return 0; ··· 403 400 * uncached, then it must also be accessed uncached 404 401 * by the kernel or data corruption may occur 405 402 */ 406 - kbuf = xlate_dev_kmem_ptr((char *)p); 403 + kbuf = xlate_dev_kmem_ptr((void *)p); 407 404 408 405 if (copy_to_user(buf, kbuf, sz)) 409 406 return -EFAULT; ··· 464 461 #endif 465 462 466 463 while (count > 0) { 467 - char *ptr; 464 + void *ptr; 468 465 469 466 sz = size_inside_page(p, count); 470 467 ··· 473 470 * it must also be accessed uncached by the kernel or data 474 471 * corruption may occur. 475 472 */ 476 - ptr = xlate_dev_kmem_ptr((char *)p); 473 + ptr = xlate_dev_kmem_ptr((void *)p); 477 474 478 475 copied = copy_from_user(ptr, buf, sz); 479 476 if (copied) {
+639 -142
include/asm-generic/io.h
··· 12 12 #define __ASM_GENERIC_IO_H 13 13 14 14 #include <asm/page.h> /* I/O is all done through memory accesses */ 15 + #include <linux/string.h> /* for memset() and memcpy() */ 15 16 #include <linux/types.h> 16 17 17 18 #ifdef CONFIG_GENERIC_IOMAP ··· 25 24 #define mmiowb() do {} while (0) 26 25 #endif 27 26 28 - /*****************************************************************************/ 29 27 /* 30 - * readX/writeX() are used to access memory mapped devices. On some 31 - * architectures the memory mapped IO stuff needs to be accessed 32 - * differently. On the simple architectures, we just read/write the 33 - * memory location directly. 28 + * __raw_{read,write}{b,w,l,q}() access memory in native endianness. 29 + * 30 + * On some architectures memory mapped IO needs to be accessed differently. 31 + * On the simple architectures, we just read/write the memory location 32 + * directly. 34 33 */ 34 + 35 35 #ifndef __raw_readb 36 + #define __raw_readb __raw_readb 36 37 static inline u8 __raw_readb(const volatile void __iomem *addr) 37 38 { 38 - return *(const volatile u8 __force *) addr; 39 + return *(const volatile u8 __force *)addr; 39 40 } 40 41 #endif 41 42 42 43 #ifndef __raw_readw 44 + #define __raw_readw __raw_readw 43 45 static inline u16 __raw_readw(const volatile void __iomem *addr) 44 46 { 45 - return *(const volatile u16 __force *) addr; 47 + return *(const volatile u16 __force *)addr; 46 48 } 47 49 #endif 48 50 49 51 #ifndef __raw_readl 52 + #define __raw_readl __raw_readl 50 53 static inline u32 __raw_readl(const volatile void __iomem *addr) 51 54 { 52 - return *(const volatile u32 __force *) addr; 55 + return *(const volatile u32 __force *)addr; 53 56 } 54 57 #endif 55 58 56 - #define readb __raw_readb 59 + #ifdef CONFIG_64BIT 60 + #ifndef __raw_readq 61 + #define __raw_readq __raw_readq 62 + static inline u64 __raw_readq(const volatile void __iomem *addr) 63 + { 64 + return *(const volatile u64 __force *)addr; 65 + } 66 + #endif 67 + #endif /* CONFIG_64BIT */ 57 68 69 + #ifndef __raw_writeb 70 + #define __raw_writeb __raw_writeb 71 + static inline void __raw_writeb(u8 value, volatile void __iomem *addr) 72 + { 73 + *(volatile u8 __force *)addr = value; 74 + } 75 + #endif 76 + 77 + #ifndef __raw_writew 78 + #define __raw_writew __raw_writew 79 + static inline void __raw_writew(u16 value, volatile void __iomem *addr) 80 + { 81 + *(volatile u16 __force *)addr = value; 82 + } 83 + #endif 84 + 85 + #ifndef __raw_writel 86 + #define __raw_writel __raw_writel 87 + static inline void __raw_writel(u32 value, volatile void __iomem *addr) 88 + { 89 + *(volatile u32 __force *)addr = value; 90 + } 91 + #endif 92 + 93 + #ifdef CONFIG_64BIT 94 + #ifndef __raw_writeq 95 + #define __raw_writeq __raw_writeq 96 + static inline void __raw_writeq(u64 value, volatile void __iomem *addr) 97 + { 98 + *(volatile u64 __force *)addr = value; 99 + } 100 + #endif 101 + #endif /* CONFIG_64BIT */ 102 + 103 + /* 104 + * {read,write}{b,w,l,q}() access little endian memory and return result in 105 + * native endianness. 106 + */ 107 + 108 + #ifndef readb 109 + #define readb readb 110 + static inline u8 readb(const volatile void __iomem *addr) 111 + { 112 + return __raw_readb(addr); 113 + } 114 + #endif 115 + 116 + #ifndef readw 58 117 #define readw readw 59 118 static inline u16 readw(const volatile void __iomem *addr) 60 119 { 61 120 return __le16_to_cpu(__raw_readw(addr)); 62 121 } 122 + #endif 63 123 124 + #ifndef readl 64 125 #define readl readl 65 126 static inline u32 readl(const volatile void __iomem *addr) 66 127 { 67 128 return __le32_to_cpu(__raw_readl(addr)); 68 129 } 69 - 70 - #ifndef __raw_writeb 71 - static inline void __raw_writeb(u8 b, volatile void __iomem *addr) 72 - { 73 - *(volatile u8 __force *) addr = b; 74 - } 75 130 #endif 76 - 77 - #ifndef __raw_writew 78 - static inline void __raw_writew(u16 b, volatile void __iomem *addr) 79 - { 80 - *(volatile u16 __force *) addr = b; 81 - } 82 - #endif 83 - 84 - #ifndef __raw_writel 85 - static inline void __raw_writel(u32 b, volatile void __iomem *addr) 86 - { 87 - *(volatile u32 __force *) addr = b; 88 - } 89 - #endif 90 - 91 - #define writeb __raw_writeb 92 - #define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) 93 - #define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr) 94 131 95 132 #ifdef CONFIG_64BIT 96 - #ifndef __raw_readq 97 - static inline u64 __raw_readq(const volatile void __iomem *addr) 98 - { 99 - return *(const volatile u64 __force *) addr; 100 - } 101 - #endif 102 - 133 + #ifndef readq 103 134 #define readq readq 104 135 static inline u64 readq(const volatile void __iomem *addr) 105 136 { 106 137 return __le64_to_cpu(__raw_readq(addr)); 107 138 } 108 - 109 - #ifndef __raw_writeq 110 - static inline void __raw_writeq(u64 b, volatile void __iomem *addr) 111 - { 112 - *(volatile u64 __force *) addr = b; 113 - } 114 139 #endif 115 - 116 - #define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr) 117 140 #endif /* CONFIG_64BIT */ 118 141 119 - #ifndef PCI_IOBASE 120 - #define PCI_IOBASE ((void __iomem *) 0) 142 + #ifndef writeb 143 + #define writeb writeb 144 + static inline void writeb(u8 value, volatile void __iomem *addr) 145 + { 146 + __raw_writeb(value, addr); 147 + } 121 148 #endif 122 149 123 - /*****************************************************************************/ 150 + #ifndef writew 151 + #define writew writew 152 + static inline void writew(u16 value, volatile void __iomem *addr) 153 + { 154 + __raw_writew(cpu_to_le16(value), addr); 155 + } 156 + #endif 157 + 158 + #ifndef writel 159 + #define writel writel 160 + static inline void writel(u32 value, volatile void __iomem *addr) 161 + { 162 + __raw_writel(__cpu_to_le32(value), addr); 163 + } 164 + #endif 165 + 166 + #ifdef CONFIG_64BIT 167 + #ifndef writeq 168 + #define writeq writeq 169 + static inline void writeq(u64 value, volatile void __iomem *addr) 170 + { 171 + __raw_writeq(__cpu_to_le64(value), addr); 172 + } 173 + #endif 174 + #endif /* CONFIG_64BIT */ 175 + 124 176 /* 125 - * traditional input/output functions 177 + * {read,write}{b,w,l,q}_relaxed() are like the regular version, but 178 + * are not guaranteed to provide ordering against spinlocks or memory 179 + * accesses. 126 180 */ 181 + #ifndef readb_relaxed 182 + #define readb_relaxed readb 183 + #endif 127 184 128 - static inline u8 inb(unsigned long addr) 129 - { 130 - return readb(addr + PCI_IOBASE); 131 - } 185 + #ifndef readw_relaxed 186 + #define readw_relaxed readw 187 + #endif 132 188 133 - static inline u16 inw(unsigned long addr) 134 - { 135 - return readw(addr + PCI_IOBASE); 136 - } 189 + #ifndef readl_relaxed 190 + #define readl_relaxed readl 191 + #endif 137 192 138 - static inline u32 inl(unsigned long addr) 139 - { 140 - return readl(addr + PCI_IOBASE); 141 - } 193 + #ifndef readq_relaxed 194 + #define readq_relaxed readq 195 + #endif 142 196 143 - static inline void outb(u8 b, unsigned long addr) 144 - { 145 - writeb(b, addr + PCI_IOBASE); 146 - } 197 + #ifndef writeb_relaxed 198 + #define writeb_relaxed writeb 199 + #endif 147 200 148 - static inline void outw(u16 b, unsigned long addr) 149 - { 150 - writew(b, addr + PCI_IOBASE); 151 - } 201 + #ifndef writew_relaxed 202 + #define writew_relaxed writew 203 + #endif 152 204 153 - static inline void outl(u32 b, unsigned long addr) 154 - { 155 - writel(b, addr + PCI_IOBASE); 156 - } 205 + #ifndef writel_relaxed 206 + #define writel_relaxed writel 207 + #endif 157 208 158 - #define inb_p(addr) inb(addr) 159 - #define inw_p(addr) inw(addr) 160 - #define inl_p(addr) inl(addr) 161 - #define outb_p(x, addr) outb((x), (addr)) 162 - #define outw_p(x, addr) outw((x), (addr)) 163 - #define outl_p(x, addr) outl((x), (addr)) 209 + #ifndef writeq_relaxed 210 + #define writeq_relaxed writeq 211 + #endif 164 212 165 - #ifndef insb 166 - static inline void insb(unsigned long addr, void *buffer, int count) 213 + /* 214 + * {read,write}s{b,w,l,q}() repeatedly access the same memory address in 215 + * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). 216 + */ 217 + #ifndef readsb 218 + #define readsb readsb 219 + static inline void readsb(const volatile void __iomem *addr, void *buffer, 220 + unsigned int count) 167 221 { 168 222 if (count) { 169 223 u8 *buf = buffer; 224 + 170 225 do { 171 - u8 x = __raw_readb(addr + PCI_IOBASE); 226 + u8 x = __raw_readb(addr); 172 227 *buf++ = x; 173 228 } while (--count); 174 229 } 175 230 } 176 231 #endif 177 232 178 - #ifndef insw 179 - static inline void insw(unsigned long addr, void *buffer, int count) 233 + #ifndef readsw 234 + #define readsw readsw 235 + static inline void readsw(const volatile void __iomem *addr, void *buffer, 236 + unsigned int count) 180 237 { 181 238 if (count) { 182 239 u16 *buf = buffer; 240 + 183 241 do { 184 - u16 x = __raw_readw(addr + PCI_IOBASE); 242 + u16 x = __raw_readw(addr); 185 243 *buf++ = x; 186 244 } while (--count); 187 245 } 188 246 } 189 247 #endif 190 248 191 - #ifndef insl 192 - static inline void insl(unsigned long addr, void *buffer, int count) 249 + #ifndef readsl 250 + #define readsl readsl 251 + static inline void readsl(const volatile void __iomem *addr, void *buffer, 252 + unsigned int count) 193 253 { 194 254 if (count) { 195 255 u32 *buf = buffer; 256 + 196 257 do { 197 - u32 x = __raw_readl(addr + PCI_IOBASE); 258 + u32 x = __raw_readl(addr); 198 259 *buf++ = x; 199 260 } while (--count); 200 261 } 201 262 } 202 263 #endif 203 264 204 - #ifndef outsb 205 - static inline void outsb(unsigned long addr, const void *buffer, int count) 265 + #ifdef CONFIG_64BIT 266 + #ifndef readsq 267 + #define readsq readsq 268 + static inline void readsq(const volatile void __iomem *addr, void *buffer, 269 + unsigned int count) 270 + { 271 + if (count) { 272 + u64 *buf = buffer; 273 + 274 + do { 275 + u64 x = __raw_readq(addr); 276 + *buf++ = x; 277 + } while (--count); 278 + } 279 + } 280 + #endif 281 + #endif /* CONFIG_64BIT */ 282 + 283 + #ifndef writesb 284 + #define writesb writesb 285 + static inline void writesb(volatile void __iomem *addr, const void *buffer, 286 + unsigned int count) 206 287 { 207 288 if (count) { 208 289 const u8 *buf = buffer; 290 + 209 291 do { 210 - __raw_writeb(*buf++, addr + PCI_IOBASE); 292 + __raw_writeb(*buf++, addr); 211 293 } while (--count); 212 294 } 213 295 } 214 296 #endif 215 297 216 - #ifndef outsw 217 - static inline void outsw(unsigned long addr, const void *buffer, int count) 298 + #ifndef writesw 299 + #define writesw writesw 300 + static inline void writesw(volatile void __iomem *addr, const void *buffer, 301 + unsigned int count) 218 302 { 219 303 if (count) { 220 304 const u16 *buf = buffer; 305 + 221 306 do { 222 - __raw_writew(*buf++, addr + PCI_IOBASE); 307 + __raw_writew(*buf++, addr); 223 308 } while (--count); 224 309 } 225 310 } 226 311 #endif 227 312 228 - #ifndef outsl 229 - static inline void outsl(unsigned long addr, const void *buffer, int count) 313 + #ifndef writesl 314 + #define writesl writesl 315 + static inline void writesl(volatile void __iomem *addr, const void *buffer, 316 + unsigned int count) 230 317 { 231 318 if (count) { 232 319 const u32 *buf = buffer; 320 + 233 321 do { 234 - __raw_writel(*buf++, addr + PCI_IOBASE); 322 + __raw_writel(*buf++, addr); 235 323 } while (--count); 236 324 } 237 325 } 238 326 #endif 239 327 240 - #ifndef CONFIG_GENERIC_IOMAP 241 - #define ioread8(addr) readb(addr) 242 - #define ioread16(addr) readw(addr) 243 - #define ioread16be(addr) __be16_to_cpu(__raw_readw(addr)) 244 - #define ioread32(addr) readl(addr) 245 - #define ioread32be(addr) __be32_to_cpu(__raw_readl(addr)) 328 + #ifdef CONFIG_64BIT 329 + #ifndef writesq 330 + #define writesq writesq 331 + static inline void writesq(volatile void __iomem *addr, const void *buffer, 332 + unsigned int count) 333 + { 334 + if (count) { 335 + const u64 *buf = buffer; 246 336 247 - #define iowrite8(v, addr) writeb((v), (addr)) 248 - #define iowrite16(v, addr) writew((v), (addr)) 249 - #define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr) 250 - #define iowrite32(v, addr) writel((v), (addr)) 251 - #define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr) 337 + do { 338 + __raw_writeq(*buf++, addr); 339 + } while (--count); 340 + } 341 + } 342 + #endif 343 + #endif /* CONFIG_64BIT */ 252 344 253 - #define ioread8_rep(p, dst, count) \ 254 - insb((unsigned long) (p), (dst), (count)) 255 - #define ioread16_rep(p, dst, count) \ 256 - insw((unsigned long) (p), (dst), (count)) 257 - #define ioread32_rep(p, dst, count) \ 258 - insl((unsigned long) (p), (dst), (count)) 259 - 260 - #define iowrite8_rep(p, src, count) \ 261 - outsb((unsigned long) (p), (src), (count)) 262 - #define iowrite16_rep(p, src, count) \ 263 - outsw((unsigned long) (p), (src), (count)) 264 - #define iowrite32_rep(p, src, count) \ 265 - outsl((unsigned long) (p), (src), (count)) 266 - #endif /* CONFIG_GENERIC_IOMAP */ 345 + #ifndef PCI_IOBASE 346 + #define PCI_IOBASE ((void __iomem *)0) 347 + #endif 267 348 268 349 #ifndef IO_SPACE_LIMIT 269 350 #define IO_SPACE_LIMIT 0xffff 270 351 #endif 271 352 353 + /* 354 + * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be 355 + * implemented on hardware that needs an additional delay for I/O accesses to 356 + * take effect. 357 + */ 358 + 359 + #ifndef inb 360 + #define inb inb 361 + static inline u8 inb(unsigned long addr) 362 + { 363 + return readb(PCI_IOBASE + addr); 364 + } 365 + #endif 366 + 367 + #ifndef inw 368 + #define inw inw 369 + static inline u16 inw(unsigned long addr) 370 + { 371 + return readw(PCI_IOBASE + addr); 372 + } 373 + #endif 374 + 375 + #ifndef inl 376 + #define inl inl 377 + static inline u32 inl(unsigned long addr) 378 + { 379 + return readl(PCI_IOBASE + addr); 380 + } 381 + #endif 382 + 383 + #ifndef outb 384 + #define outb outb 385 + static inline void outb(u8 value, unsigned long addr) 386 + { 387 + writeb(value, PCI_IOBASE + addr); 388 + } 389 + #endif 390 + 391 + #ifndef outw 392 + #define outw outw 393 + static inline void outw(u16 value, unsigned long addr) 394 + { 395 + writew(value, PCI_IOBASE + addr); 396 + } 397 + #endif 398 + 399 + #ifndef outl 400 + #define outl outl 401 + static inline void outl(u32 value, unsigned long addr) 402 + { 403 + writel(value, PCI_IOBASE + addr); 404 + } 405 + #endif 406 + 407 + #ifndef inb_p 408 + #define inb_p inb_p 409 + static inline u8 inb_p(unsigned long addr) 410 + { 411 + return inb(addr); 412 + } 413 + #endif 414 + 415 + #ifndef inw_p 416 + #define inw_p inw_p 417 + static inline u16 inw_p(unsigned long addr) 418 + { 419 + return inw(addr); 420 + } 421 + #endif 422 + 423 + #ifndef inl_p 424 + #define inl_p inl_p 425 + static inline u32 inl_p(unsigned long addr) 426 + { 427 + return inl(addr); 428 + } 429 + #endif 430 + 431 + #ifndef outb_p 432 + #define outb_p outb_p 433 + static inline void outb_p(u8 value, unsigned long addr) 434 + { 435 + outb(value, addr); 436 + } 437 + #endif 438 + 439 + #ifndef outw_p 440 + #define outw_p outw_p 441 + static inline void outw_p(u16 value, unsigned long addr) 442 + { 443 + outw(value, addr); 444 + } 445 + #endif 446 + 447 + #ifndef outl_p 448 + #define outl_p outl_p 449 + static inline void outl_p(u32 value, unsigned long addr) 450 + { 451 + outl(value, addr); 452 + } 453 + #endif 454 + 455 + /* 456 + * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a 457 + * single I/O port multiple times. 458 + */ 459 + 460 + #ifndef insb 461 + #define insb insb 462 + static inline void insb(unsigned long addr, void *buffer, unsigned int count) 463 + { 464 + readsb(PCI_IOBASE + addr, buffer, count); 465 + } 466 + #endif 467 + 468 + #ifndef insw 469 + #define insw insw 470 + static inline void insw(unsigned long addr, void *buffer, unsigned int count) 471 + { 472 + readsw(PCI_IOBASE + addr, buffer, count); 473 + } 474 + #endif 475 + 476 + #ifndef insl 477 + #define insl insl 478 + static inline void insl(unsigned long addr, void *buffer, unsigned int count) 479 + { 480 + readsl(PCI_IOBASE + addr, buffer, count); 481 + } 482 + #endif 483 + 484 + #ifndef outsb 485 + #define outsb outsb 486 + static inline void outsb(unsigned long addr, const void *buffer, 487 + unsigned int count) 488 + { 489 + writesb(PCI_IOBASE + addr, buffer, count); 490 + } 491 + #endif 492 + 493 + #ifndef outsw 494 + #define outsw outsw 495 + static inline void outsw(unsigned long addr, const void *buffer, 496 + unsigned int count) 497 + { 498 + writesw(PCI_IOBASE + addr, buffer, count); 499 + } 500 + #endif 501 + 502 + #ifndef outsl 503 + #define outsl outsl 504 + static inline void outsl(unsigned long addr, const void *buffer, 505 + unsigned int count) 506 + { 507 + writesl(PCI_IOBASE + addr, buffer, count); 508 + } 509 + #endif 510 + 511 + #ifndef insb_p 512 + #define insb_p insb_p 513 + static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) 514 + { 515 + insb(addr, buffer, count); 516 + } 517 + #endif 518 + 519 + #ifndef insw_p 520 + #define insw_p insw_p 521 + static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) 522 + { 523 + insw(addr, buffer, count); 524 + } 525 + #endif 526 + 527 + #ifndef insl_p 528 + #define insl_p insl_p 529 + static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) 530 + { 531 + insl(addr, buffer, count); 532 + } 533 + #endif 534 + 535 + #ifndef outsb_p 536 + #define outsb_p outsb_p 537 + static inline void outsb_p(unsigned long addr, const void *buffer, 538 + unsigned int count) 539 + { 540 + outsb(addr, buffer, count); 541 + } 542 + #endif 543 + 544 + #ifndef outsw_p 545 + #define outsw_p outsw_p 546 + static inline void outsw_p(unsigned long addr, const void *buffer, 547 + unsigned int count) 548 + { 549 + outsw(addr, buffer, count); 550 + } 551 + #endif 552 + 553 + #ifndef outsl_p 554 + #define outsl_p outsl_p 555 + static inline void outsl_p(unsigned long addr, const void *buffer, 556 + unsigned int count) 557 + { 558 + outsl(addr, buffer, count); 559 + } 560 + #endif 561 + 562 + #ifndef CONFIG_GENERIC_IOMAP 563 + #ifndef ioread8 564 + #define ioread8 ioread8 565 + static inline u8 ioread8(const volatile void __iomem *addr) 566 + { 567 + return readb(addr); 568 + } 569 + #endif 570 + 571 + #ifndef ioread16 572 + #define ioread16 ioread16 573 + static inline u16 ioread16(const volatile void __iomem *addr) 574 + { 575 + return readw(addr); 576 + } 577 + #endif 578 + 579 + #ifndef ioread32 580 + #define ioread32 ioread32 581 + static inline u32 ioread32(const volatile void __iomem *addr) 582 + { 583 + return readl(addr); 584 + } 585 + #endif 586 + 587 + #ifndef iowrite8 588 + #define iowrite8 iowrite8 589 + static inline void iowrite8(u8 value, volatile void __iomem *addr) 590 + { 591 + writeb(value, addr); 592 + } 593 + #endif 594 + 595 + #ifndef iowrite16 596 + #define iowrite16 iowrite16 597 + static inline void iowrite16(u16 value, volatile void __iomem *addr) 598 + { 599 + writew(value, addr); 600 + } 601 + #endif 602 + 603 + #ifndef iowrite32 604 + #define iowrite32 iowrite32 605 + static inline void iowrite32(u32 value, volatile void __iomem *addr) 606 + { 607 + writel(value, addr); 608 + } 609 + #endif 610 + 611 + #ifndef ioread16be 612 + #define ioread16be ioread16be 613 + static inline u16 ioread16be(const volatile void __iomem *addr) 614 + { 615 + return __be16_to_cpu(__raw_readw(addr)); 616 + } 617 + #endif 618 + 619 + #ifndef ioread32be 620 + #define ioread32be ioread32be 621 + static inline u32 ioread32be(const volatile void __iomem *addr) 622 + { 623 + return __be32_to_cpu(__raw_readl(addr)); 624 + } 625 + #endif 626 + 627 + #ifndef iowrite16be 628 + #define iowrite16be iowrite16be 629 + static inline void iowrite16be(u16 value, void volatile __iomem *addr) 630 + { 631 + __raw_writew(__cpu_to_be16(value), addr); 632 + } 633 + #endif 634 + 635 + #ifndef iowrite32be 636 + #define iowrite32be iowrite32be 637 + static inline void iowrite32be(u32 value, volatile void __iomem *addr) 638 + { 639 + __raw_writel(__cpu_to_be32(value), addr); 640 + } 641 + #endif 642 + 643 + #ifndef ioread8_rep 644 + #define ioread8_rep ioread8_rep 645 + static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, 646 + unsigned int count) 647 + { 648 + readsb(addr, buffer, count); 649 + } 650 + #endif 651 + 652 + #ifndef ioread16_rep 653 + #define ioread16_rep ioread16_rep 654 + static inline void ioread16_rep(const volatile void __iomem *addr, 655 + void *buffer, unsigned int count) 656 + { 657 + readsw(addr, buffer, count); 658 + } 659 + #endif 660 + 661 + #ifndef ioread32_rep 662 + #define ioread32_rep ioread32_rep 663 + static inline void ioread32_rep(const volatile void __iomem *addr, 664 + void *buffer, unsigned int count) 665 + { 666 + readsl(addr, buffer, count); 667 + } 668 + #endif 669 + 670 + #ifndef iowrite8_rep 671 + #define iowrite8_rep iowrite8_rep 672 + static inline void iowrite8_rep(volatile void __iomem *addr, 673 + const void *buffer, 674 + unsigned int count) 675 + { 676 + writesb(addr, buffer, count); 677 + } 678 + #endif 679 + 680 + #ifndef iowrite16_rep 681 + #define iowrite16_rep iowrite16_rep 682 + static inline void iowrite16_rep(volatile void __iomem *addr, 683 + const void *buffer, 684 + unsigned int count) 685 + { 686 + writesw(addr, buffer, count); 687 + } 688 + #endif 689 + 690 + #ifndef iowrite32_rep 691 + #define iowrite32_rep iowrite32_rep 692 + static inline void iowrite32_rep(volatile void __iomem *addr, 693 + const void *buffer, 694 + unsigned int count) 695 + { 696 + writesl(addr, buffer, count); 697 + } 698 + #endif 699 + #endif /* CONFIG_GENERIC_IOMAP */ 700 + 272 701 #ifdef __KERNEL__ 273 702 274 703 #include <linux/vmalloc.h> 275 - #define __io_virt(x) ((void __force *) (x)) 704 + #define __io_virt(x) ((void __force *)(x)) 276 705 277 706 #ifndef CONFIG_GENERIC_IOMAP 278 707 struct pci_dev; 279 708 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 280 709 281 710 #ifndef pci_iounmap 711 + #define pci_iounmap pci_iounmap 282 712 static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) 283 713 { 284 714 } ··· 721 289 * These are pretty trivial 722 290 */ 723 291 #ifndef virt_to_phys 292 + #define virt_to_phys virt_to_phys 724 293 static inline unsigned long virt_to_phys(volatile void *address) 725 294 { 726 295 return __pa((unsigned long)address); 727 296 } 297 + #endif 728 298 299 + #ifndef phys_to_virt 300 + #define phys_to_virt phys_to_virt 729 301 static inline void *phys_to_virt(unsigned long address) 730 302 { 731 303 return __va(address); ··· 742 306 * This implementation is for the no-MMU case only... if you have an MMU 743 307 * you'll need to provide your own definitions. 744 308 */ 745 - #ifndef CONFIG_MMU 746 - static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) 747 - { 748 - return (void __iomem*) (unsigned long)offset; 749 - } 750 309 751 - #define __ioremap(offset, size, flags) ioremap(offset, size) 310 + #ifndef CONFIG_MMU 311 + #ifndef ioremap 312 + #define ioremap ioremap 313 + static inline void __iomem *ioremap(phys_addr_t offset, size_t size) 314 + { 315 + return (void __iomem *)(unsigned long)offset; 316 + } 317 + #endif 318 + 319 + #ifndef __ioremap 320 + #define __ioremap __ioremap 321 + static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, 322 + unsigned long flags) 323 + { 324 + return ioremap(offset, size); 325 + } 326 + #endif 752 327 753 328 #ifndef ioremap_nocache 754 - #define ioremap_nocache ioremap 329 + #define ioremap_nocache ioremap_nocache 330 + static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) 331 + { 332 + return ioremap(offset, size); 333 + } 755 334 #endif 756 335 757 336 #ifndef ioremap_wc 758 - #define ioremap_wc ioremap_nocache 337 + #define ioremap_wc ioremap_wc 338 + static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) 339 + { 340 + return ioremap_nocache(offset, size); 341 + } 759 342 #endif 760 343 344 + #ifndef iounmap 345 + #define iounmap iounmap 761 346 static inline void iounmap(void __iomem *addr) 762 347 { 763 348 } 349 + #endif 764 350 #endif /* CONFIG_MMU */ 765 351 766 352 #ifdef CONFIG_HAS_IOPORT_MAP 767 353 #ifndef CONFIG_GENERIC_IOMAP 354 + #ifndef ioport_map 355 + #define ioport_map ioport_map 768 356 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 769 357 { 770 358 return PCI_IOBASE + (port & IO_SPACE_LIMIT); 771 359 } 360 + #endif 772 361 362 + #ifndef ioport_unmap 363 + #define ioport_unmap ioport_unmap 773 364 static inline void ioport_unmap(void __iomem *p) 774 365 { 775 366 } 367 + #endif 776 368 #else /* CONFIG_GENERIC_IOMAP */ 777 369 extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 778 370 extern void ioport_unmap(void __iomem *p); ··· 808 344 #endif /* CONFIG_HAS_IOPORT_MAP */ 809 345 810 346 #ifndef xlate_dev_kmem_ptr 811 - #define xlate_dev_kmem_ptr(p) p 347 + #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr 348 + static inline void *xlate_dev_kmem_ptr(void *addr) 349 + { 350 + return addr; 351 + } 812 352 #endif 353 + 813 354 #ifndef xlate_dev_mem_ptr 814 - #define xlate_dev_mem_ptr(p) __va(p) 355 + #define xlate_dev_mem_ptr xlate_dev_mem_ptr 356 + static inline void *xlate_dev_mem_ptr(phys_addr_t addr) 357 + { 358 + return __va(addr); 359 + } 360 + #endif 361 + 362 + #ifndef unxlate_dev_mem_ptr 363 + #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr 364 + static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 365 + { 366 + } 815 367 #endif 816 368 817 369 #ifdef CONFIG_VIRT_TO_BUS 818 370 #ifndef virt_to_bus 819 - static inline unsigned long virt_to_bus(volatile void *address) 371 + static inline unsigned long virt_to_bus(void *address) 820 372 { 821 - return ((unsigned long) address); 373 + return (unsigned long)address; 822 374 } 823 375 824 376 static inline void *bus_to_virt(unsigned long address) 825 377 { 826 - return (void *) address; 378 + return (void *)address; 827 379 } 828 380 #endif 829 381 #endif 830 382 831 383 #ifndef memset_io 832 - #define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) 384 + #define memset_io memset_io 385 + static inline void memset_io(volatile void __iomem *addr, int value, 386 + size_t size) 387 + { 388 + memset(__io_virt(addr), value, size); 389 + } 833 390 #endif 834 391 835 392 #ifndef memcpy_fromio 836 - #define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c)) 393 + #define memcpy_fromio memcpy_fromio 394 + static inline void memcpy_fromio(void *buffer, 395 + const volatile void __iomem *addr, 396 + size_t size) 397 + { 398 + memcpy(buffer, __io_virt(addr), size); 399 + } 837 400 #endif 401 + 838 402 #ifndef memcpy_toio 839 - #define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c)) 403 + #define memcpy_toio memcpy_toio 404 + static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, 405 + size_t size) 406 + { 407 + memcpy(__io_virt(addr), buffer, size); 408 + } 840 409 #endif 841 410 842 411 #endif /* __KERNEL__ */