Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'asm-generic-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic

Pull asm-generic updates from Arnd Bergmann:
"This contains a series from Linus Walleij to unify the linux/io.h
interface by making the ia64, alpha, parisc and sparc include
asm-generic/io.h.

All functions provided by the generic header are now available to all
drivers, but the architectures can still override this.

For the moment, mips and sh still don't include asm-generic/io.h but
provide a full set of functions themselves.

There are also a few minor cleanups unrelated to this"

* tag 'asm-generic-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic:
alpha: add full ioread64/iowrite64 implementation
parisc: Drop homebrewn io[read|write]64_[lo_hi|hi_lo]
parisc: hide ioread64 declaration on 32-bit
ia64: export memory_add_physaddr_to_nid to fix cxl build error
asm-generic: Remove empty #ifdef SA_RESTORER
parisc: Use the generic IO helpers
parisc: Remove 64bit access on 32bit machines
sparc: Fix the generic IO helpers
alpha: Use generic <asm-generic/io.h>

+341 -162
+19 -3
arch/alpha/include/asm/core_apecs.h
··· 384 384 } \ 385 385 } while (0) 386 386 387 - __EXTERN_INLINE unsigned int apecs_ioread8(const void __iomem *xaddr) 387 + __EXTERN_INLINE u8 apecs_ioread8(const void __iomem *xaddr) 388 388 { 389 389 unsigned long addr = (unsigned long) xaddr; 390 390 unsigned long result, base_and_type; ··· 420 420 *(vuip) ((addr << 5) + base_and_type) = w; 421 421 } 422 422 423 - __EXTERN_INLINE unsigned int apecs_ioread16(const void __iomem *xaddr) 423 + __EXTERN_INLINE u16 apecs_ioread16(const void __iomem *xaddr) 424 424 { 425 425 unsigned long addr = (unsigned long) xaddr; 426 426 unsigned long result, base_and_type; ··· 456 456 *(vuip) ((addr << 5) + base_and_type) = w; 457 457 } 458 458 459 - __EXTERN_INLINE unsigned int apecs_ioread32(const void __iomem *xaddr) 459 + __EXTERN_INLINE u32 apecs_ioread32(const void __iomem *xaddr) 460 460 { 461 461 unsigned long addr = (unsigned long) xaddr; 462 462 if (addr < APECS_DENSE_MEM) ··· 470 470 if (addr < APECS_DENSE_MEM) 471 471 addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18; 472 472 *(vuip)addr = b; 473 + } 474 + 475 + __EXTERN_INLINE u64 apecs_ioread64(const void __iomem *xaddr) 476 + { 477 + unsigned long addr = (unsigned long) xaddr; 478 + if (addr < APECS_DENSE_MEM) 479 + addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18; 480 + return *(vulp)addr; 481 + } 482 + 483 + __EXTERN_INLINE void apecs_iowrite64(u64 b, void __iomem *xaddr) 484 + { 485 + unsigned long addr = (unsigned long) xaddr; 486 + if (addr < APECS_DENSE_MEM) 487 + addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18; 488 + *(vulp)addr = b; 473 489 } 474 490 475 491 __EXTERN_INLINE void __iomem *apecs_ioportmap(unsigned long addr)
+19 -3
arch/alpha/include/asm/core_cia.h
··· 342 342 #define vuip volatile unsigned int __force * 343 343 #define vulp volatile unsigned long __force * 344 344 345 - __EXTERN_INLINE unsigned int cia_ioread8(const void __iomem *xaddr) 345 + __EXTERN_INLINE u8 cia_ioread8(const void __iomem *xaddr) 346 346 { 347 347 unsigned long addr = (unsigned long) xaddr; 348 348 unsigned long result, base_and_type; ··· 374 374 *(vuip) ((addr << 5) + base_and_type) = w; 375 375 } 376 376 377 - __EXTERN_INLINE unsigned int cia_ioread16(const void __iomem *xaddr) 377 + __EXTERN_INLINE u16 cia_ioread16(const void __iomem *xaddr) 378 378 { 379 379 unsigned long addr = (unsigned long) xaddr; 380 380 unsigned long result, base_and_type; ··· 404 404 *(vuip) ((addr << 5) + base_and_type) = w; 405 405 } 406 406 407 - __EXTERN_INLINE unsigned int cia_ioread32(const void __iomem *xaddr) 407 + __EXTERN_INLINE u32 cia_ioread32(const void __iomem *xaddr) 408 408 { 409 409 unsigned long addr = (unsigned long) xaddr; 410 410 if (addr < CIA_DENSE_MEM) ··· 418 418 if (addr < CIA_DENSE_MEM) 419 419 addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18; 420 420 *(vuip)addr = b; 421 + } 422 + 423 + __EXTERN_INLINE u64 cia_ioread64(const void __iomem *xaddr) 424 + { 425 + unsigned long addr = (unsigned long) xaddr; 426 + if (addr < CIA_DENSE_MEM) 427 + addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18; 428 + return *(vulp)addr; 429 + } 430 + 431 + __EXTERN_INLINE void cia_iowrite64(u64 b, void __iomem *xaddr) 432 + { 433 + unsigned long addr = (unsigned long) xaddr; 434 + if (addr < CIA_DENSE_MEM) 435 + addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18; 436 + *(vulp)addr = b; 421 437 } 422 438 423 439 __EXTERN_INLINE void __iomem *cia_ioportmap(unsigned long addr)
+19 -3
arch/alpha/include/asm/core_lca.h
··· 230 230 } while (0) 231 231 232 232 233 - __EXTERN_INLINE unsigned int lca_ioread8(const void __iomem *xaddr) 233 + __EXTERN_INLINE u8 lca_ioread8(const void __iomem *xaddr) 234 234 { 235 235 unsigned long addr = (unsigned long) xaddr; 236 236 unsigned long result, base_and_type; ··· 266 266 *(vuip) ((addr << 5) + base_and_type) = w; 267 267 } 268 268 269 - __EXTERN_INLINE unsigned int lca_ioread16(const void __iomem *xaddr) 269 + __EXTERN_INLINE u16 lca_ioread16(const void __iomem *xaddr) 270 270 { 271 271 unsigned long addr = (unsigned long) xaddr; 272 272 unsigned long result, base_and_type; ··· 302 302 *(vuip) ((addr << 5) + base_and_type) = w; 303 303 } 304 304 305 - __EXTERN_INLINE unsigned int lca_ioread32(const void __iomem *xaddr) 305 + __EXTERN_INLINE u32 lca_ioread32(const void __iomem *xaddr) 306 306 { 307 307 unsigned long addr = (unsigned long) xaddr; 308 308 if (addr < LCA_DENSE_MEM) ··· 316 316 if (addr < LCA_DENSE_MEM) 317 317 addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18; 318 318 *(vuip)addr = b; 319 + } 320 + 321 + __EXTERN_INLINE u64 lca_ioread64(const void __iomem *xaddr) 322 + { 323 + unsigned long addr = (unsigned long) xaddr; 324 + if (addr < LCA_DENSE_MEM) 325 + addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18; 326 + return *(vulp)addr; 327 + } 328 + 329 + __EXTERN_INLINE void lca_iowrite64(u64 b, void __iomem *xaddr) 330 + { 331 + unsigned long addr = (unsigned long) xaddr; 332 + if (addr < LCA_DENSE_MEM) 333 + addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18; 334 + *(vulp)addr = b; 319 335 } 320 336 321 337 __EXTERN_INLINE void __iomem *lca_ioportmap(unsigned long addr)
+2 -2
arch/alpha/include/asm/core_marvel.h
··· 332 332 #define vucp volatile unsigned char __force * 333 333 #define vusp volatile unsigned short __force * 334 334 335 - extern unsigned int marvel_ioread8(const void __iomem *); 335 + extern u8 marvel_ioread8(const void __iomem *); 336 336 extern void marvel_iowrite8(u8 b, void __iomem *); 337 337 338 - __EXTERN_INLINE unsigned int marvel_ioread16(const void __iomem *addr) 338 + __EXTERN_INLINE u16 marvel_ioread16(const void __iomem *addr) 339 339 { 340 340 return __kernel_ldwu(*(vusp)addr); 341 341 }
+25 -3
arch/alpha/include/asm/core_mcpcia.h
··· 248 248 249 249 #define vip volatile int __force * 250 250 #define vuip volatile unsigned int __force * 251 + #define vulp volatile unsigned long __force * 251 252 252 253 #ifndef MCPCIA_ONE_HAE_WINDOW 253 254 #define MCPCIA_FROB_MMIO \ ··· 268 267 return (addr & 0x80000000UL) == 0; 269 268 } 270 269 271 - __EXTERN_INLINE unsigned int mcpcia_ioread8(const void __iomem *xaddr) 270 + __EXTERN_INLINE u8 mcpcia_ioread8(const void __iomem *xaddr) 272 271 { 273 272 unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; 274 273 unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; ··· 292 291 *(vuip) ((addr << 5) + hose + 0x00) = w; 293 292 } 294 293 295 - __EXTERN_INLINE unsigned int mcpcia_ioread16(const void __iomem *xaddr) 294 + __EXTERN_INLINE u16 mcpcia_ioread16(const void __iomem *xaddr) 296 295 { 297 296 unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; 298 297 unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; ··· 316 315 *(vuip) ((addr << 5) + hose + 0x08) = w; 317 316 } 318 317 319 - __EXTERN_INLINE unsigned int mcpcia_ioread32(const void __iomem *xaddr) 318 + __EXTERN_INLINE u32 mcpcia_ioread32(const void __iomem *xaddr) 320 319 { 321 320 unsigned long addr = (unsigned long)xaddr; 322 321 ··· 334 333 addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18; 335 334 336 335 *(vuip)addr = b; 336 + } 337 + 338 + __EXTERN_INLINE u64 mcpcia_ioread64(const void __iomem *xaddr) 339 + { 340 + unsigned long addr = (unsigned long)xaddr; 341 + 342 + if (!__mcpcia_is_mmio(addr)) 343 + addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18; 344 + 345 + return *(vulp)addr; 346 + } 347 + 348 + __EXTERN_INLINE void mcpcia_iowrite64(u64 b, void __iomem *xaddr) 349 + { 350 + unsigned long addr = (unsigned long)xaddr; 351 + 352 + if (!__mcpcia_is_mmio(addr)) 353 + addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18; 354 + 355 + *(vulp)addr = b; 337 356 } 338 357 339 358 ··· 383 362 384 363 #undef vip 385 364 #undef vuip 365 + #undef vulp 386 366 387 367 #undef __IO_PREFIX 388 368 #define __IO_PREFIX mcpcia
+15 -1
arch/alpha/include/asm/core_t2.h
··· 360 360 361 361 #define vip volatile int * 362 362 #define vuip volatile unsigned int * 363 + #define vulp volatile unsigned long * 363 364 364 365 extern inline u8 t2_inb(unsigned long addr) 365 366 { ··· 400 399 extern inline void t2_outl(u32 b, unsigned long addr) 401 400 { 402 401 *(vuip) ((addr << 5) + T2_IO + 0x18) = b; 402 + mb(); 403 + } 404 + 405 + extern inline u64 t2_inq(unsigned long addr) 406 + { 407 + return *(vulp) ((addr << 5) + T2_IO + 0x18); 408 + } 409 + 410 + extern inline void t2_outq(u64 b, unsigned long addr) 411 + { 412 + *(vulp) ((addr << 5) + T2_IO + 0x18) = b; 403 413 mb(); 404 414 } 405 415 ··· 584 572 it doesn't make sense to merge the pio and mmio routines. */ 585 573 586 574 #define IOPORT(OS, NS) \ 587 - __EXTERN_INLINE unsigned int t2_ioread##NS(const void __iomem *xaddr) \ 575 + __EXTERN_INLINE u##NS t2_ioread##NS(const void __iomem *xaddr) \ 588 576 { \ 589 577 if (t2_is_mmio(xaddr)) \ 590 578 return t2_read##OS(xaddr); \ ··· 602 590 IOPORT(b, 8) 603 591 IOPORT(w, 16) 604 592 IOPORT(l, 32) 593 + IOPORT(q, 64) 605 594 606 595 #undef IOPORT 607 596 608 597 #undef vip 609 598 #undef vuip 599 + #undef vulp 610 600 611 601 #undef __IO_PREFIX 612 602 #define __IO_PREFIX t2
+87 -10
arch/alpha/include/asm/io.h
··· 90 90 } 91 91 #endif 92 92 93 + #define virt_to_phys virt_to_phys 94 + #define phys_to_virt phys_to_virt 93 95 #define page_to_phys(page) page_to_pa(page) 94 96 95 97 /* Maximum PIO space address supported? */ ··· 155 153 REMAP1(unsigned int, ioread8, const) 156 154 REMAP1(unsigned int, ioread16, const) 157 155 REMAP1(unsigned int, ioread32, const) 156 + REMAP1(u64, ioread64, const) 158 157 REMAP1(u8, readb, const volatile) 159 158 REMAP1(u16, readw, const volatile) 160 159 REMAP1(u32, readl, const volatile) ··· 164 161 REMAP2(u8, iowrite8, /**/) 165 162 REMAP2(u16, iowrite16, /**/) 166 163 REMAP2(u32, iowrite32, /**/) 164 + REMAP2(u64, iowrite64, /**/) 167 165 REMAP2(u8, writeb, volatile) 168 166 REMAP2(u16, writew, volatile) 169 167 REMAP2(u32, writel, volatile) ··· 246 242 extern void outb(u8 b, unsigned long port); 247 243 extern void outw(u16 b, unsigned long port); 248 244 extern void outl(u32 b, unsigned long port); 245 + #define inb inb 246 + #define inw inw 247 + #define inl inl 248 + #define outb outb 249 + #define outw outw 250 + #define outl outl 249 251 250 252 extern u8 readb(const volatile void __iomem *addr); 251 253 extern u16 readw(const volatile void __iomem *addr); ··· 261 251 extern void writew(u16 b, volatile void __iomem *addr); 262 252 extern void writel(u32 b, volatile void __iomem *addr); 263 253 extern void writeq(u64 b, volatile void __iomem *addr); 254 + #define readb readb 255 + #define readw readw 256 + #define readl readl 257 + #define readq readq 258 + #define writeb writeb 259 + #define writew writew 260 + #define writel writel 261 + #define writeq writeq 264 262 265 263 extern u8 __raw_readb(const volatile void __iomem *addr); 266 264 extern u16 __raw_readw(const volatile void __iomem *addr); ··· 278 260 extern void __raw_writew(u16 b, volatile void __iomem *addr); 279 261 extern void __raw_writel(u32 b, volatile void __iomem *addr); 280 262 extern void __raw_writeq(u64 b, volatile void __iomem *addr); 263 + #define __raw_readb __raw_readb 264 + #define __raw_readw __raw_readw 265 + #define __raw_readl __raw_readl 266 + #define __raw_readq __raw_readq 267 + #define __raw_writeb __raw_writeb 268 + #define __raw_writew __raw_writew 269 + #define __raw_writel __raw_writel 270 + #define __raw_writeq __raw_writeq 281 271 282 272 /* 283 273 * Mapping from port numbers to __iomem space is pretty easy. ··· 302 276 extern inline void ioport_unmap(void __iomem *addr) 303 277 { 304 278 } 279 + 280 + #define ioport_map ioport_map 281 + #define ioport_unmap ioport_unmap 305 282 306 283 static inline void __iomem *ioremap(unsigned long port, unsigned long size) 307 284 { ··· 387 358 } 388 359 #endif 389 360 361 + #define ioread8 ioread8 362 + #define ioread16 ioread16 363 + #define iowrite8 iowrite8 364 + #define iowrite16 iowrite16 365 + 390 366 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) 391 367 extern inline unsigned int ioread32(const void __iomem *addr) 392 368 { ··· 402 368 return ret; 403 369 } 404 370 371 + extern inline u64 ioread64(const void __iomem *addr) 372 + { 373 + unsigned int ret; 374 + mb(); 375 + ret = IO_CONCAT(__IO_PREFIX,ioread64)(addr); 376 + mb(); 377 + return ret; 378 + } 379 + 405 380 extern inline void iowrite32(u32 b, void __iomem *addr) 406 381 { 407 382 mb(); 408 383 IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr); 384 + } 385 + 386 + extern inline void iowrite64(u64 b, void __iomem *addr) 387 + { 388 + mb(); 389 + IO_CONCAT(__IO_PREFIX, iowrite64)(b, addr); 409 390 } 410 391 411 392 extern inline u32 inl(unsigned long port) ··· 433 384 iowrite32(b, ioport_map(port, 4)); 434 385 } 435 386 #endif 387 + 388 + #define ioread32 ioread32 389 + #define ioread64 ioread64 390 + #define iowrite32 iowrite32 391 + #define iowrite64 iowrite64 436 392 437 393 #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 438 394 extern inline u8 __raw_readb(const volatile void __iomem *addr) ··· 559 505 extern u16 readw_relaxed(const volatile void __iomem *addr); 560 506 extern u32 readl_relaxed(const volatile void __iomem *addr); 561 507 extern u64 readq_relaxed(const volatile void __iomem *addr); 508 + #define readb_relaxed readb_relaxed 509 + #define readw_relaxed readw_relaxed 510 + #define readl_relaxed readl_relaxed 511 + #define readq_relaxed readq_relaxed 562 512 563 513 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) 564 514 extern inline u8 readb_relaxed(const volatile void __iomem *addr) ··· 615 557 _memset_c_io(addr, 0x0001000100010001UL * c, len); 616 558 } 617 559 560 + #define memset_io memset_io 561 + #define memcpy_fromio memcpy_fromio 562 + #define memcpy_toio memcpy_toio 563 + 618 564 /* 619 565 * String versions of in/out ops: 620 566 */ ··· 628 566 extern void outsb (unsigned long port, const void *src, unsigned long count); 629 567 extern void outsw (unsigned long port, const void *src, unsigned long count); 630 568 extern void outsl (unsigned long port, const void *src, unsigned long count); 569 + 570 + #define insb insb 571 + #define insw insw 572 + #define insl insl 573 + #define outsb outsb 574 + #define outsw outsw 575 + #define outsl outsl 631 576 632 577 /* 633 578 * The Alpha Jensen hardware for some rather strange reason puts ··· 656 587 #define RTC_ALWAYS_BCD 0 657 588 658 589 /* 659 - * Some mucking forons use if[n]def writeq to check if platform has it. 660 - * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them 661 - * to play with; for now just use cpp anti-recursion logics and make sure 662 - * that damn thing is defined and expands to itself. 663 - */ 664 - 665 - #define writeq writeq 666 - #define readq readq 667 - 668 - /* 669 590 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 670 591 * access 671 592 */ 672 593 #define xlate_dev_mem_ptr(p) __va(p) 594 + 595 + /* 596 + * These get provided from <asm-generic/iomap.h> since alpha does not 597 + * select GENERIC_IOMAP. 598 + */ 599 + #define ioread64 ioread64 600 + #define iowrite64 iowrite64 601 + #define ioread64be ioread64be 602 + #define iowrite64be iowrite64be 603 + #define ioread8_rep ioread8_rep 604 + #define ioread16_rep ioread16_rep 605 + #define ioread32_rep ioread32_rep 606 + #define iowrite8_rep iowrite8_rep 607 + #define iowrite16_rep iowrite16_rep 608 + #define iowrite32_rep iowrite32_rep 609 + #define pci_iounmap pci_iounmap 610 + 611 + #include <asm-generic/io.h> 673 612 674 613 #endif /* __KERNEL__ */ 675 614
+15 -3
arch/alpha/include/asm/io_trivial.h
··· 6 6 /* This file may be included multiple times. */ 7 7 8 8 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) 9 - __EXTERN_INLINE unsigned int 9 + __EXTERN_INLINE u8 10 10 IO_CONCAT(__IO_PREFIX,ioread8)(const void __iomem *a) 11 11 { 12 12 return __kernel_ldbu(*(const volatile u8 __force *)a); 13 13 } 14 14 15 - __EXTERN_INLINE unsigned int 15 + __EXTERN_INLINE u16 16 16 IO_CONCAT(__IO_PREFIX,ioread16)(const void __iomem *a) 17 17 { 18 18 return __kernel_ldwu(*(const volatile u16 __force *)a); ··· 32 32 #endif 33 33 34 34 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) 35 - __EXTERN_INLINE unsigned int 35 + __EXTERN_INLINE u32 36 36 IO_CONCAT(__IO_PREFIX,ioread32)(const void __iomem *a) 37 37 { 38 38 return *(const volatile u32 __force *)a; ··· 42 42 IO_CONCAT(__IO_PREFIX,iowrite32)(u32 b, void __iomem *a) 43 43 { 44 44 *(volatile u32 __force *)a = b; 45 + } 46 + 47 + __EXTERN_INLINE u64 48 + IO_CONCAT(__IO_PREFIX,ioread64)(const void __iomem *a) 49 + { 50 + return *(const volatile u64 __force *)a; 51 + } 52 + 53 + __EXTERN_INLINE void 54 + IO_CONCAT(__IO_PREFIX,iowrite64)(u64 b, void __iomem *a) 55 + { 56 + *(volatile u64 __force *)a = b; 45 57 } 46 58 #endif 47 59
+17 -1
arch/alpha/include/asm/jensen.h
··· 98 98 } 99 99 100 100 #define vuip volatile unsigned int * 101 + #define vulp volatile unsigned long * 101 102 102 103 /* 103 104 * IO functions ··· 184 183 return *(vuip) ((addr << 7) + EISA_IO + 0x60); 185 184 } 186 185 186 + __EXTERN_INLINE u64 jensen_inq(unsigned long addr) 187 + { 188 + jensen_set_hae(0); 189 + return *(vulp) ((addr << 7) + EISA_IO + 0x60); 190 + } 191 + 187 192 __EXTERN_INLINE void jensen_outw(u16 b, unsigned long addr) 188 193 { 189 194 jensen_set_hae(0); ··· 201 194 { 202 195 jensen_set_hae(0); 203 196 *(vuip) ((addr << 7) + EISA_IO + 0x60) = b; 197 + mb(); 198 + } 199 + 200 + __EXTERN_INLINE void jensen_outq(u64 b, unsigned long addr) 201 + { 202 + jensen_set_hae(0); 203 + *(vulp) ((addr << 7) + EISA_IO + 0x60) = b; 204 204 mb(); 205 205 } 206 206 ··· 319 305 that it doesn't make sense to merge them. */ 320 306 321 307 #define IOPORT(OS, NS) \ 322 - __EXTERN_INLINE unsigned int jensen_ioread##NS(const void __iomem *xaddr) \ 308 + __EXTERN_INLINE u##NS jensen_ioread##NS(const void __iomem *xaddr) \ 323 309 { \ 324 310 if (jensen_is_mmio(xaddr)) \ 325 311 return jensen_read##OS(xaddr - 0x100000000ul); \ ··· 337 323 IOPORT(b, 8) 338 324 IOPORT(w, 16) 339 325 IOPORT(l, 32) 326 + IOPORT(q, 64) 340 327 341 328 #undef IOPORT 342 329 343 330 #undef vuip 331 + #undef vulp 344 332 345 333 #undef __IO_PREFIX 346 334 #define __IO_PREFIX jensen
+5 -3
arch/alpha/include/asm/machvec.h
··· 46 46 void (*mv_pci_tbi)(struct pci_controller *hose, 47 47 dma_addr_t start, dma_addr_t end); 48 48 49 - unsigned int (*mv_ioread8)(const void __iomem *); 50 - unsigned int (*mv_ioread16)(const void __iomem *); 51 - unsigned int (*mv_ioread32)(const void __iomem *); 49 + u8 (*mv_ioread8)(const void __iomem *); 50 + u16 (*mv_ioread16)(const void __iomem *); 51 + u32 (*mv_ioread32)(const void __iomem *); 52 + u64 (*mv_ioread64)(const void __iomem *); 52 53 53 54 void (*mv_iowrite8)(u8, void __iomem *); 54 55 void (*mv_iowrite16)(u16, void __iomem *); 55 56 void (*mv_iowrite32)(u32, void __iomem *); 57 + void (*mv_iowrite64)(u64, void __iomem *); 56 58 57 59 u8 (*mv_readb)(const volatile void __iomem *); 58 60 u16 (*mv_readw)(const volatile void __iomem *);
+1 -1
arch/alpha/kernel/core_marvel.c
··· 803 803 return (void __iomem *)addr; 804 804 } 805 805 806 - unsigned int 806 + unsigned u8 807 807 marvel_ioread8(const void __iomem *xaddr) 808 808 { 809 809 unsigned long addr = (unsigned long) xaddr;
+17
arch/alpha/kernel/io.c
··· 41 41 return ret; 42 42 } 43 43 44 + u64 ioread64(const void __iomem *addr) 45 + { 46 + unsigned int ret; 47 + mb(); 48 + ret = IO_CONCAT(__IO_PREFIX,ioread64)(addr); 49 + mb(); 50 + return ret; 51 + } 52 + 44 53 void iowrite8(u8 b, void __iomem *addr) 45 54 { 46 55 mb(); ··· 68 59 IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); 69 60 } 70 61 62 + void iowrite64(u64 b, void __iomem *addr) 63 + { 64 + mb(); 65 + IO_CONCAT(__IO_PREFIX,iowrite64)(b, addr); 66 + } 67 + 71 68 EXPORT_SYMBOL(ioread8); 72 69 EXPORT_SYMBOL(ioread16); 73 70 EXPORT_SYMBOL(ioread32); 71 + EXPORT_SYMBOL(ioread64); 74 72 EXPORT_SYMBOL(iowrite8); 75 73 EXPORT_SYMBOL(iowrite16); 76 74 EXPORT_SYMBOL(iowrite32); 75 + EXPORT_SYMBOL(iowrite64); 77 76 78 77 u8 inb(unsigned long port) 79 78 {
+2
arch/alpha/kernel/machvec_impl.h
··· 78 78 .mv_ioread8 = CAT(low,_ioread8), \ 79 79 .mv_ioread16 = CAT(low,_ioread16), \ 80 80 .mv_ioread32 = CAT(low,_ioread32), \ 81 + .mv_ioread64 = CAT(low,_ioread64), \ 81 82 .mv_iowrite8 = CAT(low,_iowrite8), \ 82 83 .mv_iowrite16 = CAT(low,_iowrite16), \ 83 84 .mv_iowrite32 = CAT(low,_iowrite32), \ 85 + .mv_iowrite64 = CAT(low,_iowrite64), \ 84 86 .mv_readb = CAT(low,_readb), \ 85 87 .mv_readw = CAT(low,_readw), \ 86 88 .mv_readl = CAT(low,_readl), \
+1
arch/ia64/mm/numa.c
··· 75 75 return 0; 76 76 return nid; 77 77 } 78 + EXPORT_SYMBOL(memory_add_physaddr_to_nid); 78 79 #endif 79 80 #endif
+45 -89
arch/parisc/include/asm/io.h
··· 128 128 void __iomem *ioremap(unsigned long offset, unsigned long size); 129 129 #define ioremap_wc ioremap 130 130 #define ioremap_uc ioremap 131 + #define pci_iounmap pci_iounmap 131 132 132 133 extern void iounmap(const volatile void __iomem *addr); 133 - 134 - static inline unsigned char __raw_readb(const volatile void __iomem *addr) 135 - { 136 - return (*(volatile unsigned char __force *) (addr)); 137 - } 138 - static inline unsigned short __raw_readw(const volatile void __iomem *addr) 139 - { 140 - return *(volatile unsigned short __force *) addr; 141 - } 142 - static inline unsigned int __raw_readl(const volatile void __iomem *addr) 143 - { 144 - return *(volatile unsigned int __force *) addr; 145 - } 146 - static inline unsigned long long __raw_readq(const volatile void __iomem *addr) 147 - { 148 - return *(volatile unsigned long long __force *) addr; 149 - } 150 - 151 - static inline void __raw_writeb(unsigned char b, volatile void __iomem *addr) 152 - { 153 - *(volatile unsigned char __force *) addr = b; 154 - } 155 - static inline void __raw_writew(unsigned short b, volatile void __iomem *addr) 156 - { 157 - *(volatile unsigned short __force *) addr = b; 158 - } 159 - static inline void __raw_writel(unsigned int b, volatile void __iomem *addr) 160 - { 161 - *(volatile unsigned int __force *) addr = b; 162 - } 163 - static inline void __raw_writeq(unsigned long long b, volatile void __iomem *addr) 164 - { 165 - *(volatile unsigned long long __force *) addr = b; 166 - } 167 - 168 - static inline unsigned char readb(const volatile void __iomem *addr) 169 - { 170 - return __raw_readb(addr); 171 - } 172 - static inline unsigned short readw(const volatile void __iomem *addr) 173 - { 174 - return le16_to_cpu((__le16 __force) __raw_readw(addr)); 175 - } 176 - static inline unsigned int readl(const volatile void __iomem *addr) 177 - { 178 - return le32_to_cpu((__le32 __force) __raw_readl(addr)); 179 - } 180 - static inline unsigned long long readq(const volatile void __iomem *addr) 181 - { 182 - return le64_to_cpu((__le64 __force) __raw_readq(addr)); 183 - } 184 - 185 - static inline void writeb(unsigned char b, volatile void __iomem *addr) 186 - { 187 - __raw_writeb(b, addr); 188 - } 189 - static inline void writew(unsigned short w, volatile void __iomem *addr) 190 - { 191 - __raw_writew((__u16 __force) cpu_to_le16(w), addr); 192 - } 193 - static inline void writel(unsigned int l, volatile void __iomem *addr) 194 - { 195 - __raw_writel((__u32 __force) cpu_to_le32(l), addr); 196 - } 197 - static inline void writeq(unsigned long long q, volatile void __iomem *addr) 198 - { 199 - __raw_writeq((__u64 __force) cpu_to_le64(q), addr); 200 - } 201 - 202 - #define readb readb 203 - #define readw readw 204 - #define readl readl 205 - #define readq readq 206 - #define writeb writeb 207 - #define writew writew 208 - #define writel writel 209 - #define writeq writeq 210 - 211 - #define readb_relaxed(addr) readb(addr) 212 - #define readw_relaxed(addr) readw(addr) 213 - #define readl_relaxed(addr) readl(addr) 214 - #define readq_relaxed(addr) readq(addr) 215 - #define writeb_relaxed(b, addr) writeb(b, addr) 216 - #define writew_relaxed(w, addr) writew(w, addr) 217 - #define writel_relaxed(l, addr) writel(l, addr) 218 - #define writeq_relaxed(q, addr) writeq(q, addr) 219 134 220 135 void memset_io(volatile void __iomem *addr, unsigned char val, int count); 221 136 void memcpy_fromio(void *dst, const volatile void __iomem *src, int count); 222 137 void memcpy_toio(volatile void __iomem *dst, const void *src, int count); 138 + #define memset_io memset_io 139 + #define memcpy_fromio memcpy_fromio 140 + #define memcpy_toio memcpy_toio 223 141 224 142 /* Port-space IO */ 225 143 ··· 159 241 extern unsigned char inb(int addr); 160 242 extern unsigned short inw(int addr); 161 243 extern unsigned int inl(int addr); 162 - 163 244 extern void outb(unsigned char b, int addr); 164 245 extern void outw(unsigned short b, int addr); 165 246 extern void outl(unsigned int b, int addr); 247 + #define inb inb 248 + #define inw inw 249 + #define inl inl 250 + #define outb outb 251 + #define outw outw 252 + #define outl outl 166 253 #elif defined(CONFIG_EISA) 167 254 #define inb eisa_in8 168 255 #define inw eisa_in16 ··· 193 270 BUG(); 194 271 return -1; 195 272 } 196 - 273 + #define inb inb 274 + #define inw inw 275 + #define inl inl 197 276 #define outb(x, y) ({(void)(x); (void)(y); BUG(); 0;}) 198 277 #define outw(x, y) ({(void)(x); (void)(y); BUG(); 0;}) 199 278 #define outl(x, y) ({(void)(x); (void)(y); BUG(); 0;}) ··· 210 285 extern void outsb (unsigned long port, const void *src, unsigned long count); 211 286 extern void outsw (unsigned long port, const void *src, unsigned long count); 212 287 extern void outsl (unsigned long port, const void *src, unsigned long count); 213 - 288 + #define insb insb 289 + #define insw insw 290 + #define insl insl 291 + #define outsb outsb 292 + #define outsw outsw 293 + #define outsl outsl 214 294 215 295 /* IO Port space is : BBiiii where BB is HBA number. */ 216 296 #define IO_SPACE_LIMIT 0x00ffffff ··· 227 297 * value for either 32 or 64 bit mode */ 228 298 #define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL))) 229 299 300 + #ifdef CONFIG_64BIT 230 301 #define ioread64 ioread64 231 302 #define ioread64be ioread64be 232 303 #define iowrite64 iowrite64 ··· 236 305 extern u64 ioread64be(const void __iomem *addr); 237 306 extern void iowrite64(u64 val, void __iomem *addr); 238 307 extern void iowrite64be(u64 val, void __iomem *addr); 308 + #endif 239 309 240 310 #include <asm-generic/iomap.h> 311 + /* 312 + * These get provided from <asm-generic/iomap.h> since parisc does not 313 + * select GENERIC_IOMAP. 314 + */ 315 + #define ioport_map ioport_map 316 + #define ioport_unmap ioport_unmap 317 + #define ioread8 ioread8 318 + #define ioread16 ioread16 319 + #define ioread32 ioread32 320 + #define ioread16be ioread16be 321 + #define ioread32be ioread32be 322 + #define iowrite8 iowrite8 323 + #define iowrite16 iowrite16 324 + #define iowrite32 iowrite32 325 + #define iowrite16be iowrite16be 326 + #define iowrite32be iowrite32be 327 + #define ioread8_rep ioread8_rep 328 + #define ioread16_rep ioread16_rep 329 + #define ioread32_rep ioread32_rep 330 + #define iowrite8_rep iowrite8_rep 331 + #define iowrite16_rep iowrite16_rep 332 + #define iowrite32_rep iowrite32_rep 241 333 242 334 /* 243 335 * Convert a physical pointer to a virtual kernel pointer for /dev/mem ··· 269 315 #define xlate_dev_mem_ptr(p) __va(p) 270 316 271 317 extern int devmem_is_allowed(unsigned long pfn); 318 + 319 + #include <asm-generic/io.h> 272 320 273 321 #endif
+22 -38
arch/parisc/lib/iomap.c
··· 48 48 unsigned int (*read16be)(const void __iomem *); 49 49 unsigned int (*read32)(const void __iomem *); 50 50 unsigned int (*read32be)(const void __iomem *); 51 + #ifdef CONFIG_64BIT 51 52 u64 (*read64)(const void __iomem *); 52 53 u64 (*read64be)(const void __iomem *); 54 + #endif 53 55 void (*write8)(u8, void __iomem *); 54 56 void (*write16)(u16, void __iomem *); 55 57 void (*write16be)(u16, void __iomem *); 56 58 void (*write32)(u32, void __iomem *); 57 59 void (*write32be)(u32, void __iomem *); 60 + #ifdef CONFIG_64BIT 58 61 void (*write64)(u64, void __iomem *); 59 62 void (*write64be)(u64, void __iomem *); 63 + #endif 60 64 void (*read8r)(const void __iomem *, void *, unsigned long); 61 65 void (*read16r)(const void __iomem *, void *, unsigned long); 62 66 void (*read32r)(const void __iomem *, void *, unsigned long); ··· 179 175 return __raw_readl(addr); 180 176 } 181 177 178 + #ifdef CONFIG_64BIT 182 179 static u64 iomem_read64(const void __iomem *addr) 183 180 { 184 181 return readq(addr); ··· 189 184 { 190 185 return __raw_readq(addr); 191 186 } 187 + #endif 192 188 193 189 static void iomem_write8(u8 datum, void __iomem *addr) 194 190 { ··· 216 210 __raw_writel(datum, addr); 217 211 } 218 212 213 + #ifdef CONFIG_64BIT 219 214 static void iomem_write64(u64 datum, void __iomem *addr) 220 215 { 221 - writel(datum, addr); 216 + writeq(datum, addr); 222 217 } 223 218 224 219 static void iomem_write64be(u64 datum, void __iomem *addr) 225 220 { 226 - __raw_writel(datum, addr); 221 + __raw_writeq(datum, addr); 227 222 } 223 + #endif 228 224 229 225 static void iomem_read8r(const void __iomem *addr, void *dst, unsigned long count) 230 226 { ··· 282 274 .read16be = iomem_read16be, 283 275 .read32 = iomem_read32, 284 276 .read32be = iomem_read32be, 277 + #ifdef CONFIG_64BIT 285 278 .read64 = iomem_read64, 286 279 .read64be = iomem_read64be, 280 + #endif 287 281 .write8 = iomem_write8, 288 282 .write16 = iomem_write16, 289 283 .write16be = iomem_write16be, 290 284 .write32 = iomem_write32, 291 285 .write32be = iomem_write32be, 286 + #ifdef CONFIG_64BIT 292 287 .write64 = iomem_write64, 293 288 .write64be = iomem_write64be, 289 + #endif 294 290 .read8r = iomem_read8r, 295 291 .read16r = iomem_read16r, 296 292 .read32r = iomem_read32r, ··· 344 332 return *((u32 *)addr); 345 333 } 346 334 335 + #ifdef CONFIG_64BIT 347 336 u64 ioread64(const void __iomem *addr) 348 337 { 349 338 if (unlikely(INDIRECT_ADDR(addr))) ··· 358 345 return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr); 359 346 return *((u64 *)addr); 360 347 } 361 - 362 - u64 ioread64_lo_hi(const void __iomem *addr) 363 - { 364 - u32 low, high; 365 - 366 - low = ioread32(addr); 367 - high = ioread32(addr + sizeof(u32)); 368 - 369 - return low + ((u64)high << 32); 370 - } 371 - 372 - u64 ioread64_hi_lo(const void __iomem *addr) 373 - { 374 - u32 low, high; 375 - 376 - high = ioread32(addr + sizeof(u32)); 377 - low = ioread32(addr); 378 - 379 - return low + ((u64)high << 32); 380 - } 348 + #endif 381 349 382 350 void iowrite8(u8 datum, void __iomem *addr) 383 351 { ··· 405 411 } 406 412 } 407 413 414 + #ifdef CONFIG_64BIT 408 415 void iowrite64(u64 datum, void __iomem *addr) 409 416 { 410 417 if (unlikely(INDIRECT_ADDR(addr))) { ··· 423 428 *((u64 *)addr) = datum; 424 429 } 425 430 } 426 - 427 - void iowrite64_lo_hi(u64 val, void __iomem *addr) 428 - { 429 - iowrite32(val, addr); 430 - iowrite32(val >> 32, addr + sizeof(u32)); 431 - } 432 - 433 - void iowrite64_hi_lo(u64 val, void __iomem *addr) 434 - { 435 - iowrite32(val >> 32, addr + sizeof(u32)); 436 - iowrite32(val, addr); 437 - } 431 + #endif 438 432 439 433 /* Repeating interfaces */ 440 434 ··· 528 544 EXPORT_SYMBOL(ioread16be); 529 545 EXPORT_SYMBOL(ioread32); 530 546 EXPORT_SYMBOL(ioread32be); 547 + #ifdef CONFIG_64BIT 531 548 EXPORT_SYMBOL(ioread64); 532 549 EXPORT_SYMBOL(ioread64be); 533 - EXPORT_SYMBOL(ioread64_lo_hi); 534 - EXPORT_SYMBOL(ioread64_hi_lo); 550 + #endif 535 551 EXPORT_SYMBOL(iowrite8); 536 552 EXPORT_SYMBOL(iowrite16); 537 553 EXPORT_SYMBOL(iowrite16be); 538 554 EXPORT_SYMBOL(iowrite32); 539 555 EXPORT_SYMBOL(iowrite32be); 556 + #ifdef CONFIG_64BIT 540 557 EXPORT_SYMBOL(iowrite64); 541 558 EXPORT_SYMBOL(iowrite64be); 542 - EXPORT_SYMBOL(iowrite64_lo_hi); 543 - EXPORT_SYMBOL(iowrite64_hi_lo); 559 + #endif 544 560 EXPORT_SYMBOL(ioread8_rep); 545 561 EXPORT_SYMBOL(ioread16_rep); 546 562 EXPORT_SYMBOL(ioread32_rep);
+2
arch/sparc/include/asm/io.h
··· 19 19 #define writel_be(__w, __addr) __raw_writel(__w, __addr) 20 20 #define writew_be(__l, __addr) __raw_writew(__l, __addr) 21 21 22 + #include <asm-generic/io.h> 23 + 22 24 #endif
+22
arch/sparc/include/asm/io_64.h
··· 9 9 #include <asm/page.h> /* IO address mapping routines need this */ 10 10 #include <asm/asi.h> 11 11 #include <asm-generic/pci_iomap.h> 12 + #define pci_iomap pci_iomap 12 13 13 14 /* BIO layer definitions. */ 14 15 extern unsigned long kern_base, kern_size; ··· 240 239 void outsb(unsigned long, const void *, unsigned long); 241 240 void outsw(unsigned long, const void *, unsigned long); 242 241 void outsl(unsigned long, const void *, unsigned long); 242 + #define outsb outsb 243 + #define outsw outsw 244 + #define outsl outsl 243 245 void insb(unsigned long, void *, unsigned long); 244 246 void insw(unsigned long, void *, unsigned long); 245 247 void insl(unsigned long, void *, unsigned long); 248 + #define insb insb 249 + #define insw insw 250 + #define insl insl 246 251 247 252 static inline void readsb(void __iomem *port, void *buf, unsigned long count) 248 253 { 249 254 insb((unsigned long __force)port, buf, count); 250 255 } 256 + #define readsb readsb 257 + 251 258 static inline void readsw(void __iomem *port, void *buf, unsigned long count) 252 259 { 253 260 insw((unsigned long __force)port, buf, count); 254 261 } 262 + #define readsw readsw 255 263 256 264 static inline void readsl(void __iomem *port, void *buf, unsigned long count) 257 265 { 258 266 insl((unsigned long __force)port, buf, count); 259 267 } 268 + #define readsl readsl 260 269 261 270 static inline void writesb(void __iomem *port, const void *buf, unsigned long count) 262 271 { 263 272 outsb((unsigned long __force)port, buf, count); 264 273 } 274 + #define writesb writesb 265 275 266 276 static inline void writesw(void __iomem *port, const void *buf, unsigned long count) 267 277 { 268 278 outsw((unsigned long __force)port, buf, count); 269 279 } 280 + #define writesw writesw 270 281 271 282 static inline void writesl(void __iomem *port, const void *buf, unsigned long count) 272 283 { 273 284 outsl((unsigned long __force)port, buf, count); 274 285 } 286 + #define writesl writesl 275 287 276 288 #define ioread8_rep(p,d,l) readsb(p,d,l) 277 289 #define ioread16_rep(p,d,l) readsw(p,d,l) ··· 358 344 d++; 359 345 } 360 346 } 347 + #define memset_io memset_io 361 348 362 349 static inline void sbus_memcpy_fromio(void *dst, const volatile void __iomem *src, 363 350 __kernel_size_t n) ··· 384 369 src++; 385 370 } 386 371 } 372 + #define memcpy_fromio memcpy_fromio 387 373 388 374 static inline void sbus_memcpy_toio(volatile void __iomem *dst, const void *src, 389 375 __kernel_size_t n) ··· 411 395 d++; 412 396 } 413 397 } 398 + #define memcpy_toio memcpy_toio 414 399 415 400 #ifdef __KERNEL__ 416 401 ··· 429 412 static inline void __iomem *ioremap_np(unsigned long offset, unsigned long size) 430 413 { 431 414 return NULL; 415 + 432 416 } 417 + #define ioremap_np ioremap_np 433 418 434 419 static inline void iounmap(volatile void __iomem *addr) 435 420 { ··· 451 432 /* Create a virtual mapping cookie for an IO port range */ 452 433 void __iomem *ioport_map(unsigned long port, unsigned int nr); 453 434 void ioport_unmap(void __iomem *); 435 + #define ioport_map ioport_map 436 + #define ioport_unmap ioport_unmap 454 437 455 438 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ 456 439 struct pci_dev; 457 440 void pci_iounmap(struct pci_dev *dev, void __iomem *); 441 + #define pci_iounmap pci_iounmap 458 442 459 443 static inline int sbus_can_dma_64bit(void) 460 444 {
+6
drivers/parisc/sba_iommu.c
··· 28 28 #include <linux/dma-map-ops.h> 29 29 #include <linux/scatterlist.h> 30 30 #include <linux/iommu-helper.h> 31 + /* 32 + * The semantics of 64 register access on 32bit systems can't be guaranteed 33 + * by the C standard, we hope the _lo_hi() macros defining readq and writeq 34 + * here will behave as expected. 35 + */ 36 + #include <linux/io-64-nonatomic-lo-hi.h> 31 37 32 38 #include <asm/byteorder.h> 33 39 #include <asm/io.h>
-2
include/asm-generic/signal.h
··· 5 5 #include <uapi/asm-generic/signal.h> 6 6 7 7 #ifndef __ASSEMBLY__ 8 - #ifdef SA_RESTORER 9 - #endif 10 8 11 9 #include <asm/sigcontext.h> 12 10 #undef __HAVE_ARCH_SIG_BITOPS