at v2.6.15-rc2 684 lines 21 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 1995 Waldorf GmbH 7 * Copyright (C) 1994 - 2000 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 10 * Author: Maciej W. Rozycki <macro@mips.com> 11 */ 12#ifndef _ASM_IO_H 13#define _ASM_IO_H 14 15#include <linux/config.h> 16#include <linux/compiler.h> 17#include <linux/kernel.h> 18#include <linux/types.h> 19 20#include <asm/addrspace.h> 21#include <asm/bug.h> 22#include <asm/byteorder.h> 23#include <asm/cpu.h> 24#include <asm/cpu-features.h> 25#include <asm/page.h> 26#include <asm/pgtable-bits.h> 27#include <asm/processor.h> 28#include <asm/string.h> 29 30#include <ioremap.h> 31#include <mangle-port.h> 32 33/* 34 * Slowdown I/O port space accesses for antique hardware. 35 */ 36#undef CONF_SLOWDOWN_IO 37 38/* 39 * Raw operations are never swapped in software. OTOH values that raw 40 * operations are working on may or may not have been swapped by the bus 41 * hardware. An example use would be for flash memory that's used for 42 * execute in place. 43 */ 44# define __raw_ioswabb(x) (x) 45# define __raw_ioswabw(x) (x) 46# define __raw_ioswabl(x) (x) 47# define __raw_ioswabq(x) (x) 48# define ____raw_ioswabq(x) (x) 49 50/* 51 * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware; 52 * less sane hardware forces software to fiddle with this... 53 * 54 * Regardless, if the host bus endianness mismatches that of PCI/ISA, then 55 * you can't have the numerical value of data and byte addresses within 56 * multibyte quantities both preserved at the same time. Hence two 57 * variations of functions: non-prefixed ones that preserve the value 58 * and prefixed ones that preserve byte addresses. The latters are 59 * typically used for moving raw data between a peripheral and memory (cf. 60 * string I/O functions), hence the "mem_" prefix. 61 */ 62#if defined(CONFIG_SWAP_IO_SPACE) 63 64# define ioswabb(x) (x) 65# define mem_ioswabb(x) (x) 66# ifdef CONFIG_SGI_IP22 67/* 68 * IP22 seems braindead enough to swap 16bits values in hardware, but 69 * not 32bits. Go figure... Can't tell without documentation. 70 */ 71# define ioswabw(x) (x) 72# define mem_ioswabw(x) le16_to_cpu(x) 73# else 74# define ioswabw(x) le16_to_cpu(x) 75# define mem_ioswabw(x) (x) 76# endif 77# define ioswabl(x) le32_to_cpu(x) 78# define mem_ioswabl(x) (x) 79# define ioswabq(x) le64_to_cpu(x) 80# define mem_ioswabq(x) (x) 81 82#else 83 84# define ioswabb(x) (x) 85# define mem_ioswabb(x) (x) 86# define ioswabw(x) (x) 87# define mem_ioswabw(x) cpu_to_le16(x) 88# define ioswabl(x) (x) 89# define mem_ioswabl(x) cpu_to_le32(x) 90# define ioswabq(x) (x) 91# define mem_ioswabq(x) cpu_to_le32(x) 92 93#endif 94 95#define IO_SPACE_LIMIT 0xffff 96 97/* 98 * On MIPS I/O ports are memory mapped, so we access them using normal 99 * load/store instructions. mips_io_port_base is the virtual address to 100 * which all ports are being mapped. For sake of efficiency some code 101 * assumes that this is an address that can be loaded with a single lui 102 * instruction, so the lower 16 bits must be zero. Should be true on 103 * on any sane architecture; generic code does not use this assumption. 104 */ 105extern const unsigned long mips_io_port_base; 106 107#define set_io_port_base(base) \ 108 do { * (unsigned long *) &mips_io_port_base = (base); } while (0) 109 110/* 111 * Thanks to James van Artsdalen for a better timing-fix than 112 * the two short jumps: using outb's to a nonexistent port seems 113 * to guarantee better timings even on fast machines. 114 * 115 * On the other hand, I'd like to be sure of a non-existent port: 116 * I feel a bit unsafe about using 0x80 (should be safe, though) 117 * 118 * Linus 119 * 120 */ 121 122#define __SLOW_DOWN_IO \ 123 __asm__ __volatile__( \ 124 "sb\t$0,0x80(%0)" \ 125 : : "r" (mips_io_port_base)); 126 127#ifdef CONF_SLOWDOWN_IO 128#ifdef REALLY_SLOW_IO 129#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } 130#else 131#define SLOW_DOWN_IO __SLOW_DOWN_IO 132#endif 133#else 134#define SLOW_DOWN_IO 135#endif 136 137/* 138 * virt_to_phys - map virtual addresses to physical 139 * @address: address to remap 140 * 141 * The returned physical address is the physical (CPU) mapping for 142 * the memory address given. It is only valid to use this function on 143 * addresses directly mapped or allocated via kmalloc. 144 * 145 * This function does not give bus mappings for DMA transfers. In 146 * almost all conceivable cases a device driver should not be using 147 * this function 148 */ 149static inline unsigned long virt_to_phys(volatile void * address) 150{ 151 return (unsigned long)address - PAGE_OFFSET; 152} 153 154/* 155 * phys_to_virt - map physical address to virtual 156 * @address: address to remap 157 * 158 * The returned virtual address is a current CPU mapping for 159 * the memory address given. It is only valid to use this function on 160 * addresses that have a kernel mapping 161 * 162 * This function does not handle bus mappings for DMA transfers. In 163 * almost all conceivable cases a device driver should not be using 164 * this function 165 */ 166static inline void * phys_to_virt(unsigned long address) 167{ 168 return (void *)(address + PAGE_OFFSET); 169} 170 171/* 172 * ISA I/O bus memory addresses are 1:1 with the physical address. 173 */ 174static inline unsigned long isa_virt_to_bus(volatile void * address) 175{ 176 return (unsigned long)address - PAGE_OFFSET; 177} 178 179static inline void * isa_bus_to_virt(unsigned long address) 180{ 181 return (void *)(address + PAGE_OFFSET); 182} 183 184#define isa_page_to_bus page_to_phys 185 186/* 187 * However PCI ones are not necessarily 1:1 and therefore these interfaces 188 * are forbidden in portable PCI drivers. 189 * 190 * Allow them for x86 for legacy drivers, though. 191 */ 192#define virt_to_bus virt_to_phys 193#define bus_to_virt phys_to_virt 194 195/* 196 * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped 197 * for the processor. This implies the assumption that there is only 198 * one of these busses. 199 */ 200extern unsigned long isa_slot_offset; 201 202/* 203 * Change "struct page" to physical address. 204 */ 205#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 206 207extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags); 208extern void __iounmap(volatile void __iomem *addr); 209 210static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, 211 unsigned long flags) 212{ 213#define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) 214 215 if (cpu_has_64bit_addresses) { 216 u64 base = UNCAC_BASE; 217 218 /* 219 * R10000 supports a 2 bit uncached attribute therefore 220 * UNCAC_BASE may not equal IO_BASE. 221 */ 222 if (flags == _CACHE_UNCACHED) 223 base = (u64) IO_BASE; 224 return (void __iomem *) (unsigned long) (base + offset); 225 } else if (__builtin_constant_p(offset) && 226 __builtin_constant_p(size) && __builtin_constant_p(flags)) { 227 phys_t phys_addr, last_addr; 228 229 phys_addr = fixup_bigphys_addr(offset, size); 230 231 /* Don't allow wraparound or zero size. */ 232 last_addr = phys_addr + size - 1; 233 if (!size || last_addr < phys_addr) 234 return NULL; 235 236 /* 237 * Map uncached objects in the low 512MB of address 238 * space using KSEG1. 239 */ 240 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) && 241 flags == _CACHE_UNCACHED) 242 return (void __iomem *)CKSEG1ADDR(phys_addr); 243 } 244 245 return __ioremap(offset, size, flags); 246 247#undef __IS_LOW512 248} 249 250/* 251 * ioremap - map bus memory into CPU space 252 * @offset: bus address of the memory 253 * @size: size of the resource to map 254 * 255 * ioremap performs a platform specific sequence of operations to 256 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 257 * writew/writel functions and the other mmio helpers. The returned 258 * address is not guaranteed to be usable directly as a virtual 259 * address. 260 */ 261#define ioremap(offset, size) \ 262 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 263 264/* 265 * ioremap_nocache - map bus memory into CPU space 266 * @offset: bus address of the memory 267 * @size: size of the resource to map 268 * 269 * ioremap_nocache performs a platform specific sequence of operations to 270 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 271 * writew/writel functions and the other mmio helpers. The returned 272 * address is not guaranteed to be usable directly as a virtual 273 * address. 274 * 275 * This version of ioremap ensures that the memory is marked uncachable 276 * on the CPU as well as honouring existing caching rules from things like 277 * the PCI bus. Note that there are other caches and buffers on many 278 * busses. In paticular driver authors should read up on PCI writes 279 * 280 * It's useful if some control registers are in such an area and 281 * write combining or read caching is not desirable: 282 */ 283#define ioremap_nocache(offset, size) \ 284 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 285 286/* 287 * These two are MIPS specific ioremap variant. ioremap_cacheable_cow 288 * requests a cachable mapping, ioremap_uncached_accelerated requests a 289 * mapping using the uncached accelerated mode which isn't supported on 290 * all processors. 291 */ 292#define ioremap_cacheable_cow(offset, size) \ 293 __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) 294#define ioremap_uncached_accelerated(offset, size) \ 295 __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) 296 297static inline void iounmap(volatile void __iomem *addr) 298{ 299#define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) 300 301 if (cpu_has_64bit_addresses || 302 (__builtin_constant_p(addr) && __IS_KSEG1(addr))) 303 return; 304 305 __iounmap(addr); 306 307#undef __IS_KSEG1 308} 309 310#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ 311 \ 312static inline void pfx##write##bwlq(type val, \ 313 volatile void __iomem *mem) \ 314{ \ 315 volatile type *__mem; \ 316 type __val; \ 317 \ 318 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 319 \ 320 __val = pfx##ioswab##bwlq(val); \ 321 \ 322 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 323 *__mem = __val; \ 324 else if (cpu_has_64bits) { \ 325 unsigned long __flags; \ 326 type __tmp; \ 327 \ 328 if (irq) \ 329 local_irq_save(__flags); \ 330 __asm__ __volatile__( \ 331 ".set mips3" "\t\t# __writeq""\n\t" \ 332 "dsll32 %L0, %L0, 0" "\n\t" \ 333 "dsrl32 %L0, %L0, 0" "\n\t" \ 334 "dsll32 %M0, %M0, 0" "\n\t" \ 335 "or %L0, %L0, %M0" "\n\t" \ 336 "sd %L0, %2" "\n\t" \ 337 ".set mips0" "\n" \ 338 : "=r" (__tmp) \ 339 : "0" (__val), "m" (*__mem)); \ 340 if (irq) \ 341 local_irq_restore(__flags); \ 342 } else \ 343 BUG(); \ 344} \ 345 \ 346static inline type pfx##read##bwlq(volatile void __iomem *mem) \ 347{ \ 348 volatile type *__mem; \ 349 type __val; \ 350 \ 351 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 352 \ 353 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 354 __val = *__mem; \ 355 else if (cpu_has_64bits) { \ 356 unsigned long __flags; \ 357 \ 358 if (irq) \ 359 local_irq_save(__flags); \ 360 __asm__ __volatile__( \ 361 ".set mips3" "\t\t# __readq" "\n\t" \ 362 "ld %L0, %1" "\n\t" \ 363 "dsra32 %M0, %L0, 0" "\n\t" \ 364 "sll %L0, %L0, 0" "\n\t" \ 365 ".set mips0" "\n" \ 366 : "=r" (__val) \ 367 : "m" (*__mem)); \ 368 if (irq) \ 369 local_irq_restore(__flags); \ 370 } else { \ 371 __val = 0; \ 372 BUG(); \ 373 } \ 374 \ 375 return pfx##ioswab##bwlq(__val); \ 376} 377 378#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ 379 \ 380static inline void pfx##out##bwlq##p(type val, unsigned long port) \ 381{ \ 382 volatile type *__addr; \ 383 type __val; \ 384 \ 385 port = __swizzle_addr_##bwlq(port); \ 386 __addr = (void *)(mips_io_port_base + port); \ 387 \ 388 __val = pfx##ioswab##bwlq(val); \ 389 \ 390 /* Really, we want this to be atomic */ \ 391 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 392 \ 393 *__addr = __val; \ 394 slow; \ 395} \ 396 \ 397static inline type pfx##in##bwlq##p(unsigned long port) \ 398{ \ 399 volatile type *__addr; \ 400 type __val; \ 401 \ 402 port = __swizzle_addr_##bwlq(port); \ 403 __addr = (void *)(mips_io_port_base + port); \ 404 \ 405 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 406 \ 407 __val = *__addr; \ 408 slow; \ 409 \ 410 return pfx##ioswab##bwlq(__val); \ 411} 412 413#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ 414 \ 415__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) 416 417#define BUILDIO_MEM(bwlq, type) \ 418 \ 419__BUILD_MEMORY_PFX(__raw_, bwlq, type) \ 420__BUILD_MEMORY_PFX(, bwlq, type) \ 421__BUILD_MEMORY_PFX(mem_, bwlq, type) \ 422 423BUILDIO_MEM(b, u8) 424BUILDIO_MEM(w, u16) 425BUILDIO_MEM(l, u32) 426BUILDIO_MEM(q, u64) 427 428#define __BUILD_IOPORT_PFX(bus, bwlq, type) \ 429 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ 430 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) 431 432#define BUILDIO_IOPORT(bwlq, type) \ 433 __BUILD_IOPORT_PFX(, bwlq, type) \ 434 __BUILD_IOPORT_PFX(mem_, bwlq, type) 435 436BUILDIO_IOPORT(b, u8) 437BUILDIO_IOPORT(w, u16) 438BUILDIO_IOPORT(l, u32) 439#ifdef CONFIG_64BIT 440BUILDIO_IOPORT(q, u64) 441#endif 442 443#define __BUILDIO(bwlq, type) \ 444 \ 445__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) 446 447__BUILDIO(q, u64) 448 449#define readb_relaxed readb 450#define readw_relaxed readw 451#define readl_relaxed readl 452#define readq_relaxed readq 453 454/* 455 * Some code tests for these symbols 456 */ 457#define readq readq 458#define writeq writeq 459 460#define __BUILD_MEMORY_STRING(bwlq, type) \ 461 \ 462static inline void writes##bwlq(volatile void __iomem *mem, \ 463 const void *addr, unsigned int count) \ 464{ \ 465 const volatile type *__addr = addr; \ 466 \ 467 while (count--) { \ 468 mem_write##bwlq(*__addr, mem); \ 469 __addr++; \ 470 } \ 471} \ 472 \ 473static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ 474 unsigned int count) \ 475{ \ 476 volatile type *__addr = addr; \ 477 \ 478 while (count--) { \ 479 *__addr = mem_read##bwlq(mem); \ 480 __addr++; \ 481 } \ 482} 483 484#define __BUILD_IOPORT_STRING(bwlq, type) \ 485 \ 486static inline void outs##bwlq(unsigned long port, const void *addr, \ 487 unsigned int count) \ 488{ \ 489 const volatile type *__addr = addr; \ 490 \ 491 while (count--) { \ 492 mem_out##bwlq(*__addr, port); \ 493 __addr++; \ 494 } \ 495} \ 496 \ 497static inline void ins##bwlq(unsigned long port, void *addr, \ 498 unsigned int count) \ 499{ \ 500 volatile type *__addr = addr; \ 501 \ 502 while (count--) { \ 503 *__addr = mem_in##bwlq(port); \ 504 __addr++; \ 505 } \ 506} 507 508#define BUILDSTRING(bwlq, type) \ 509 \ 510__BUILD_MEMORY_STRING(bwlq, type) \ 511__BUILD_IOPORT_STRING(bwlq, type) 512 513BUILDSTRING(b, u8) 514BUILDSTRING(w, u16) 515BUILDSTRING(l, u32) 516#ifdef CONFIG_64BIT 517BUILDSTRING(q, u64) 518#endif 519 520 521/* Depends on MIPS II instruction set */ 522#define mmiowb() asm volatile ("sync" ::: "memory") 523 524static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) 525{ 526 memset((void __force *) addr, val, count); 527} 528static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) 529{ 530 memcpy(dst, (void __force *) src, count); 531} 532static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) 533{ 534 memcpy((void __force *) dst, src, count); 535} 536 537/* 538 * Memory Mapped I/O 539 */ 540#define ioread8(addr) readb(addr) 541#define ioread16(addr) readw(addr) 542#define ioread32(addr) readl(addr) 543 544#define iowrite8(b,addr) writeb(b,addr) 545#define iowrite16(w,addr) writew(w,addr) 546#define iowrite32(l,addr) writel(l,addr) 547 548#define ioread8_rep(a,b,c) readsb(a,b,c) 549#define ioread16_rep(a,b,c) readsw(a,b,c) 550#define ioread32_rep(a,b,c) readsl(a,b,c) 551 552#define iowrite8_rep(a,b,c) writesb(a,b,c) 553#define iowrite16_rep(a,b,c) writesw(a,b,c) 554#define iowrite32_rep(a,b,c) writesl(a,b,c) 555 556/* Create a virtual mapping cookie for an IO port range */ 557extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 558extern void ioport_unmap(void __iomem *); 559 560/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ 561struct pci_dev; 562extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 563extern void pci_iounmap(struct pci_dev *dev, void __iomem *); 564 565/* 566 * ISA space is 'always mapped' on currently supported MIPS systems, no need 567 * to explicitly ioremap() it. The fact that the ISA IO space is mapped 568 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 569 * are physical addresses. The following constant pointer can be 570 * used as the IO-area pointer (it can be iounmapped as well, so the 571 * analogy with PCI is quite large): 572 */ 573#define __ISA_IO_base ((char *)(isa_slot_offset)) 574 575#define isa_readb(a) readb(__ISA_IO_base + (a)) 576#define isa_readw(a) readw(__ISA_IO_base + (a)) 577#define isa_readl(a) readl(__ISA_IO_base + (a)) 578#define isa_readq(a) readq(__ISA_IO_base + (a)) 579#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) 580#define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) 581#define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) 582#define isa_writeq(q,a) writeq(q,__ISA_IO_base + (a)) 583#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) 584#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) 585#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) 586 587/* 588 * We don't have csum_partial_copy_fromio() yet, so we cheat here and 589 * just copy it. The net code will then do the checksum later. 590 */ 591#define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len)) 592#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d)) 593 594/* 595 * check_signature - find BIOS signatures 596 * @io_addr: mmio address to check 597 * @signature: signature block 598 * @length: length of signature 599 * 600 * Perform a signature comparison with the mmio address io_addr. This 601 * address should have been obtained by ioremap. 602 * Returns 1 on a match. 603 */ 604static inline int check_signature(char __iomem *io_addr, 605 const unsigned char *signature, int length) 606{ 607 int retval = 0; 608 do { 609 if (readb(io_addr) != *signature) 610 goto out; 611 io_addr++; 612 signature++; 613 length--; 614 } while (length); 615 retval = 1; 616out: 617 return retval; 618} 619 620/* 621 * The caches on some architectures aren't dma-coherent and have need to 622 * handle this in software. There are three types of operations that 623 * can be applied to dma buffers. 624 * 625 * - dma_cache_wback_inv(start, size) makes caches and coherent by 626 * writing the content of the caches back to memory, if necessary. 627 * The function also invalidates the affected part of the caches as 628 * necessary before DMA transfers from outside to memory. 629 * - dma_cache_wback(start, size) makes caches and coherent by 630 * writing the content of the caches back to memory, if necessary. 631 * The function also invalidates the affected part of the caches as 632 * necessary before DMA transfers from outside to memory. 633 * - dma_cache_inv(start, size) invalidates the affected parts of the 634 * caches. Dirty lines of the caches may be written back or simply 635 * be discarded. This operation is necessary before dma operations 636 * to the memory. 637 */ 638#ifdef CONFIG_DMA_NONCOHERENT 639 640extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); 641extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); 642extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); 643 644#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start,size) 645#define dma_cache_wback(start, size) _dma_cache_wback(start,size) 646#define dma_cache_inv(start, size) _dma_cache_inv(start,size) 647 648#else /* Sane hardware */ 649 650#define dma_cache_wback_inv(start,size) \ 651 do { (void) (start); (void) (size); } while (0) 652#define dma_cache_wback(start,size) \ 653 do { (void) (start); (void) (size); } while (0) 654#define dma_cache_inv(start,size) \ 655 do { (void) (start); (void) (size); } while (0) 656 657#endif /* CONFIG_DMA_NONCOHERENT */ 658 659/* 660 * Read a 32-bit register that requires a 64-bit read cycle on the bus. 661 * Avoid interrupt mucking, just adjust the address for 4-byte access. 662 * Assume the addresses are 8-byte aligned. 663 */ 664#ifdef __MIPSEB__ 665#define __CSR_32_ADJUST 4 666#else 667#define __CSR_32_ADJUST 0 668#endif 669 670#define csr_out32(v,a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) 671#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) 672 673/* 674 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 675 * access 676 */ 677#define xlate_dev_mem_ptr(p) __va(p) 678 679/* 680 * Convert a virtual cached pointer to an uncached pointer 681 */ 682#define xlate_dev_kmem_ptr(p) p 683 684#endif /* _ASM_IO_H */