at v2.6.16-rc4 683 lines 21 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 1995 Waldorf GmbH 7 * Copyright (C) 1994 - 2000 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 10 * Author: Maciej W. Rozycki <macro@mips.com> 11 */ 12#ifndef _ASM_IO_H 13#define _ASM_IO_H 14 15#include <linux/config.h> 16#include <linux/compiler.h> 17#include <linux/kernel.h> 18#include <linux/types.h> 19 20#include <asm/addrspace.h> 21#include <asm/byteorder.h> 22#include <asm/cpu.h> 23#include <asm/cpu-features.h> 24#include <asm/page.h> 25#include <asm/pgtable-bits.h> 26#include <asm/processor.h> 27#include <asm/string.h> 28 29#include <ioremap.h> 30#include <mangle-port.h> 31 32/* 33 * Slowdown I/O port space accesses for antique hardware. 34 */ 35#undef CONF_SLOWDOWN_IO 36 37/* 38 * Raw operations are never swapped in software. OTOH values that raw 39 * operations are working on may or may not have been swapped by the bus 40 * hardware. An example use would be for flash memory that's used for 41 * execute in place. 42 */ 43# define __raw_ioswabb(x) (x) 44# define __raw_ioswabw(x) (x) 45# define __raw_ioswabl(x) (x) 46# define __raw_ioswabq(x) (x) 47# define ____raw_ioswabq(x) (x) 48 49/* 50 * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware; 51 * less sane hardware forces software to fiddle with this... 52 * 53 * Regardless, if the host bus endianness mismatches that of PCI/ISA, then 54 * you can't have the numerical value of data and byte addresses within 55 * multibyte quantities both preserved at the same time. Hence two 56 * variations of functions: non-prefixed ones that preserve the value 57 * and prefixed ones that preserve byte addresses. The latters are 58 * typically used for moving raw data between a peripheral and memory (cf. 59 * string I/O functions), hence the "__mem_" prefix. 60 */ 61#if defined(CONFIG_SWAP_IO_SPACE) 62 63# define ioswabb(x) (x) 64# define __mem_ioswabb(x) (x) 65# ifdef CONFIG_SGI_IP22 66/* 67 * IP22 seems braindead enough to swap 16bits values in hardware, but 68 * not 32bits. Go figure... Can't tell without documentation. 69 */ 70# define ioswabw(x) (x) 71# define __mem_ioswabw(x) le16_to_cpu(x) 72# else 73# define ioswabw(x) le16_to_cpu(x) 74# define __mem_ioswabw(x) (x) 75# endif 76# define ioswabl(x) le32_to_cpu(x) 77# define __mem_ioswabl(x) (x) 78# define ioswabq(x) le64_to_cpu(x) 79# define __mem_ioswabq(x) (x) 80 81#else 82 83# define ioswabb(x) (x) 84# define __mem_ioswabb(x) (x) 85# define ioswabw(x) (x) 86# define __mem_ioswabw(x) cpu_to_le16(x) 87# define ioswabl(x) (x) 88# define __mem_ioswabl(x) cpu_to_le32(x) 89# define ioswabq(x) (x) 90# define __mem_ioswabq(x) cpu_to_le32(x) 91 92#endif 93 94#define IO_SPACE_LIMIT 0xffff 95 96/* 97 * On MIPS I/O ports are memory mapped, so we access them using normal 98 * load/store instructions. mips_io_port_base is the virtual address to 99 * which all ports are being mapped. For sake of efficiency some code 100 * assumes that this is an address that can be loaded with a single lui 101 * instruction, so the lower 16 bits must be zero. Should be true on 102 * on any sane architecture; generic code does not use this assumption. 103 */ 104extern const unsigned long mips_io_port_base; 105 106#define set_io_port_base(base) \ 107 do { * (unsigned long *) &mips_io_port_base = (base); } while (0) 108 109/* 110 * Thanks to James van Artsdalen for a better timing-fix than 111 * the two short jumps: using outb's to a nonexistent port seems 112 * to guarantee better timings even on fast machines. 113 * 114 * On the other hand, I'd like to be sure of a non-existent port: 115 * I feel a bit unsafe about using 0x80 (should be safe, though) 116 * 117 * Linus 118 * 119 */ 120 121#define __SLOW_DOWN_IO \ 122 __asm__ __volatile__( \ 123 "sb\t$0,0x80(%0)" \ 124 : : "r" (mips_io_port_base)); 125 126#ifdef CONF_SLOWDOWN_IO 127#ifdef REALLY_SLOW_IO 128#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } 129#else 130#define SLOW_DOWN_IO __SLOW_DOWN_IO 131#endif 132#else 133#define SLOW_DOWN_IO 134#endif 135 136/* 137 * virt_to_phys - map virtual addresses to physical 138 * @address: address to remap 139 * 140 * The returned physical address is the physical (CPU) mapping for 141 * the memory address given. It is only valid to use this function on 142 * addresses directly mapped or allocated via kmalloc. 143 * 144 * This function does not give bus mappings for DMA transfers. In 145 * almost all conceivable cases a device driver should not be using 146 * this function 147 */ 148static inline unsigned long virt_to_phys(volatile void * address) 149{ 150 return (unsigned long)address - PAGE_OFFSET; 151} 152 153/* 154 * phys_to_virt - map physical address to virtual 155 * @address: address to remap 156 * 157 * The returned virtual address is a current CPU mapping for 158 * the memory address given. It is only valid to use this function on 159 * addresses that have a kernel mapping 160 * 161 * This function does not handle bus mappings for DMA transfers. In 162 * almost all conceivable cases a device driver should not be using 163 * this function 164 */ 165static inline void * phys_to_virt(unsigned long address) 166{ 167 return (void *)(address + PAGE_OFFSET); 168} 169 170/* 171 * ISA I/O bus memory addresses are 1:1 with the physical address. 172 */ 173static inline unsigned long isa_virt_to_bus(volatile void * address) 174{ 175 return (unsigned long)address - PAGE_OFFSET; 176} 177 178static inline void * isa_bus_to_virt(unsigned long address) 179{ 180 return (void *)(address + PAGE_OFFSET); 181} 182 183#define isa_page_to_bus page_to_phys 184 185/* 186 * However PCI ones are not necessarily 1:1 and therefore these interfaces 187 * are forbidden in portable PCI drivers. 188 * 189 * Allow them for x86 for legacy drivers, though. 190 */ 191#define virt_to_bus virt_to_phys 192#define bus_to_virt phys_to_virt 193 194/* 195 * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped 196 * for the processor. This implies the assumption that there is only 197 * one of these busses. 198 */ 199extern unsigned long isa_slot_offset; 200 201/* 202 * Change "struct page" to physical address. 203 */ 204#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 205 206extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags); 207extern void __iounmap(volatile void __iomem *addr); 208 209static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, 210 unsigned long flags) 211{ 212#define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) 213 214 if (cpu_has_64bit_addresses) { 215 u64 base = UNCAC_BASE; 216 217 /* 218 * R10000 supports a 2 bit uncached attribute therefore 219 * UNCAC_BASE may not equal IO_BASE. 220 */ 221 if (flags == _CACHE_UNCACHED) 222 base = (u64) IO_BASE; 223 return (void __iomem *) (unsigned long) (base + offset); 224 } else if (__builtin_constant_p(offset) && 225 __builtin_constant_p(size) && __builtin_constant_p(flags)) { 226 phys_t phys_addr, last_addr; 227 228 phys_addr = fixup_bigphys_addr(offset, size); 229 230 /* Don't allow wraparound or zero size. */ 231 last_addr = phys_addr + size - 1; 232 if (!size || last_addr < phys_addr) 233 return NULL; 234 235 /* 236 * Map uncached objects in the low 512MB of address 237 * space using KSEG1. 238 */ 239 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) && 240 flags == _CACHE_UNCACHED) 241 return (void __iomem *)CKSEG1ADDR(phys_addr); 242 } 243 244 return __ioremap(offset, size, flags); 245 246#undef __IS_LOW512 247} 248 249/* 250 * ioremap - map bus memory into CPU space 251 * @offset: bus address of the memory 252 * @size: size of the resource to map 253 * 254 * ioremap performs a platform specific sequence of operations to 255 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 256 * writew/writel functions and the other mmio helpers. The returned 257 * address is not guaranteed to be usable directly as a virtual 258 * address. 259 */ 260#define ioremap(offset, size) \ 261 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 262 263/* 264 * ioremap_nocache - map bus memory into CPU space 265 * @offset: bus address of the memory 266 * @size: size of the resource to map 267 * 268 * ioremap_nocache performs a platform specific sequence of operations to 269 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 270 * writew/writel functions and the other mmio helpers. The returned 271 * address is not guaranteed to be usable directly as a virtual 272 * address. 273 * 274 * This version of ioremap ensures that the memory is marked uncachable 275 * on the CPU as well as honouring existing caching rules from things like 276 * the PCI bus. Note that there are other caches and buffers on many 277 * busses. In paticular driver authors should read up on PCI writes 278 * 279 * It's useful if some control registers are in such an area and 280 * write combining or read caching is not desirable: 281 */ 282#define ioremap_nocache(offset, size) \ 283 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 284 285/* 286 * These two are MIPS specific ioremap variant. ioremap_cacheable_cow 287 * requests a cachable mapping, ioremap_uncached_accelerated requests a 288 * mapping using the uncached accelerated mode which isn't supported on 289 * all processors. 290 */ 291#define ioremap_cacheable_cow(offset, size) \ 292 __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) 293#define ioremap_uncached_accelerated(offset, size) \ 294 __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) 295 296static inline void iounmap(volatile void __iomem *addr) 297{ 298#define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) 299 300 if (cpu_has_64bit_addresses || 301 (__builtin_constant_p(addr) && __IS_KSEG1(addr))) 302 return; 303 304 __iounmap(addr); 305 306#undef __IS_KSEG1 307} 308 309#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ 310 \ 311static inline void pfx##write##bwlq(type val, \ 312 volatile void __iomem *mem) \ 313{ \ 314 volatile type *__mem; \ 315 type __val; \ 316 \ 317 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 318 \ 319 __val = pfx##ioswab##bwlq(val); \ 320 \ 321 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 322 *__mem = __val; \ 323 else if (cpu_has_64bits) { \ 324 unsigned long __flags; \ 325 type __tmp; \ 326 \ 327 if (irq) \ 328 local_irq_save(__flags); \ 329 __asm__ __volatile__( \ 330 ".set mips3" "\t\t# __writeq""\n\t" \ 331 "dsll32 %L0, %L0, 0" "\n\t" \ 332 "dsrl32 %L0, %L0, 0" "\n\t" \ 333 "dsll32 %M0, %M0, 0" "\n\t" \ 334 "or %L0, %L0, %M0" "\n\t" \ 335 "sd %L0, %2" "\n\t" \ 336 ".set mips0" "\n" \ 337 : "=r" (__tmp) \ 338 : "0" (__val), "m" (*__mem)); \ 339 if (irq) \ 340 local_irq_restore(__flags); \ 341 } else \ 342 BUG(); \ 343} \ 344 \ 345static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ 346{ \ 347 volatile type *__mem; \ 348 type __val; \ 349 \ 350 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 351 \ 352 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 353 __val = *__mem; \ 354 else if (cpu_has_64bits) { \ 355 unsigned long __flags; \ 356 \ 357 if (irq) \ 358 local_irq_save(__flags); \ 359 __asm__ __volatile__( \ 360 ".set mips3" "\t\t# __readq" "\n\t" \ 361 "ld %L0, %1" "\n\t" \ 362 "dsra32 %M0, %L0, 0" "\n\t" \ 363 "sll %L0, %L0, 0" "\n\t" \ 364 ".set mips0" "\n" \ 365 : "=r" (__val) \ 366 : "m" (*__mem)); \ 367 if (irq) \ 368 local_irq_restore(__flags); \ 369 } else { \ 370 __val = 0; \ 371 BUG(); \ 372 } \ 373 \ 374 return pfx##ioswab##bwlq(__val); \ 375} 376 377#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ 378 \ 379static inline void pfx##out##bwlq##p(type val, unsigned long port) \ 380{ \ 381 volatile type *__addr; \ 382 type __val; \ 383 \ 384 port = __swizzle_addr_##bwlq(port); \ 385 __addr = (void *)(mips_io_port_base + port); \ 386 \ 387 __val = pfx##ioswab##bwlq(val); \ 388 \ 389 /* Really, we want this to be atomic */ \ 390 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 391 \ 392 *__addr = __val; \ 393 slow; \ 394} \ 395 \ 396static inline type pfx##in##bwlq##p(unsigned long port) \ 397{ \ 398 volatile type *__addr; \ 399 type __val; \ 400 \ 401 port = __swizzle_addr_##bwlq(port); \ 402 __addr = (void *)(mips_io_port_base + port); \ 403 \ 404 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 405 \ 406 __val = *__addr; \ 407 slow; \ 408 \ 409 return pfx##ioswab##bwlq(__val); \ 410} 411 412#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ 413 \ 414__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) 415 416#define BUILDIO_MEM(bwlq, type) \ 417 \ 418__BUILD_MEMORY_PFX(__raw_, bwlq, type) \ 419__BUILD_MEMORY_PFX(, bwlq, type) \ 420__BUILD_MEMORY_PFX(__mem_, bwlq, type) \ 421 422BUILDIO_MEM(b, u8) 423BUILDIO_MEM(w, u16) 424BUILDIO_MEM(l, u32) 425BUILDIO_MEM(q, u64) 426 427#define __BUILD_IOPORT_PFX(bus, bwlq, type) \ 428 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ 429 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) 430 431#define BUILDIO_IOPORT(bwlq, type) \ 432 __BUILD_IOPORT_PFX(, bwlq, type) \ 433 __BUILD_IOPORT_PFX(__mem_, bwlq, type) 434 435BUILDIO_IOPORT(b, u8) 436BUILDIO_IOPORT(w, u16) 437BUILDIO_IOPORT(l, u32) 438#ifdef CONFIG_64BIT 439BUILDIO_IOPORT(q, u64) 440#endif 441 442#define __BUILDIO(bwlq, type) \ 443 \ 444__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) 445 446__BUILDIO(q, u64) 447 448#define readb_relaxed readb 449#define readw_relaxed readw 450#define readl_relaxed readl 451#define readq_relaxed readq 452 453/* 454 * Some code tests for these symbols 455 */ 456#define readq readq 457#define writeq writeq 458 459#define __BUILD_MEMORY_STRING(bwlq, type) \ 460 \ 461static inline void writes##bwlq(volatile void __iomem *mem, \ 462 const void *addr, unsigned int count) \ 463{ \ 464 const volatile type *__addr = addr; \ 465 \ 466 while (count--) { \ 467 __mem_write##bwlq(*__addr, mem); \ 468 __addr++; \ 469 } \ 470} \ 471 \ 472static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ 473 unsigned int count) \ 474{ \ 475 volatile type *__addr = addr; \ 476 \ 477 while (count--) { \ 478 *__addr = __mem_read##bwlq(mem); \ 479 __addr++; \ 480 } \ 481} 482 483#define __BUILD_IOPORT_STRING(bwlq, type) \ 484 \ 485static inline void outs##bwlq(unsigned long port, const void *addr, \ 486 unsigned int count) \ 487{ \ 488 const volatile type *__addr = addr; \ 489 \ 490 while (count--) { \ 491 __mem_out##bwlq(*__addr, port); \ 492 __addr++; \ 493 } \ 494} \ 495 \ 496static inline void ins##bwlq(unsigned long port, void *addr, \ 497 unsigned int count) \ 498{ \ 499 volatile type *__addr = addr; \ 500 \ 501 while (count--) { \ 502 *__addr = __mem_in##bwlq(port); \ 503 __addr++; \ 504 } \ 505} 506 507#define BUILDSTRING(bwlq, type) \ 508 \ 509__BUILD_MEMORY_STRING(bwlq, type) \ 510__BUILD_IOPORT_STRING(bwlq, type) 511 512BUILDSTRING(b, u8) 513BUILDSTRING(w, u16) 514BUILDSTRING(l, u32) 515#ifdef CONFIG_64BIT 516BUILDSTRING(q, u64) 517#endif 518 519 520/* Depends on MIPS II instruction set */ 521#define mmiowb() asm volatile ("sync" ::: "memory") 522 523static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) 524{ 525 memset((void __force *) addr, val, count); 526} 527static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) 528{ 529 memcpy(dst, (void __force *) src, count); 530} 531static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) 532{ 533 memcpy((void __force *) dst, src, count); 534} 535 536/* 537 * Memory Mapped I/O 538 */ 539#define ioread8(addr) readb(addr) 540#define ioread16(addr) readw(addr) 541#define ioread32(addr) readl(addr) 542 543#define iowrite8(b,addr) writeb(b,addr) 544#define iowrite16(w,addr) writew(w,addr) 545#define iowrite32(l,addr) writel(l,addr) 546 547#define ioread8_rep(a,b,c) readsb(a,b,c) 548#define ioread16_rep(a,b,c) readsw(a,b,c) 549#define ioread32_rep(a,b,c) readsl(a,b,c) 550 551#define iowrite8_rep(a,b,c) writesb(a,b,c) 552#define iowrite16_rep(a,b,c) writesw(a,b,c) 553#define iowrite32_rep(a,b,c) writesl(a,b,c) 554 555/* Create a virtual mapping cookie for an IO port range */ 556extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 557extern void ioport_unmap(void __iomem *); 558 559/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ 560struct pci_dev; 561extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 562extern void pci_iounmap(struct pci_dev *dev, void __iomem *); 563 564/* 565 * ISA space is 'always mapped' on currently supported MIPS systems, no need 566 * to explicitly ioremap() it. The fact that the ISA IO space is mapped 567 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 568 * are physical addresses. The following constant pointer can be 569 * used as the IO-area pointer (it can be iounmapped as well, so the 570 * analogy with PCI is quite large): 571 */ 572#define __ISA_IO_base ((char *)(isa_slot_offset)) 573 574#define isa_readb(a) readb(__ISA_IO_base + (a)) 575#define isa_readw(a) readw(__ISA_IO_base + (a)) 576#define isa_readl(a) readl(__ISA_IO_base + (a)) 577#define isa_readq(a) readq(__ISA_IO_base + (a)) 578#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) 579#define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) 580#define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) 581#define isa_writeq(q,a) writeq(q,__ISA_IO_base + (a)) 582#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) 583#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) 584#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) 585 586/* 587 * We don't have csum_partial_copy_fromio() yet, so we cheat here and 588 * just copy it. The net code will then do the checksum later. 589 */ 590#define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len)) 591#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d)) 592 593/* 594 * check_signature - find BIOS signatures 595 * @io_addr: mmio address to check 596 * @signature: signature block 597 * @length: length of signature 598 * 599 * Perform a signature comparison with the mmio address io_addr. This 600 * address should have been obtained by ioremap. 601 * Returns 1 on a match. 602 */ 603static inline int check_signature(char __iomem *io_addr, 604 const unsigned char *signature, int length) 605{ 606 int retval = 0; 607 do { 608 if (readb(io_addr) != *signature) 609 goto out; 610 io_addr++; 611 signature++; 612 length--; 613 } while (length); 614 retval = 1; 615out: 616 return retval; 617} 618 619/* 620 * The caches on some architectures aren't dma-coherent and have need to 621 * handle this in software. There are three types of operations that 622 * can be applied to dma buffers. 623 * 624 * - dma_cache_wback_inv(start, size) makes caches and coherent by 625 * writing the content of the caches back to memory, if necessary. 626 * The function also invalidates the affected part of the caches as 627 * necessary before DMA transfers from outside to memory. 628 * - dma_cache_wback(start, size) makes caches and coherent by 629 * writing the content of the caches back to memory, if necessary. 630 * The function also invalidates the affected part of the caches as 631 * necessary before DMA transfers from outside to memory. 632 * - dma_cache_inv(start, size) invalidates the affected parts of the 633 * caches. Dirty lines of the caches may be written back or simply 634 * be discarded. This operation is necessary before dma operations 635 * to the memory. 636 */ 637#ifdef CONFIG_DMA_NONCOHERENT 638 639extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); 640extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); 641extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); 642 643#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start,size) 644#define dma_cache_wback(start, size) _dma_cache_wback(start,size) 645#define dma_cache_inv(start, size) _dma_cache_inv(start,size) 646 647#else /* Sane hardware */ 648 649#define dma_cache_wback_inv(start,size) \ 650 do { (void) (start); (void) (size); } while (0) 651#define dma_cache_wback(start,size) \ 652 do { (void) (start); (void) (size); } while (0) 653#define dma_cache_inv(start,size) \ 654 do { (void) (start); (void) (size); } while (0) 655 656#endif /* CONFIG_DMA_NONCOHERENT */ 657 658/* 659 * Read a 32-bit register that requires a 64-bit read cycle on the bus. 660 * Avoid interrupt mucking, just adjust the address for 4-byte access. 661 * Assume the addresses are 8-byte aligned. 662 */ 663#ifdef __MIPSEB__ 664#define __CSR_32_ADJUST 4 665#else 666#define __CSR_32_ADJUST 0 667#endif 668 669#define csr_out32(v,a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) 670#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) 671 672/* 673 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 674 * access 675 */ 676#define xlate_dev_mem_ptr(p) __va(p) 677 678/* 679 * Convert a virtual cached pointer to an uncached pointer 680 */ 681#define xlate_dev_kmem_ptr(p) p 682 683#endif /* _ASM_IO_H */