Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com>
11 */
12#ifndef _ASM_IO_H
13#define _ASM_IO_H
14
15#define ARCH_HAS_IOREMAP_WC
16
17#include <linux/compiler.h>
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/irqflags.h>
21
22#include <asm/addrspace.h>
23#include <asm/barrier.h>
24#include <asm/bug.h>
25#include <asm/byteorder.h>
26#include <asm/cpu.h>
27#include <asm/cpu-features.h>
28#include <asm-generic/iomap.h>
29#include <asm/page.h>
30#include <asm/pgtable-bits.h>
31#include <asm/processor.h>
32#include <asm/string.h>
33
34#include <ioremap.h>
35#include <mangle-port.h>
36
37/*
38 * Raw operations are never swapped in software. OTOH values that raw
39 * operations are working on may or may not have been swapped by the bus
40 * hardware. An example use would be for flash memory that's used for
41 * execute in place.
42 */
43# define __raw_ioswabb(a, x) (x)
44# define __raw_ioswabw(a, x) (x)
45# define __raw_ioswabl(a, x) (x)
46# define __raw_ioswabq(a, x) (x)
47# define ____raw_ioswabq(a, x) (x)
48
49# define __relaxed_ioswabb ioswabb
50# define __relaxed_ioswabw ioswabw
51# define __relaxed_ioswabl ioswabl
52# define __relaxed_ioswabq ioswabq
53
54/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
55
56#define IO_SPACE_LIMIT 0xffff
57
58/*
59 * On MIPS I/O ports are memory mapped, so we access them using normal
60 * load/store instructions. mips_io_port_base is the virtual address to
61 * which all ports are being mapped. For sake of efficiency some code
62 * assumes that this is an address that can be loaded with a single lui
63 * instruction, so the lower 16 bits must be zero. Should be true on
64 * on any sane architecture; generic code does not use this assumption.
65 */
66extern const unsigned long mips_io_port_base;
67
68/*
69 * Gcc will generate code to load the value of mips_io_port_base after each
70 * function call which may be fairly wasteful in some cases. So we don't
71 * play quite by the book. We tell gcc mips_io_port_base is a long variable
72 * which solves the code generation issue. Now we need to violate the
73 * aliasing rules a little to make initialization possible and finally we
74 * will need the barrier() to fight side effects of the aliasing chat.
75 * This trickery will eventually collapse under gcc's optimizer. Oh well.
76 */
77static inline void set_io_port_base(unsigned long base)
78{
79 * (unsigned long *) &mips_io_port_base = base;
80 barrier();
81}
82
83/*
84 * Provide the necessary definitions for generic iomap. We make use of
85 * mips_io_port_base for iomap(), but we don't reserve any low addresses for
86 * use with I/O ports.
87 */
88
89#define HAVE_ARCH_PIO_SIZE
90#define PIO_OFFSET mips_io_port_base
91#define PIO_MASK IO_SPACE_LIMIT
92#define PIO_RESERVED 0x0UL
93
94/*
95 * Enforce in-order execution of data I/O. In the MIPS architecture
96 * these are equivalent to corresponding platform-specific memory
97 * barriers defined in <asm/barrier.h>. API pinched from PowerPC,
98 * with sync additionally defined.
99 */
100#define iobarrier_rw() mb()
101#define iobarrier_r() rmb()
102#define iobarrier_w() wmb()
103#define iobarrier_sync() iob()
104
105/* Some callers use this older API instead. */
106#define mmiowb() iobarrier_w()
107
108/*
109 * virt_to_phys - map virtual addresses to physical
110 * @address: address to remap
111 *
112 * The returned physical address is the physical (CPU) mapping for
113 * the memory address given. It is only valid to use this function on
114 * addresses directly mapped or allocated via kmalloc.
115 *
116 * This function does not give bus mappings for DMA transfers. In
117 * almost all conceivable cases a device driver should not be using
118 * this function
119 */
120static inline unsigned long virt_to_phys(volatile const void *address)
121{
122 return __pa(address);
123}
124
125/*
126 * phys_to_virt - map physical address to virtual
127 * @address: address to remap
128 *
129 * The returned virtual address is a current CPU mapping for
130 * the memory address given. It is only valid to use this function on
131 * addresses that have a kernel mapping
132 *
133 * This function does not handle bus mappings for DMA transfers. In
134 * almost all conceivable cases a device driver should not be using
135 * this function
136 */
137static inline void * phys_to_virt(unsigned long address)
138{
139 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
140}
141
142/*
143 * ISA I/O bus memory addresses are 1:1 with the physical address.
144 */
145static inline unsigned long isa_virt_to_bus(volatile void *address)
146{
147 return virt_to_phys(address);
148}
149
150static inline void *isa_bus_to_virt(unsigned long address)
151{
152 return phys_to_virt(address);
153}
154
155#define isa_page_to_bus page_to_phys
156
157/*
158 * However PCI ones are not necessarily 1:1 and therefore these interfaces
159 * are forbidden in portable PCI drivers.
160 *
161 * Allow them for x86 for legacy drivers, though.
162 */
163#define virt_to_bus virt_to_phys
164#define bus_to_virt phys_to_virt
165
166/*
167 * Change "struct page" to physical address.
168 */
169#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
170
171extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags);
172extern void __iounmap(const volatile void __iomem *addr);
173
174static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size,
175 unsigned long flags)
176{
177 void __iomem *addr = plat_ioremap(offset, size, flags);
178
179 if (addr)
180 return addr;
181
182#define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
183
184 if (cpu_has_64bit_addresses) {
185 u64 base = UNCAC_BASE;
186
187 /*
188 * R10000 supports a 2 bit uncached attribute therefore
189 * UNCAC_BASE may not equal IO_BASE.
190 */
191 if (flags == _CACHE_UNCACHED)
192 base = (u64) IO_BASE;
193 return (void __iomem *) (unsigned long) (base + offset);
194 } else if (__builtin_constant_p(offset) &&
195 __builtin_constant_p(size) && __builtin_constant_p(flags)) {
196 phys_addr_t phys_addr, last_addr;
197
198 phys_addr = fixup_bigphys_addr(offset, size);
199
200 /* Don't allow wraparound or zero size. */
201 last_addr = phys_addr + size - 1;
202 if (!size || last_addr < phys_addr)
203 return NULL;
204
205 /*
206 * Map uncached objects in the low 512MB of address
207 * space using KSEG1.
208 */
209 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) &&
210 flags == _CACHE_UNCACHED)
211 return (void __iomem *)
212 (unsigned long)CKSEG1ADDR(phys_addr);
213 }
214
215 return __ioremap(offset, size, flags);
216
217#undef __IS_LOW512
218}
219
220/*
221 * ioremap_prot - map bus memory into CPU space
222 * @offset: bus address of the memory
223 * @size: size of the resource to map
224
225 * ioremap_prot gives the caller control over cache coherency attributes (CCA)
226 */
227static inline void __iomem *ioremap_prot(phys_addr_t offset,
228 unsigned long size, unsigned long prot_val) {
229 return __ioremap_mode(offset, size, prot_val & _CACHE_MASK);
230}
231
232/*
233 * ioremap - map bus memory into CPU space
234 * @offset: bus address of the memory
235 * @size: size of the resource to map
236 *
237 * ioremap performs a platform specific sequence of operations to
238 * make bus memory CPU accessible via the readb/readw/readl/writeb/
239 * writew/writel functions and the other mmio helpers. The returned
240 * address is not guaranteed to be usable directly as a virtual
241 * address.
242 */
243#define ioremap(offset, size) \
244 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
245
246/*
247 * ioremap_nocache - map bus memory into CPU space
248 * @offset: bus address of the memory
249 * @size: size of the resource to map
250 *
251 * ioremap_nocache performs a platform specific sequence of operations to
252 * make bus memory CPU accessible via the readb/readw/readl/writeb/
253 * writew/writel functions and the other mmio helpers. The returned
254 * address is not guaranteed to be usable directly as a virtual
255 * address.
256 *
257 * This version of ioremap ensures that the memory is marked uncachable
258 * on the CPU as well as honouring existing caching rules from things like
259 * the PCI bus. Note that there are other caches and buffers on many
260 * busses. In particular driver authors should read up on PCI writes
261 *
262 * It's useful if some control registers are in such an area and
263 * write combining or read caching is not desirable:
264 */
265#define ioremap_nocache(offset, size) \
266 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
267#define ioremap_uc ioremap_nocache
268
269/*
270 * ioremap_cachable - map bus memory into CPU space
271 * @offset: bus address of the memory
272 * @size: size of the resource to map
273 *
274 * ioremap_nocache performs a platform specific sequence of operations to
275 * make bus memory CPU accessible via the readb/readw/readl/writeb/
276 * writew/writel functions and the other mmio helpers. The returned
277 * address is not guaranteed to be usable directly as a virtual
278 * address.
279 *
280 * This version of ioremap ensures that the memory is marked cachable by
281 * the CPU. Also enables full write-combining. Useful for some
282 * memory-like regions on I/O busses.
283 */
284#define ioremap_cachable(offset, size) \
285 __ioremap_mode((offset), (size), _page_cachable_default)
286#define ioremap_cache ioremap_cachable
287
288/*
289 * ioremap_wc - map bus memory into CPU space
290 * @offset: bus address of the memory
291 * @size: size of the resource to map
292 *
293 * ioremap_wc performs a platform specific sequence of operations to
294 * make bus memory CPU accessible via the readb/readw/readl/writeb/
295 * writew/writel functions and the other mmio helpers. The returned
296 * address is not guaranteed to be usable directly as a virtual
297 * address.
298 *
299 * This version of ioremap ensures that the memory is marked uncachable
300 * but accelerated by means of write-combining feature. It is specifically
301 * useful for PCIe prefetchable windows, which may vastly improve a
302 * communications performance. If it was determined on boot stage, what
303 * CPU CCA doesn't support UCA, the method shall fall-back to the
304 * _CACHE_UNCACHED option (see cpu_probe() method).
305 */
306#define ioremap_wc(offset, size) \
307 __ioremap_mode((offset), (size), boot_cpu_data.writecombine)
308
309static inline void iounmap(const volatile void __iomem *addr)
310{
311 if (plat_iounmap(addr))
312 return;
313
314#define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
315
316 if (cpu_has_64bit_addresses ||
317 (__builtin_constant_p(addr) && __IS_KSEG1(addr)))
318 return;
319
320 __iounmap(addr);
321
322#undef __IS_KSEG1
323}
324
325#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON3)
326#define war_io_reorder_wmb() wmb()
327#else
328#define war_io_reorder_wmb() barrier()
329#endif
330
331#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
332 \
333static inline void pfx##write##bwlq(type val, \
334 volatile void __iomem *mem) \
335{ \
336 volatile type *__mem; \
337 type __val; \
338 \
339 if (barrier) \
340 iobarrier_rw(); \
341 else \
342 war_io_reorder_wmb(); \
343 \
344 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
345 \
346 __val = pfx##ioswab##bwlq(__mem, val); \
347 \
348 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
349 *__mem = __val; \
350 else if (cpu_has_64bits) { \
351 unsigned long __flags; \
352 type __tmp; \
353 \
354 if (irq) \
355 local_irq_save(__flags); \
356 __asm__ __volatile__( \
357 ".set push" "\t\t# __writeq""\n\t" \
358 ".set arch=r4000" "\n\t" \
359 "dsll32 %L0, %L0, 0" "\n\t" \
360 "dsrl32 %L0, %L0, 0" "\n\t" \
361 "dsll32 %M0, %M0, 0" "\n\t" \
362 "or %L0, %L0, %M0" "\n\t" \
363 "sd %L0, %2" "\n\t" \
364 ".set pop" "\n" \
365 : "=r" (__tmp) \
366 : "0" (__val), "m" (*__mem)); \
367 if (irq) \
368 local_irq_restore(__flags); \
369 } else \
370 BUG(); \
371} \
372 \
373static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
374{ \
375 volatile type *__mem; \
376 type __val; \
377 \
378 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
379 \
380 if (barrier) \
381 iobarrier_rw(); \
382 \
383 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
384 __val = *__mem; \
385 else if (cpu_has_64bits) { \
386 unsigned long __flags; \
387 \
388 if (irq) \
389 local_irq_save(__flags); \
390 __asm__ __volatile__( \
391 ".set push" "\t\t# __readq" "\n\t" \
392 ".set arch=r4000" "\n\t" \
393 "ld %L0, %1" "\n\t" \
394 "dsra32 %M0, %L0, 0" "\n\t" \
395 "sll %L0, %L0, 0" "\n\t" \
396 ".set pop" "\n" \
397 : "=r" (__val) \
398 : "m" (*__mem)); \
399 if (irq) \
400 local_irq_restore(__flags); \
401 } else { \
402 __val = 0; \
403 BUG(); \
404 } \
405 \
406 /* prevent prefetching of coherent DMA data prematurely */ \
407 if (!relax) \
408 rmb(); \
409 return pfx##ioswab##bwlq(__mem, __val); \
410}
411
412#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
413 \
414static inline void pfx##out##bwlq##p(type val, unsigned long port) \
415{ \
416 volatile type *__addr; \
417 type __val; \
418 \
419 if (barrier) \
420 iobarrier_rw(); \
421 else \
422 war_io_reorder_wmb(); \
423 \
424 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
425 \
426 __val = pfx##ioswab##bwlq(__addr, val); \
427 \
428 /* Really, we want this to be atomic */ \
429 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
430 \
431 *__addr = __val; \
432} \
433 \
434static inline type pfx##in##bwlq##p(unsigned long port) \
435{ \
436 volatile type *__addr; \
437 type __val; \
438 \
439 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
440 \
441 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
442 \
443 if (barrier) \
444 iobarrier_rw(); \
445 \
446 __val = *__addr; \
447 \
448 /* prevent prefetching of coherent DMA data prematurely */ \
449 if (!relax) \
450 rmb(); \
451 return pfx##ioswab##bwlq(__addr, __val); \
452}
453
454#define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
455 \
456__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
457
458#define BUILDIO_MEM(bwlq, type) \
459 \
460__BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
461__BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
462__BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
463__BUILD_MEMORY_PFX(, bwlq, type, 0)
464
465BUILDIO_MEM(b, u8)
466BUILDIO_MEM(w, u16)
467BUILDIO_MEM(l, u32)
468BUILDIO_MEM(q, u64)
469
470#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
471 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
472 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
473
474#define BUILDIO_IOPORT(bwlq, type) \
475 __BUILD_IOPORT_PFX(, bwlq, type) \
476 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
477
478BUILDIO_IOPORT(b, u8)
479BUILDIO_IOPORT(w, u16)
480BUILDIO_IOPORT(l, u32)
481#ifdef CONFIG_64BIT
482BUILDIO_IOPORT(q, u64)
483#endif
484
485#define __BUILDIO(bwlq, type) \
486 \
487__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
488
489__BUILDIO(q, u64)
490
491#define readb_relaxed __relaxed_readb
492#define readw_relaxed __relaxed_readw
493#define readl_relaxed __relaxed_readl
494#define readq_relaxed __relaxed_readq
495
496#define writeb_relaxed __relaxed_writeb
497#define writew_relaxed __relaxed_writew
498#define writel_relaxed __relaxed_writel
499#define writeq_relaxed __relaxed_writeq
500
501#define readb_be(addr) \
502 __raw_readb((__force unsigned *)(addr))
503#define readw_be(addr) \
504 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
505#define readl_be(addr) \
506 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
507#define readq_be(addr) \
508 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
509
510#define writeb_be(val, addr) \
511 __raw_writeb((val), (__force unsigned *)(addr))
512#define writew_be(val, addr) \
513 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
514#define writel_be(val, addr) \
515 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
516#define writeq_be(val, addr) \
517 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
518
519/*
520 * Some code tests for these symbols
521 */
522#define readq readq
523#define writeq writeq
524
525#define __BUILD_MEMORY_STRING(bwlq, type) \
526 \
527static inline void writes##bwlq(volatile void __iomem *mem, \
528 const void *addr, unsigned int count) \
529{ \
530 const volatile type *__addr = addr; \
531 \
532 while (count--) { \
533 __mem_write##bwlq(*__addr, mem); \
534 __addr++; \
535 } \
536} \
537 \
538static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
539 unsigned int count) \
540{ \
541 volatile type *__addr = addr; \
542 \
543 while (count--) { \
544 *__addr = __mem_read##bwlq(mem); \
545 __addr++; \
546 } \
547}
548
549#define __BUILD_IOPORT_STRING(bwlq, type) \
550 \
551static inline void outs##bwlq(unsigned long port, const void *addr, \
552 unsigned int count) \
553{ \
554 const volatile type *__addr = addr; \
555 \
556 while (count--) { \
557 __mem_out##bwlq(*__addr, port); \
558 __addr++; \
559 } \
560} \
561 \
562static inline void ins##bwlq(unsigned long port, void *addr, \
563 unsigned int count) \
564{ \
565 volatile type *__addr = addr; \
566 \
567 while (count--) { \
568 *__addr = __mem_in##bwlq(port); \
569 __addr++; \
570 } \
571}
572
573#define BUILDSTRING(bwlq, type) \
574 \
575__BUILD_MEMORY_STRING(bwlq, type) \
576__BUILD_IOPORT_STRING(bwlq, type)
577
578BUILDSTRING(b, u8)
579BUILDSTRING(w, u16)
580BUILDSTRING(l, u32)
581#ifdef CONFIG_64BIT
582BUILDSTRING(q, u64)
583#endif
584
585static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
586{
587 memset((void __force *) addr, val, count);
588}
589static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
590{
591 memcpy(dst, (void __force *) src, count);
592}
593static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
594{
595 memcpy((void __force *) dst, src, count);
596}
597
598/*
599 * The caches on some architectures aren't dma-coherent and have need to
600 * handle this in software. There are three types of operations that
601 * can be applied to dma buffers.
602 *
603 * - dma_cache_wback_inv(start, size) makes caches and coherent by
604 * writing the content of the caches back to memory, if necessary.
605 * The function also invalidates the affected part of the caches as
606 * necessary before DMA transfers from outside to memory.
607 * - dma_cache_wback(start, size) makes caches and coherent by
608 * writing the content of the caches back to memory, if necessary.
609 * The function also invalidates the affected part of the caches as
610 * necessary before DMA transfers from outside to memory.
611 * - dma_cache_inv(start, size) invalidates the affected parts of the
612 * caches. Dirty lines of the caches may be written back or simply
613 * be discarded. This operation is necessary before dma operations
614 * to the memory.
615 *
616 * This API used to be exported; it now is for arch code internal use only.
617 */
618#ifdef CONFIG_DMA_NONCOHERENT
619
620extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
621extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
622extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
623
624#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
625#define dma_cache_wback(start, size) _dma_cache_wback(start, size)
626#define dma_cache_inv(start, size) _dma_cache_inv(start, size)
627
628#else /* Sane hardware */
629
630#define dma_cache_wback_inv(start,size) \
631 do { (void) (start); (void) (size); } while (0)
632#define dma_cache_wback(start,size) \
633 do { (void) (start); (void) (size); } while (0)
634#define dma_cache_inv(start,size) \
635 do { (void) (start); (void) (size); } while (0)
636
637#endif /* CONFIG_DMA_NONCOHERENT */
638
639/*
640 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
641 * Avoid interrupt mucking, just adjust the address for 4-byte access.
642 * Assume the addresses are 8-byte aligned.
643 */
644#ifdef __MIPSEB__
645#define __CSR_32_ADJUST 4
646#else
647#define __CSR_32_ADJUST 0
648#endif
649
650#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
651#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
652
653/*
654 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
655 * access
656 */
657#define xlate_dev_mem_ptr(p) __va(p)
658
659/*
660 * Convert a virtual cached pointer to an uncached pointer
661 */
662#define xlate_dev_kmem_ptr(p) p
663
664void __ioread64_copy(void *to, const void __iomem *from, size_t count);
665
666#endif /* _ASM_IO_H */