at v4.2-rc2 390 lines 12 kB view raw
1#ifndef __ASM_SH_IO_H 2#define __ASM_SH_IO_H 3 4/* 5 * Convention: 6 * read{b,w,l,q}/write{b,w,l,q} are for PCI, 7 * while in{b,w,l}/out{b,w,l} are for ISA 8 * 9 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p 10 * and 'string' versions: ins{b,w,l}/outs{b,w,l} 11 * 12 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers 13 * automatically, there are also __raw versions, which do not. 14 */ 15#include <linux/errno.h> 16#include <asm/cache.h> 17#include <asm/addrspace.h> 18#include <asm/machvec.h> 19#include <asm/pgtable.h> 20#include <asm-generic/iomap.h> 21 22#ifdef __KERNEL__ 23#define __IO_PREFIX generic 24#include <asm/io_generic.h> 25#include <asm/io_trapped.h> 26#include <mach/mangle-port.h> 27 28#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v)) 29#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v)) 30#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v)) 31#define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v)) 32 33#define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a)) 34#define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a)) 35#define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a)) 36#define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a)) 37 38#define readb_relaxed(c) ({ u8 __v = ioswabb(__raw_readb(c)); __v; }) 39#define readw_relaxed(c) ({ u16 __v = ioswabw(__raw_readw(c)); __v; }) 40#define readl_relaxed(c) ({ u32 __v = ioswabl(__raw_readl(c)); __v; }) 41#define readq_relaxed(c) ({ u64 __v = ioswabq(__raw_readq(c)); __v; }) 42 43#define writeb_relaxed(v,c) ((void)__raw_writeb((__force u8)ioswabb(v),c)) 44#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)ioswabw(v),c)) 45#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)ioswabl(v),c)) 46#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)ioswabq(v),c)) 47 48#define readb(a) ({ u8 r_ = readb_relaxed(a); rmb(); r_; }) 49#define readw(a) ({ u16 r_ = readw_relaxed(a); rmb(); r_; }) 50#define readl(a) ({ u32 r_ = readl_relaxed(a); rmb(); r_; }) 51#define readq(a) ({ u64 r_ = readq_relaxed(a); rmb(); r_; }) 52 53#define writeb(v,a) ({ wmb(); writeb_relaxed((v),(a)); }) 54#define writew(v,a) ({ wmb(); writew_relaxed((v),(a)); }) 55#define writel(v,a) ({ wmb(); writel_relaxed((v),(a)); }) 56#define writeq(v,a) ({ wmb(); writeq_relaxed((v),(a)); }) 57 58#define readsb(p,d,l) __raw_readsb(p,d,l) 59#define readsw(p,d,l) __raw_readsw(p,d,l) 60#define readsl(p,d,l) __raw_readsl(p,d,l) 61 62#define writesb(p,d,l) __raw_writesb(p,d,l) 63#define writesw(p,d,l) __raw_writesw(p,d,l) 64#define writesl(p,d,l) __raw_writesl(p,d,l) 65 66#define __BUILD_UNCACHED_IO(bwlq, type) \ 67static inline type read##bwlq##_uncached(unsigned long addr) \ 68{ \ 69 type ret; \ 70 jump_to_uncached(); \ 71 ret = __raw_read##bwlq(addr); \ 72 back_to_cached(); \ 73 return ret; \ 74} \ 75 \ 76static inline void write##bwlq##_uncached(type v, unsigned long addr) \ 77{ \ 78 jump_to_uncached(); \ 79 __raw_write##bwlq(v, addr); \ 80 back_to_cached(); \ 81} 82 83__BUILD_UNCACHED_IO(b, u8) 84__BUILD_UNCACHED_IO(w, u16) 85__BUILD_UNCACHED_IO(l, u32) 86__BUILD_UNCACHED_IO(q, u64) 87 88#define __BUILD_MEMORY_STRING(pfx, bwlq, type) \ 89 \ 90static inline void \ 91pfx##writes##bwlq(volatile void __iomem *mem, const void *addr, \ 92 unsigned int count) \ 93{ \ 94 const volatile type *__addr = addr; \ 95 \ 96 while (count--) { \ 97 __raw_write##bwlq(*__addr, mem); \ 98 __addr++; \ 99 } \ 100} \ 101 \ 102static inline void pfx##reads##bwlq(volatile void __iomem *mem, \ 103 void *addr, unsigned int count) \ 104{ \ 105 volatile type *__addr = addr; \ 106 \ 107 while (count--) { \ 108 *__addr = __raw_read##bwlq(mem); \ 109 __addr++; \ 110 } \ 111} 112 113__BUILD_MEMORY_STRING(__raw_, b, u8) 114__BUILD_MEMORY_STRING(__raw_, w, u16) 115 116#ifdef CONFIG_SUPERH32 117void __raw_writesl(void __iomem *addr, const void *data, int longlen); 118void __raw_readsl(const void __iomem *addr, void *data, int longlen); 119#else 120__BUILD_MEMORY_STRING(__raw_, l, u32) 121#endif 122 123__BUILD_MEMORY_STRING(__raw_, q, u64) 124 125#ifdef CONFIG_HAS_IOPORT_MAP 126 127/* 128 * Slowdown I/O port space accesses for antique hardware. 129 */ 130#undef CONF_SLOWDOWN_IO 131 132/* 133 * On SuperH I/O ports are memory mapped, so we access them using normal 134 * load/store instructions. sh_io_port_base is the virtual address to 135 * which all ports are being mapped. 136 */ 137extern unsigned long sh_io_port_base; 138 139static inline void __set_io_port_base(unsigned long pbase) 140{ 141 *(unsigned long *)&sh_io_port_base = pbase; 142 barrier(); 143} 144 145#ifdef CONFIG_GENERIC_IOMAP 146#define __ioport_map ioport_map 147#else 148extern void __iomem *__ioport_map(unsigned long addr, unsigned int size); 149#endif 150 151#ifdef CONF_SLOWDOWN_IO 152#define SLOW_DOWN_IO __raw_readw(sh_io_port_base) 153#else 154#define SLOW_DOWN_IO 155#endif 156 157#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ 158 \ 159static inline void pfx##out##bwlq##p(type val, unsigned long port) \ 160{ \ 161 volatile type *__addr; \ 162 \ 163 __addr = __ioport_map(port, sizeof(type)); \ 164 *__addr = val; \ 165 slow; \ 166} \ 167 \ 168static inline type pfx##in##bwlq##p(unsigned long port) \ 169{ \ 170 volatile type *__addr; \ 171 type __val; \ 172 \ 173 __addr = __ioport_map(port, sizeof(type)); \ 174 __val = *__addr; \ 175 slow; \ 176 \ 177 return __val; \ 178} 179 180#define __BUILD_IOPORT_PFX(bus, bwlq, type) \ 181 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ 182 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) 183 184#define BUILDIO_IOPORT(bwlq, type) \ 185 __BUILD_IOPORT_PFX(, bwlq, type) 186 187BUILDIO_IOPORT(b, u8) 188BUILDIO_IOPORT(w, u16) 189BUILDIO_IOPORT(l, u32) 190BUILDIO_IOPORT(q, u64) 191 192#define __BUILD_IOPORT_STRING(bwlq, type) \ 193 \ 194static inline void outs##bwlq(unsigned long port, const void *addr, \ 195 unsigned int count) \ 196{ \ 197 const volatile type *__addr = addr; \ 198 \ 199 while (count--) { \ 200 out##bwlq(*__addr, port); \ 201 __addr++; \ 202 } \ 203} \ 204 \ 205static inline void ins##bwlq(unsigned long port, void *addr, \ 206 unsigned int count) \ 207{ \ 208 volatile type *__addr = addr; \ 209 \ 210 while (count--) { \ 211 *__addr = in##bwlq(port); \ 212 __addr++; \ 213 } \ 214} 215 216__BUILD_IOPORT_STRING(b, u8) 217__BUILD_IOPORT_STRING(w, u16) 218__BUILD_IOPORT_STRING(l, u32) 219__BUILD_IOPORT_STRING(q, u64) 220 221#else /* !CONFIG_HAS_IOPORT_MAP */ 222 223#include <asm/io_noioport.h> 224 225#endif 226 227 228#define IO_SPACE_LIMIT 0xffffffff 229 230/* synco on SH-4A, otherwise a nop */ 231#define mmiowb() wmb() 232 233/* We really want to try and get these to memcpy etc */ 234void memcpy_fromio(void *, const volatile void __iomem *, unsigned long); 235void memcpy_toio(volatile void __iomem *, const void *, unsigned long); 236void memset_io(volatile void __iomem *, int, unsigned long); 237 238/* Quad-word real-mode I/O, don't ask.. */ 239unsigned long long peek_real_address_q(unsigned long long addr); 240unsigned long long poke_real_address_q(unsigned long long addr, 241 unsigned long long val); 242 243#if !defined(CONFIG_MMU) 244#define virt_to_phys(address) ((unsigned long)(address)) 245#define phys_to_virt(address) ((void *)(address)) 246#else 247#define virt_to_phys(address) (__pa(address)) 248#define phys_to_virt(address) (__va(address)) 249#endif 250 251/* 252 * On 32-bit SH, we traditionally have the whole physical address space 253 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do 254 * not need to do anything but place the address in the proper segment. 255 * This is true for P1 and P2 addresses, as well as some P3 ones. 256 * However, most of the P3 addresses and newer cores using extended 257 * addressing need to map through page tables, so the ioremap() 258 * implementation becomes a bit more complicated. 259 * 260 * See arch/sh/mm/ioremap.c for additional notes on this. 261 * 262 * We cheat a bit and always return uncachable areas until we've fixed 263 * the drivers to handle caching properly. 264 * 265 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply 266 * doesn't exist, so everything must go through page tables. 267 */ 268#ifdef CONFIG_MMU 269void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, 270 pgprot_t prot, void *caller); 271void __iounmap(void __iomem *addr); 272 273static inline void __iomem * 274__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot) 275{ 276 return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); 277} 278 279static inline void __iomem * 280__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) 281{ 282#ifdef CONFIG_29BIT 283 phys_addr_t last_addr = offset + size - 1; 284 285 /* 286 * For P1 and P2 space this is trivial, as everything is already 287 * mapped. Uncached access for P1 addresses are done through P2. 288 * In the P3 case or for addresses outside of the 29-bit space, 289 * mapping must be done by the PMB or by using page tables. 290 */ 291 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { 292 u64 flags = pgprot_val(prot); 293 294 /* 295 * Anything using the legacy PTEA space attributes needs 296 * to be kicked down to page table mappings. 297 */ 298 if (unlikely(flags & _PAGE_PCC_MASK)) 299 return NULL; 300 if (unlikely(flags & _PAGE_CACHABLE)) 301 return (void __iomem *)P1SEGADDR(offset); 302 303 return (void __iomem *)P2SEGADDR(offset); 304 } 305 306 /* P4 above the store queues are always mapped. */ 307 if (unlikely(offset >= P3_ADDR_MAX)) 308 return (void __iomem *)P4SEGADDR(offset); 309#endif 310 311 return NULL; 312} 313 314static inline void __iomem * 315__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot) 316{ 317 void __iomem *ret; 318 319 ret = __ioremap_trapped(offset, size); 320 if (ret) 321 return ret; 322 323 ret = __ioremap_29bit(offset, size, prot); 324 if (ret) 325 return ret; 326 327 return __ioremap(offset, size, prot); 328} 329#else 330#define __ioremap(offset, size, prot) ((void __iomem *)(offset)) 331#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset)) 332#define __iounmap(addr) do { } while (0) 333#endif /* CONFIG_MMU */ 334 335static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) 336{ 337 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); 338} 339 340static inline void __iomem * 341ioremap_cache(phys_addr_t offset, unsigned long size) 342{ 343 return __ioremap_mode(offset, size, PAGE_KERNEL); 344} 345 346#ifdef CONFIG_HAVE_IOREMAP_PROT 347static inline void __iomem * 348ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags) 349{ 350 return __ioremap_mode(offset, size, __pgprot(flags)); 351} 352#endif 353 354#ifdef CONFIG_IOREMAP_FIXED 355extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t); 356extern int iounmap_fixed(void __iomem *); 357extern void ioremap_fixed_init(void); 358#else 359static inline void __iomem * 360ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) 361{ 362 BUG(); 363 return NULL; 364} 365 366static inline void ioremap_fixed_init(void) { } 367static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } 368#endif 369 370#define ioremap_nocache ioremap 371#define iounmap __iounmap 372 373/* 374 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 375 * access 376 */ 377#define xlate_dev_mem_ptr(p) __va(p) 378 379/* 380 * Convert a virtual cached pointer to an uncached pointer 381 */ 382#define xlate_dev_kmem_ptr(p) p 383 384#define ARCH_HAS_VALID_PHYS_ADDR_RANGE 385int valid_phys_addr_range(phys_addr_t addr, size_t size); 386int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); 387 388#endif /* __KERNEL__ */ 389 390#endif /* __ASM_SH_IO_H */