Merge branch 'io-mappings-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'io-mappings-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
io mapping: clean up #ifdefs
io mapping: improve documentation
i915: use io-mapping interfaces instead of a variety of mapping kludges
resources: add io-mapping functions to dynamically map large device apertures
x86: add iomap_atomic*()/iounmap_atomic() on 32-bit using fixmaps

+390 -105
+82
Documentation/io-mapping.txt
··· 1 + The io_mapping functions in linux/io-mapping.h provide an abstraction for 2 + efficiently mapping small regions of an I/O device to the CPU. The initial 3 + usage is to support the large graphics aperture on 32-bit processors where 4 + ioremap_wc cannot be used to statically map the entire aperture to the CPU 5 + as it would consume too much of the kernel address space. 6 + 7 + A mapping object is created during driver initialization using 8 + 9 + struct io_mapping *io_mapping_create_wc(unsigned long base, 10 + unsigned long size) 11 + 12 + 'base' is the bus address of the region to be made 13 + mappable, while 'size' indicates how large a mapping region to 14 + enable. Both are in bytes. 15 + 16 + This _wc variant provides a mapping which may only be used 17 + with the io_mapping_map_atomic_wc or io_mapping_map_wc. 18 + 19 + With this mapping object, individual pages can be mapped either atomically 20 + or not, depending on the necessary scheduling environment. Of course, atomic 21 + maps are more efficient: 22 + 23 + void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 24 + unsigned long offset) 25 + 26 + 'offset' is the offset within the defined mapping region. 27 + Accessing addresses beyond the region specified in the 28 + creation function yields undefined results. Using an offset 29 + which is not page aligned yields an undefined result. The 30 + return value points to a single page in CPU address space. 31 + 32 + This _wc variant returns a write-combining map to the 33 + page and may only be used with mappings created by 34 + io_mapping_create_wc 35 + 36 + Note that the task may not sleep while holding this page 37 + mapped. 38 + 39 + void io_mapping_unmap_atomic(void *vaddr) 40 + 41 + 'vaddr' must be the the value returned by the last 42 + io_mapping_map_atomic_wc call. This unmaps the specified 43 + page and allows the task to sleep once again. 44 + 45 + If you need to sleep while holding the lock, you can use the non-atomic 46 + variant, although they may be significantly slower. 47 + 48 + void *io_mapping_map_wc(struct io_mapping *mapping, 49 + unsigned long offset) 50 + 51 + This works like io_mapping_map_atomic_wc except it allows 52 + the task to sleep while holding the page mapped. 53 + 54 + void io_mapping_unmap(void *vaddr) 55 + 56 + This works like io_mapping_unmap_atomic, except it is used 57 + for pages mapped with io_mapping_map_wc. 58 + 59 + At driver close time, the io_mapping object must be freed: 60 + 61 + void io_mapping_free(struct io_mapping *mapping) 62 + 63 + Current Implementation: 64 + 65 + The initial implementation of these functions uses existing mapping 66 + mechanisms and so provides only an abstraction layer and no new 67 + functionality. 68 + 69 + On 64-bit processors, io_mapping_create_wc calls ioremap_wc for the whole 70 + range, creating a permanent kernel-visible mapping to the resource. The 71 + map_atomic and map functions add the requested offset to the base of the 72 + virtual address returned by ioremap_wc. 73 + 74 + On 32-bit processors with HIGHMEM defined, io_mapping_map_atomic_wc uses 75 + kmap_atomic_pfn to map the specified page in an atomic fashion; 76 + kmap_atomic_pfn isn't really supposed to be used with device pages, but it 77 + provides an efficient mapping for this usage. 78 + 79 + On 32-bit processors without HIGHMEM defined, io_mapping_map_atomic_wc and 80 + io_mapping_map_wc both use ioremap_wc, a terribly inefficient function which 81 + performs an IPI to inform all processors about the new mapping. This results 82 + in a significant performance penalty.
+4
arch/x86/Kconfig
··· 1894 1894 endmenu 1895 1895 1896 1896 1897 + config HAVE_ATOMIC_IOMAP 1898 + def_bool y 1899 + depends on X86_32 1900 + 1897 1901 source "net/Kconfig" 1898 1902 1899 1903 source "drivers/Kconfig"
+4
arch/x86/include/asm/fixmap.h
··· 9 9 10 10 extern int fixmaps_set; 11 11 12 + extern pte_t *kmap_pte; 13 + extern pgprot_t kmap_prot; 14 + extern pte_t *pkmap_page_table; 15 + 12 16 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); 13 17 void native_set_fixmap(enum fixed_addresses idx, 14 18 unsigned long phys, pgprot_t flags);
-4
arch/x86/include/asm/fixmap_32.h
··· 28 28 #include <asm/acpi.h> 29 29 #include <asm/apicdef.h> 30 30 #include <asm/page.h> 31 - #ifdef CONFIG_HIGHMEM 32 31 #include <linux/threads.h> 33 32 #include <asm/kmap_types.h> 34 - #endif 35 33 36 34 /* 37 35 * Here we define all the compile-time 'special' virtual ··· 73 75 #ifdef CONFIG_X86_CYCLONE_TIMER 74 76 FIX_CYCLONE_TIMER, /*cyclone timer register*/ 75 77 #endif 76 - #ifdef CONFIG_HIGHMEM 77 78 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 78 79 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 79 - #endif 80 80 #ifdef CONFIG_PCI_MMCONFIG 81 81 FIX_PCIE_MCFG, 82 82 #endif
+1 -4
arch/x86/include/asm/highmem.h
··· 25 25 #include <asm/kmap_types.h> 26 26 #include <asm/tlbflush.h> 27 27 #include <asm/paravirt.h> 28 + #include <asm/fixmap.h> 28 29 29 30 /* declarations for highmem.c */ 30 31 extern unsigned long highstart_pfn, highend_pfn; 31 - 32 - extern pte_t *kmap_pte; 33 - extern pgprot_t kmap_prot; 34 - extern pte_t *pkmap_page_table; 35 32 36 33 /* 37 34 * Right now we initialize only a single pte table. It can be extended
+1 -1
arch/x86/mm/Makefile
··· 1 1 obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 2 2 pat.o pgtable.o gup.o 3 3 4 - obj-$(CONFIG_X86_32) += pgtable_32.o 4 + obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o 5 5 6 6 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 7 7 obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
+1 -2
arch/x86/mm/init_32.c
··· 334 334 return 0; 335 335 } 336 336 337 - #ifdef CONFIG_HIGHMEM 338 337 pte_t *kmap_pte; 339 338 pgprot_t kmap_prot; 340 339 ··· 356 357 kmap_prot = PAGE_KERNEL; 357 358 } 358 359 360 + #ifdef CONFIG_HIGHMEM 359 361 static void __init permanent_kmaps_init(pgd_t *pgd_base) 360 362 { 361 363 unsigned long vaddr; ··· 436 436 #endif /* !CONFIG_NUMA */ 437 437 438 438 #else 439 - # define kmap_init() do { } while (0) 440 439 # define permanent_kmaps_init(pgd_base) do { } while (0) 441 440 # define set_highmem_pages_init() do { } while (0) 442 441 #endif /* CONFIG_HIGHMEM */
+59
arch/x86/mm/iomap_32.c
··· 1 + /* 2 + * Copyright © 2008 Ingo Molnar 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, but 10 + * WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 + * General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along 15 + * with this program; if not, write to the Free Software Foundation, Inc., 16 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 17 + */ 18 + 19 + #include <asm/iomap.h> 20 + #include <linux/module.h> 21 + 22 + /* Map 'pfn' using fixed map 'type' and protections 'prot' 23 + */ 24 + void * 25 + iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 26 + { 27 + enum fixed_addresses idx; 28 + unsigned long vaddr; 29 + 30 + pagefault_disable(); 31 + 32 + idx = type + KM_TYPE_NR*smp_processor_id(); 33 + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 34 + set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); 35 + arch_flush_lazy_mmu_mode(); 36 + 37 + return (void*) vaddr; 38 + } 39 + EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); 40 + 41 + void 42 + iounmap_atomic(void *kvaddr, enum km_type type) 43 + { 44 + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 45 + enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 46 + 47 + /* 48 + * Force other mappings to Oops if they'll try to access this pte 49 + * without first remap it. Keeping stale mappings around is a bad idea 50 + * also, in case the page changes cacheability attributes or becomes 51 + * a protected page in a hypervisor. 52 + */ 53 + if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) 54 + kpte_clear_flush(kmap_pte-idx, vaddr); 55 + 56 + arch_flush_lazy_mmu_mode(); 57 + pagefault_enable(); 58 + } 59 + EXPORT_SYMBOL_GPL(iounmap_atomic);
+3
drivers/gpu/drm/i915/i915_drv.h
··· 31 31 #define _I915_DRV_H_ 32 32 33 33 #include "i915_reg.h" 34 + #include <linux/io-mapping.h> 34 35 35 36 /* General customization: 36 37 */ ··· 246 245 247 246 struct { 248 247 struct drm_mm gtt_space; 248 + 249 + struct io_mapping *gtt_mapping; 249 250 250 251 /** 251 252 * List of objects currently involved in rendering from the
+80 -94
drivers/gpu/drm/i915/i915_gem.c
··· 193 193 return 0; 194 194 } 195 195 196 - /* 197 - * Try to write quickly with an atomic kmap. Return true on success. 198 - * 199 - * If this fails (which includes a partial write), we'll redo the whole 200 - * thing with the slow version. 201 - * 202 - * This is a workaround for the low performance of iounmap (approximate 203 - * 10% cpu cost on normal 3D workloads). kmap_atomic on HIGHMEM kernels 204 - * happens to let us map card memory without taking IPIs. When the vmap 205 - * rework lands we should be able to dump this hack. 196 + /* This is the fast write path which cannot handle 197 + * page faults in the source data 206 198 */ 207 - static inline int fast_user_write(unsigned long pfn, char __user *user_data, 208 - int l, int o) 209 - { 210 - #ifdef CONFIG_HIGHMEM 211 - unsigned long unwritten; 212 - char *vaddr_atomic; 213 199 214 - vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0); 215 - #if WATCH_PWRITE 216 - DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", 217 - i, o, l, pfn, vaddr_atomic); 218 - #endif 219 - unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o, user_data, l); 220 - kunmap_atomic(vaddr_atomic, KM_USER0); 221 - return !unwritten; 222 - #else 200 + static inline int 201 + fast_user_write(struct io_mapping *mapping, 202 + loff_t page_base, int page_offset, 203 + char __user *user_data, 204 + int length) 205 + { 206 + char *vaddr_atomic; 207 + unsigned long unwritten; 208 + 209 + vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); 210 + unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, 211 + user_data, length); 212 + io_mapping_unmap_atomic(vaddr_atomic); 213 + if (unwritten) 214 + return -EFAULT; 223 215 return 0; 224 - #endif 216 + } 217 + 218 + /* Here's the write path which can sleep for 219 + * page faults 220 + */ 221 + 222 + static inline int 223 + slow_user_write(struct io_mapping *mapping, 224 + loff_t page_base, int page_offset, 225 + char __user *user_data, 226 + int length) 227 + { 228 + char __iomem *vaddr; 229 + unsigned long unwritten; 230 + 231 + vaddr = io_mapping_map_wc(mapping, page_base); 232 + if (vaddr == NULL) 233 + return -EFAULT; 234 + unwritten = __copy_from_user(vaddr + page_offset, 235 + user_data, length); 236 + io_mapping_unmap(vaddr); 237 + if (unwritten) 238 + return -EFAULT; 239 + return 0; 225 240 } 226 241 227 242 static int ··· 245 230 struct drm_file *file_priv) 246 231 { 247 232 struct drm_i915_gem_object *obj_priv = obj->driver_private; 233 + drm_i915_private_t *dev_priv = dev->dev_private; 248 234 ssize_t remain; 249 - loff_t offset; 235 + loff_t offset, page_base; 250 236 char __user *user_data; 251 - int ret = 0; 237 + int page_offset, page_length; 238 + int ret; 252 239 253 240 user_data = (char __user *) (uintptr_t) args->data_ptr; 254 241 remain = args->size; ··· 274 257 obj_priv->dirty = 1; 275 258 276 259 while (remain > 0) { 277 - unsigned long pfn; 278 - int i, o, l; 279 - 280 260 /* Operation in this page 281 261 * 282 - * i = page number 283 - * o = offset within page 284 - * l = bytes to copy 262 + * page_base = page offset within aperture 263 + * page_offset = offset within page 264 + * page_length = bytes to copy for this page 285 265 */ 286 - i = offset >> PAGE_SHIFT; 287 - o = offset & (PAGE_SIZE-1); 288 - l = remain; 289 - if ((o + l) > PAGE_SIZE) 290 - l = PAGE_SIZE - o; 266 + page_base = (offset & ~(PAGE_SIZE-1)); 267 + page_offset = offset & (PAGE_SIZE-1); 268 + page_length = remain; 269 + if ((page_offset + remain) > PAGE_SIZE) 270 + page_length = PAGE_SIZE - page_offset; 291 271 292 - pfn = (dev->agp->base >> PAGE_SHIFT) + i; 272 + ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base, 273 + page_offset, user_data, page_length); 293 274 294 - if (!fast_user_write(pfn, user_data, l, o)) { 295 - unsigned long unwritten; 296 - char __iomem *vaddr; 297 - 298 - vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); 299 - #if WATCH_PWRITE 300 - DRM_INFO("pwrite slow i %d o %d l %d " 301 - "pfn %ld vaddr %p\n", 302 - i, o, l, pfn, vaddr); 303 - #endif 304 - if (vaddr == NULL) { 305 - ret = -EFAULT; 275 + /* If we get a fault while copying data, then (presumably) our 276 + * source page isn't available. In this case, use the 277 + * non-atomic function 278 + */ 279 + if (ret) { 280 + ret = slow_user_write (dev_priv->mm.gtt_mapping, 281 + page_base, page_offset, 282 + user_data, page_length); 283 + if (ret) 306 284 goto fail; 307 - } 308 - unwritten = __copy_from_user(vaddr + o, user_data, l); 309 - #if WATCH_PWRITE 310 - DRM_INFO("unwritten %ld\n", unwritten); 311 - #endif 312 - iounmap(vaddr); 313 - if (unwritten) { 314 - ret = -EFAULT; 315 - goto fail; 316 - } 317 285 } 318 286 319 - remain -= l; 320 - user_data += l; 321 - offset += l; 287 + remain -= page_length; 288 + user_data += page_length; 289 + offset += page_length; 322 290 } 323 - #if WATCH_PWRITE && 1 324 - i915_gem_clflush_object(obj); 325 - i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0); 326 - i915_gem_clflush_object(obj); 327 - #endif 328 291 329 292 fail: 330 293 i915_gem_object_unpin(obj); ··· 1522 1525 struct drm_i915_gem_exec_object *entry) 1523 1526 { 1524 1527 struct drm_device *dev = obj->dev; 1528 + drm_i915_private_t *dev_priv = dev->dev_private; 1525 1529 struct drm_i915_gem_relocation_entry reloc; 1526 1530 struct drm_i915_gem_relocation_entry __user *relocs; 1527 1531 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1528 1532 int i, ret; 1529 - uint32_t last_reloc_offset = -1; 1530 - void __iomem *reloc_page = NULL; 1533 + void __iomem *reloc_page; 1531 1534 1532 1535 /* Choose the GTT offset for our buffer and put it there. */ 1533 1536 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); ··· 1650 1653 * perform. 1651 1654 */ 1652 1655 reloc_offset = obj_priv->gtt_offset + reloc.offset; 1653 - if (reloc_page == NULL || 1654 - (last_reloc_offset & ~(PAGE_SIZE - 1)) != 1655 - (reloc_offset & ~(PAGE_SIZE - 1))) { 1656 - if (reloc_page != NULL) 1657 - iounmap(reloc_page); 1658 - 1659 - reloc_page = ioremap_wc(dev->agp->base + 1660 - (reloc_offset & 1661 - ~(PAGE_SIZE - 1)), 1662 - PAGE_SIZE); 1663 - last_reloc_offset = reloc_offset; 1664 - if (reloc_page == NULL) { 1665 - drm_gem_object_unreference(target_obj); 1666 - i915_gem_object_unpin(obj); 1667 - return -ENOMEM; 1668 - } 1669 - } 1670 - 1656 + reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 1657 + (reloc_offset & 1658 + ~(PAGE_SIZE - 1))); 1671 1659 reloc_entry = (uint32_t __iomem *)(reloc_page + 1672 - (reloc_offset & (PAGE_SIZE - 1))); 1660 + (reloc_offset & (PAGE_SIZE - 1))); 1673 1661 reloc_val = target_obj_priv->gtt_offset + reloc.delta; 1674 1662 1675 1663 #if WATCH_BUF ··· 1663 1681 readl(reloc_entry), reloc_val); 1664 1682 #endif 1665 1683 writel(reloc_val, reloc_entry); 1684 + io_mapping_unmap_atomic(reloc_page); 1666 1685 1667 1686 /* Write the updated presumed offset for this entry back out 1668 1687 * to the user. ··· 1678 1695 1679 1696 drm_gem_object_unreference(target_obj); 1680 1697 } 1681 - 1682 - if (reloc_page != NULL) 1683 - iounmap(reloc_page); 1684 1698 1685 1699 #if WATCH_BUF 1686 1700 if (0) ··· 2520 2540 if (ret != 0) 2521 2541 return ret; 2522 2542 2543 + dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base, 2544 + dev->agp->agp_info.aper_size 2545 + * 1024 * 1024); 2546 + 2523 2547 mutex_lock(&dev->struct_mutex); 2524 2548 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 2525 2549 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); ··· 2541 2557 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 2542 2558 struct drm_file *file_priv) 2543 2559 { 2560 + drm_i915_private_t *dev_priv = dev->dev_private; 2544 2561 int ret; 2545 2562 2546 2563 ret = i915_gem_idle(dev); 2547 2564 drm_irq_uninstall(dev); 2548 2565 2566 + io_mapping_free(dev_priv->mm.gtt_mapping); 2549 2567 return ret; 2550 2568 } 2551 2569
+30
include/asm-x86/iomap.h
··· 1 + /* 2 + * Copyright © 2008 Ingo Molnar 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, but 10 + * WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 + * General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along 15 + * with this program; if not, write to the Free Software Foundation, Inc., 16 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 17 + */ 18 + 19 + #include <linux/fs.h> 20 + #include <linux/mm.h> 21 + #include <linux/uaccess.h> 22 + #include <asm/cacheflush.h> 23 + #include <asm/pgtable.h> 24 + #include <asm/tlbflush.h> 25 + 26 + void * 27 + iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 28 + 29 + void 30 + iounmap_atomic(void *kvaddr, enum km_type type);
+125
include/linux/io-mapping.h
··· 1 + /* 2 + * Copyright © 2008 Keith Packard <keithp@keithp.com> 3 + * 4 + * This file is free software; you can redistribute it and/or modify 5 + * it under the terms of version 2 of the GNU General Public License 6 + * as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, write to the Free Software Foundation, 15 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. 16 + */ 17 + 18 + #ifndef _LINUX_IO_MAPPING_H 19 + #define _LINUX_IO_MAPPING_H 20 + 21 + #include <linux/types.h> 22 + #include <asm/io.h> 23 + #include <asm/page.h> 24 + #include <asm/iomap.h> 25 + 26 + /* 27 + * The io_mapping mechanism provides an abstraction for mapping 28 + * individual pages from an io device to the CPU in an efficient fashion. 29 + * 30 + * See Documentation/io_mapping.txt 31 + */ 32 + 33 + /* this struct isn't actually defined anywhere */ 34 + struct io_mapping; 35 + 36 + #ifdef CONFIG_HAVE_ATOMIC_IOMAP 37 + 38 + /* 39 + * For small address space machines, mapping large objects 40 + * into the kernel virtual space isn't practical. Where 41 + * available, use fixmap support to dynamically map pages 42 + * of the object at run time. 43 + */ 44 + 45 + static inline struct io_mapping * 46 + io_mapping_create_wc(unsigned long base, unsigned long size) 47 + { 48 + return (struct io_mapping *) base; 49 + } 50 + 51 + static inline void 52 + io_mapping_free(struct io_mapping *mapping) 53 + { 54 + } 55 + 56 + /* Atomic map/unmap */ 57 + static inline void * 58 + io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) 59 + { 60 + offset += (unsigned long) mapping; 61 + return iomap_atomic_prot_pfn(offset >> PAGE_SHIFT, KM_USER0, 62 + __pgprot(__PAGE_KERNEL_WC)); 63 + } 64 + 65 + static inline void 66 + io_mapping_unmap_atomic(void *vaddr) 67 + { 68 + iounmap_atomic(vaddr, KM_USER0); 69 + } 70 + 71 + static inline void * 72 + io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 73 + { 74 + offset += (unsigned long) mapping; 75 + return ioremap_wc(offset, PAGE_SIZE); 76 + } 77 + 78 + static inline void 79 + io_mapping_unmap(void *vaddr) 80 + { 81 + iounmap(vaddr); 82 + } 83 + 84 + #else 85 + 86 + /* Create the io_mapping object*/ 87 + static inline struct io_mapping * 88 + io_mapping_create_wc(unsigned long base, unsigned long size) 89 + { 90 + return (struct io_mapping *) ioremap_wc(base, size); 91 + } 92 + 93 + static inline void 94 + io_mapping_free(struct io_mapping *mapping) 95 + { 96 + iounmap(mapping); 97 + } 98 + 99 + /* Atomic map/unmap */ 100 + static inline void * 101 + io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) 102 + { 103 + return ((char *) mapping) + offset; 104 + } 105 + 106 + static inline void 107 + io_mapping_unmap_atomic(void *vaddr) 108 + { 109 + } 110 + 111 + /* Non-atomic map/unmap */ 112 + static inline void * 113 + io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 114 + { 115 + return ((char *) mapping) + offset; 116 + } 117 + 118 + static inline void 119 + io_mapping_unmap(void *vaddr) 120 + { 121 + } 122 + 123 + #endif /* HAVE_ATOMIC_IOMAP */ 124 + 125 + #endif /* _LINUX_IO_MAPPING_H */