at v4.15 4.4 kB view raw
1/* 2 * Copyright © 2008 Keith Packard <keithp@keithp.com> 3 * 4 * This file is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software Foundation, 15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. 16 */ 17 18#ifndef _LINUX_IO_MAPPING_H 19#define _LINUX_IO_MAPPING_H 20 21#include <linux/types.h> 22#include <linux/slab.h> 23#include <linux/bug.h> 24#include <linux/io.h> 25#include <asm/page.h> 26 27/* 28 * The io_mapping mechanism provides an abstraction for mapping 29 * individual pages from an io device to the CPU in an efficient fashion. 30 * 31 * See Documentation/io-mapping.txt 32 */ 33 34struct io_mapping { 35 resource_size_t base; 36 unsigned long size; 37 pgprot_t prot; 38 void __iomem *iomem; 39}; 40 41#ifdef CONFIG_HAVE_ATOMIC_IOMAP 42 43#include <asm/iomap.h> 44/* 45 * For small address space machines, mapping large objects 46 * into the kernel virtual space isn't practical. Where 47 * available, use fixmap support to dynamically map pages 48 * of the object at run time. 49 */ 50 51static inline struct io_mapping * 52io_mapping_init_wc(struct io_mapping *iomap, 53 resource_size_t base, 54 unsigned long size) 55{ 56 pgprot_t prot; 57 58 if (iomap_create_wc(base, size, &prot)) 59 return NULL; 60 61 iomap->base = base; 62 iomap->size = size; 63 iomap->prot = prot; 64 return iomap; 65} 66 67static inline void 68io_mapping_fini(struct io_mapping *mapping) 69{ 70 iomap_free(mapping->base, mapping->size); 71} 72 73/* Atomic map/unmap */ 74static inline void __iomem * 75io_mapping_map_atomic_wc(struct io_mapping *mapping, 76 unsigned long offset) 77{ 78 resource_size_t phys_addr; 79 unsigned long pfn; 80 81 BUG_ON(offset >= mapping->size); 82 phys_addr = mapping->base + offset; 83 pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); 84 return iomap_atomic_prot_pfn(pfn, mapping->prot); 85} 86 87static inline void 88io_mapping_unmap_atomic(void __iomem *vaddr) 89{ 90 iounmap_atomic(vaddr); 91} 92 93static inline void __iomem * 94io_mapping_map_wc(struct io_mapping *mapping, 95 unsigned long offset, 96 unsigned long size) 97{ 98 resource_size_t phys_addr; 99 100 BUG_ON(offset >= mapping->size); 101 phys_addr = mapping->base + offset; 102 103 return ioremap_wc(phys_addr, size); 104} 105 106static inline void 107io_mapping_unmap(void __iomem *vaddr) 108{ 109 iounmap(vaddr); 110} 111 112#else 113 114#include <linux/uaccess.h> 115#include <asm/pgtable.h> 116 117/* Create the io_mapping object*/ 118static inline struct io_mapping * 119io_mapping_init_wc(struct io_mapping *iomap, 120 resource_size_t base, 121 unsigned long size) 122{ 123 iomap->base = base; 124 iomap->size = size; 125 iomap->iomem = ioremap_wc(base, size); 126#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ 127 iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); 128#elif defined(pgprot_writecombine) 129 iomap->prot = pgprot_writecombine(PAGE_KERNEL); 130#else 131 iomap->prot = pgprot_noncached(PAGE_KERNEL); 132#endif 133 134 return iomap; 135} 136 137static inline void 138io_mapping_fini(struct io_mapping *mapping) 139{ 140 iounmap(mapping->iomem); 141} 142 143/* Non-atomic map/unmap */ 144static inline void __iomem * 145io_mapping_map_wc(struct io_mapping *mapping, 146 unsigned long offset, 147 unsigned long size) 148{ 149 return mapping->iomem + offset; 150} 151 152static inline void 153io_mapping_unmap(void __iomem *vaddr) 154{ 155} 156 157/* Atomic map/unmap */ 158static inline void __iomem * 159io_mapping_map_atomic_wc(struct io_mapping *mapping, 160 unsigned long offset) 161{ 162 preempt_disable(); 163 pagefault_disable(); 164 return io_mapping_map_wc(mapping, offset, PAGE_SIZE); 165} 166 167static inline void 168io_mapping_unmap_atomic(void __iomem *vaddr) 169{ 170 io_mapping_unmap(vaddr); 171 pagefault_enable(); 172 preempt_enable(); 173} 174 175#endif /* HAVE_ATOMIC_IOMAP */ 176 177static inline struct io_mapping * 178io_mapping_create_wc(resource_size_t base, 179 unsigned long size) 180{ 181 struct io_mapping *iomap; 182 183 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL); 184 if (!iomap) 185 return NULL; 186 187 if (!io_mapping_init_wc(iomap, base, size)) { 188 kfree(iomap); 189 return NULL; 190 } 191 192 return iomap; 193} 194 195static inline void 196io_mapping_free(struct io_mapping *iomap) 197{ 198 io_mapping_fini(iomap); 199 kfree(iomap); 200} 201 202#endif /* _LINUX_IO_MAPPING_H */