Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright © 2008 Keith Packard <keithp@keithp.com>
4 */
5
6#ifndef _LINUX_IO_MAPPING_H
7#define _LINUX_IO_MAPPING_H
8
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/bug.h>
12#include <linux/io.h>
13#include <linux/pgtable.h>
14#include <asm/page.h>
15
16/*
17 * The io_mapping mechanism provides an abstraction for mapping
18 * individual pages from an io device to the CPU in an efficient fashion.
19 *
20 * See Documentation/driver-api/io-mapping.rst
21 */
22
23struct io_mapping {
24 resource_size_t base;
25 unsigned long size;
26 pgprot_t prot;
27 void __iomem *iomem;
28};
29
30#ifdef CONFIG_HAVE_ATOMIC_IOMAP
31
32#include <linux/pfn.h>
33#include <asm/iomap.h>
34/*
35 * For small address space machines, mapping large objects
36 * into the kernel virtual space isn't practical. Where
37 * available, use fixmap support to dynamically map pages
38 * of the object at run time.
39 */
40
41static inline struct io_mapping *
42io_mapping_init_wc(struct io_mapping *iomap,
43 resource_size_t base,
44 unsigned long size)
45{
46 pgprot_t prot;
47
48 if (iomap_create_wc(base, size, &prot))
49 return NULL;
50
51 iomap->base = base;
52 iomap->size = size;
53 iomap->prot = prot;
54 return iomap;
55}
56
57static inline void
58io_mapping_fini(struct io_mapping *mapping)
59{
60 iomap_free(mapping->base, mapping->size);
61}
62
63/* Atomic map/unmap */
64static inline void __iomem *
65io_mapping_map_atomic_wc(struct io_mapping *mapping,
66 unsigned long offset)
67{
68 resource_size_t phys_addr;
69
70 BUG_ON(offset >= mapping->size);
71 phys_addr = mapping->base + offset;
72 preempt_disable();
73 pagefault_disable();
74 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
75}
76
77static inline void
78io_mapping_unmap_atomic(void __iomem *vaddr)
79{
80 kunmap_local_indexed((void __force *)vaddr);
81 pagefault_enable();
82 preempt_enable();
83}
84
85static inline void __iomem *
86io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
87{
88 resource_size_t phys_addr;
89
90 BUG_ON(offset >= mapping->size);
91 phys_addr = mapping->base + offset;
92 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
93}
94
95static inline void io_mapping_unmap_local(void __iomem *vaddr)
96{
97 kunmap_local_indexed((void __force *)vaddr);
98}
99
100static inline void __iomem *
101io_mapping_map_wc(struct io_mapping *mapping,
102 unsigned long offset,
103 unsigned long size)
104{
105 resource_size_t phys_addr;
106
107 BUG_ON(offset >= mapping->size);
108 phys_addr = mapping->base + offset;
109
110 return ioremap_wc(phys_addr, size);
111}
112
113static inline void
114io_mapping_unmap(void __iomem *vaddr)
115{
116 iounmap(vaddr);
117}
118
119#else /* HAVE_ATOMIC_IOMAP */
120
121#include <linux/uaccess.h>
122
123/* Create the io_mapping object*/
124static inline struct io_mapping *
125io_mapping_init_wc(struct io_mapping *iomap,
126 resource_size_t base,
127 unsigned long size)
128{
129 iomap->iomem = ioremap_wc(base, size);
130 if (!iomap->iomem)
131 return NULL;
132
133 iomap->base = base;
134 iomap->size = size;
135 iomap->prot = pgprot_writecombine(PAGE_KERNEL);
136
137 return iomap;
138}
139
140static inline void
141io_mapping_fini(struct io_mapping *mapping)
142{
143 iounmap(mapping->iomem);
144}
145
146/* Non-atomic map/unmap */
147static inline void __iomem *
148io_mapping_map_wc(struct io_mapping *mapping,
149 unsigned long offset,
150 unsigned long size)
151{
152 return mapping->iomem + offset;
153}
154
155static inline void
156io_mapping_unmap(void __iomem *vaddr)
157{
158}
159
160/* Atomic map/unmap */
161static inline void __iomem *
162io_mapping_map_atomic_wc(struct io_mapping *mapping,
163 unsigned long offset)
164{
165 preempt_disable();
166 pagefault_disable();
167 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
168}
169
170static inline void
171io_mapping_unmap_atomic(void __iomem *vaddr)
172{
173 io_mapping_unmap(vaddr);
174 pagefault_enable();
175 preempt_enable();
176}
177
178static inline void __iomem *
179io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
180{
181 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
182}
183
184static inline void io_mapping_unmap_local(void __iomem *vaddr)
185{
186 io_mapping_unmap(vaddr);
187}
188
189#endif /* !HAVE_ATOMIC_IOMAP */
190
191static inline struct io_mapping *
192io_mapping_create_wc(resource_size_t base,
193 unsigned long size)
194{
195 struct io_mapping *iomap;
196
197 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
198 if (!iomap)
199 return NULL;
200
201 if (!io_mapping_init_wc(iomap, base, size)) {
202 kfree(iomap);
203 return NULL;
204 }
205
206 return iomap;
207}
208
209static inline void
210io_mapping_free(struct io_mapping *iomap)
211{
212 io_mapping_fini(iomap);
213 kfree(iomap);
214}
215
216#endif /* _LINUX_IO_MAPPING_H */
217
218int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
219 unsigned long addr, unsigned long pfn, unsigned long size);