Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
6#include <linux/mm_types.h>
7#include <linux/scatterlist.h>
8#include <linux/dma-attrs.h>
9#include <linux/dma-debug.h>
10
11#include <asm-generic/dma-coherent.h>
12#include <asm/memory.h>
13
14#include <xen/xen.h>
15#include <asm/xen/hypervisor.h>
16
17#define DMA_ERROR_CODE (~0)
18extern struct dma_map_ops arm_dma_ops;
19extern struct dma_map_ops arm_coherent_dma_ops;
20
21static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
22{
23 if (dev && dev->archdata.dma_ops)
24 return dev->archdata.dma_ops;
25 return &arm_dma_ops;
26}
27
28static inline struct dma_map_ops *get_dma_ops(struct device *dev)
29{
30 if (xen_initial_domain())
31 return xen_dma_ops;
32 else
33 return __generic_dma_ops(dev);
34}
35
36static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
37{
38 BUG_ON(!dev);
39 dev->archdata.dma_ops = ops;
40}
41
42#include <asm-generic/dma-mapping-common.h>
43
44static inline int dma_set_mask(struct device *dev, u64 mask)
45{
46 return get_dma_ops(dev)->set_dma_mask(dev, mask);
47}
48
49#ifdef __arch_page_to_dma
50#error Please update to __arch_pfn_to_dma
51#endif
52
53/*
54 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
55 * functions used internally by the DMA-mapping API to provide DMA
56 * addresses. They must not be used by drivers.
57 */
58#ifndef __arch_pfn_to_dma
59static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
60{
61 if (dev)
62 pfn -= dev->dma_pfn_offset;
63 return (dma_addr_t)__pfn_to_bus(pfn);
64}
65
66static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
67{
68 unsigned long pfn = __bus_to_pfn(addr);
69
70 if (dev)
71 pfn += dev->dma_pfn_offset;
72
73 return pfn;
74}
75
76static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
77{
78 if (dev) {
79 unsigned long pfn = dma_to_pfn(dev, addr);
80
81 return phys_to_virt(__pfn_to_phys(pfn));
82 }
83
84 return (void *)__bus_to_virt((unsigned long)addr);
85}
86
87static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
88{
89 if (dev)
90 return pfn_to_dma(dev, virt_to_pfn(addr));
91
92 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
93}
94
95#else
96static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
97{
98 return __arch_pfn_to_dma(dev, pfn);
99}
100
101static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
102{
103 return __arch_dma_to_pfn(dev, addr);
104}
105
106static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
107{
108 return __arch_dma_to_virt(dev, addr);
109}
110
111static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
112{
113 return __arch_virt_to_dma(dev, addr);
114}
115#endif
116
117/* The ARM override for dma_max_pfn() */
118static inline unsigned long dma_max_pfn(struct device *dev)
119{
120 return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
121}
122#define dma_max_pfn(dev) dma_max_pfn(dev)
123
124static inline int set_arch_dma_coherent_ops(struct device *dev)
125{
126 set_dma_ops(dev, &arm_coherent_dma_ops);
127 return 0;
128}
129#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
130
131static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
132{
133 unsigned int offset = paddr & ~PAGE_MASK;
134 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
135}
136
137static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
138{
139 unsigned int offset = dev_addr & ~PAGE_MASK;
140 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
141}
142
143static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
144{
145 u64 limit, mask;
146
147 if (!dev->dma_mask)
148 return 0;
149
150 mask = *dev->dma_mask;
151
152 limit = (mask + 1) & ~mask;
153 if (limit && size > limit)
154 return 0;
155
156 if ((addr | (addr + size - 1)) & ~mask)
157 return 0;
158
159 return 1;
160}
161
162static inline void dma_mark_clean(void *addr, size_t size) { }
163
164/*
165 * DMA errors are defined by all-bits-set in the DMA address.
166 */
167static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
168{
169 debug_dma_mapping_error(dev, dma_addr);
170 return dma_addr == DMA_ERROR_CODE;
171}
172
173/*
174 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
175 * function so drivers using this API are highlighted with build warnings.
176 */
177static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
178 dma_addr_t *handle, gfp_t gfp)
179{
180 return NULL;
181}
182
183static inline void dma_free_noncoherent(struct device *dev, size_t size,
184 void *cpu_addr, dma_addr_t handle)
185{
186}
187
188extern int dma_supported(struct device *dev, u64 mask);
189
190extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
191
192/**
193 * arm_dma_alloc - allocate consistent memory for DMA
194 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
195 * @size: required memory size
196 * @handle: bus-specific DMA address
197 * @attrs: optinal attributes that specific mapping properties
198 *
199 * Allocate some memory for a device for performing DMA. This function
200 * allocates pages, and will return the CPU-viewed address, and sets @handle
201 * to be the device-viewed address.
202 */
203extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
204 gfp_t gfp, struct dma_attrs *attrs);
205
206#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
207
208static inline void *dma_alloc_attrs(struct device *dev, size_t size,
209 dma_addr_t *dma_handle, gfp_t flag,
210 struct dma_attrs *attrs)
211{
212 struct dma_map_ops *ops = get_dma_ops(dev);
213 void *cpu_addr;
214 BUG_ON(!ops);
215
216 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
217 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
218 return cpu_addr;
219}
220
221/**
222 * arm_dma_free - free memory allocated by arm_dma_alloc
223 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
224 * @size: size of memory originally requested in dma_alloc_coherent
225 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
226 * @handle: device-view address returned from dma_alloc_coherent
227 * @attrs: optinal attributes that specific mapping properties
228 *
229 * Free (and unmap) a DMA buffer previously allocated by
230 * arm_dma_alloc().
231 *
232 * References to memory and mappings associated with cpu_addr/handle
233 * during and after this call executing are illegal.
234 */
235extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
236 dma_addr_t handle, struct dma_attrs *attrs);
237
238#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
239
240static inline void dma_free_attrs(struct device *dev, size_t size,
241 void *cpu_addr, dma_addr_t dma_handle,
242 struct dma_attrs *attrs)
243{
244 struct dma_map_ops *ops = get_dma_ops(dev);
245 BUG_ON(!ops);
246
247 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
248 ops->free(dev, size, cpu_addr, dma_handle, attrs);
249}
250
251/**
252 * arm_dma_mmap - map a coherent DMA allocation into user space
253 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
254 * @vma: vm_area_struct describing requested user mapping
255 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
256 * @handle: device-view address returned from dma_alloc_coherent
257 * @size: size of memory originally requested in dma_alloc_coherent
258 * @attrs: optinal attributes that specific mapping properties
259 *
260 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
261 * into user space. The coherent DMA buffer must not be freed by the
262 * driver until the user space mapping has been released.
263 */
264extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
265 void *cpu_addr, dma_addr_t dma_addr, size_t size,
266 struct dma_attrs *attrs);
267
268static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
269 dma_addr_t *dma_handle, gfp_t flag)
270{
271 DEFINE_DMA_ATTRS(attrs);
272 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
273 return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
274}
275
276static inline void dma_free_writecombine(struct device *dev, size_t size,
277 void *cpu_addr, dma_addr_t dma_handle)
278{
279 DEFINE_DMA_ATTRS(attrs);
280 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
281 return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
282}
283
284/*
285 * This can be called during early boot to increase the size of the atomic
286 * coherent DMA pool above the default value of 256KiB. It must be called
287 * before postcore_initcall.
288 */
289extern void __init init_dma_coherent_pool_size(unsigned long size);
290
291/*
292 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
293 * and utilize bounce buffers as needed to work around limited DMA windows.
294 *
295 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
296 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
297 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
298 *
299 * The following are helper functions used by the dmabounce subystem
300 *
301 */
302
303/**
304 * dmabounce_register_dev
305 *
306 * @dev: valid struct device pointer
307 * @small_buf_size: size of buffers to use with small buffer pool
308 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
309 * @needs_bounce_fn: called to determine whether buffer needs bouncing
310 *
311 * This function should be called by low-level platform code to register
312 * a device as requireing DMA buffer bouncing. The function will allocate
313 * appropriate DMA pools for the device.
314 */
315extern int dmabounce_register_dev(struct device *, unsigned long,
316 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
317
318/**
319 * dmabounce_unregister_dev
320 *
321 * @dev: valid struct device pointer
322 *
323 * This function should be called by low-level platform code when device
324 * that was previously registered with dmabounce_register_dev is removed
325 * from the system.
326 *
327 */
328extern void dmabounce_unregister_dev(struct device *);
329
330
331
332/*
333 * The scatter list versions of the above methods.
334 */
335extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
336 enum dma_data_direction, struct dma_attrs *attrs);
337extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
338 enum dma_data_direction, struct dma_attrs *attrs);
339extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
340 enum dma_data_direction);
341extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
342 enum dma_data_direction);
343extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
344 void *cpu_addr, dma_addr_t dma_addr, size_t size,
345 struct dma_attrs *attrs);
346
347#endif /* __KERNEL__ */
348#endif