Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * This header is for implementations of dma_map_ops and related code.
4 * It should not be included in drivers just using the DMA API.
5 */
6#ifndef _LINUX_DMA_MAP_OPS_H
7#define _LINUX_DMA_MAP_OPS_H
8
9#include <linux/dma-mapping.h>
10#include <linux/pgtable.h>
11
12struct cma;
13
14struct dma_map_ops {
15 void *(*alloc)(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp,
17 unsigned long attrs);
18 void (*free)(struct device *dev, size_t size, void *vaddr,
19 dma_addr_t dma_handle, unsigned long attrs);
20 struct page *(*alloc_pages)(struct device *dev, size_t size,
21 dma_addr_t *dma_handle, enum dma_data_direction dir,
22 gfp_t gfp);
23 void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
24 dma_addr_t dma_handle, enum dma_data_direction dir);
25 int (*mmap)(struct device *, struct vm_area_struct *,
26 void *, dma_addr_t, size_t, unsigned long attrs);
27
28 int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
29 void *cpu_addr, dma_addr_t dma_addr, size_t size,
30 unsigned long attrs);
31
32 dma_addr_t (*map_page)(struct device *dev, struct page *page,
33 unsigned long offset, size_t size,
34 enum dma_data_direction dir, unsigned long attrs);
35 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
36 size_t size, enum dma_data_direction dir,
37 unsigned long attrs);
38 /*
39 * map_sg returns 0 on error and a value > 0 on success.
40 * It should never return a value < 0.
41 */
42 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
43 enum dma_data_direction dir, unsigned long attrs);
44 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
45 enum dma_data_direction dir, unsigned long attrs);
46 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
47 size_t size, enum dma_data_direction dir,
48 unsigned long attrs);
49 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
50 size_t size, enum dma_data_direction dir,
51 unsigned long attrs);
52 void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
53 size_t size, enum dma_data_direction dir);
54 void (*sync_single_for_device)(struct device *dev,
55 dma_addr_t dma_handle, size_t size,
56 enum dma_data_direction dir);
57 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
58 int nents, enum dma_data_direction dir);
59 void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
60 int nents, enum dma_data_direction dir);
61 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
62 enum dma_data_direction direction);
63 int (*dma_supported)(struct device *dev, u64 mask);
64 u64 (*get_required_mask)(struct device *dev);
65 size_t (*max_mapping_size)(struct device *dev);
66 unsigned long (*get_merge_boundary)(struct device *dev);
67};
68
69#ifdef CONFIG_DMA_OPS
70#include <asm/dma-mapping.h>
71
72static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
73{
74 if (dev->dma_ops)
75 return dev->dma_ops;
76 return get_arch_dma_ops(dev->bus);
77}
78
79static inline void set_dma_ops(struct device *dev,
80 const struct dma_map_ops *dma_ops)
81{
82 dev->dma_ops = dma_ops;
83}
84#else /* CONFIG_DMA_OPS */
85static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
86{
87 return NULL;
88}
89static inline void set_dma_ops(struct device *dev,
90 const struct dma_map_ops *dma_ops)
91{
92}
93#endif /* CONFIG_DMA_OPS */
94
95#ifdef CONFIG_DMA_CMA
96extern struct cma *dma_contiguous_default_area;
97
98static inline struct cma *dev_get_cma_area(struct device *dev)
99{
100 if (dev && dev->cma_area)
101 return dev->cma_area;
102 return dma_contiguous_default_area;
103}
104
105void dma_contiguous_reserve(phys_addr_t addr_limit);
106int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
107 phys_addr_t limit, struct cma **res_cma, bool fixed);
108
109struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
110 unsigned int order, bool no_warn);
111bool dma_release_from_contiguous(struct device *dev, struct page *pages,
112 int count);
113struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
114void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
115
116void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
117#else /* CONFIG_DMA_CMA */
118static inline struct cma *dev_get_cma_area(struct device *dev)
119{
120 return NULL;
121}
122static inline void dma_contiguous_reserve(phys_addr_t limit)
123{
124}
125static inline int dma_contiguous_reserve_area(phys_addr_t size,
126 phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
127 bool fixed)
128{
129 return -ENOSYS;
130}
131static inline struct page *dma_alloc_from_contiguous(struct device *dev,
132 size_t count, unsigned int order, bool no_warn)
133{
134 return NULL;
135}
136static inline bool dma_release_from_contiguous(struct device *dev,
137 struct page *pages, int count)
138{
139 return false;
140}
141/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
142static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
143 gfp_t gfp)
144{
145 return NULL;
146}
147static inline void dma_free_contiguous(struct device *dev, struct page *page,
148 size_t size)
149{
150 __free_pages(page, get_order(size));
151}
152#endif /* CONFIG_DMA_CMA*/
153
154#ifdef CONFIG_DMA_PERNUMA_CMA
155void dma_pernuma_cma_reserve(void);
156#else
157static inline void dma_pernuma_cma_reserve(void) { }
158#endif /* CONFIG_DMA_PERNUMA_CMA */
159
160#ifdef CONFIG_DMA_DECLARE_COHERENT
161int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
162 dma_addr_t device_addr, size_t size);
163int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
164 dma_addr_t *dma_handle, void **ret);
165int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
166int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
167 void *cpu_addr, size_t size, int *ret);
168
169void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
170 dma_addr_t *dma_handle);
171int dma_release_from_global_coherent(int order, void *vaddr);
172int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
173 size_t size, int *ret);
174
175#else
176static inline int dma_declare_coherent_memory(struct device *dev,
177 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
178{
179 return -ENOSYS;
180}
181#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
182#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
183#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
184
185static inline void *dma_alloc_from_global_coherent(struct device *dev,
186 ssize_t size, dma_addr_t *dma_handle)
187{
188 return NULL;
189}
190static inline int dma_release_from_global_coherent(int order, void *vaddr)
191{
192 return 0;
193}
194static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
195 void *cpu_addr, size_t size, int *ret)
196{
197 return 0;
198}
199#endif /* CONFIG_DMA_DECLARE_COHERENT */
200
201int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
202 void *cpu_addr, dma_addr_t dma_addr, size_t size,
203 unsigned long attrs);
204int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
205 void *cpu_addr, dma_addr_t dma_addr, size_t size,
206 unsigned long attrs);
207struct page *dma_common_alloc_pages(struct device *dev, size_t size,
208 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
209void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
210 dma_addr_t dma_handle, enum dma_data_direction dir);
211
212struct page **dma_common_find_pages(void *cpu_addr);
213void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
214 const void *caller);
215void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
216 const void *caller);
217void dma_common_free_remap(void *cpu_addr, size_t size);
218
219struct page *dma_alloc_from_pool(struct device *dev, size_t size,
220 void **cpu_addr, gfp_t flags,
221 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
222bool dma_free_from_pool(struct device *dev, void *start, size_t size);
223
224int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
225 dma_addr_t dma_start, u64 size);
226
227#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
228 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
229 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
230extern bool dma_default_coherent;
231static inline bool dev_is_dma_coherent(struct device *dev)
232{
233 return dev->dma_coherent;
234}
235#else
236static inline bool dev_is_dma_coherent(struct device *dev)
237{
238 return true;
239}
240#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
241
242void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
243 gfp_t gfp, unsigned long attrs);
244void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
245 dma_addr_t dma_addr, unsigned long attrs);
246
247#ifdef CONFIG_MMU
248/*
249 * Page protection so that devices that can't snoop CPU caches can use the
250 * memory coherently. We default to pgprot_noncached which is usually used
251 * for ioremap as a safe bet, but architectures can override this with less
252 * strict semantics if possible.
253 */
254#ifndef pgprot_dmacoherent
255#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
256#endif
257
258pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
259#else
260static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
261 unsigned long attrs)
262{
263 return prot; /* no protection bits supported without page tables */
264}
265#endif /* CONFIG_MMU */
266
267#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
268void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
269 enum dma_data_direction dir);
270#else
271static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
272 enum dma_data_direction dir)
273{
274}
275#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
276
277#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
278void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
279 enum dma_data_direction dir);
280#else
281static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
282 enum dma_data_direction dir)
283{
284}
285#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
286
287#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
288void arch_sync_dma_for_cpu_all(void);
289#else
290static inline void arch_sync_dma_for_cpu_all(void)
291{
292}
293#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
294
295#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
296void arch_dma_prep_coherent(struct page *page, size_t size);
297#else
298static inline void arch_dma_prep_coherent(struct page *page, size_t size)
299{
300}
301#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
302
303#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
304void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
305#else
306static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
307{
308}
309#endif /* ARCH_HAS_DMA_MARK_CLEAN */
310
311void *arch_dma_set_uncached(void *addr, size_t size);
312void arch_dma_clear_uncached(void *addr, size_t size);
313
314#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
315bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
316bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
317bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
318 int nents);
319bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
320 int nents);
321#else
322#define arch_dma_map_page_direct(d, a) (false)
323#define arch_dma_unmap_page_direct(d, a) (false)
324#define arch_dma_map_sg_direct(d, s, n) (false)
325#define arch_dma_unmap_sg_direct(d, s, n) (false)
326#endif
327
328#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
329void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
330 const struct iommu_ops *iommu, bool coherent);
331#else
332static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
333 u64 size, const struct iommu_ops *iommu, bool coherent)
334{
335}
336#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
337
338#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
339void arch_teardown_dma_ops(struct device *dev);
340#else
341static inline void arch_teardown_dma_ops(struct device *dev)
342{
343}
344#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
345
346#ifdef CONFIG_DMA_API_DEBUG
347void dma_debug_add_bus(struct bus_type *bus);
348void debug_dma_dump_mappings(struct device *dev);
349#else
350static inline void dma_debug_add_bus(struct bus_type *bus)
351{
352}
353static inline void debug_dma_dump_mappings(struct device *dev)
354{
355}
356#endif /* CONFIG_DMA_API_DEBUG */
357
358extern const struct dma_map_ops dma_dummy_ops;
359
360#endif /* _LINUX_DMA_MAP_OPS_H */