Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_DMA_NONCOHERENT_H
3#define _LINUX_DMA_NONCOHERENT_H 1
4
5#include <linux/dma-mapping.h>
6#include <linux/pgtable.h>
7
8#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
9#include <asm/dma-coherence.h>
10#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
11 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
12 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
13static inline bool dev_is_dma_coherent(struct device *dev)
14{
15 return dev->dma_coherent;
16}
17#else
18static inline bool dev_is_dma_coherent(struct device *dev)
19{
20 return true;
21}
22#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
23
24/*
25 * Check if an allocation needs to be marked uncached to be coherent.
26 */
27static __always_inline bool dma_alloc_need_uncached(struct device *dev,
28 unsigned long attrs)
29{
30 if (dev_is_dma_coherent(dev))
31 return false;
32 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
33 return false;
34 if (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
35 (attrs & DMA_ATTR_NON_CONSISTENT))
36 return false;
37 return true;
38}
39
40void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
41 gfp_t gfp, unsigned long attrs);
42void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
43 dma_addr_t dma_addr, unsigned long attrs);
44
45#ifdef CONFIG_MMU
46/*
47 * Page protection so that devices that can't snoop CPU caches can use the
48 * memory coherently. We default to pgprot_noncached which is usually used
49 * for ioremap as a safe bet, but architectures can override this with less
50 * strict semantics if possible.
51 */
52#ifndef pgprot_dmacoherent
53#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
54#endif
55
56pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
57#else
58static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
59 unsigned long attrs)
60{
61 return prot; /* no protection bits supported without page tables */
62}
63#endif /* CONFIG_MMU */
64
65#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
66void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
67 enum dma_data_direction direction);
68#else
69static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
70 size_t size, enum dma_data_direction direction)
71{
72}
73#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
74
75#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
76void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
77 enum dma_data_direction dir);
78#else
79static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
80 enum dma_data_direction dir)
81{
82}
83#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
84
85#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
86void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
87 enum dma_data_direction dir);
88#else
89static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
90 enum dma_data_direction dir)
91{
92}
93#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
94
95#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
96void arch_sync_dma_for_cpu_all(void);
97#else
98static inline void arch_sync_dma_for_cpu_all(void)
99{
100}
101#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
102
103#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
104void arch_dma_prep_coherent(struct page *page, size_t size);
105#else
106static inline void arch_dma_prep_coherent(struct page *page, size_t size)
107{
108}
109#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
110
111void *arch_dma_set_uncached(void *addr, size_t size);
112void arch_dma_clear_uncached(void *addr, size_t size);
113
114#endif /* _LINUX_DMA_NONCOHERENT_H */