at v6.11 523 lines 18 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * This header is for implementations of dma_map_ops and related code. 4 * It should not be included in drivers just using the DMA API. 5 */ 6#ifndef _LINUX_DMA_MAP_OPS_H 7#define _LINUX_DMA_MAP_OPS_H 8 9#include <linux/dma-mapping.h> 10#include <linux/pgtable.h> 11#include <linux/slab.h> 12 13struct cma; 14struct iommu_ops; 15 16/* 17 * Values for struct dma_map_ops.flags: 18 * 19 * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can 20 * handle PCI P2PDMA pages in the map_sg/unmap_sg operation. 21 * DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is 22 * coherent and it's not an SWIOTLB buffer. 23 */ 24#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0) 25#define DMA_F_CAN_SKIP_SYNC (1 << 1) 26 27struct dma_map_ops { 28 unsigned int flags; 29 30 void *(*alloc)(struct device *dev, size_t size, 31 dma_addr_t *dma_handle, gfp_t gfp, 32 unsigned long attrs); 33 void (*free)(struct device *dev, size_t size, void *vaddr, 34 dma_addr_t dma_handle, unsigned long attrs); 35 struct page *(*alloc_pages_op)(struct device *dev, size_t size, 36 dma_addr_t *dma_handle, enum dma_data_direction dir, 37 gfp_t gfp); 38 void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, 39 dma_addr_t dma_handle, enum dma_data_direction dir); 40 struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size, 41 enum dma_data_direction dir, gfp_t gfp, 42 unsigned long attrs); 43 void (*free_noncontiguous)(struct device *dev, size_t size, 44 struct sg_table *sgt, enum dma_data_direction dir); 45 int (*mmap)(struct device *, struct vm_area_struct *, 46 void *, dma_addr_t, size_t, unsigned long attrs); 47 48 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, 49 void *cpu_addr, dma_addr_t dma_addr, size_t size, 50 unsigned long attrs); 51 52 dma_addr_t (*map_page)(struct device *dev, struct page *page, 53 unsigned long offset, size_t size, 54 enum dma_data_direction dir, unsigned long attrs); 55 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, 56 size_t size, enum dma_data_direction dir, 57 unsigned long attrs); 58 /* 59 * map_sg should return a negative error code on error. See 60 * dma_map_sgtable() for a list of appropriate error codes 61 * and their meanings. 62 */ 63 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, 64 enum dma_data_direction dir, unsigned long attrs); 65 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, 66 enum dma_data_direction dir, unsigned long attrs); 67 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, 68 size_t size, enum dma_data_direction dir, 69 unsigned long attrs); 70 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, 71 size_t size, enum dma_data_direction dir, 72 unsigned long attrs); 73 void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, 74 size_t size, enum dma_data_direction dir); 75 void (*sync_single_for_device)(struct device *dev, 76 dma_addr_t dma_handle, size_t size, 77 enum dma_data_direction dir); 78 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, 79 int nents, enum dma_data_direction dir); 80 void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, 81 int nents, enum dma_data_direction dir); 82 void (*cache_sync)(struct device *dev, void *vaddr, size_t size, 83 enum dma_data_direction direction); 84 int (*dma_supported)(struct device *dev, u64 mask); 85 u64 (*get_required_mask)(struct device *dev); 86 size_t (*max_mapping_size)(struct device *dev); 87 size_t (*opt_mapping_size)(void); 88 unsigned long (*get_merge_boundary)(struct device *dev); 89}; 90 91#ifdef CONFIG_DMA_OPS 92#include <asm/dma-mapping.h> 93 94static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 95{ 96 if (dev->dma_ops) 97 return dev->dma_ops; 98 return get_arch_dma_ops(); 99} 100 101static inline void set_dma_ops(struct device *dev, 102 const struct dma_map_ops *dma_ops) 103{ 104 dev->dma_ops = dma_ops; 105} 106#else /* CONFIG_DMA_OPS */ 107static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 108{ 109 return NULL; 110} 111static inline void set_dma_ops(struct device *dev, 112 const struct dma_map_ops *dma_ops) 113{ 114} 115#endif /* CONFIG_DMA_OPS */ 116 117#ifdef CONFIG_DMA_CMA 118extern struct cma *dma_contiguous_default_area; 119 120static inline struct cma *dev_get_cma_area(struct device *dev) 121{ 122 if (dev && dev->cma_area) 123 return dev->cma_area; 124 return dma_contiguous_default_area; 125} 126 127void dma_contiguous_reserve(phys_addr_t addr_limit); 128int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, 129 phys_addr_t limit, struct cma **res_cma, bool fixed); 130 131struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, 132 unsigned int order, bool no_warn); 133bool dma_release_from_contiguous(struct device *dev, struct page *pages, 134 int count); 135struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); 136void dma_free_contiguous(struct device *dev, struct page *page, size_t size); 137 138void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); 139#else /* CONFIG_DMA_CMA */ 140static inline struct cma *dev_get_cma_area(struct device *dev) 141{ 142 return NULL; 143} 144static inline void dma_contiguous_reserve(phys_addr_t limit) 145{ 146} 147static inline int dma_contiguous_reserve_area(phys_addr_t size, 148 phys_addr_t base, phys_addr_t limit, struct cma **res_cma, 149 bool fixed) 150{ 151 return -ENOSYS; 152} 153static inline struct page *dma_alloc_from_contiguous(struct device *dev, 154 size_t count, unsigned int order, bool no_warn) 155{ 156 return NULL; 157} 158static inline bool dma_release_from_contiguous(struct device *dev, 159 struct page *pages, int count) 160{ 161 return false; 162} 163/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */ 164static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, 165 gfp_t gfp) 166{ 167 return NULL; 168} 169static inline void dma_free_contiguous(struct device *dev, struct page *page, 170 size_t size) 171{ 172 __free_pages(page, get_order(size)); 173} 174#endif /* CONFIG_DMA_CMA*/ 175 176#ifdef CONFIG_DMA_DECLARE_COHERENT 177int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 178 dma_addr_t device_addr, size_t size); 179void dma_release_coherent_memory(struct device *dev); 180int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, 181 dma_addr_t *dma_handle, void **ret); 182int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); 183int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, 184 void *cpu_addr, size_t size, int *ret); 185#else 186static inline int dma_declare_coherent_memory(struct device *dev, 187 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) 188{ 189 return -ENOSYS; 190} 191 192#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) 193#define dma_release_from_dev_coherent(dev, order, vaddr) (0) 194#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) 195static inline void dma_release_coherent_memory(struct device *dev) { } 196#endif /* CONFIG_DMA_DECLARE_COHERENT */ 197 198#ifdef CONFIG_DMA_GLOBAL_POOL 199void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, 200 dma_addr_t *dma_handle); 201int dma_release_from_global_coherent(int order, void *vaddr); 202int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, 203 size_t size, int *ret); 204int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); 205#else 206static inline void *dma_alloc_from_global_coherent(struct device *dev, 207 ssize_t size, dma_addr_t *dma_handle) 208{ 209 return NULL; 210} 211static inline int dma_release_from_global_coherent(int order, void *vaddr) 212{ 213 return 0; 214} 215static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, 216 void *cpu_addr, size_t size, int *ret) 217{ 218 return 0; 219} 220#endif /* CONFIG_DMA_GLOBAL_POOL */ 221 222/* 223 * This is the actual return value from the ->alloc_noncontiguous method. 224 * The users of the DMA API should only care about the sg_table, but to make 225 * the DMA-API internal vmaping and freeing easier we stash away the page 226 * array as well (except for the fallback case). This can go away any time, 227 * e.g. when a vmap-variant that takes a scatterlist comes along. 228 */ 229struct dma_sgt_handle { 230 struct sg_table sgt; 231 struct page **pages; 232}; 233#define sgt_handle(sgt) \ 234 container_of((sgt), struct dma_sgt_handle, sgt) 235 236int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 237 void *cpu_addr, dma_addr_t dma_addr, size_t size, 238 unsigned long attrs); 239int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 240 void *cpu_addr, dma_addr_t dma_addr, size_t size, 241 unsigned long attrs); 242struct page *dma_common_alloc_pages(struct device *dev, size_t size, 243 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); 244void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr, 245 dma_addr_t dma_handle, enum dma_data_direction dir); 246 247struct page **dma_common_find_pages(void *cpu_addr); 248void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot, 249 const void *caller); 250void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, 251 const void *caller); 252void dma_common_free_remap(void *cpu_addr, size_t size); 253 254struct page *dma_alloc_from_pool(struct device *dev, size_t size, 255 void **cpu_addr, gfp_t flags, 256 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); 257bool dma_free_from_pool(struct device *dev, void *start, size_t size); 258 259int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, 260 dma_addr_t dma_start, u64 size); 261 262#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 263 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 264 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) 265extern bool dma_default_coherent; 266static inline bool dev_is_dma_coherent(struct device *dev) 267{ 268 return dev->dma_coherent; 269} 270#else 271#define dma_default_coherent true 272 273static inline bool dev_is_dma_coherent(struct device *dev) 274{ 275 return true; 276} 277#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ 278 279static inline void dma_reset_need_sync(struct device *dev) 280{ 281#ifdef CONFIG_DMA_NEED_SYNC 282 /* Reset it only once so that the function can be called on hotpath */ 283 if (unlikely(dev->dma_skip_sync)) 284 dev->dma_skip_sync = false; 285#endif 286} 287 288/* 289 * Check whether potential kmalloc() buffers are safe for non-coherent DMA. 290 */ 291static inline bool dma_kmalloc_safe(struct device *dev, 292 enum dma_data_direction dir) 293{ 294 /* 295 * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc() 296 * caches have already been aligned to a DMA-safe size. 297 */ 298 if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC)) 299 return true; 300 301 /* 302 * kmalloc() buffers are DMA-safe irrespective of size if the device 303 * is coherent or the direction is DMA_TO_DEVICE (non-desctructive 304 * cache maintenance and benign cache line evictions). 305 */ 306 if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE) 307 return true; 308 309 return false; 310} 311 312/* 313 * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is 314 * sufficiently aligned for non-coherent DMA. 315 */ 316static inline bool dma_kmalloc_size_aligned(size_t size) 317{ 318 /* 319 * Larger kmalloc() sizes are guaranteed to be aligned to 320 * ARCH_DMA_MINALIGN. 321 */ 322 if (size >= 2 * ARCH_DMA_MINALIGN || 323 IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment())) 324 return true; 325 326 return false; 327} 328 329/* 330 * Check whether the given object size may have originated from a kmalloc() 331 * buffer with a slab alignment below the DMA-safe alignment and needs 332 * bouncing for non-coherent DMA. The pointer alignment is not considered and 333 * in-structure DMA-safe offsets are the responsibility of the caller. Such 334 * code should use the static ARCH_DMA_MINALIGN for compiler annotations. 335 * 336 * The heuristics can have false positives, bouncing unnecessarily, though the 337 * buffers would be small. False negatives are theoretically possible if, for 338 * example, multiple small kmalloc() buffers are coalesced into a larger 339 * buffer that passes the alignment check. There are no such known constructs 340 * in the kernel. 341 */ 342static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size, 343 enum dma_data_direction dir) 344{ 345 return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size); 346} 347 348void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 349 gfp_t gfp, unsigned long attrs); 350void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 351 dma_addr_t dma_addr, unsigned long attrs); 352 353#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK 354void arch_dma_set_mask(struct device *dev, u64 mask); 355#else 356#define arch_dma_set_mask(dev, mask) do { } while (0) 357#endif 358 359#ifdef CONFIG_MMU 360/* 361 * Page protection so that devices that can't snoop CPU caches can use the 362 * memory coherently. We default to pgprot_noncached which is usually used 363 * for ioremap as a safe bet, but architectures can override this with less 364 * strict semantics if possible. 365 */ 366#ifndef pgprot_dmacoherent 367#define pgprot_dmacoherent(prot) pgprot_noncached(prot) 368#endif 369 370pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); 371#else 372static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, 373 unsigned long attrs) 374{ 375 return prot; /* no protection bits supported without page tables */ 376} 377#endif /* CONFIG_MMU */ 378 379#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE 380void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 381 enum dma_data_direction dir); 382#else 383static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 384 enum dma_data_direction dir) 385{ 386} 387#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ 388 389#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU 390void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 391 enum dma_data_direction dir); 392#else 393static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 394 enum dma_data_direction dir) 395{ 396} 397#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ 398 399#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL 400void arch_sync_dma_for_cpu_all(void); 401#else 402static inline void arch_sync_dma_for_cpu_all(void) 403{ 404} 405#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ 406 407#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT 408void arch_dma_prep_coherent(struct page *page, size_t size); 409#else 410static inline void arch_dma_prep_coherent(struct page *page, size_t size) 411{ 412} 413#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ 414 415#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN 416void arch_dma_mark_clean(phys_addr_t paddr, size_t size); 417#else 418static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size) 419{ 420} 421#endif /* ARCH_HAS_DMA_MARK_CLEAN */ 422 423void *arch_dma_set_uncached(void *addr, size_t size); 424void arch_dma_clear_uncached(void *addr, size_t size); 425 426#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT 427bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr); 428bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle); 429bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, 430 int nents); 431bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, 432 int nents); 433#else 434#define arch_dma_map_page_direct(d, a) (false) 435#define arch_dma_unmap_page_direct(d, a) (false) 436#define arch_dma_map_sg_direct(d, s, n) (false) 437#define arch_dma_unmap_sg_direct(d, s, n) (false) 438#endif 439 440#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS 441void arch_setup_dma_ops(struct device *dev, bool coherent); 442#else 443static inline void arch_setup_dma_ops(struct device *dev, bool coherent) 444{ 445} 446#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ 447 448#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS 449void arch_teardown_dma_ops(struct device *dev); 450#else 451static inline void arch_teardown_dma_ops(struct device *dev) 452{ 453} 454#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ 455 456#ifdef CONFIG_DMA_API_DEBUG 457void dma_debug_add_bus(const struct bus_type *bus); 458void debug_dma_dump_mappings(struct device *dev); 459#else 460static inline void dma_debug_add_bus(const struct bus_type *bus) 461{ 462} 463static inline void debug_dma_dump_mappings(struct device *dev) 464{ 465} 466#endif /* CONFIG_DMA_API_DEBUG */ 467 468extern const struct dma_map_ops dma_dummy_ops; 469 470enum pci_p2pdma_map_type { 471 /* 472 * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping 473 * type hasn't been calculated yet. Functions that return this enum 474 * never return this value. 475 */ 476 PCI_P2PDMA_MAP_UNKNOWN = 0, 477 478 /* 479 * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will 480 * traverse the host bridge and the host bridge is not in the 481 * allowlist. DMA Mapping routines should return an error when 482 * this is returned. 483 */ 484 PCI_P2PDMA_MAP_NOT_SUPPORTED, 485 486 /* 487 * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to 488 * each other directly through a PCI switch and the transaction will 489 * not traverse the host bridge. Such a mapping should program 490 * the DMA engine with PCI bus addresses. 491 */ 492 PCI_P2PDMA_MAP_BUS_ADDR, 493 494 /* 495 * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk 496 * to each other, but the transaction traverses a host bridge on the 497 * allowlist. In this case, a normal mapping either with CPU physical 498 * addresses (in the case of dma-direct) or IOVA addresses (in the 499 * case of IOMMUs) should be used to program the DMA engine. 500 */ 501 PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, 502}; 503 504struct pci_p2pdma_map_state { 505 struct dev_pagemap *pgmap; 506 int map; 507 u64 bus_off; 508}; 509 510#ifdef CONFIG_PCI_P2PDMA 511enum pci_p2pdma_map_type 512pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, 513 struct scatterlist *sg); 514#else /* CONFIG_PCI_P2PDMA */ 515static inline enum pci_p2pdma_map_type 516pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, 517 struct scatterlist *sg) 518{ 519 return PCI_P2PDMA_MAP_NOT_SUPPORTED; 520} 521#endif /* CONFIG_PCI_P2PDMA */ 522 523#endif /* _LINUX_DMA_MAP_OPS_H */