at v6.16 24 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_DMA_MAPPING_H 3#define _LINUX_DMA_MAPPING_H 4 5#include <linux/device.h> 6#include <linux/err.h> 7#include <linux/dma-direction.h> 8#include <linux/scatterlist.h> 9#include <linux/bug.h> 10 11/** 12 * List of possible attributes associated with a DMA mapping. The semantics 13 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. 14 */ 15 16/* 17 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping 18 * may be weakly ordered, that is that reads and writes may pass each other. 19 */ 20#define DMA_ATTR_WEAK_ORDERING (1UL << 1) 21/* 22 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be 23 * buffered to improve performance. 24 */ 25#define DMA_ATTR_WRITE_COMBINE (1UL << 2) 26/* 27 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel 28 * virtual mapping for the allocated buffer. 29 */ 30#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) 31/* 32 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of 33 * the CPU cache for the given buffer assuming that it has been already 34 * transferred to 'device' domain. 35 */ 36#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) 37/* 38 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer 39 * in physical memory. 40 */ 41#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) 42/* 43 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem 44 * that it's probably not worth the time to try to allocate memory to in a way 45 * that gives better TLB efficiency. 46 */ 47#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) 48/* 49 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress 50 * allocation failure reports (similarly to __GFP_NOWARN). 51 */ 52#define DMA_ATTR_NO_WARN (1UL << 8) 53 54/* 55 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully 56 * accessible at an elevated privilege level (and ideally inaccessible or 57 * at least read-only at lesser-privileged levels). 58 */ 59#define DMA_ATTR_PRIVILEGED (1UL << 9) 60 61/* 62 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can 63 * be given to a device to use as a DMA source or target. It is specific to a 64 * given device and there may be a translation between the CPU physical address 65 * space and the bus address space. 66 * 67 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not 68 * be used directly in drivers, but checked for using dma_mapping_error() 69 * instead. 70 */ 71#define DMA_MAPPING_ERROR (~(dma_addr_t)0) 72 73#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 74 75struct dma_iova_state { 76 dma_addr_t addr; 77 u64 __size; 78}; 79 80/* 81 * Use the high bit to mark if we used swiotlb for one or more ranges. 82 */ 83#define DMA_IOVA_USE_SWIOTLB (1ULL << 63) 84 85static inline size_t dma_iova_size(struct dma_iova_state *state) 86{ 87 /* Casting is needed for 32-bits systems */ 88 return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB); 89} 90 91#ifdef CONFIG_DMA_API_DEBUG 92void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 93void debug_dma_map_single(struct device *dev, const void *addr, 94 unsigned long len); 95#else 96static inline void debug_dma_mapping_error(struct device *dev, 97 dma_addr_t dma_addr) 98{ 99} 100static inline void debug_dma_map_single(struct device *dev, const void *addr, 101 unsigned long len) 102{ 103} 104#endif /* CONFIG_DMA_API_DEBUG */ 105 106#ifdef CONFIG_HAS_DMA 107static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 108{ 109 debug_dma_mapping_error(dev, dma_addr); 110 111 if (unlikely(dma_addr == DMA_MAPPING_ERROR)) 112 return -ENOMEM; 113 return 0; 114} 115 116dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, 117 size_t offset, size_t size, enum dma_data_direction dir, 118 unsigned long attrs); 119void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 120 enum dma_data_direction dir, unsigned long attrs); 121unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 122 int nents, enum dma_data_direction dir, unsigned long attrs); 123void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 124 int nents, enum dma_data_direction dir, 125 unsigned long attrs); 126int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 127 enum dma_data_direction dir, unsigned long attrs); 128dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, 129 size_t size, enum dma_data_direction dir, unsigned long attrs); 130void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, 131 enum dma_data_direction dir, unsigned long attrs); 132void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 133 gfp_t flag, unsigned long attrs); 134void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 135 dma_addr_t dma_handle, unsigned long attrs); 136void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 137 gfp_t gfp, unsigned long attrs); 138void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 139 dma_addr_t dma_handle); 140int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, 141 void *cpu_addr, dma_addr_t dma_addr, size_t size, 142 unsigned long attrs); 143int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 144 void *cpu_addr, dma_addr_t dma_addr, size_t size, 145 unsigned long attrs); 146bool dma_can_mmap(struct device *dev); 147bool dma_pci_p2pdma_supported(struct device *dev); 148int dma_set_mask(struct device *dev, u64 mask); 149int dma_set_coherent_mask(struct device *dev, u64 mask); 150u64 dma_get_required_mask(struct device *dev); 151bool dma_addressing_limited(struct device *dev); 152size_t dma_max_mapping_size(struct device *dev); 153size_t dma_opt_mapping_size(struct device *dev); 154unsigned long dma_get_merge_boundary(struct device *dev); 155struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, 156 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); 157void dma_free_noncontiguous(struct device *dev, size_t size, 158 struct sg_table *sgt, enum dma_data_direction dir); 159void *dma_vmap_noncontiguous(struct device *dev, size_t size, 160 struct sg_table *sgt); 161void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); 162int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, 163 size_t size, struct sg_table *sgt); 164#else /* CONFIG_HAS_DMA */ 165static inline dma_addr_t dma_map_page_attrs(struct device *dev, 166 struct page *page, size_t offset, size_t size, 167 enum dma_data_direction dir, unsigned long attrs) 168{ 169 return DMA_MAPPING_ERROR; 170} 171static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, 172 size_t size, enum dma_data_direction dir, unsigned long attrs) 173{ 174} 175static inline unsigned int dma_map_sg_attrs(struct device *dev, 176 struct scatterlist *sg, int nents, enum dma_data_direction dir, 177 unsigned long attrs) 178{ 179 return 0; 180} 181static inline void dma_unmap_sg_attrs(struct device *dev, 182 struct scatterlist *sg, int nents, enum dma_data_direction dir, 183 unsigned long attrs) 184{ 185} 186static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 187 enum dma_data_direction dir, unsigned long attrs) 188{ 189 return -EOPNOTSUPP; 190} 191static inline dma_addr_t dma_map_resource(struct device *dev, 192 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, 193 unsigned long attrs) 194{ 195 return DMA_MAPPING_ERROR; 196} 197static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, 198 size_t size, enum dma_data_direction dir, unsigned long attrs) 199{ 200} 201static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 202{ 203 return -ENOMEM; 204} 205static inline void *dma_alloc_attrs(struct device *dev, size_t size, 206 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) 207{ 208 return NULL; 209} 210static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 211 dma_addr_t dma_handle, unsigned long attrs) 212{ 213} 214static inline void *dmam_alloc_attrs(struct device *dev, size_t size, 215 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 216{ 217 return NULL; 218} 219static inline void dmam_free_coherent(struct device *dev, size_t size, 220 void *vaddr, dma_addr_t dma_handle) 221{ 222} 223static inline int dma_get_sgtable_attrs(struct device *dev, 224 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, 225 size_t size, unsigned long attrs) 226{ 227 return -ENXIO; 228} 229static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 230 void *cpu_addr, dma_addr_t dma_addr, size_t size, 231 unsigned long attrs) 232{ 233 return -ENXIO; 234} 235static inline bool dma_can_mmap(struct device *dev) 236{ 237 return false; 238} 239static inline bool dma_pci_p2pdma_supported(struct device *dev) 240{ 241 return false; 242} 243static inline int dma_set_mask(struct device *dev, u64 mask) 244{ 245 return -EIO; 246} 247static inline int dma_set_coherent_mask(struct device *dev, u64 mask) 248{ 249 return -EIO; 250} 251static inline u64 dma_get_required_mask(struct device *dev) 252{ 253 return 0; 254} 255static inline bool dma_addressing_limited(struct device *dev) 256{ 257 return false; 258} 259static inline size_t dma_max_mapping_size(struct device *dev) 260{ 261 return 0; 262} 263static inline size_t dma_opt_mapping_size(struct device *dev) 264{ 265 return 0; 266} 267static inline unsigned long dma_get_merge_boundary(struct device *dev) 268{ 269 return 0; 270} 271static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, 272 size_t size, enum dma_data_direction dir, gfp_t gfp, 273 unsigned long attrs) 274{ 275 return NULL; 276} 277static inline void dma_free_noncontiguous(struct device *dev, size_t size, 278 struct sg_table *sgt, enum dma_data_direction dir) 279{ 280} 281static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, 282 struct sg_table *sgt) 283{ 284 return NULL; 285} 286static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) 287{ 288} 289static inline int dma_mmap_noncontiguous(struct device *dev, 290 struct vm_area_struct *vma, size_t size, struct sg_table *sgt) 291{ 292 return -EINVAL; 293} 294#endif /* CONFIG_HAS_DMA */ 295 296#ifdef CONFIG_IOMMU_DMA 297/** 298 * dma_use_iova - check if the IOVA API is used for this state 299 * @state: IOVA state 300 * 301 * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if 302 * they can't be used. 303 */ 304static inline bool dma_use_iova(struct dma_iova_state *state) 305{ 306 return state->__size != 0; 307} 308 309bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state, 310 phys_addr_t phys, size_t size); 311void dma_iova_free(struct device *dev, struct dma_iova_state *state); 312void dma_iova_destroy(struct device *dev, struct dma_iova_state *state, 313 size_t mapped_len, enum dma_data_direction dir, 314 unsigned long attrs); 315int dma_iova_sync(struct device *dev, struct dma_iova_state *state, 316 size_t offset, size_t size); 317int dma_iova_link(struct device *dev, struct dma_iova_state *state, 318 phys_addr_t phys, size_t offset, size_t size, 319 enum dma_data_direction dir, unsigned long attrs); 320void dma_iova_unlink(struct device *dev, struct dma_iova_state *state, 321 size_t offset, size_t size, enum dma_data_direction dir, 322 unsigned long attrs); 323#else /* CONFIG_IOMMU_DMA */ 324static inline bool dma_use_iova(struct dma_iova_state *state) 325{ 326 return false; 327} 328static inline bool dma_iova_try_alloc(struct device *dev, 329 struct dma_iova_state *state, phys_addr_t phys, size_t size) 330{ 331 return false; 332} 333static inline void dma_iova_free(struct device *dev, 334 struct dma_iova_state *state) 335{ 336} 337static inline void dma_iova_destroy(struct device *dev, 338 struct dma_iova_state *state, size_t mapped_len, 339 enum dma_data_direction dir, unsigned long attrs) 340{ 341} 342static inline int dma_iova_sync(struct device *dev, 343 struct dma_iova_state *state, size_t offset, size_t size) 344{ 345 return -EOPNOTSUPP; 346} 347static inline int dma_iova_link(struct device *dev, 348 struct dma_iova_state *state, phys_addr_t phys, size_t offset, 349 size_t size, enum dma_data_direction dir, unsigned long attrs) 350{ 351 return -EOPNOTSUPP; 352} 353static inline void dma_iova_unlink(struct device *dev, 354 struct dma_iova_state *state, size_t offset, size_t size, 355 enum dma_data_direction dir, unsigned long attrs) 356{ 357} 358#endif /* CONFIG_IOMMU_DMA */ 359 360#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) 361void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 362 enum dma_data_direction dir); 363void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr, 364 size_t size, enum dma_data_direction dir); 365void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 366 int nelems, enum dma_data_direction dir); 367void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 368 int nelems, enum dma_data_direction dir); 369bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr); 370 371static inline bool dma_dev_need_sync(const struct device *dev) 372{ 373 /* Always call DMA sync operations when debugging is enabled */ 374 return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG); 375} 376 377static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 378 size_t size, enum dma_data_direction dir) 379{ 380 if (dma_dev_need_sync(dev)) 381 __dma_sync_single_for_cpu(dev, addr, size, dir); 382} 383 384static inline void dma_sync_single_for_device(struct device *dev, 385 dma_addr_t addr, size_t size, enum dma_data_direction dir) 386{ 387 if (dma_dev_need_sync(dev)) 388 __dma_sync_single_for_device(dev, addr, size, dir); 389} 390 391static inline void dma_sync_sg_for_cpu(struct device *dev, 392 struct scatterlist *sg, int nelems, enum dma_data_direction dir) 393{ 394 if (dma_dev_need_sync(dev)) 395 __dma_sync_sg_for_cpu(dev, sg, nelems, dir); 396} 397 398static inline void dma_sync_sg_for_device(struct device *dev, 399 struct scatterlist *sg, int nelems, enum dma_data_direction dir) 400{ 401 if (dma_dev_need_sync(dev)) 402 __dma_sync_sg_for_device(dev, sg, nelems, dir); 403} 404 405static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 406{ 407 return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false; 408} 409bool dma_need_unmap(struct device *dev); 410#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ 411static inline bool dma_dev_need_sync(const struct device *dev) 412{ 413 return false; 414} 415static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 416 size_t size, enum dma_data_direction dir) 417{ 418} 419static inline void dma_sync_single_for_device(struct device *dev, 420 dma_addr_t addr, size_t size, enum dma_data_direction dir) 421{ 422} 423static inline void dma_sync_sg_for_cpu(struct device *dev, 424 struct scatterlist *sg, int nelems, enum dma_data_direction dir) 425{ 426} 427static inline void dma_sync_sg_for_device(struct device *dev, 428 struct scatterlist *sg, int nelems, enum dma_data_direction dir) 429{ 430} 431static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 432{ 433 return false; 434} 435static inline bool dma_need_unmap(struct device *dev) 436{ 437 return false; 438} 439#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ 440 441struct page *dma_alloc_pages(struct device *dev, size_t size, 442 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); 443void dma_free_pages(struct device *dev, size_t size, struct page *page, 444 dma_addr_t dma_handle, enum dma_data_direction dir); 445int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, 446 size_t size, struct page *page); 447 448static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, 449 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 450{ 451 struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); 452 return page ? page_address(page) : NULL; 453} 454 455static inline void dma_free_noncoherent(struct device *dev, size_t size, 456 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir) 457{ 458 dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); 459} 460 461static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 462 size_t size, enum dma_data_direction dir, unsigned long attrs) 463{ 464 /* DMA must never operate on areas that might be remapped. */ 465 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), 466 "rejecting DMA map of vmalloc memory\n")) 467 return DMA_MAPPING_ERROR; 468 debug_dma_map_single(dev, ptr, size); 469 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), 470 size, dir, attrs); 471} 472 473static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, 474 size_t size, enum dma_data_direction dir, unsigned long attrs) 475{ 476 return dma_unmap_page_attrs(dev, addr, size, dir, attrs); 477} 478 479static inline void dma_sync_single_range_for_cpu(struct device *dev, 480 dma_addr_t addr, unsigned long offset, size_t size, 481 enum dma_data_direction dir) 482{ 483 return dma_sync_single_for_cpu(dev, addr + offset, size, dir); 484} 485 486static inline void dma_sync_single_range_for_device(struct device *dev, 487 dma_addr_t addr, unsigned long offset, size_t size, 488 enum dma_data_direction dir) 489{ 490 return dma_sync_single_for_device(dev, addr + offset, size, dir); 491} 492 493/** 494 * dma_unmap_sgtable - Unmap the given buffer for DMA 495 * @dev: The device for which to perform the DMA operation 496 * @sgt: The sg_table object describing the buffer 497 * @dir: DMA direction 498 * @attrs: Optional DMA attributes for the unmap operation 499 * 500 * Unmaps a buffer described by a scatterlist stored in the given sg_table 501 * object for the @dir DMA operation by the @dev device. After this function 502 * the ownership of the buffer is transferred back to the CPU domain. 503 */ 504static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, 505 enum dma_data_direction dir, unsigned long attrs) 506{ 507 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); 508} 509 510/** 511 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access 512 * @dev: The device for which to perform the DMA operation 513 * @sgt: The sg_table object describing the buffer 514 * @dir: DMA direction 515 * 516 * Performs the needed cache synchronization and moves the ownership of the 517 * buffer back to the CPU domain, so it is safe to perform any access to it 518 * by the CPU. Before doing any further DMA operations, one has to transfer 519 * the ownership of the buffer back to the DMA domain by calling the 520 * dma_sync_sgtable_for_device(). 521 */ 522static inline void dma_sync_sgtable_for_cpu(struct device *dev, 523 struct sg_table *sgt, enum dma_data_direction dir) 524{ 525 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); 526} 527 528/** 529 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA 530 * @dev: The device for which to perform the DMA operation 531 * @sgt: The sg_table object describing the buffer 532 * @dir: DMA direction 533 * 534 * Performs the needed cache synchronization and moves the ownership of the 535 * buffer back to the DMA domain, so it is safe to perform the DMA operation. 536 * Once finished, one has to call dma_sync_sgtable_for_cpu() or 537 * dma_unmap_sgtable(). 538 */ 539static inline void dma_sync_sgtable_for_device(struct device *dev, 540 struct sg_table *sgt, enum dma_data_direction dir) 541{ 542 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); 543} 544 545#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) 546#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) 547#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) 548#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) 549#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) 550#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) 551#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) 552#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) 553 554bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size); 555 556static inline void *dma_alloc_coherent(struct device *dev, size_t size, 557 dma_addr_t *dma_handle, gfp_t gfp) 558{ 559 return dma_alloc_attrs(dev, size, dma_handle, gfp, 560 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); 561} 562 563static inline void dma_free_coherent(struct device *dev, size_t size, 564 void *cpu_addr, dma_addr_t dma_handle) 565{ 566 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); 567} 568 569 570static inline u64 dma_get_mask(struct device *dev) 571{ 572 if (dev->dma_mask && *dev->dma_mask) 573 return *dev->dma_mask; 574 return DMA_BIT_MASK(32); 575} 576 577/* 578 * Set both the DMA mask and the coherent DMA mask to the same thing. 579 * Note that we don't check the return value from dma_set_coherent_mask() 580 * as the DMA API guarantees that the coherent DMA mask can be set to 581 * the same or smaller than the streaming DMA mask. 582 */ 583static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) 584{ 585 int rc = dma_set_mask(dev, mask); 586 if (rc == 0) 587 dma_set_coherent_mask(dev, mask); 588 return rc; 589} 590 591/* 592 * Similar to the above, except it deals with the case where the device 593 * does not have dev->dma_mask appropriately setup. 594 */ 595static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) 596{ 597 dev->dma_mask = &dev->coherent_dma_mask; 598 return dma_set_mask_and_coherent(dev, mask); 599} 600 601static inline unsigned int dma_get_max_seg_size(struct device *dev) 602{ 603 if (dev->dma_parms && dev->dma_parms->max_segment_size) 604 return dev->dma_parms->max_segment_size; 605 return SZ_64K; 606} 607 608static inline void dma_set_max_seg_size(struct device *dev, unsigned int size) 609{ 610 if (WARN_ON_ONCE(!dev->dma_parms)) 611 return; 612 dev->dma_parms->max_segment_size = size; 613} 614 615static inline unsigned long dma_get_seg_boundary(struct device *dev) 616{ 617 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) 618 return dev->dma_parms->segment_boundary_mask; 619 return ULONG_MAX; 620} 621 622/** 623 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units 624 * @dev: device to guery the boundary for 625 * @page_shift: ilog() of the IOMMU page size 626 * 627 * Return the segment boundary in IOMMU page units (which may be different from 628 * the CPU page size) for the passed in device. 629 * 630 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for 631 * non-DMA API callers. 632 */ 633static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, 634 unsigned int page_shift) 635{ 636 if (!dev) 637 return (U32_MAX >> page_shift) + 1; 638 return (dma_get_seg_boundary(dev) >> page_shift) + 1; 639} 640 641static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask) 642{ 643 if (WARN_ON_ONCE(!dev->dma_parms)) 644 return; 645 dev->dma_parms->segment_boundary_mask = mask; 646} 647 648static inline unsigned int dma_get_min_align_mask(struct device *dev) 649{ 650 if (dev->dma_parms) 651 return dev->dma_parms->min_align_mask; 652 return 0; 653} 654 655static inline void dma_set_min_align_mask(struct device *dev, 656 unsigned int min_align_mask) 657{ 658 if (WARN_ON_ONCE(!dev->dma_parms)) 659 return; 660 dev->dma_parms->min_align_mask = min_align_mask; 661} 662 663#ifndef dma_get_cache_alignment 664static inline int dma_get_cache_alignment(void) 665{ 666#ifdef ARCH_HAS_DMA_MINALIGN 667 return ARCH_DMA_MINALIGN; 668#endif 669 return 1; 670} 671#endif 672 673static inline void *dmam_alloc_coherent(struct device *dev, size_t size, 674 dma_addr_t *dma_handle, gfp_t gfp) 675{ 676 return dmam_alloc_attrs(dev, size, dma_handle, gfp, 677 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); 678} 679 680static inline void *dma_alloc_wc(struct device *dev, size_t size, 681 dma_addr_t *dma_addr, gfp_t gfp) 682{ 683 unsigned long attrs = DMA_ATTR_WRITE_COMBINE; 684 685 if (gfp & __GFP_NOWARN) 686 attrs |= DMA_ATTR_NO_WARN; 687 688 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); 689} 690 691static inline void dma_free_wc(struct device *dev, size_t size, 692 void *cpu_addr, dma_addr_t dma_addr) 693{ 694 return dma_free_attrs(dev, size, cpu_addr, dma_addr, 695 DMA_ATTR_WRITE_COMBINE); 696} 697 698static inline int dma_mmap_wc(struct device *dev, 699 struct vm_area_struct *vma, 700 void *cpu_addr, dma_addr_t dma_addr, 701 size_t size) 702{ 703 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, 704 DMA_ATTR_WRITE_COMBINE); 705} 706 707#ifdef CONFIG_NEED_DMA_MAP_STATE 708#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 709#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME 710#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) 711#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) 712#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) 713#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) 714#else 715#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) 716#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) 717#define dma_unmap_addr(PTR, ADDR_NAME) \ 718 ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) 719#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \ 720 do { typeof(PTR) __p __maybe_unused = PTR; } while (0) 721#define dma_unmap_len(PTR, LEN_NAME) \ 722 ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) 723#define dma_unmap_len_set(PTR, LEN_NAME, VAL) \ 724 do { typeof(PTR) __p __maybe_unused = PTR; } while (0) 725#endif 726 727#endif /* _LINUX_DMA_MAPPING_H */