at v4.8 22 kB view raw
1#ifndef _LINUX_DMA_MAPPING_H 2#define _LINUX_DMA_MAPPING_H 3 4#include <linux/sizes.h> 5#include <linux/string.h> 6#include <linux/device.h> 7#include <linux/err.h> 8#include <linux/dma-debug.h> 9#include <linux/dma-direction.h> 10#include <linux/scatterlist.h> 11#include <linux/kmemcheck.h> 12#include <linux/bug.h> 13 14/** 15 * List of possible attributes associated with a DMA mapping. The semantics 16 * of each attribute should be defined in Documentation/DMA-attributes.txt. 17 * 18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute 19 * forces all pending DMA writes to complete. 20 */ 21#define DMA_ATTR_WRITE_BARRIER (1UL << 0) 22/* 23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping 24 * may be weakly ordered, that is that reads and writes may pass each other. 25 */ 26#define DMA_ATTR_WEAK_ORDERING (1UL << 1) 27/* 28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be 29 * buffered to improve performance. 30 */ 31#define DMA_ATTR_WRITE_COMBINE (1UL << 2) 32/* 33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either 34 * consistent or non-consistent memory as it sees fit. 35 */ 36#define DMA_ATTR_NON_CONSISTENT (1UL << 3) 37/* 38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel 39 * virtual mapping for the allocated buffer. 40 */ 41#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) 42/* 43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of 44 * the CPU cache for the given buffer assuming that it has been already 45 * transferred to 'device' domain. 46 */ 47#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) 48/* 49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer 50 * in physical memory. 51 */ 52#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) 53/* 54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem 55 * that it's probably not worth the time to try to allocate memory to in a way 56 * that gives better TLB efficiency. 57 */ 58#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) 59 60/* 61 * A dma_addr_t can hold any valid DMA or bus address for the platform. 62 * It can be given to a device to use as a DMA source or target. A CPU cannot 63 * reference a dma_addr_t directly because there may be translation between 64 * its physical address space and the bus address space. 65 */ 66struct dma_map_ops { 67 void* (*alloc)(struct device *dev, size_t size, 68 dma_addr_t *dma_handle, gfp_t gfp, 69 unsigned long attrs); 70 void (*free)(struct device *dev, size_t size, 71 void *vaddr, dma_addr_t dma_handle, 72 unsigned long attrs); 73 int (*mmap)(struct device *, struct vm_area_struct *, 74 void *, dma_addr_t, size_t, 75 unsigned long attrs); 76 77 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, 78 dma_addr_t, size_t, unsigned long attrs); 79 80 dma_addr_t (*map_page)(struct device *dev, struct page *page, 81 unsigned long offset, size_t size, 82 enum dma_data_direction dir, 83 unsigned long attrs); 84 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, 85 size_t size, enum dma_data_direction dir, 86 unsigned long attrs); 87 /* 88 * map_sg returns 0 on error and a value > 0 on success. 89 * It should never return a value < 0. 90 */ 91 int (*map_sg)(struct device *dev, struct scatterlist *sg, 92 int nents, enum dma_data_direction dir, 93 unsigned long attrs); 94 void (*unmap_sg)(struct device *dev, 95 struct scatterlist *sg, int nents, 96 enum dma_data_direction dir, 97 unsigned long attrs); 98 void (*sync_single_for_cpu)(struct device *dev, 99 dma_addr_t dma_handle, size_t size, 100 enum dma_data_direction dir); 101 void (*sync_single_for_device)(struct device *dev, 102 dma_addr_t dma_handle, size_t size, 103 enum dma_data_direction dir); 104 void (*sync_sg_for_cpu)(struct device *dev, 105 struct scatterlist *sg, int nents, 106 enum dma_data_direction dir); 107 void (*sync_sg_for_device)(struct device *dev, 108 struct scatterlist *sg, int nents, 109 enum dma_data_direction dir); 110 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); 111 int (*dma_supported)(struct device *dev, u64 mask); 112 int (*set_dma_mask)(struct device *dev, u64 mask); 113#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK 114 u64 (*get_required_mask)(struct device *dev); 115#endif 116 int is_phys; 117}; 118 119extern struct dma_map_ops dma_noop_ops; 120 121#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 122 123#define DMA_MASK_NONE 0x0ULL 124 125static inline int valid_dma_direction(int dma_direction) 126{ 127 return ((dma_direction == DMA_BIDIRECTIONAL) || 128 (dma_direction == DMA_TO_DEVICE) || 129 (dma_direction == DMA_FROM_DEVICE)); 130} 131 132static inline int is_device_dma_capable(struct device *dev) 133{ 134 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; 135} 136 137#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 138/* 139 * These three functions are only for dma allocator. 140 * Don't use them in device drivers. 141 */ 142int dma_alloc_from_coherent(struct device *dev, ssize_t size, 143 dma_addr_t *dma_handle, void **ret); 144int dma_release_from_coherent(struct device *dev, int order, void *vaddr); 145 146int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, 147 void *cpu_addr, size_t size, int *ret); 148#else 149#define dma_alloc_from_coherent(dev, size, handle, ret) (0) 150#define dma_release_from_coherent(dev, order, vaddr) (0) 151#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) 152#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 153 154#ifdef CONFIG_HAS_DMA 155#include <asm/dma-mapping.h> 156#else 157/* 158 * Define the dma api to allow compilation but not linking of 159 * dma dependent code. Code that depends on the dma-mapping 160 * API needs to set 'depends on HAS_DMA' in its Kconfig 161 */ 162extern struct dma_map_ops bad_dma_ops; 163static inline struct dma_map_ops *get_dma_ops(struct device *dev) 164{ 165 return &bad_dma_ops; 166} 167#endif 168 169static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 170 size_t size, 171 enum dma_data_direction dir, 172 unsigned long attrs) 173{ 174 struct dma_map_ops *ops = get_dma_ops(dev); 175 dma_addr_t addr; 176 177 kmemcheck_mark_initialized(ptr, size); 178 BUG_ON(!valid_dma_direction(dir)); 179 addr = ops->map_page(dev, virt_to_page(ptr), 180 offset_in_page(ptr), size, 181 dir, attrs); 182 debug_dma_map_page(dev, virt_to_page(ptr), 183 offset_in_page(ptr), size, 184 dir, addr, true); 185 return addr; 186} 187 188static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, 189 size_t size, 190 enum dma_data_direction dir, 191 unsigned long attrs) 192{ 193 struct dma_map_ops *ops = get_dma_ops(dev); 194 195 BUG_ON(!valid_dma_direction(dir)); 196 if (ops->unmap_page) 197 ops->unmap_page(dev, addr, size, dir, attrs); 198 debug_dma_unmap_page(dev, addr, size, dir, true); 199} 200 201/* 202 * dma_maps_sg_attrs returns 0 on error and > 0 on success. 203 * It should never return a value < 0. 204 */ 205static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 206 int nents, enum dma_data_direction dir, 207 unsigned long attrs) 208{ 209 struct dma_map_ops *ops = get_dma_ops(dev); 210 int i, ents; 211 struct scatterlist *s; 212 213 for_each_sg(sg, s, nents, i) 214 kmemcheck_mark_initialized(sg_virt(s), s->length); 215 BUG_ON(!valid_dma_direction(dir)); 216 ents = ops->map_sg(dev, sg, nents, dir, attrs); 217 BUG_ON(ents < 0); 218 debug_dma_map_sg(dev, sg, nents, ents, dir); 219 220 return ents; 221} 222 223static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 224 int nents, enum dma_data_direction dir, 225 unsigned long attrs) 226{ 227 struct dma_map_ops *ops = get_dma_ops(dev); 228 229 BUG_ON(!valid_dma_direction(dir)); 230 debug_dma_unmap_sg(dev, sg, nents, dir); 231 if (ops->unmap_sg) 232 ops->unmap_sg(dev, sg, nents, dir, attrs); 233} 234 235static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 236 size_t offset, size_t size, 237 enum dma_data_direction dir) 238{ 239 struct dma_map_ops *ops = get_dma_ops(dev); 240 dma_addr_t addr; 241 242 kmemcheck_mark_initialized(page_address(page) + offset, size); 243 BUG_ON(!valid_dma_direction(dir)); 244 addr = ops->map_page(dev, page, offset, size, dir, 0); 245 debug_dma_map_page(dev, page, offset, size, dir, addr, false); 246 247 return addr; 248} 249 250static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 251 size_t size, enum dma_data_direction dir) 252{ 253 struct dma_map_ops *ops = get_dma_ops(dev); 254 255 BUG_ON(!valid_dma_direction(dir)); 256 if (ops->unmap_page) 257 ops->unmap_page(dev, addr, size, dir, 0); 258 debug_dma_unmap_page(dev, addr, size, dir, false); 259} 260 261static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 262 size_t size, 263 enum dma_data_direction dir) 264{ 265 struct dma_map_ops *ops = get_dma_ops(dev); 266 267 BUG_ON(!valid_dma_direction(dir)); 268 if (ops->sync_single_for_cpu) 269 ops->sync_single_for_cpu(dev, addr, size, dir); 270 debug_dma_sync_single_for_cpu(dev, addr, size, dir); 271} 272 273static inline void dma_sync_single_for_device(struct device *dev, 274 dma_addr_t addr, size_t size, 275 enum dma_data_direction dir) 276{ 277 struct dma_map_ops *ops = get_dma_ops(dev); 278 279 BUG_ON(!valid_dma_direction(dir)); 280 if (ops->sync_single_for_device) 281 ops->sync_single_for_device(dev, addr, size, dir); 282 debug_dma_sync_single_for_device(dev, addr, size, dir); 283} 284 285static inline void dma_sync_single_range_for_cpu(struct device *dev, 286 dma_addr_t addr, 287 unsigned long offset, 288 size_t size, 289 enum dma_data_direction dir) 290{ 291 const struct dma_map_ops *ops = get_dma_ops(dev); 292 293 BUG_ON(!valid_dma_direction(dir)); 294 if (ops->sync_single_for_cpu) 295 ops->sync_single_for_cpu(dev, addr + offset, size, dir); 296 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); 297} 298 299static inline void dma_sync_single_range_for_device(struct device *dev, 300 dma_addr_t addr, 301 unsigned long offset, 302 size_t size, 303 enum dma_data_direction dir) 304{ 305 const struct dma_map_ops *ops = get_dma_ops(dev); 306 307 BUG_ON(!valid_dma_direction(dir)); 308 if (ops->sync_single_for_device) 309 ops->sync_single_for_device(dev, addr + offset, size, dir); 310 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); 311} 312 313static inline void 314dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 315 int nelems, enum dma_data_direction dir) 316{ 317 struct dma_map_ops *ops = get_dma_ops(dev); 318 319 BUG_ON(!valid_dma_direction(dir)); 320 if (ops->sync_sg_for_cpu) 321 ops->sync_sg_for_cpu(dev, sg, nelems, dir); 322 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); 323} 324 325static inline void 326dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 327 int nelems, enum dma_data_direction dir) 328{ 329 struct dma_map_ops *ops = get_dma_ops(dev); 330 331 BUG_ON(!valid_dma_direction(dir)); 332 if (ops->sync_sg_for_device) 333 ops->sync_sg_for_device(dev, sg, nelems, dir); 334 debug_dma_sync_sg_for_device(dev, sg, nelems, dir); 335 336} 337 338#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) 339#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) 340#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) 341#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) 342 343extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 344 void *cpu_addr, dma_addr_t dma_addr, size_t size); 345 346void *dma_common_contiguous_remap(struct page *page, size_t size, 347 unsigned long vm_flags, 348 pgprot_t prot, const void *caller); 349 350void *dma_common_pages_remap(struct page **pages, size_t size, 351 unsigned long vm_flags, pgprot_t prot, 352 const void *caller); 353void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); 354 355/** 356 * dma_mmap_attrs - map a coherent DMA allocation into user space 357 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 358 * @vma: vm_area_struct describing requested user mapping 359 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs 360 * @handle: device-view address returned from dma_alloc_attrs 361 * @size: size of memory originally requested in dma_alloc_attrs 362 * @attrs: attributes of mapping properties requested in dma_alloc_attrs 363 * 364 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs 365 * into user space. The coherent DMA buffer must not be freed by the 366 * driver until the user space mapping has been released. 367 */ 368static inline int 369dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, 370 dma_addr_t dma_addr, size_t size, unsigned long attrs) 371{ 372 struct dma_map_ops *ops = get_dma_ops(dev); 373 BUG_ON(!ops); 374 if (ops->mmap) 375 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 376 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); 377} 378 379#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) 380 381int 382dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 383 void *cpu_addr, dma_addr_t dma_addr, size_t size); 384 385static inline int 386dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, 387 dma_addr_t dma_addr, size_t size, 388 unsigned long attrs) 389{ 390 struct dma_map_ops *ops = get_dma_ops(dev); 391 BUG_ON(!ops); 392 if (ops->get_sgtable) 393 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, 394 attrs); 395 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); 396} 397 398#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) 399 400#ifndef arch_dma_alloc_attrs 401#define arch_dma_alloc_attrs(dev, flag) (true) 402#endif 403 404static inline void *dma_alloc_attrs(struct device *dev, size_t size, 405 dma_addr_t *dma_handle, gfp_t flag, 406 unsigned long attrs) 407{ 408 struct dma_map_ops *ops = get_dma_ops(dev); 409 void *cpu_addr; 410 411 BUG_ON(!ops); 412 413 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) 414 return cpu_addr; 415 416 if (!arch_dma_alloc_attrs(&dev, &flag)) 417 return NULL; 418 if (!ops->alloc) 419 return NULL; 420 421 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 422 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 423 return cpu_addr; 424} 425 426static inline void dma_free_attrs(struct device *dev, size_t size, 427 void *cpu_addr, dma_addr_t dma_handle, 428 unsigned long attrs) 429{ 430 struct dma_map_ops *ops = get_dma_ops(dev); 431 432 BUG_ON(!ops); 433 WARN_ON(irqs_disabled()); 434 435 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 436 return; 437 438 if (!ops->free || !cpu_addr) 439 return; 440 441 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 442 ops->free(dev, size, cpu_addr, dma_handle, attrs); 443} 444 445static inline void *dma_alloc_coherent(struct device *dev, size_t size, 446 dma_addr_t *dma_handle, gfp_t flag) 447{ 448 return dma_alloc_attrs(dev, size, dma_handle, flag, 0); 449} 450 451static inline void dma_free_coherent(struct device *dev, size_t size, 452 void *cpu_addr, dma_addr_t dma_handle) 453{ 454 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); 455} 456 457static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, 458 dma_addr_t *dma_handle, gfp_t gfp) 459{ 460 return dma_alloc_attrs(dev, size, dma_handle, gfp, 461 DMA_ATTR_NON_CONSISTENT); 462} 463 464static inline void dma_free_noncoherent(struct device *dev, size_t size, 465 void *cpu_addr, dma_addr_t dma_handle) 466{ 467 dma_free_attrs(dev, size, cpu_addr, dma_handle, 468 DMA_ATTR_NON_CONSISTENT); 469} 470 471static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 472{ 473 debug_dma_mapping_error(dev, dma_addr); 474 475 if (get_dma_ops(dev)->mapping_error) 476 return get_dma_ops(dev)->mapping_error(dev, dma_addr); 477 478#ifdef DMA_ERROR_CODE 479 return dma_addr == DMA_ERROR_CODE; 480#else 481 return 0; 482#endif 483} 484 485#ifndef HAVE_ARCH_DMA_SUPPORTED 486static inline int dma_supported(struct device *dev, u64 mask) 487{ 488 struct dma_map_ops *ops = get_dma_ops(dev); 489 490 if (!ops) 491 return 0; 492 if (!ops->dma_supported) 493 return 1; 494 return ops->dma_supported(dev, mask); 495} 496#endif 497 498#ifndef HAVE_ARCH_DMA_SET_MASK 499static inline int dma_set_mask(struct device *dev, u64 mask) 500{ 501 struct dma_map_ops *ops = get_dma_ops(dev); 502 503 if (ops->set_dma_mask) 504 return ops->set_dma_mask(dev, mask); 505 506 if (!dev->dma_mask || !dma_supported(dev, mask)) 507 return -EIO; 508 *dev->dma_mask = mask; 509 return 0; 510} 511#endif 512 513static inline u64 dma_get_mask(struct device *dev) 514{ 515 if (dev && dev->dma_mask && *dev->dma_mask) 516 return *dev->dma_mask; 517 return DMA_BIT_MASK(32); 518} 519 520#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 521int dma_set_coherent_mask(struct device *dev, u64 mask); 522#else 523static inline int dma_set_coherent_mask(struct device *dev, u64 mask) 524{ 525 if (!dma_supported(dev, mask)) 526 return -EIO; 527 dev->coherent_dma_mask = mask; 528 return 0; 529} 530#endif 531 532/* 533 * Set both the DMA mask and the coherent DMA mask to the same thing. 534 * Note that we don't check the return value from dma_set_coherent_mask() 535 * as the DMA API guarantees that the coherent DMA mask can be set to 536 * the same or smaller than the streaming DMA mask. 537 */ 538static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) 539{ 540 int rc = dma_set_mask(dev, mask); 541 if (rc == 0) 542 dma_set_coherent_mask(dev, mask); 543 return rc; 544} 545 546/* 547 * Similar to the above, except it deals with the case where the device 548 * does not have dev->dma_mask appropriately setup. 549 */ 550static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) 551{ 552 dev->dma_mask = &dev->coherent_dma_mask; 553 return dma_set_mask_and_coherent(dev, mask); 554} 555 556extern u64 dma_get_required_mask(struct device *dev); 557 558#ifndef arch_setup_dma_ops 559static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, 560 u64 size, const struct iommu_ops *iommu, 561 bool coherent) { } 562#endif 563 564#ifndef arch_teardown_dma_ops 565static inline void arch_teardown_dma_ops(struct device *dev) { } 566#endif 567 568static inline unsigned int dma_get_max_seg_size(struct device *dev) 569{ 570 if (dev->dma_parms && dev->dma_parms->max_segment_size) 571 return dev->dma_parms->max_segment_size; 572 return SZ_64K; 573} 574 575static inline unsigned int dma_set_max_seg_size(struct device *dev, 576 unsigned int size) 577{ 578 if (dev->dma_parms) { 579 dev->dma_parms->max_segment_size = size; 580 return 0; 581 } 582 return -EIO; 583} 584 585static inline unsigned long dma_get_seg_boundary(struct device *dev) 586{ 587 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) 588 return dev->dma_parms->segment_boundary_mask; 589 return DMA_BIT_MASK(32); 590} 591 592static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) 593{ 594 if (dev->dma_parms) { 595 dev->dma_parms->segment_boundary_mask = mask; 596 return 0; 597 } 598 return -EIO; 599} 600 601#ifndef dma_max_pfn 602static inline unsigned long dma_max_pfn(struct device *dev) 603{ 604 return *dev->dma_mask >> PAGE_SHIFT; 605} 606#endif 607 608static inline void *dma_zalloc_coherent(struct device *dev, size_t size, 609 dma_addr_t *dma_handle, gfp_t flag) 610{ 611 void *ret = dma_alloc_coherent(dev, size, dma_handle, 612 flag | __GFP_ZERO); 613 return ret; 614} 615 616#ifdef CONFIG_HAS_DMA 617static inline int dma_get_cache_alignment(void) 618{ 619#ifdef ARCH_DMA_MINALIGN 620 return ARCH_DMA_MINALIGN; 621#endif 622 return 1; 623} 624#endif 625 626/* flags for the coherent memory api */ 627#define DMA_MEMORY_MAP 0x01 628#define DMA_MEMORY_IO 0x02 629#define DMA_MEMORY_INCLUDES_CHILDREN 0x04 630#define DMA_MEMORY_EXCLUSIVE 0x08 631 632#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 633int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 634 dma_addr_t device_addr, size_t size, int flags); 635void dma_release_declared_memory(struct device *dev); 636void *dma_mark_declared_memory_occupied(struct device *dev, 637 dma_addr_t device_addr, size_t size); 638#else 639static inline int 640dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 641 dma_addr_t device_addr, size_t size, int flags) 642{ 643 return 0; 644} 645 646static inline void 647dma_release_declared_memory(struct device *dev) 648{ 649} 650 651static inline void * 652dma_mark_declared_memory_occupied(struct device *dev, 653 dma_addr_t device_addr, size_t size) 654{ 655 return ERR_PTR(-EBUSY); 656} 657#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 658 659/* 660 * Managed DMA API 661 */ 662extern void *dmam_alloc_coherent(struct device *dev, size_t size, 663 dma_addr_t *dma_handle, gfp_t gfp); 664extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 665 dma_addr_t dma_handle); 666extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, 667 dma_addr_t *dma_handle, gfp_t gfp); 668extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, 669 dma_addr_t dma_handle); 670#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 671extern int dmam_declare_coherent_memory(struct device *dev, 672 phys_addr_t phys_addr, 673 dma_addr_t device_addr, size_t size, 674 int flags); 675extern void dmam_release_declared_memory(struct device *dev); 676#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 677static inline int dmam_declare_coherent_memory(struct device *dev, 678 phys_addr_t phys_addr, dma_addr_t device_addr, 679 size_t size, gfp_t gfp) 680{ 681 return 0; 682} 683 684static inline void dmam_release_declared_memory(struct device *dev) 685{ 686} 687#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 688 689static inline void *dma_alloc_wc(struct device *dev, size_t size, 690 dma_addr_t *dma_addr, gfp_t gfp) 691{ 692 return dma_alloc_attrs(dev, size, dma_addr, gfp, 693 DMA_ATTR_WRITE_COMBINE); 694} 695#ifndef dma_alloc_writecombine 696#define dma_alloc_writecombine dma_alloc_wc 697#endif 698 699static inline void dma_free_wc(struct device *dev, size_t size, 700 void *cpu_addr, dma_addr_t dma_addr) 701{ 702 return dma_free_attrs(dev, size, cpu_addr, dma_addr, 703 DMA_ATTR_WRITE_COMBINE); 704} 705#ifndef dma_free_writecombine 706#define dma_free_writecombine dma_free_wc 707#endif 708 709static inline int dma_mmap_wc(struct device *dev, 710 struct vm_area_struct *vma, 711 void *cpu_addr, dma_addr_t dma_addr, 712 size_t size) 713{ 714 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, 715 DMA_ATTR_WRITE_COMBINE); 716} 717#ifndef dma_mmap_writecombine 718#define dma_mmap_writecombine dma_mmap_wc 719#endif 720 721#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG) 722#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 723#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME 724#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) 725#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) 726#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) 727#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) 728#else 729#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) 730#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) 731#define dma_unmap_addr(PTR, ADDR_NAME) (0) 732#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) 733#define dma_unmap_len(PTR, LEN_NAME) (0) 734#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 735#endif 736 737#endif