at master 2.8 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved 4 * 5 * DMA operations that map physical memory through IOMMU. 6 */ 7#ifndef _LINUX_IOMMU_DMA_H 8#define _LINUX_IOMMU_DMA_H 9 10#include <linux/dma-direction.h> 11 12#ifdef CONFIG_IOMMU_DMA 13static inline bool use_dma_iommu(struct device *dev) 14{ 15 return dev->dma_iommu; 16} 17#else 18static inline bool use_dma_iommu(struct device *dev) 19{ 20 return false; 21} 22#endif /* CONFIG_IOMMU_DMA */ 23 24dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, 25 enum dma_data_direction dir, unsigned long attrs); 26void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle, 27 size_t size, enum dma_data_direction dir, unsigned long attrs); 28int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 29 enum dma_data_direction dir, unsigned long attrs); 30void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 31 enum dma_data_direction dir, unsigned long attrs); 32void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 33 gfp_t gfp, unsigned long attrs); 34int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 35 void *cpu_addr, dma_addr_t dma_addr, size_t size, 36 unsigned long attrs); 37int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 38 void *cpu_addr, dma_addr_t dma_addr, size_t size, 39 unsigned long attrs); 40unsigned long iommu_dma_get_merge_boundary(struct device *dev); 41size_t iommu_dma_opt_mapping_size(void); 42size_t iommu_dma_max_mapping_size(struct device *dev); 43void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, 44 dma_addr_t handle, unsigned long attrs); 45struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size, 46 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); 47void iommu_dma_free_noncontiguous(struct device *dev, size_t size, 48 struct sg_table *sgt, enum dma_data_direction dir); 49void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size, 50 struct sg_table *sgt); 51#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \ 52 vunmap(vaddr); 53int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, 54 size_t size, struct sg_table *sgt); 55void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 56 size_t size, enum dma_data_direction dir); 57void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 58 size_t size, enum dma_data_direction dir); 59void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, 60 int nelems, enum dma_data_direction dir); 61void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, 62 int nelems, enum dma_data_direction dir); 63 64#endif /* _LINUX_IOMMU_DMA_H */