at v4.16 4.5 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * DMA operations that map physical memory directly without using an IOMMU or 4 * flushing caches. 5 */ 6#include <linux/export.h> 7#include <linux/mm.h> 8#include <linux/dma-direct.h> 9#include <linux/scatterlist.h> 10#include <linux/dma-contiguous.h> 11#include <linux/pfn.h> 12 13#define DIRECT_MAPPING_ERROR 0 14 15/* 16 * Most architectures use ZONE_DMA for the first 16 Megabytes, but 17 * some use it for entirely different regions: 18 */ 19#ifndef ARCH_ZONE_DMA_BITS 20#define ARCH_ZONE_DMA_BITS 24 21#endif 22 23static bool 24check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, 25 const char *caller) 26{ 27 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { 28 if (*dev->dma_mask >= DMA_BIT_MASK(32)) { 29 dev_err(dev, 30 "%s: overflow %pad+%zu of device mask %llx\n", 31 caller, &dma_addr, size, *dev->dma_mask); 32 } 33 return false; 34 } 35 return true; 36} 37 38static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 39{ 40 return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask; 41} 42 43void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 44 gfp_t gfp, unsigned long attrs) 45{ 46 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 47 int page_order = get_order(size); 48 struct page *page = NULL; 49 50 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ 51 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) 52 gfp |= GFP_DMA; 53 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) 54 gfp |= GFP_DMA32; 55 56again: 57 /* CMA can be used only in the context which permits sleeping */ 58 if (gfpflags_allow_blocking(gfp)) { 59 page = dma_alloc_from_contiguous(dev, count, page_order, gfp); 60 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 61 dma_release_from_contiguous(dev, page, count); 62 page = NULL; 63 } 64 } 65 if (!page) 66 page = alloc_pages_node(dev_to_node(dev), gfp, page_order); 67 68 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 69 __free_pages(page, page_order); 70 page = NULL; 71 72 if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && 73 !(gfp & GFP_DMA)) { 74 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 75 goto again; 76 } 77 } 78 79 if (!page) 80 return NULL; 81 82 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 83 memset(page_address(page), 0, size); 84 return page_address(page); 85} 86 87/* 88 * NOTE: this function must never look at the dma_addr argument, because we want 89 * to be able to use it as a helper for iommu implementations as well. 90 */ 91void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, 92 dma_addr_t dma_addr, unsigned long attrs) 93{ 94 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 95 96 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) 97 free_pages((unsigned long)cpu_addr, get_order(size)); 98} 99 100static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, 101 unsigned long offset, size_t size, enum dma_data_direction dir, 102 unsigned long attrs) 103{ 104 dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset; 105 106 if (!check_addr(dev, dma_addr, size, __func__)) 107 return DIRECT_MAPPING_ERROR; 108 return dma_addr; 109} 110 111static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 112 int nents, enum dma_data_direction dir, unsigned long attrs) 113{ 114 int i; 115 struct scatterlist *sg; 116 117 for_each_sg(sgl, sg, nents, i) { 118 BUG_ON(!sg_page(sg)); 119 120 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg)); 121 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) 122 return 0; 123 sg_dma_len(sg) = sg->length; 124 } 125 126 return nents; 127} 128 129int dma_direct_supported(struct device *dev, u64 mask) 130{ 131#ifdef CONFIG_ZONE_DMA 132 if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) 133 return 0; 134#else 135 /* 136 * Because 32-bit DMA masks are so common we expect every architecture 137 * to be able to satisfy them - either by not supporting more physical 138 * memory, or by providing a ZONE_DMA32. If neither is the case, the 139 * architecture needs to use an IOMMU instead of the direct mapping. 140 */ 141 if (mask < DMA_BIT_MASK(32)) 142 return 0; 143#endif 144 return 1; 145} 146 147static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) 148{ 149 return dma_addr == DIRECT_MAPPING_ERROR; 150} 151 152const struct dma_map_ops dma_direct_ops = { 153 .alloc = dma_direct_alloc, 154 .free = dma_direct_free, 155 .map_page = dma_direct_map_page, 156 .map_sg = dma_direct_map_sg, 157 .dma_supported = dma_direct_supported, 158 .mapping_error = dma_direct_mapping_error, 159 .is_phys = 1, 160}; 161EXPORT_SYMBOL(dma_direct_ops);