Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/dma: Remove dma_alloc_coherent_mask()

These days all devices (including the ISA fallback device) have a coherent
DMA mask set, so remove the workaround.

Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Jon Mason <jdmason@kudzu.us>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Muli Ben-Yehuda <mulix@mulix.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: iommu@lists.linux-foundation.org
Link: http://lkml.kernel.org/r/20180319103826.12853-3-hch@lst.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Christoph Hellwig and committed by
Ingo Molnar
038d07a2 3eb93ea3

+8 -40
+2 -16
arch/x86/include/asm/dma-mapping.h
··· 44 44 void *vaddr, dma_addr_t dma_addr, 45 45 unsigned long attrs); 46 46 47 - static inline unsigned long dma_alloc_coherent_mask(struct device *dev, 48 - gfp_t gfp) 49 - { 50 - unsigned long dma_mask = 0; 51 - 52 - dma_mask = dev->coherent_dma_mask; 53 - if (!dma_mask) 54 - dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); 55 - 56 - return dma_mask; 57 - } 58 - 59 47 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) 60 48 { 61 - unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); 62 - 63 - if (dma_mask <= DMA_BIT_MASK(24)) 49 + if (dev->coherent_dma_mask <= DMA_BIT_MASK(24)) 64 50 gfp |= GFP_DMA; 65 51 #ifdef CONFIG_X86_64 66 - if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) 52 + if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) 67 53 gfp |= GFP_DMA32; 68 54 #endif 69 55 return gfp;
+4 -6
arch/x86/kernel/pci-dma.c
··· 80 80 dma_addr_t *dma_addr, gfp_t flag, 81 81 unsigned long attrs) 82 82 { 83 - unsigned long dma_mask; 84 83 struct page *page; 85 84 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 86 85 dma_addr_t addr; 87 - 88 - dma_mask = dma_alloc_coherent_mask(dev, flag); 89 86 90 87 again: 91 88 page = NULL; ··· 92 95 flag); 93 96 if (page) { 94 97 addr = phys_to_dma(dev, page_to_phys(page)); 95 - if (addr + size > dma_mask) { 98 + if (addr + size > dev->coherent_dma_mask) { 96 99 dma_release_from_contiguous(dev, page, count); 97 100 page = NULL; 98 101 } ··· 105 108 return NULL; 106 109 107 110 addr = phys_to_dma(dev, page_to_phys(page)); 108 - if (addr + size > dma_mask) { 111 + if (addr + size > dev->coherent_dma_mask) { 109 112 __free_pages(page, get_order(size)); 110 113 111 - if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { 114 + if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && 115 + !(flag & GFP_DMA)) { 112 116 flag = (flag & ~GFP_DMA32) | GFP_DMA; 113 117 goto again; 114 118 }
+1 -3
arch/x86/mm/mem_encrypt.c
··· 198 198 static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 199 199 gfp_t gfp, unsigned long attrs) 200 200 { 201 - unsigned long dma_mask; 202 201 unsigned int order; 203 202 struct page *page; 204 203 void *vaddr = NULL; 205 204 206 - dma_mask = dma_alloc_coherent_mask(dev, gfp); 207 205 order = get_order(size); 208 206 209 207 /* ··· 219 221 * mask with it already cleared. 220 222 */ 221 223 addr = __sme_clr(phys_to_dma(dev, page_to_phys(page))); 222 - if ((addr + size) > dma_mask) { 224 + if ((addr + size) > dev->coherent_dma_mask) { 223 225 __free_pages(page, get_order(size)); 224 226 } else { 225 227 vaddr = page_address(page);
+1 -15
drivers/xen/swiotlb-xen.c
··· 53 53 * API. 54 54 */ 55 55 56 - #ifndef CONFIG_X86 57 - static unsigned long dma_alloc_coherent_mask(struct device *dev, 58 - gfp_t gfp) 59 - { 60 - unsigned long dma_mask = 0; 61 - 62 - dma_mask = dev->coherent_dma_mask; 63 - if (!dma_mask) 64 - dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); 65 - 66 - return dma_mask; 67 - } 68 - #endif 69 - 70 56 #define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0) 71 57 72 58 static char *xen_io_tlb_start, *xen_io_tlb_end; ··· 314 328 return ret; 315 329 316 330 if (hwdev && hwdev->coherent_dma_mask) 317 - dma_mask = dma_alloc_coherent_mask(hwdev, flags); 331 + dma_mask = hwdev->coherent_dma_mask; 318 332 319 333 /* At this point dma_handle is the physical address, next we are 320 334 * going to set it to the machine address.