Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM/dma-mapping: remove get_coherent_dma_mask

The core DMA code already checks for valid DMA masks earlier on, and
doesn't allow NULL struct device pointers. Remove the now not required
checks.

Signed-off-by: Christoph Hellwig <hch@lst.de>

+4 -37
+4 -37
arch/arm/mm/dma-mapping.c
··· 219 219 }; 220 220 EXPORT_SYMBOL(arm_coherent_dma_ops); 221 221 222 - static int __dma_supported(struct device *dev, u64 mask, bool warn) 222 + static int __dma_supported(struct device *dev, u64 mask) 223 223 { 224 224 unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); 225 225 ··· 227 227 * Translate the device's DMA mask to a PFN limit. This 228 228 * PFN number includes the page which we can DMA to. 229 229 */ 230 - if (dma_to_pfn(dev, mask) < max_dma_pfn) { 231 - if (warn) 232 - dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", 233 - mask, 234 - dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, 235 - max_dma_pfn + 1); 230 + if (dma_to_pfn(dev, mask) < max_dma_pfn) 236 231 return 0; 237 - } 238 - 239 232 return 1; 240 - } 241 - 242 - static u64 get_coherent_dma_mask(struct device *dev) 243 - { 244 - u64 mask = (u64)DMA_BIT_MASK(32); 245 - 246 - if (dev) { 247 - mask = dev->coherent_dma_mask; 248 - 249 - /* 250 - * Sanity check the DMA mask - it must be non-zero, and 251 - * must be able to be satisfied by a DMA allocation. 252 - */ 253 - if (mask == 0) { 254 - dev_warn(dev, "coherent DMA mask is unset\n"); 255 - return 0; 256 - } 257 - 258 - if (!__dma_supported(dev, mask, true)) 259 - return 0; 260 - } 261 - 262 - return mask; 263 233 } 264 234 265 235 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) ··· 658 688 gfp_t gfp, pgprot_t prot, bool is_coherent, 659 689 unsigned long attrs, const void *caller) 660 690 { 661 - u64 mask = get_coherent_dma_mask(dev); 691 + u64 mask = dev->coherent_dma_mask; 662 692 struct page *page = NULL; 663 693 void *addr; 664 694 bool allowblock, cma; ··· 681 711 return NULL; 682 712 } 683 713 #endif 684 - 685 - if (!mask) 686 - return NULL; 687 714 688 715 buf = kzalloc(sizeof(*buf), 689 716 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); ··· 1062 1095 */ 1063 1096 int arm_dma_supported(struct device *dev, u64 mask) 1064 1097 { 1065 - return __dma_supported(dev, mask, false); 1098 + return __dma_supported(dev, mask); 1066 1099 } 1067 1100 1068 1101 static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)