Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-v3.16' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping into next

Pull CMA and DMA-mapping fixes from Marek Szyprowski:
"A few fixes for dma-mapping and CMA subsystems"

* 'for-v3.16' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
CMA: correct unlock target
drivers/base/dma-contiguous.c: erratum of dev_get_cma_area
arm: dma-mapping: add checking cma area initialized
arm: dma-iommu: Clean up redundant variable
cma: Remove potential deadlock situation

+35 -17
-1
arch/arm/include/asm/dma-iommu.h
··· 18 18 unsigned int extensions; 19 19 size_t bitmap_size; /* size of a single bitmap */ 20 20 size_t bits; /* per bitmap */ 21 - unsigned int size; /* per bitmap */ 22 21 dma_addr_t base; 23 22 24 23 spinlock_t lock;
+9 -8
arch/arm/mm/dma-mapping.c
··· 390 390 if (!pages) 391 391 goto no_pages; 392 392 393 - if (IS_ENABLED(CONFIG_DMA_CMA)) 393 + if (dev_get_cma_area(NULL)) 394 394 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, 395 395 atomic_pool_init); 396 396 else ··· 701 701 addr = __alloc_simple_buffer(dev, size, gfp, &page); 702 702 else if (!(gfp & __GFP_WAIT)) 703 703 addr = __alloc_from_pool(size, &page); 704 - else if (!IS_ENABLED(CONFIG_DMA_CMA)) 704 + else if (!dev_get_cma_area(dev)) 705 705 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 706 706 else 707 707 addr = __alloc_from_contiguous(dev, size, prot, &page, caller); ··· 790 790 __dma_free_buffer(page, size); 791 791 } else if (__free_from_pool(cpu_addr, size)) { 792 792 return; 793 - } else if (!IS_ENABLED(CONFIG_DMA_CMA)) { 793 + } else if (!dev_get_cma_area(dev)) { 794 794 __dma_free_remap(cpu_addr, size); 795 795 __dma_free_buffer(page, size); 796 796 } else { ··· 1074 1074 unsigned int order = get_order(size); 1075 1075 unsigned int align = 0; 1076 1076 unsigned int count, start; 1077 + size_t mapping_size = mapping->bits << PAGE_SHIFT; 1077 1078 unsigned long flags; 1078 1079 dma_addr_t iova; 1079 1080 int i; ··· 1120 1119 } 1121 1120 spin_unlock_irqrestore(&mapping->lock, flags); 1122 1121 1123 - iova = mapping->base + (mapping->size * i); 1122 + iova = mapping->base + (mapping_size * i); 1124 1123 iova += start << PAGE_SHIFT; 1125 1124 1126 1125 return iova; ··· 1130 1129 dma_addr_t addr, size_t size) 1131 1130 { 1132 1131 unsigned int start, count; 1132 + size_t mapping_size = mapping->bits << PAGE_SHIFT; 1133 1133 unsigned long flags; 1134 1134 dma_addr_t bitmap_base; 1135 1135 u32 bitmap_index; ··· 1138 1136 if (!size) 1139 1137 return; 1140 1138 1141 - bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size; 1139 + bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 1142 1140 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 1143 1141 1144 - bitmap_base = mapping->base + mapping->size * bitmap_index; 1142 + bitmap_base = mapping->base + mapping_size * bitmap_index; 1145 1143 1146 1144 start = (addr - bitmap_base) >> PAGE_SHIFT; 1147 1145 1148 - if (addr + size > bitmap_base + mapping->size) { 1146 + if (addr + size > bitmap_base + mapping_size) { 1149 1147 /* 1150 1148 * The address range to be freed reaches into the iova 1151 1149 * range of the next bitmap. This should not happen as ··· 1966 1964 mapping->extensions = extensions; 1967 1965 mapping->base = base; 1968 1966 mapping->bits = BITS_PER_BYTE * bitmap_size; 1969 - mapping->size = mapping->bits << PAGE_SHIFT; 1970 1967 1971 1968 spin_lock_init(&mapping->lock); 1972 1969
+26 -8
drivers/base/dma-contiguous.c
··· 37 37 unsigned long base_pfn; 38 38 unsigned long count; 39 39 unsigned long *bitmap; 40 + struct mutex lock; 40 41 }; 41 42 42 43 struct cma *dma_contiguous_default_area; ··· 162 161 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 163 162 } while (--i); 164 163 164 + mutex_init(&cma->lock); 165 165 return 0; 166 166 } 167 167 ··· 263 261 return ret; 264 262 } 265 263 264 + static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) 265 + { 266 + mutex_lock(&cma->lock); 267 + bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); 268 + mutex_unlock(&cma->lock); 269 + } 270 + 266 271 /** 267 272 * dma_alloc_from_contiguous() - allocate pages from contiguous area 268 273 * @dev: Pointer to device for which the allocation is performed. ··· 278 269 * 279 270 * This function allocates memory buffer for specified device. It uses 280 271 * device specific contiguous memory area if available or the default 281 - * global one. Requires architecture specific get_dev_cma_area() helper 272 + * global one. Requires architecture specific dev_get_cma_area() helper 282 273 * function. 283 274 */ 284 275 struct page *dma_alloc_from_contiguous(struct device *dev, int count, ··· 303 294 304 295 mask = (1 << align) - 1; 305 296 306 - mutex_lock(&cma_mutex); 307 297 308 298 for (;;) { 299 + mutex_lock(&cma->lock); 309 300 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, 310 301 start, count, mask); 311 - if (pageno >= cma->count) 302 + if (pageno >= cma->count) { 303 + mutex_unlock(&cma->lock); 312 304 break; 305 + } 306 + bitmap_set(cma->bitmap, pageno, count); 307 + /* 308 + * It's safe to drop the lock here. We've marked this region for 309 + * our exclusive use. If the migration fails we will take the 310 + * lock again and unmark it. 311 + */ 312 + mutex_unlock(&cma->lock); 313 313 314 314 pfn = cma->base_pfn + pageno; 315 + mutex_lock(&cma_mutex); 315 316 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 317 + mutex_unlock(&cma_mutex); 316 318 if (ret == 0) { 317 - bitmap_set(cma->bitmap, pageno, count); 318 319 page = pfn_to_page(pfn); 319 320 break; 320 321 } else if (ret != -EBUSY) { 322 + clear_cma_bitmap(cma, pfn, count); 321 323 break; 322 324 } 325 + clear_cma_bitmap(cma, pfn, count); 323 326 pr_debug("%s(): memory range at %p is busy, retrying\n", 324 327 __func__, pfn_to_page(pfn)); 325 328 /* try again with a bit different memory target */ 326 329 start = pageno + mask + 1; 327 330 } 328 331 329 - mutex_unlock(&cma_mutex); 330 332 pr_debug("%s(): returned %p\n", __func__, page); 331 333 return page; 332 334 } ··· 370 350 371 351 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 372 352 373 - mutex_lock(&cma_mutex); 374 - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); 375 353 free_contig_range(pfn, count); 376 - mutex_unlock(&cma_mutex); 354 + clear_cma_bitmap(cma, pfn, count); 377 355 378 356 return true; 379 357 }