Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb

Pull swiotlb updates from Konrad Rzeszutek Wilk:
"One compiler fix, and a bug-fix in swiotlb_nr_tbl() and
swiotlb_max_segment() to check also for no_iotlb_memory"

* 'for-linus-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
swiotlb: fix phys_addr_t overflow warning
swiotlb: Return consistent SWIOTLB segments/nr_tbl
swiotlb: Group identical cleanup in swiotlb_cleanup()

+17 -15
+1 -1
drivers/xen/swiotlb-xen.c
··· 402 402 403 403 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, 404 404 attrs); 405 - if (map == DMA_MAPPING_ERROR) 405 + if (map == (phys_addr_t)DMA_MAPPING_ERROR) 406 406 return DMA_MAPPING_ERROR; 407 407 408 408 dev_addr = xen_phys_to_bus(map);
+16 -14
kernel/dma/swiotlb.c
··· 129 129 } 130 130 early_param("swiotlb", setup_io_tlb_npages); 131 131 132 + static bool no_iotlb_memory; 133 + 132 134 unsigned long swiotlb_nr_tbl(void) 133 135 { 134 - return io_tlb_nslabs; 136 + return unlikely(no_iotlb_memory) ? 0 : io_tlb_nslabs; 135 137 } 136 138 EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); 137 139 138 140 unsigned int swiotlb_max_segment(void) 139 141 { 140 - return max_segment; 142 + return unlikely(no_iotlb_memory) ? 0 : max_segment; 141 143 } 142 144 EXPORT_SYMBOL_GPL(swiotlb_max_segment); 143 145 ··· 161 159 162 160 return size ? size : (IO_TLB_DEFAULT_SIZE); 163 161 } 164 - 165 - static bool no_iotlb_memory; 166 162 167 163 void swiotlb_print_info(void) 168 164 { ··· 317 317 return rc; 318 318 } 319 319 320 + static void swiotlb_cleanup(void) 321 + { 322 + io_tlb_end = 0; 323 + io_tlb_start = 0; 324 + io_tlb_nslabs = 0; 325 + max_segment = 0; 326 + } 327 + 320 328 int 321 329 swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) 322 330 { ··· 375 367 sizeof(int))); 376 368 io_tlb_list = NULL; 377 369 cleanup3: 378 - io_tlb_end = 0; 379 - io_tlb_start = 0; 380 - io_tlb_nslabs = 0; 381 - max_segment = 0; 370 + swiotlb_cleanup(); 382 371 return -ENOMEM; 383 372 } 384 373 ··· 399 394 memblock_free_late(io_tlb_start, 400 395 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 401 396 } 402 - io_tlb_start = 0; 403 - io_tlb_end = 0; 404 - io_tlb_nslabs = 0; 405 - max_segment = 0; 397 + swiotlb_cleanup(); 406 398 } 407 399 408 400 /* ··· 548 546 if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) 549 547 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", 550 548 size, io_tlb_nslabs, tmp_io_tlb_used); 551 - return DMA_MAPPING_ERROR; 549 + return (phys_addr_t)DMA_MAPPING_ERROR; 552 550 found: 553 551 io_tlb_used += nslots; 554 552 spin_unlock_irqrestore(&io_tlb_lock, flags); ··· 666 664 /* Oh well, have to allocate and map a bounce buffer. */ 667 665 *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), 668 666 *phys, size, dir, attrs); 669 - if (*phys == DMA_MAPPING_ERROR) 667 + if (*phys == (phys_addr_t)DMA_MAPPING_ERROR) 670 668 return false; 671 669 672 670 /* Ensure that the address returned is DMA'ble */