Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
drivers/dma: Correct NULL test
async-tx: fix buffer submission error handling in ipu_idma.c
dmaengine: correct onstack wait_queue_head declaration
ioat: fix infinite timeout checking in ioat2_quiesce
dmaengine: fix memleak in dma_async_device_unregister

+10 -22
-2
drivers/dma/coh901318.c
··· 613 cohd_fin->pending_irqs--; 614 cohc->completed = cohd_fin->desc.cookie; 615 616 - BUG_ON(cohc->nbr_active_done && cohd_fin == NULL); 617 - 618 if (cohc->nbr_active_done == 0) 619 return; 620
··· 613 cohd_fin->pending_irqs--; 614 cohc->completed = cohd_fin->desc.cookie; 615 616 if (cohc->nbr_active_done == 0) 617 return; 618
+1
drivers/dma/dmaengine.c
··· 826 chan->dev->chan = NULL; 827 mutex_unlock(&dma_list_mutex); 828 device_unregister(&chan->dev->device); 829 } 830 } 831 EXPORT_SYMBOL(dma_async_device_unregister);
··· 826 chan->dev->chan = NULL; 827 mutex_unlock(&dma_list_mutex); 828 device_unregister(&chan->dev->device); 829 + free_percpu(chan->local); 830 } 831 } 832 EXPORT_SYMBOL(dma_async_device_unregister);
+1 -1
drivers/dma/dmatest.c
··· 467 468 if (iterations > 0) 469 while (!kthread_should_stop()) { 470 - DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); 471 interruptible_sleep_on(&wait_dmatest_exit); 472 } 473
··· 467 468 if (iterations > 0) 469 while (!kthread_should_stop()) { 470 + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); 471 interruptible_sleep_on(&wait_dmatest_exit); 472 } 473
+1 -1
drivers/dma/ioat/dma_v2.c
··· 249 if (is_ioat_active(status) || is_ioat_idle(status)) 250 ioat_suspend(chan); 251 while (is_ioat_active(status) || is_ioat_idle(status)) { 252 - if (end && time_after(jiffies, end)) { 253 err = -ETIMEDOUT; 254 break; 255 }
··· 249 if (is_ioat_active(status) || is_ioat_idle(status)) 250 ioat_suspend(chan); 251 while (is_ioat_active(status) || is_ioat_idle(status)) { 252 + if (tmo && time_after(jiffies, end)) { 253 err = -ETIMEDOUT; 254 break; 255 }
+7 -18
drivers/dma/ipu/ipu_idmac.c
··· 761 * @buffer_n: buffer number to update. 762 * 0 or 1 are the only valid values. 763 * @phyaddr: buffer physical address. 764 - * @return: Returns 0 on success or negative error code on failure. This 765 - * function will fail if the buffer is set to ready. 766 */ 767 /* Called under spin_lock(_irqsave)(&ichan->lock) */ 768 - static int ipu_update_channel_buffer(struct idmac_channel *ichan, 769 - int buffer_n, dma_addr_t phyaddr) 770 { 771 enum ipu_channel channel = ichan->dma_chan.chan_id; 772 uint32_t reg; ··· 804 } 805 806 spin_unlock_irqrestore(&ipu_data.lock, flags); 807 - 808 - return 0; 809 } 810 811 /* Called under spin_lock_irqsave(&ichan->lock) */ ··· 812 { 813 unsigned int chan_id = ichan->dma_chan.chan_id; 814 struct device *dev = &ichan->dma_chan.dev->device; 815 - int ret; 816 817 if (async_tx_test_ack(&desc->txd)) 818 return -EINTR; ··· 822 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but 823 * doing it again shouldn't hurt either. 824 */ 825 - ret = ipu_update_channel_buffer(ichan, buf_idx, 826 - sg_dma_address(sg)); 827 - 828 - if (ret < 0) { 829 - dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n", 830 - sg, chan_id, buf_idx); 831 - return ret; 832 - } 833 834 ipu_select_buffer(chan_id, buf_idx); 835 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", ··· 1367 1368 if (likely(sgnew) && 1369 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { 1370 - callback = desc->txd.callback; 1371 - callback_param = desc->txd.callback_param; 1372 spin_unlock(&ichan->lock); 1373 - callback(callback_param); 1374 spin_lock(&ichan->lock); 1375 } 1376
··· 761 * @buffer_n: buffer number to update. 762 * 0 or 1 are the only valid values. 763 * @phyaddr: buffer physical address. 764 */ 765 /* Called under spin_lock(_irqsave)(&ichan->lock) */ 766 + static void ipu_update_channel_buffer(struct idmac_channel *ichan, 767 + int buffer_n, dma_addr_t phyaddr) 768 { 769 enum ipu_channel channel = ichan->dma_chan.chan_id; 770 uint32_t reg; ··· 806 } 807 808 spin_unlock_irqrestore(&ipu_data.lock, flags); 809 } 810 811 /* Called under spin_lock_irqsave(&ichan->lock) */ ··· 816 { 817 unsigned int chan_id = ichan->dma_chan.chan_id; 818 struct device *dev = &ichan->dma_chan.dev->device; 819 820 if (async_tx_test_ack(&desc->txd)) 821 return -EINTR; ··· 827 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but 828 * doing it again shouldn't hurt either. 829 */ 830 + ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg)); 831 832 ipu_select_buffer(chan_id, buf_idx); 833 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", ··· 1379 1380 if (likely(sgnew) && 1381 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { 1382 + callback = descnew->txd.callback; 1383 + callback_param = descnew->txd.callback_param; 1384 spin_unlock(&ichan->lock); 1385 + if (callback) 1386 + callback(callback_param); 1387 spin_lock(&ichan->lock); 1388 } 1389