Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

async_tx: kill ->device_dependency_added

DMA drivers no longer need to be notified of dependency submission
events as async_tx_run_dependencies and async_tx_channel_switch will
handle the scheduling and execution of dependent operations.

[sfr@canb.auug.org.au: extend this for fsldma]
Acked-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

-30
-1
drivers/dma/dmaengine.c
··· 362 362 363 363 BUG_ON(!device->device_alloc_chan_resources); 364 364 BUG_ON(!device->device_free_chan_resources); 365 - BUG_ON(!device->device_dependency_added); 366 365 BUG_ON(!device->device_is_tx_complete); 367 366 BUG_ON(!device->device_issue_pending); 368 367 BUG_ON(!device->dev);
-8
drivers/dma/fsldma.c
··· 658 658 fsl_chan_xfer_ld_queue(fsl_chan); 659 659 } 660 660 661 - static void fsl_dma_dependency_added(struct dma_chan *chan) 662 - { 663 - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 664 - 665 - fsl_chan_ld_cleanup(fsl_chan); 666 - } 667 - 668 661 /** 669 662 * fsl_dma_is_complete - Determine the DMA status 670 663 * @fsl_chan : Freescale DMA channel ··· 1082 1089 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1083 1090 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1084 1091 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1085 - fdev->common.device_dependency_added = fsl_dma_dependency_added; 1086 1092 fdev->common.dev = &dev->dev; 1087 1093 1088 1094 irq = irq_of_parse_and_map(dev->node, 0);
-12
drivers/dma/ioat_dma.c
··· 924 924 spin_unlock_bh(&ioat_chan->cleanup_lock); 925 925 } 926 926 927 - static void ioat_dma_dependency_added(struct dma_chan *chan) 928 - { 929 - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 930 - spin_lock_bh(&ioat_chan->desc_lock); 931 - if (ioat_chan->pending == 0) { 932 - spin_unlock_bh(&ioat_chan->desc_lock); 933 - ioat_dma_memcpy_cleanup(ioat_chan); 934 - } else 935 - spin_unlock_bh(&ioat_chan->desc_lock); 936 - } 937 - 938 927 /** 939 928 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction 940 929 * @chan: IOAT DMA channel handle ··· 1305 1316 1306 1317 dma_cap_set(DMA_MEMCPY, device->common.cap_mask); 1307 1318 device->common.device_is_tx_complete = ioat_dma_is_complete; 1308 - device->common.device_dependency_added = ioat_dma_dependency_added; 1309 1319 switch (device->version) { 1310 1320 case IOAT_VER_1_2: 1311 1321 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
-7
drivers/dma/iop-adma.c
··· 672 672 return sw_desc ? &sw_desc->async_tx : NULL; 673 673 } 674 674 675 - static void iop_adma_dependency_added(struct dma_chan *chan) 676 - { 677 - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 678 - tasklet_schedule(&iop_chan->irq_tasklet); 679 - } 680 - 681 675 static void iop_adma_free_chan_resources(struct dma_chan *chan) 682 676 { 683 677 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); ··· 1172 1178 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; 1173 1179 dma_dev->device_is_tx_complete = iop_adma_is_complete; 1174 1180 dma_dev->device_issue_pending = iop_adma_issue_pending; 1175 - dma_dev->device_dependency_added = iop_adma_dependency_added; 1176 1181 dma_dev->dev = &pdev->dev; 1177 1182 1178 1183 /* set prep routines based on capability */
-2
include/linux/dmaengine.h
··· 258 258 * @device_prep_dma_zero_sum: prepares a zero_sum operation 259 259 * @device_prep_dma_memset: prepares a memset operation 260 260 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 261 - * @device_dependency_added: async_tx notifies the channel about new deps 262 261 * @device_issue_pending: push pending transactions to hardware 263 262 */ 264 263 struct dma_device { ··· 292 293 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 293 294 struct dma_chan *chan); 294 295 295 - void (*device_dependency_added)(struct dma_chan *chan); 296 296 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, 297 297 dma_cookie_t cookie, dma_cookie_t *last, 298 298 dma_cookie_t *used);