Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: move last completed cookie into generic dma_chan structure

Every DMA engine implementation declares a last completed dma cookie
in their private dma channel structures. This is pointless, and
forces driver specific code. Move this out into the common dma_chan
structure.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jassi Brar <jassisinghbrar@gmail.com>
[imx-sdma.c & mxs-dma.c]
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>

authored by

Russell King - ARM Linux and committed by
Vinod Koul
4d4e58de 08714f60

+83 -119
-2
arch/arm/include/asm/hardware/iop_adma.h
··· 49 49 /** 50 50 * struct iop_adma_chan - internal representation of an ADMA device 51 51 * @pending: allows batching of hardware operations 52 - * @completed_cookie: identifier for the most recently completed operation 53 52 * @lock: serializes enqueue/dequeue operations to the slot pool 54 53 * @mmr_base: memory mapped register base 55 54 * @chain: device chain view of the descriptors ··· 61 62 */ 62 63 struct iop_adma_chan { 63 64 int pending; 64 - dma_cookie_t completed_cookie; 65 65 spinlock_t lock; /* protects the descriptor slot pool */ 66 66 void __iomem *mmr_base; 67 67 struct list_head chain;
+4 -4
drivers/dma/amba-pl08x.c
··· 971 971 u32 bytesleft = 0; 972 972 973 973 last_used = plchan->chan.cookie; 974 - last_complete = plchan->lc; 974 + last_complete = plchan->chan.completed_cookie; 975 975 976 976 ret = dma_async_is_complete(cookie, last_complete, last_used); 977 977 if (ret == DMA_SUCCESS) { ··· 983 983 * This cookie not complete yet 984 984 */ 985 985 last_used = plchan->chan.cookie; 986 - last_complete = plchan->lc; 986 + last_complete = plchan->chan.completed_cookie; 987 987 988 988 /* Get number of bytes left in the active transactions and queue */ 989 989 bytesleft = pl08x_getbytes_chan(plchan); ··· 1543 1543 1544 1544 if (txd) { 1545 1545 /* Update last completed */ 1546 - plchan->lc = txd->tx.cookie; 1546 + plchan->chan.completed_cookie = txd->tx.cookie; 1547 1547 } 1548 1548 1549 1549 /* If a new descriptor is queued, set it up plchan->at is NULL here */ ··· 1725 1725 1726 1726 chan->chan.device = dmadev; 1727 1727 chan->chan.cookie = 0; 1728 - chan->lc = 0; 1728 + chan->chan.completed_cookie = 0; 1729 1729 1730 1730 spin_lock_init(&chan->lock); 1731 1731 INIT_LIST_HEAD(&chan->pend_list);
+5 -5
drivers/dma/at_hdmac.c
··· 269 269 dev_vdbg(chan2dev(&atchan->chan_common), 270 270 "descriptor %u complete\n", txd->cookie); 271 271 272 - atchan->completed_cookie = txd->cookie; 272 + atchan->chan_common.completed_cookie = txd->cookie; 273 273 274 274 /* move children to free_list */ 275 275 list_splice_init(&desc->tx_list, &atchan->free_list); ··· 1016 1016 1017 1017 spin_lock_irqsave(&atchan->lock, flags); 1018 1018 1019 - last_complete = atchan->completed_cookie; 1019 + last_complete = chan->completed_cookie; 1020 1020 last_used = chan->cookie; 1021 1021 1022 1022 ret = dma_async_is_complete(cookie, last_complete, last_used); 1023 1023 if (ret != DMA_SUCCESS) { 1024 1024 atc_cleanup_descriptors(atchan); 1025 1025 1026 - last_complete = atchan->completed_cookie; 1026 + last_complete = chan->completed_cookie; 1027 1027 last_used = chan->cookie; 1028 1028 1029 1029 ret = dma_async_is_complete(cookie, last_complete, last_used); ··· 1129 1129 spin_lock_irqsave(&atchan->lock, flags); 1130 1130 atchan->descs_allocated = i; 1131 1131 list_splice(&tmp_list, &atchan->free_list); 1132 - atchan->completed_cookie = chan->cookie = 1; 1132 + chan->completed_cookie = chan->cookie = 1; 1133 1133 spin_unlock_irqrestore(&atchan->lock, flags); 1134 1134 1135 1135 /* channel parameters */ ··· 1329 1329 struct at_dma_chan *atchan = &atdma->chan[i]; 1330 1330 1331 1331 atchan->chan_common.device = &atdma->dma_common; 1332 - atchan->chan_common.cookie = atchan->completed_cookie = 1; 1332 + atchan->chan_common.cookie = atchan->chan_common.completed_cookie = 1; 1333 1333 list_add_tail(&atchan->chan_common.device_node, 1334 1334 &atdma->dma_common.channels); 1335 1335
-2
drivers/dma/at_hdmac_regs.h
··· 208 208 * @save_dscr: for cyclic operations, preserve next descriptor address in 209 209 * the cyclic list on suspend/resume cycle 210 210 * @lock: serializes enqueue/dequeue operations to descriptors lists 211 - * @completed_cookie: identifier for the most recently completed operation 212 211 * @active_list: list of descriptors dmaengine is being running on 213 212 * @queue: list of descriptors ready to be submitted to engine 214 213 * @free_list: list of descriptors usable by the channel ··· 226 227 spinlock_t lock; 227 228 228 229 /* these other elements are all protected by lock */ 229 - dma_cookie_t completed_cookie; 230 230 struct list_head active_list; 231 231 struct list_head queue; 232 232 struct list_head free_list;
+3 -4
drivers/dma/coh901318.c
··· 59 59 struct coh901318_chan { 60 60 spinlock_t lock; 61 61 int allocated; 62 - int completed; 63 62 int id; 64 63 int stopped; 65 64 ··· 704 705 callback_param = cohd_fin->desc.callback_param; 705 706 706 707 /* sign this job as completed on the channel */ 707 - cohc->completed = cohd_fin->desc.cookie; 708 + cohc->chan.completed_cookie = cohd_fin->desc.cookie; 708 709 709 710 /* release the lli allocation and remove the descriptor */ 710 711 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); ··· 928 929 coh901318_config(cohc, NULL); 929 930 930 931 cohc->allocated = 1; 931 - cohc->completed = chan->cookie = 1; 932 + chan->completed_cookie = chan->cookie = 1; 932 933 933 934 spin_unlock_irqrestore(&cohc->lock, flags); 934 935 ··· 1168 1169 dma_cookie_t last_complete; 1169 1170 int ret; 1170 1171 1171 - last_complete = cohc->completed; 1172 + last_complete = chan->completed_cookie; 1172 1173 last_used = chan->cookie; 1173 1174 1174 1175 ret = dma_async_is_complete(cookie, last_complete, last_used);
+5 -5
drivers/dma/dw_dmac.c
··· 249 249 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 250 250 251 251 spin_lock_irqsave(&dwc->lock, flags); 252 - dwc->completed = txd->cookie; 252 + dwc->chan.completed_cookie = txd->cookie; 253 253 if (callback_required) { 254 254 callback = txd->callback; 255 255 param = txd->callback_param; ··· 997 997 dma_cookie_t last_complete; 998 998 int ret; 999 999 1000 - last_complete = dwc->completed; 1000 + last_complete = chan->completed_cookie; 1001 1001 last_used = chan->cookie; 1002 1002 1003 1003 ret = dma_async_is_complete(cookie, last_complete, last_used); 1004 1004 if (ret != DMA_SUCCESS) { 1005 1005 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 1006 1006 1007 - last_complete = dwc->completed; 1007 + last_complete = chan->completed_cookie; 1008 1008 last_used = chan->cookie; 1009 1009 1010 1010 ret = dma_async_is_complete(cookie, last_complete, last_used); ··· 1046 1046 return -EIO; 1047 1047 } 1048 1048 1049 - dwc->completed = chan->cookie = 1; 1049 + chan->completed_cookie = chan->cookie = 1; 1050 1050 1051 1051 /* 1052 1052 * NOTE: some controllers may have additional features that we ··· 1474 1474 struct dw_dma_chan *dwc = &dw->chan[i]; 1475 1475 1476 1476 dwc->chan.device = &dw->dma; 1477 - dwc->chan.cookie = dwc->completed = 1; 1477 + dwc->chan.cookie = dwc->chan.completed_cookie = 1; 1478 1478 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1479 1479 list_add_tail(&dwc->chan.device_node, 1480 1480 &dw->dma.channels);
-1
drivers/dma/dw_dmac_regs.h
··· 158 158 159 159 /* these other elements are all protected by lock */ 160 160 unsigned long flags; 161 - dma_cookie_t completed; 162 161 struct list_head active_list; 163 162 struct list_head queue; 164 163 struct list_head free_list;
+3 -5
drivers/dma/ep93xx_dma.c
··· 122 122 * @lock: lock protecting the fields following 123 123 * @flags: flags for the channel 124 124 * @buffer: which buffer to use next (0/1) 125 - * @last_completed: last completed cookie value 126 125 * @active: flattened chain of descriptors currently being processed 127 126 * @queue: pending descriptors which are handled next 128 127 * @free_list: list of free descriptors which can be used ··· 156 157 #define EP93XX_DMA_IS_CYCLIC 0 157 158 158 159 int buffer; 159 - dma_cookie_t last_completed; 160 160 struct list_head active; 161 161 struct list_head queue; 162 162 struct list_head free_list; ··· 701 703 desc = ep93xx_dma_get_active(edmac); 702 704 if (desc) { 703 705 if (desc->complete) { 704 - edmac->last_completed = desc->txd.cookie; 706 + edmac->chan.completed_cookie = desc->txd.cookie; 705 707 list_splice_init(&edmac->active, &list); 706 708 } 707 709 callback = desc->txd.callback; ··· 859 861 goto fail_clk_disable; 860 862 861 863 spin_lock_irq(&edmac->lock); 862 - edmac->last_completed = 1; 864 + edmac->chan.completed_cookie = 1; 863 865 edmac->chan.cookie = 1; 864 866 ret = edmac->edma->hw_setup(edmac); 865 867 spin_unlock_irq(&edmac->lock); ··· 1252 1254 1253 1255 spin_lock_irqsave(&edmac->lock, flags); 1254 1256 last_used = chan->cookie; 1255 - last_completed = edmac->last_completed; 1257 + last_completed = chan->completed_cookie; 1256 1258 spin_unlock_irqrestore(&edmac->lock, flags); 1257 1259 1258 1260 ret = dma_async_is_complete(cookie, last_completed, last_used);
+2 -2
drivers/dma/fsldma.c
··· 990 990 991 991 spin_lock_irqsave(&chan->desc_lock, flags); 992 992 993 - last_complete = chan->completed_cookie; 993 + last_complete = dchan->completed_cookie; 994 994 last_used = dchan->cookie; 995 995 996 996 spin_unlock_irqrestore(&chan->desc_lock, flags); ··· 1088 1088 desc = to_fsl_desc(chan->ld_running.prev); 1089 1089 cookie = desc->async_tx.cookie; 1090 1090 1091 - chan->completed_cookie = cookie; 1091 + chan->common.completed_cookie = cookie; 1092 1092 chan_dbg(chan, "completed_cookie=%d\n", cookie); 1093 1093 } 1094 1094
-1
drivers/dma/fsldma.h
··· 137 137 struct fsldma_chan { 138 138 char name[8]; /* Channel name */ 139 139 struct fsldma_chan_regs __iomem *regs; 140 - dma_cookie_t completed_cookie; /* The maximum cookie completed */ 141 140 spinlock_t desc_lock; /* Descriptor operation lock */ 142 141 struct list_head ld_pending; /* Link descriptors queue */ 143 142 struct list_head ld_running; /* Link descriptors queue */
+3 -4
drivers/dma/imx-dma.c
··· 41 41 struct dma_chan chan; 42 42 spinlock_t lock; 43 43 struct dma_async_tx_descriptor desc; 44 - dma_cookie_t last_completed; 45 44 enum dma_status status; 46 45 int dma_request; 47 46 struct scatterlist *sg_list; ··· 64 65 { 65 66 if (imxdmac->desc.callback) 66 67 imxdmac->desc.callback(imxdmac->desc.callback_param); 67 - imxdmac->last_completed = imxdmac->desc.cookie; 68 + imxdmac->chan.completed_cookie = imxdmac->desc.cookie; 68 69 } 69 70 70 71 static void imxdma_irq_handler(int channel, void *data) ··· 157 158 158 159 last_used = chan->cookie; 159 160 160 - ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); 161 - dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); 161 + ret = dma_async_is_complete(cookie, chan->completed_cookie, last_used); 162 + dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); 162 163 163 164 return ret; 164 165 }
+2 -3
drivers/dma/imx-sdma.c
··· 267 267 struct dma_chan chan; 268 268 spinlock_t lock; 269 269 struct dma_async_tx_descriptor desc; 270 - dma_cookie_t last_completed; 271 270 enum dma_status status; 272 271 unsigned int chn_count; 273 272 unsigned int chn_real_count; ··· 528 529 else 529 530 sdmac->status = DMA_SUCCESS; 530 531 531 - sdmac->last_completed = sdmac->desc.cookie; 532 + sdmac->chan.completed_cookie = sdmac->desc.cookie; 532 533 if (sdmac->desc.callback) 533 534 sdmac->desc.callback(sdmac->desc.callback_param); 534 535 } ··· 1126 1127 1127 1128 last_used = chan->cookie; 1128 1129 1129 - dma_set_tx_state(txstate, sdmac->last_completed, last_used, 1130 + dma_set_tx_state(txstate, chan->completed_cookie, last_used, 1130 1131 sdmac->chn_count - sdmac->chn_real_count); 1131 1132 1132 1133 return sdmac->status;
+4 -5
drivers/dma/intel_mid_dma.c
··· 288 288 struct intel_mid_dma_lli *llitem; 289 289 void *param_txd = NULL; 290 290 291 - midc->completed = txd->cookie; 291 + midc->chan.completed_cookie = txd->cookie; 292 292 callback_txd = txd->callback; 293 293 param_txd = txd->callback_param; 294 294 ··· 482 482 dma_cookie_t cookie, 483 483 struct dma_tx_state *txstate) 484 484 { 485 - struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 486 485 dma_cookie_t last_used; 487 486 dma_cookie_t last_complete; 488 487 int ret; 489 488 490 - last_complete = midc->completed; 489 + last_complete = chan->completed_cookie; 491 490 last_used = chan->cookie; 492 491 493 492 ret = dma_async_is_complete(cookie, last_complete, last_used); ··· 495 496 midc_scan_descriptors(to_middma_device(chan->device), midc); 496 497 spin_unlock_bh(&midc->lock); 497 498 498 - last_complete = midc->completed; 499 + last_complete = chan->completed_cookie; 499 500 last_used = chan->cookie; 500 501 501 502 ret = dma_async_is_complete(cookie, last_complete, last_used); ··· 885 886 pm_runtime_put(&mid->pdev->dev); 886 887 return -EIO; 887 888 } 888 - midc->completed = chan->cookie = 1; 889 + chan->completed_cookie = chan->cookie = 1; 889 890 890 891 spin_lock_bh(&midc->lock); 891 892 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
-2
drivers/dma/intel_mid_dma_regs.h
··· 165 165 * @dma_base: MMIO register space DMA engine base pointer 166 166 * @ch_id: DMA channel id 167 167 * @lock: channel spinlock 168 - * @completed: DMA cookie 169 168 * @active_list: current active descriptors 170 169 * @queue: current queued up descriptors 171 170 * @free_list: current free descriptors ··· 182 183 void __iomem *dma_base; 183 184 int ch_id; 184 185 spinlock_t lock; 185 - dma_cookie_t completed; 186 186 struct list_head active_list; 187 187 struct list_head queue; 188 188 struct list_head free_list;
+1 -1
drivers/dma/ioat/dma.c
··· 603 603 */ 604 604 dump_desc_dbg(ioat, desc); 605 605 if (tx->cookie) { 606 - chan->completed_cookie = tx->cookie; 606 + chan->common.completed_cookie = tx->cookie; 607 607 tx->cookie = 0; 608 608 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 609 609 ioat->active -= desc->hw->tx_cnt;
+1 -3
drivers/dma/ioat/dma.h
··· 90 90 void __iomem *reg_base; 91 91 unsigned long last_completion; 92 92 spinlock_t cleanup_lock; 93 - dma_cookie_t completed_cookie; 94 93 unsigned long state; 95 94 #define IOAT_COMPLETION_PENDING 0 96 95 #define IOAT_COMPLETION_ACK 1 ··· 152 153 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, 153 154 struct dma_tx_state *txstate) 154 155 { 155 - struct ioat_chan_common *chan = to_chan_common(c); 156 156 dma_cookie_t last_used; 157 157 dma_cookie_t last_complete; 158 158 159 159 last_used = c->cookie; 160 - last_complete = chan->completed_cookie; 160 + last_complete = c->completed_cookie; 161 161 162 162 dma_set_tx_state(txstate, last_complete, last_used, 0); 163 163
+1 -1
drivers/dma/ioat/dma_v2.c
··· 147 147 dump_desc_dbg(ioat, desc); 148 148 if (tx->cookie) { 149 149 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 150 - chan->completed_cookie = tx->cookie; 150 + chan->common.completed_cookie = tx->cookie; 151 151 tx->cookie = 0; 152 152 if (tx->callback) { 153 153 tx->callback(tx->callback_param);
+1 -1
drivers/dma/ioat/dma_v3.c
··· 277 277 dump_desc_dbg(ioat, desc); 278 278 tx = &desc->txd; 279 279 if (tx->cookie) { 280 - chan->completed_cookie = tx->cookie; 280 + chan->common.completed_cookie = tx->cookie; 281 281 ioat3_dma_unmap(ioat, desc, idx + i); 282 282 tx->cookie = 0; 283 283 if (tx->callback) {
+5 -5
drivers/dma/iop-adma.c
··· 317 317 } 318 318 319 319 if (cookie > 0) { 320 - iop_chan->completed_cookie = cookie; 320 + iop_chan->common.completed_cookie = cookie; 321 321 pr_debug("\tcompleted cookie %d\n", cookie); 322 322 } 323 323 } ··· 909 909 enum dma_status ret; 910 910 911 911 last_used = chan->cookie; 912 - last_complete = iop_chan->completed_cookie; 912 + last_complete = chan->completed_cookie; 913 913 dma_set_tx_state(txstate, last_complete, last_used, 0); 914 914 ret = dma_async_is_complete(cookie, last_complete, last_used); 915 915 if (ret == DMA_SUCCESS) ··· 918 918 iop_adma_slot_cleanup(iop_chan); 919 919 920 920 last_used = chan->cookie; 921 - last_complete = iop_chan->completed_cookie; 921 + last_complete = chan->completed_cookie; 922 922 dma_set_tx_state(txstate, last_complete, last_used, 0); 923 923 924 924 return dma_async_is_complete(cookie, last_complete, last_used); ··· 1650 1650 /* initialize the completed cookie to be less than 1651 1651 * the most recently used cookie 1652 1652 */ 1653 - iop_chan->completed_cookie = cookie - 1; 1653 + iop_chan->common.completed_cookie = cookie - 1; 1654 1654 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; 1655 1655 1656 1656 /* channel should not be busy */ ··· 1707 1707 /* initialize the completed cookie to be less than 1708 1708 * the most recently used cookie 1709 1709 */ 1710 - iop_chan->completed_cookie = cookie - 1; 1710 + iop_chan->common.completed_cookie = cookie - 1; 1711 1711 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; 1712 1712 1713 1713 /* channel should not be busy */
+4 -6
drivers/dma/ipu/ipu_idmac.c
··· 1295 1295 /* Flip the active buffer - even if update above failed */ 1296 1296 ichan->active_buffer = !ichan->active_buffer; 1297 1297 if (done) 1298 - ichan->completed = desc->txd.cookie; 1298 + ichan->dma_chan.completed_cookie = desc->txd.cookie; 1299 1299 1300 1300 callback = desc->txd.callback; 1301 1301 callback_param = desc->txd.callback_param; ··· 1511 1511 WARN_ON(ichan->status != IPU_CHANNEL_FREE); 1512 1512 1513 1513 chan->cookie = 1; 1514 - ichan->completed = -ENXIO; 1514 + chan->completed_cookie = -ENXIO; 1515 1515 1516 1516 ret = ipu_irq_map(chan->chan_id); 1517 1517 if (ret < 0) ··· 1600 1600 static enum dma_status idmac_tx_status(struct dma_chan *chan, 1601 1601 dma_cookie_t cookie, struct dma_tx_state *txstate) 1602 1602 { 1603 - struct idmac_channel *ichan = to_idmac_chan(chan); 1604 - 1605 - dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0); 1603 + dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); 1606 1604 if (cookie != chan->cookie) 1607 1605 return DMA_ERROR; 1608 1606 return DMA_SUCCESS; ··· 1636 1638 1637 1639 ichan->status = IPU_CHANNEL_FREE; 1638 1640 ichan->sec_chan_en = false; 1639 - ichan->completed = -ENXIO; 1640 1641 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); 1641 1642 1642 1643 dma_chan->device = &idmac->dma; 1643 1644 dma_chan->cookie = 1; 1645 + dma_chan->completed_cookie = -ENXIO; 1644 1646 dma_chan->chan_id = i; 1645 1647 list_add_tail(&dma_chan->device_node, &dma->channels); 1646 1648 }
+3 -4
drivers/dma/mpc512x_dma.c
··· 188 188 struct list_head completed; 189 189 struct mpc_dma_tcd *tcd; 190 190 dma_addr_t tcd_paddr; 191 - dma_cookie_t completed_cookie; 192 191 193 192 /* Lock for this structure */ 194 193 spinlock_t lock; ··· 364 365 /* Free descriptors */ 365 366 spin_lock_irqsave(&mchan->lock, flags); 366 367 list_splice_tail_init(&list, &mchan->free); 367 - mchan->completed_cookie = last_cookie; 368 + mchan->chan.completed_cookie = last_cookie; 368 369 spin_unlock_irqrestore(&mchan->lock, flags); 369 370 } 370 371 } ··· 567 568 568 569 spin_lock_irqsave(&mchan->lock, flags); 569 570 last_used = mchan->chan.cookie; 570 - last_complete = mchan->completed_cookie; 571 + last_complete = mchan->chan.completed_cookie; 571 572 spin_unlock_irqrestore(&mchan->lock, flags); 572 573 573 574 dma_set_tx_state(txstate, last_complete, last_used, 0); ··· 741 742 742 743 mchan->chan.device = dma; 743 744 mchan->chan.cookie = 1; 744 - mchan->completed_cookie = mchan->chan.cookie; 745 + mchan->chan.completed_cookie = mchan->chan.cookie; 745 746 746 747 INIT_LIST_HEAD(&mchan->free); 747 748 INIT_LIST_HEAD(&mchan->prepared);
+3 -3
drivers/dma/mv_xor.c
··· 435 435 } 436 436 437 437 if (cookie > 0) 438 - mv_chan->completed_cookie = cookie; 438 + mv_chan->common.completed_cookie = cookie; 439 439 } 440 440 441 441 static void ··· 825 825 enum dma_status ret; 826 826 827 827 last_used = chan->cookie; 828 - last_complete = mv_chan->completed_cookie; 828 + last_complete = chan->completed_cookie; 829 829 dma_set_tx_state(txstate, last_complete, last_used, 0); 830 830 831 831 ret = dma_async_is_complete(cookie, last_complete, last_used); ··· 836 836 mv_xor_slot_cleanup(mv_chan); 837 837 838 838 last_used = chan->cookie; 839 - last_complete = mv_chan->completed_cookie; 839 + last_complete = chan->completed_cookie; 840 840 841 841 dma_set_tx_state(txstate, last_complete, last_used, 0); 842 842 return dma_async_is_complete(cookie, last_complete, last_used);
-2
drivers/dma/mv_xor.h
··· 78 78 /** 79 79 * struct mv_xor_chan - internal representation of a XOR channel 80 80 * @pending: allows batching of hardware operations 81 - * @completed_cookie: identifier for the most recently completed operation 82 81 * @lock: serializes enqueue/dequeue operations to the descriptors pool 83 82 * @mmr_base: memory mapped register base 84 83 * @idx: the index of the xor channel ··· 92 93 */ 93 94 struct mv_xor_chan { 94 95 int pending; 95 - dma_cookie_t completed_cookie; 96 96 spinlock_t lock; /* protects the descriptor slot pool */ 97 97 void __iomem *mmr_base; 98 98 unsigned int idx;
+2 -3
drivers/dma/mxs-dma.c
··· 111 111 struct mxs_dma_ccw *ccw; 112 112 dma_addr_t ccw_phys; 113 113 int desc_count; 114 - dma_cookie_t last_completed; 115 114 enum dma_status status; 116 115 unsigned int flags; 117 116 #define MXS_DMA_SG_LOOP (1 << 0) ··· 273 274 stat1 &= ~(1 << channel); 274 275 275 276 if (mxs_chan->status == DMA_SUCCESS) 276 - mxs_chan->last_completed = mxs_chan->desc.cookie; 277 + mxs_chan->chan.completed_cookie = mxs_chan->desc.cookie; 277 278 278 279 /* schedule tasklet on this channel */ 279 280 tasklet_schedule(&mxs_chan->tasklet); ··· 537 538 dma_cookie_t last_used; 538 539 539 540 last_used = chan->cookie; 540 - dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); 541 + dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); 541 542 542 543 return mxs_chan->status; 543 544 }
+2 -3
drivers/dma/pch_dma.c
··· 105 105 106 106 spinlock_t lock; 107 107 108 - dma_cookie_t completed_cookie; 109 108 struct list_head active_list; 110 109 struct list_head queue; 111 110 struct list_head free_list; ··· 543 544 spin_lock_irq(&pd_chan->lock); 544 545 list_splice(&tmp_list, &pd_chan->free_list); 545 546 pd_chan->descs_allocated = i; 546 - pd_chan->completed_cookie = chan->cookie = 1; 547 + chan->completed_cookie = chan->cookie = 1; 547 548 spin_unlock_irq(&pd_chan->lock); 548 549 549 550 pdc_enable_irq(chan, 1); ··· 582 583 int ret; 583 584 584 585 spin_lock_irq(&pd_chan->lock); 585 - last_completed = pd_chan->completed_cookie; 586 + last_completed = chan->completed_cookie; 586 587 last_used = chan->cookie; 587 588 spin_unlock_irq(&pd_chan->lock); 588 589
+3 -6
drivers/dma/pl330.c
··· 51 51 /* DMA-Engine Channel */ 52 52 struct dma_chan chan; 53 53 54 - /* Last completed cookie */ 55 - dma_cookie_t completed; 56 - 57 54 /* List of to be xfered descriptors */ 58 55 struct list_head work_list; 59 56 ··· 231 234 /* Pick up ripe tomatoes */ 232 235 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 233 236 if (desc->status == DONE) { 234 - pch->completed = desc->txd.cookie; 237 + pch->chan.completed_cookie = desc->txd.cookie; 235 238 list_move_tail(&desc->node, &list); 236 239 } 237 240 ··· 302 305 303 306 spin_lock_irqsave(&pch->lock, flags); 304 307 305 - pch->completed = chan->cookie = 1; 308 + chan->completed_cookie = chan->cookie = 1; 306 309 pch->cyclic = false; 307 310 308 311 pch->pl330_chid = pl330_request_channel(&pdmac->pif); ··· 397 400 dma_cookie_t last_done, last_used; 398 401 int ret; 399 402 400 - last_done = pch->completed; 403 + last_done = chan->completed_cookie; 401 404 last_used = chan->cookie; 402 405 403 406 ret = dma_async_is_complete(cookie, last_done, last_used);
+5 -5
drivers/dma/ppc4xx/adma.c
··· 1930 1930 if (end_of_chain && slot_cnt) { 1931 1931 /* Should wait for ZeroSum completion */ 1932 1932 if (cookie > 0) 1933 - chan->completed_cookie = cookie; 1933 + chan->common.completed_cookie = cookie; 1934 1934 return; 1935 1935 } 1936 1936 ··· 1960 1960 BUG_ON(!seen_current); 1961 1961 1962 1962 if (cookie > 0) { 1963 - chan->completed_cookie = cookie; 1963 + chan->common.completed_cookie = cookie; 1964 1964 pr_debug("\tcompleted cookie %d\n", cookie); 1965 1965 } 1966 1966 ··· 3950 3950 3951 3951 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 3952 3952 last_used = chan->cookie; 3953 - last_complete = ppc440spe_chan->completed_cookie; 3953 + last_complete = chan->completed_cookie; 3954 3954 3955 3955 dma_set_tx_state(txstate, last_complete, last_used, 0); 3956 3956 ··· 3961 3961 ppc440spe_adma_slot_cleanup(ppc440spe_chan); 3962 3962 3963 3963 last_used = chan->cookie; 3964 - last_complete = ppc440spe_chan->completed_cookie; 3964 + last_complete = chan->completed_cookie; 3965 3965 3966 3966 dma_set_tx_state(txstate, last_complete, last_used, 0); 3967 3967 ··· 4058 4058 /* initialize the completed cookie to be less than 4059 4059 * the most recently used cookie 4060 4060 */ 4061 - chan->completed_cookie = cookie - 1; 4061 + chan->common.completed_cookie = cookie - 1; 4062 4062 chan->common.cookie = sw_desc->async_tx.cookie = cookie; 4063 4063 4064 4064 /* channel should not be busy */
-2
drivers/dma/ppc4xx/adma.h
··· 81 81 * @common: common dmaengine channel object members 82 82 * @all_slots: complete domain of slots usable by the channel 83 83 * @pending: allows batching of hardware operations 84 - * @completed_cookie: identifier for the most recently completed operation 85 84 * @slots_allocated: records the actual size of the descriptor slot pool 86 85 * @hw_chain_inited: h/w descriptor chain initialization flag 87 86 * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs ··· 98 99 struct list_head all_slots; 99 100 struct ppc440spe_adma_desc_slot *last_used; 100 101 int pending; 101 - dma_cookie_t completed_cookie; 102 102 int slots_allocated; 103 103 int hw_chain_inited; 104 104 struct tasklet_struct irq_tasklet;
+5 -5
drivers/dma/shdma.c
··· 764 764 cookie = tx->cookie; 765 765 766 766 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 767 - if (sh_chan->completed_cookie != desc->cookie - 1) 767 + if (sh_chan->common.completed_cookie != desc->cookie - 1) 768 768 dev_dbg(sh_chan->dev, 769 769 "Completing cookie %d, expected %d\n", 770 770 desc->cookie, 771 - sh_chan->completed_cookie + 1); 772 - sh_chan->completed_cookie = desc->cookie; 771 + sh_chan->common.completed_cookie + 1); 772 + sh_chan->common.completed_cookie = desc->cookie; 773 773 } 774 774 775 775 /* Call callback on the last chunk */ ··· 823 823 * Terminating and the loop completed normally: forgive 824 824 * uncompleted cookies 825 825 */ 826 - sh_chan->completed_cookie = sh_chan->common.cookie; 826 + sh_chan->common.completed_cookie = sh_chan->common.cookie; 827 827 828 828 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 829 829 ··· 891 891 sh_dmae_chan_ld_cleanup(sh_chan, false); 892 892 893 893 /* First read completed cookie to avoid a skew */ 894 - last_complete = sh_chan->completed_cookie; 894 + last_complete = chan->completed_cookie; 895 895 rmb(); 896 896 last_used = chan->cookie; 897 897 BUG_ON(last_complete < 0);
-1
drivers/dma/shdma.h
··· 30 30 }; 31 31 32 32 struct sh_dmae_chan { 33 - dma_cookie_t completed_cookie; /* The maximum cookie completed */ 34 33 spinlock_t desc_lock; /* Descriptor operation lock */ 35 34 struct list_head ld_queue; /* Link descriptors queue */ 36 35 struct list_head ld_free; /* Link descriptors free */
+3 -4
drivers/dma/sirf-dma.c
··· 59 59 struct list_head queued; 60 60 struct list_head active; 61 61 struct list_head completed; 62 - dma_cookie_t completed_cookie; 63 62 unsigned long happened_cyclic; 64 63 unsigned long completed_cyclic; 65 64 ··· 207 208 /* Free descriptors */ 208 209 spin_lock_irqsave(&schan->lock, flags); 209 210 list_splice_tail_init(&list, &schan->free); 210 - schan->completed_cookie = last_cookie; 211 + schan->chan.completed_cookie = last_cookie; 211 212 spin_unlock_irqrestore(&schan->lock, flags); 212 213 } else { 213 214 /* for cyclic channel, desc is always in active list */ ··· 418 419 419 420 spin_lock_irqsave(&schan->lock, flags); 420 421 last_used = schan->chan.cookie; 421 - last_complete = schan->completed_cookie; 422 + last_complete = schan->chan.completed_cookie; 422 423 spin_unlock_irqrestore(&schan->lock, flags); 423 424 424 425 dma_set_tx_state(txstate, last_complete, last_used, 0); ··· 635 636 636 637 schan->chan.device = dma; 637 638 schan->chan.cookie = 1; 638 - schan->completed_cookie = schan->chan.cookie; 639 + schan->chan.completed_cookie = schan->chan.cookie; 639 640 640 641 INIT_LIST_HEAD(&schan->free); 641 642 INIT_LIST_HEAD(&schan->prepared);
+3 -7
drivers/dma/ste_dma40.c
··· 220 220 * 221 221 * @lock: A spinlock to protect this struct. 222 222 * @log_num: The logical number, if any of this channel. 223 - * @completed: Starts with 1, after first interrupt it is set to dma engine's 224 - * current cookie. 225 223 * @pending_tx: The number of pending transfers. Used between interrupt handler 226 224 * and tasklet. 227 225 * @busy: Set to true when transfer is ongoing on this channel. ··· 248 250 struct d40_chan { 249 251 spinlock_t lock; 250 252 int log_num; 251 - /* ID of the most recent completed transfer */ 252 - int completed; 253 253 int pending_tx; 254 254 bool busy; 255 255 struct d40_phy_res *phy_chan; ··· 1353 1357 goto err; 1354 1358 1355 1359 if (!d40d->cyclic) 1356 - d40c->completed = d40d->txd.cookie; 1360 + d40c->chan.completed_cookie = d40d->txd.cookie; 1357 1361 1358 1362 /* 1359 1363 * If terminating a channel pending_tx is set to zero. ··· 2178 2182 bool is_free_phy; 2179 2183 spin_lock_irqsave(&d40c->lock, flags); 2180 2184 2181 - d40c->completed = chan->cookie = 1; 2185 + chan->completed_cookie = chan->cookie = 1; 2182 2186 2183 2187 /* If no dma configuration is set use default configuration (memcpy) */ 2184 2188 if (!d40c->configured) { ··· 2347 2351 return -EINVAL; 2348 2352 } 2349 2353 2350 - last_complete = d40c->completed; 2354 + last_complete = chan->completed_cookie; 2351 2355 last_used = chan->cookie; 2352 2356 2353 2357 if (d40_is_paused(d40c))
+3 -4
drivers/dma/timb_dma.c
··· 84 84 especially the lists and descriptors, 85 85 from races between the tasklet and calls 86 86 from above */ 87 - dma_cookie_t last_completed_cookie; 88 87 bool ongoing; 89 88 struct list_head active_list; 90 89 struct list_head queue; ··· 283 284 else 284 285 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); 285 286 */ 286 - td_chan->last_completed_cookie = txd->cookie; 287 + td_chan->chan.completed_cookie = txd->cookie; 287 288 td_chan->ongoing = false; 288 289 289 290 callback = txd->callback; ··· 480 481 } 481 482 482 483 spin_lock_bh(&td_chan->lock); 483 - td_chan->last_completed_cookie = 1; 484 + chan->completed_cookie = 1; 484 485 chan->cookie = 1; 485 486 spin_unlock_bh(&td_chan->lock); 486 487 ··· 522 523 523 524 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 524 525 525 - last_complete = td_chan->last_completed_cookie; 526 + last_complete = chan->completed_cookie; 526 527 last_used = chan->cookie; 527 528 528 529 ret = dma_async_is_complete(cookie, last_complete, last_used);
+5 -5
drivers/dma/txx9dmac.c
··· 424 424 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 425 425 txd->cookie, desc); 426 426 427 - dc->completed = txd->cookie; 427 + dc->chan.completed_cookie = txd->cookie; 428 428 callback = txd->callback; 429 429 param = txd->callback_param; 430 430 ··· 976 976 dma_cookie_t last_complete; 977 977 int ret; 978 978 979 - last_complete = dc->completed; 979 + last_complete = chan->completed_cookie; 980 980 last_used = chan->cookie; 981 981 982 982 ret = dma_async_is_complete(cookie, last_complete, last_used); ··· 985 985 txx9dmac_scan_descriptors(dc); 986 986 spin_unlock_bh(&dc->lock); 987 987 988 - last_complete = dc->completed; 988 + last_complete = chan->completed_cookie; 989 989 last_used = chan->cookie; 990 990 991 991 ret = dma_async_is_complete(cookie, last_complete, last_used); ··· 1057 1057 return -EIO; 1058 1058 } 1059 1059 1060 - dc->completed = chan->cookie = 1; 1060 + chan->completed_cookie = chan->cookie = 1; 1061 1061 1062 1062 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; 1063 1063 txx9dmac_chan_set_SMPCHN(dc); ··· 1186 1186 dc->ddev->chan[ch] = dc; 1187 1187 dc->chan.device = &dc->dma; 1188 1188 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); 1189 - dc->chan.cookie = dc->completed = 1; 1189 + dc->chan.cookie = dc->chan.completed_cookie = 1; 1190 1190 1191 1191 if (is_dmac64(dc)) 1192 1192 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
-1
drivers/dma/txx9dmac.h
··· 172 172 spinlock_t lock; 173 173 174 174 /* these other elements are all protected by lock */ 175 - dma_cookie_t completed; 176 175 struct list_head active_list; 177 176 struct list_head queue; 178 177 struct list_head free_list;
-2
include/linux/amba/pl08x.h
··· 172 172 * @runtime_addr: address for RX/TX according to the runtime config 173 173 * @runtime_direction: current direction of this channel according to 174 174 * runtime config 175 - * @lc: last completed transaction on this channel 176 175 * @pend_list: queued transactions pending on this channel 177 176 * @at: active transaction on this channel 178 177 * @lock: a lock for this channel data ··· 196 197 u32 src_cctl; 197 198 u32 dst_cctl; 198 199 enum dma_transfer_direction runtime_direction; 199 - dma_cookie_t lc; 200 200 struct list_head pend_list; 201 201 struct pl08x_txd *at; 202 202 spinlock_t lock;
+2
include/linux/dmaengine.h
··· 258 258 * struct dma_chan - devices supply DMA channels, clients use them 259 259 * @device: ptr to the dma device who supplies this channel, always !%NULL 260 260 * @cookie: last cookie value returned to client 261 + * @completed_cookie: last completed cookie for this channel 261 262 * @chan_id: channel ID for sysfs 262 263 * @dev: class device for sysfs 263 264 * @device_node: used to add this to the device chan list ··· 270 269 struct dma_chan { 271 270 struct dma_device *device; 272 271 dma_cookie_t cookie; 272 + dma_cookie_t completed_cookie; 273 273 274 274 /* sysfs */ 275 275 int chan_id;