Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: remove DMA unmap flags

Remove no longer needed DMA unmap flags:
- DMA_COMPL_SKIP_SRC_UNMAP
- DMA_COMPL_SKIP_DEST_UNMAP
- DMA_COMPL_SRC_UNMAP_SINGLE
- DMA_COMPL_DEST_UNMAP_SINGLE

Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Jon Mason <jon.mason@intel.com>
Acked-by: Mark Brown <broonie@linaro.org>
[djbw: clean up straggling skip unmap flags in ntb]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

authored by

Bartlomiej Zolnierkiewicz and committed by
Dan Williams
0776ae7b 54f8d501

+27 -67
+1 -2
crypto/async_tx/async_memcpy.c
··· 56 56 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); 57 57 58 58 if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { 59 - unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | 60 - DMA_COMPL_SKIP_DEST_UNMAP; 59 + unsigned long dma_prep_flags = 0; 61 60 62 61 if (submit->cb_fn) 63 62 dma_prep_flags |= DMA_PREP_INTERRUPT;
-1
crypto/async_tx/async_pq.c
··· 62 62 dma_addr_t dma_dest[2]; 63 63 int src_off = 0; 64 64 65 - dma_flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; 66 65 if (submit->flags & ASYNC_TX_FENCE) 67 66 dma_flags |= DMA_PREP_FENCE; 68 67
+2 -6
crypto/async_tx/async_raid6_recov.c
··· 47 47 struct device *dev = dma->dev; 48 48 dma_addr_t pq[2]; 49 49 struct dma_async_tx_descriptor *tx; 50 - enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | 51 - DMA_COMPL_SKIP_DEST_UNMAP | 52 - DMA_PREP_PQ_DISABLE_P; 50 + enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; 53 51 54 52 if (submit->flags & ASYNC_TX_FENCE) 55 53 dma_flags |= DMA_PREP_FENCE; ··· 111 113 dma_addr_t dma_dest[2]; 112 114 struct device *dev = dma->dev; 113 115 struct dma_async_tx_descriptor *tx; 114 - enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | 115 - DMA_COMPL_SKIP_DEST_UNMAP | 116 - DMA_PREP_PQ_DISABLE_P; 116 + enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; 117 117 118 118 if (submit->flags & ASYNC_TX_FENCE) 119 119 dma_flags |= DMA_PREP_FENCE;
+2 -4
crypto/async_tx/async_xor.c
··· 41 41 dma_async_tx_callback cb_fn_orig = submit->cb_fn; 42 42 void *cb_param_orig = submit->cb_param; 43 43 enum async_tx_flags flags_orig = submit->flags; 44 - enum dma_ctrl_flags dma_flags; 44 + enum dma_ctrl_flags dma_flags = 0; 45 45 int src_cnt = unmap->to_cnt; 46 46 int xor_src_cnt; 47 47 dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; ··· 55 55 /* if we are submitting additional xors, leave the chain open 56 56 * and clear the callback parameters 57 57 */ 58 - dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; 59 58 if (src_cnt > xor_src_cnt) { 60 59 submit->flags &= ~ASYNC_TX_ACK; 61 60 submit->flags |= ASYNC_TX_FENCE; ··· 283 284 284 285 if (unmap && src_cnt <= device->max_xor && 285 286 is_dma_xor_aligned(device, offset, 0, len)) { 286 - unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | 287 - DMA_COMPL_SKIP_DEST_UNMAP; 287 + unsigned long dma_prep_flags = 0; 288 288 int i; 289 289 290 290 pr_debug("%s: (async) len: %zu\n", __func__, len);
+1 -2
drivers/ata/pata_arasan_cf.c
··· 396 396 struct dma_async_tx_descriptor *tx; 397 397 struct dma_chan *chan = acdev->dma_chan; 398 398 dma_cookie_t cookie; 399 - unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | 400 - DMA_COMPL_SKIP_DEST_UNMAP; 399 + unsigned long flags = DMA_PREP_INTERRUPT; 401 400 int ret = 0; 402 401 403 402 tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
+1 -2
drivers/dma/dmaengine.c
··· 1065 1065 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, 1066 1066 DMA_FROM_DEVICE); 1067 1067 unmap->len = len; 1068 - flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_SRC_UNMAP | 1069 - DMA_COMPL_SKIP_DEST_UNMAP; 1068 + flags = DMA_CTRL_ACK; 1070 1069 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], 1071 1070 len, flags); 1072 1071
+1 -2
drivers/dma/dmatest.c
··· 599 599 /* 600 600 * src and dst buffers are freed by ourselves below 601 601 */ 602 - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | 603 - DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; 602 + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 604 603 605 604 while (!kthread_should_stop() 606 605 && !(params->iterations && total_tests >= params->iterations)) {
+1 -2
drivers/dma/ioat/dma.c
··· 818 818 819 819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 820 820 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 821 - flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | 822 - DMA_PREP_INTERRUPT; 821 + flags = DMA_PREP_INTERRUPT; 823 822 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, 824 823 IOAT_TEST_SIZE, flags); 825 824 if (!tx) {
+3 -9
drivers/dma/ioat/dma_v3.c
··· 1279 1279 DMA_TO_DEVICE); 1280 1280 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1281 1281 IOAT_NUM_SRC_TEST, PAGE_SIZE, 1282 - DMA_PREP_INTERRUPT | 1283 - DMA_COMPL_SKIP_SRC_UNMAP | 1284 - DMA_COMPL_SKIP_DEST_UNMAP); 1282 + DMA_PREP_INTERRUPT); 1285 1283 1286 1284 if (!tx) { 1287 1285 dev_err(dev, "Self-test xor prep failed\n"); ··· 1340 1342 DMA_TO_DEVICE); 1341 1343 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1342 1344 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1343 - &xor_val_result, DMA_PREP_INTERRUPT | 1344 - DMA_COMPL_SKIP_SRC_UNMAP | 1345 - DMA_COMPL_SKIP_DEST_UNMAP); 1345 + &xor_val_result, DMA_PREP_INTERRUPT); 1346 1346 if (!tx) { 1347 1347 dev_err(dev, "Self-test zero prep failed\n"); 1348 1348 err = -ENODEV; ··· 1385 1389 DMA_TO_DEVICE); 1386 1390 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1387 1391 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1388 - &xor_val_result, DMA_PREP_INTERRUPT | 1389 - DMA_COMPL_SKIP_SRC_UNMAP | 1390 - DMA_COMPL_SKIP_DEST_UNMAP); 1392 + &xor_val_result, DMA_PREP_INTERRUPT); 1391 1393 if (!tx) { 1392 1394 dev_err(dev, "Self-test 2nd zero prep failed\n"); 1393 1395 err = -ENODEV;
+1 -2
drivers/media/platform/m2m-deinterlace.c
··· 341 341 ctx->xt->dir = DMA_MEM_TO_MEM; 342 342 ctx->xt->src_sgl = false; 343 343 ctx->xt->dst_sgl = true; 344 - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | 345 - DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP; 344 + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 346 345 347 346 tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); 348 347 if (tx == NULL) {
+1 -1
drivers/media/platform/timblogiw.c
··· 565 565 566 566 desc = dmaengine_prep_slave_sg(fh->chan, 567 567 buf->sg, sg_elems, DMA_DEV_TO_MEM, 568 - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 568 + DMA_PREP_INTERRUPT); 569 569 if (!desc) { 570 570 spin_lock_irq(&fh->queue_lock); 571 571 list_del_init(&vb->queue);
+1 -2
drivers/misc/carma/carma-fpga.c
··· 631 631 struct dma_async_tx_descriptor *tx; 632 632 dma_cookie_t cookie; 633 633 dma_addr_t dst, src; 634 - unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | 635 - DMA_COMPL_SKIP_SRC_UNMAP; 634 + unsigned long dma_flags = 0; 636 635 637 636 dst_sg = buf->vb.sglist; 638 637 dst_nents = buf->vb.sglen;
+1 -2
drivers/mtd/nand/atmel_nand.c
··· 375 375 376 376 dma_dev = host->dma_chan->device; 377 377 378 - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | 379 - DMA_COMPL_SKIP_DEST_UNMAP; 378 + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 380 379 381 380 phys_addr = dma_map_single(dma_dev->dev, p, len, dir); 382 381 if (dma_mapping_error(dma_dev->dev, phys_addr)) {
-2
drivers/mtd/nand/fsmc_nand.c
··· 573 573 dma_dev = chan->device; 574 574 dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); 575 575 576 - flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; 577 - 578 576 if (direction == DMA_TO_DEVICE) { 579 577 dma_src = dma_addr; 580 578 dma_dst = host->data_pa;
+2 -4
drivers/net/ethernet/micrel/ks8842.c
··· 459 459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; 460 460 461 461 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, 462 - &ctl->sg, 1, DMA_MEM_TO_DEV, 463 - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 462 + &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); 464 463 if (!ctl->adesc) 465 464 return NETDEV_TX_BUSY; 466 465 ··· 570 571 sg_dma_len(sg) = DMA_BUFFER_SIZE; 571 572 572 573 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, 573 - sg, 1, DMA_DEV_TO_MEM, 574 - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 574 + sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); 575 575 576 576 if (!ctl->adesc) 577 577 goto out;
+3 -8
drivers/ntb/ntb_transport.c
··· 1037 1037 struct dmaengine_unmap_data *unmap; 1038 1038 dma_cookie_t cookie; 1039 1039 void *buf = entry->buf; 1040 - unsigned long flags; 1041 1040 1042 1041 entry->len = len; 1043 1042 ··· 1072 1073 1073 1074 unmap->from_cnt = 1; 1074 1075 1075 - flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | 1076 - DMA_PREP_INTERRUPT; 1077 1076 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1078 - unmap->addr[0], len, flags); 1077 + unmap->addr[0], len, 1078 + DMA_PREP_INTERRUPT); 1079 1079 if (!txd) 1080 1080 goto err_get_unmap; 1081 1081 ··· 1264 1266 void __iomem *offset; 1265 1267 size_t len = entry->len; 1266 1268 void *buf = entry->buf; 1267 - unsigned long flags; 1268 1269 1269 1270 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; 1270 1271 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); ··· 1298 1301 1299 1302 unmap->to_cnt = 1; 1300 1303 1301 - flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | 1302 - DMA_PREP_INTERRUPT; 1303 1304 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, 1304 - flags); 1305 + DMA_PREP_INTERRUPT); 1305 1306 if (!txd) 1306 1307 goto err_get_unmap; 1307 1308
+2 -2
drivers/spi/spi-dw-mid.c
··· 150 150 &dws->tx_sgl, 151 151 1, 152 152 DMA_MEM_TO_DEV, 153 - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); 153 + DMA_PREP_INTERRUPT); 154 154 txdesc->callback = dw_spi_dma_done; 155 155 txdesc->callback_param = dws; 156 156 ··· 173 173 &dws->rx_sgl, 174 174 1, 175 175 DMA_DEV_TO_MEM, 176 - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); 176 + DMA_PREP_INTERRUPT); 177 177 rxdesc->callback = dw_spi_dma_done; 178 178 rxdesc->callback_param = dws; 179 179
+4 -14
include/linux/dmaengine.h
··· 171 171 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client 172 172 * acknowledges receipt, i.e. has has a chance to establish any dependency 173 173 * chains 174 - * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) 175 - * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) 176 - * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single 177 - * (if not set, do the source dma-unmapping as page) 178 - * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single 179 - * (if not set, do the destination dma-unmapping as page) 180 174 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q 181 175 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P 182 176 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as ··· 182 188 enum dma_ctrl_flags { 183 189 DMA_PREP_INTERRUPT = (1 << 0), 184 190 DMA_CTRL_ACK = (1 << 1), 185 - DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), 186 - DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), 187 - DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), 188 - DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), 189 - DMA_PREP_PQ_DISABLE_P = (1 << 6), 190 - DMA_PREP_PQ_DISABLE_Q = (1 << 7), 191 - DMA_PREP_CONTINUE = (1 << 8), 192 - DMA_PREP_FENCE = (1 << 9), 191 + DMA_PREP_PQ_DISABLE_P = (1 << 2), 192 + DMA_PREP_PQ_DISABLE_Q = (1 << 3), 193 + DMA_PREP_CONTINUE = (1 << 4), 194 + DMA_PREP_FENCE = (1 << 5), 193 195 }; 194 196 195 197 /**