Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drivers/dma: remove unused support for MEMSET operations

There have never been any real users of MEMSET operations since they
have been introduced in January 2007 by commit 7405f74badf4 ("dmaengine:
refactor dmaengine around dma_async_tx_descriptor"). Therefore remove
support for them for now, it can be always brought back when needed.

[sebastian.hesselbarth@gmail.com: fix drivers/dma/mv_xor]
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Acked-by: Dan Williams <djbw@fb.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Olof Johansson <olof@lixom.net>
Cc: Kevin Hilman <khilman@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Bartlomiej Zolnierkiewicz and committed by
Linus Torvalds
48a9db46 dcf6d294

+8 -462
-1
Documentation/crypto/async-tx-api.txt
··· 222 222 include/linux/async_tx.h: core header file for the async_tx api 223 223 crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code 224 224 crypto/async_tx/async_memcpy.c: copy offload 225 - crypto/async_tx/async_memset.c: memory fill offload 226 225 crypto/async_tx/async_xor.c: xor and xor zero sum offload
-3
arch/arm/mach-iop13xx/setup.c
··· 469 469 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); 470 470 dma_cap_set(DMA_XOR, plat_data->cap_mask); 471 471 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); 472 - dma_cap_set(DMA_MEMSET, plat_data->cap_mask); 473 472 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); 474 473 break; 475 474 case IOP13XX_INIT_ADMA_1: ··· 478 479 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); 479 480 dma_cap_set(DMA_XOR, plat_data->cap_mask); 480 481 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); 481 - dma_cap_set(DMA_MEMSET, plat_data->cap_mask); 482 482 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); 483 483 break; 484 484 case IOP13XX_INIT_ADMA_2: ··· 487 489 dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); 488 490 dma_cap_set(DMA_XOR, plat_data->cap_mask); 489 491 dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); 490 - dma_cap_set(DMA_MEMSET, plat_data->cap_mask); 491 492 dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); 492 493 dma_cap_set(DMA_PQ, plat_data->cap_mask); 493 494 dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask);
-2
arch/arm/plat-iop/adma.c
··· 192 192 193 193 #ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */ 194 194 dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); 195 - dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); 196 195 dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); 197 196 #else 198 197 dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); 199 198 dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); 200 - dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); 201 199 dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); 202 200 #endif 203 201
-10
arch/arm/plat-orion/common.c
··· 666 666 orion_xor0_shared_resources[3].start = irq_1; 667 667 orion_xor0_shared_resources[3].end = irq_1; 668 668 669 - /* 670 - * two engines can't do memset simultaneously, this limitation 671 - * satisfied by removing memset support from one of the engines. 672 - */ 673 669 dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask); 674 670 dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask); 675 671 676 - dma_cap_set(DMA_MEMSET, orion_xor0_channels_data[1].cap_mask); 677 672 dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask); 678 673 dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask); 679 674 ··· 727 732 orion_xor1_shared_resources[3].start = irq_1; 728 733 orion_xor1_shared_resources[3].end = irq_1; 729 734 730 - /* 731 - * two engines can't do memset simultaneously, this limitation 732 - * satisfied by removing memset support from one of the engines. 733 - */ 734 735 dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask); 735 736 dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask); 736 737 737 - dma_cap_set(DMA_MEMSET, orion_xor1_channels_data[1].cap_mask); 738 738 dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask); 739 739 dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask); 740 740
-4
crypto/async_tx/Kconfig
··· 10 10 select ASYNC_CORE 11 11 select XOR_BLOCKS 12 12 13 - config ASYNC_MEMSET 14 - tristate 15 - select ASYNC_CORE 16 - 17 13 config ASYNC_PQ 18 14 tristate 19 15 select ASYNC_CORE
-1
crypto/async_tx/Makefile
··· 1 1 obj-$(CONFIG_ASYNC_CORE) += async_tx.o 2 2 obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o 3 - obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o 4 3 obj-$(CONFIG_ASYNC_XOR) += async_xor.o 5 4 obj-$(CONFIG_ASYNC_PQ) += async_pq.o 6 5 obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o
-89
crypto/async_tx/async_memset.c
··· 1 - /* 2 - * memory fill offload engine support 3 - * 4 - * Copyright © 2006, Intel Corporation. 5 - * 6 - * Dan Williams <dan.j.williams@intel.com> 7 - * 8 - * with architecture considerations by: 9 - * Neil Brown <neilb@suse.de> 10 - * Jeff Garzik <jeff@garzik.org> 11 - * 12 - * This program is free software; you can redistribute it and/or modify it 13 - * under the terms and conditions of the GNU General Public License, 14 - * version 2, as published by the Free Software Foundation. 15 - * 16 - * This program is distributed in the hope it will be useful, but WITHOUT 17 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 18 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 19 - * more details. 20 - * 21 - * You should have received a copy of the GNU General Public License along with 22 - * this program; if not, write to the Free Software Foundation, Inc., 23 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 24 - * 25 - */ 26 - #include <linux/kernel.h> 27 - #include <linux/interrupt.h> 28 - #include <linux/module.h> 29 - #include <linux/mm.h> 30 - #include <linux/dma-mapping.h> 31 - #include <linux/async_tx.h> 32 - 33 - /** 34 - * async_memset - attempt to fill memory with a dma engine. 35 - * @dest: destination page 36 - * @val: fill value 37 - * @offset: offset in pages to start transaction 38 - * @len: length in bytes 39 - * 40 - * honored flags: ASYNC_TX_ACK 41 - */ 42 - struct dma_async_tx_descriptor * 43 - async_memset(struct page *dest, int val, unsigned int offset, size_t len, 44 - struct async_submit_ctl *submit) 45 - { 46 - struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET, 47 - &dest, 1, NULL, 0, len); 48 - struct dma_device *device = chan ? chan->device : NULL; 49 - struct dma_async_tx_descriptor *tx = NULL; 50 - 51 - if (device && is_dma_fill_aligned(device, offset, 0, len)) { 52 - dma_addr_t dma_dest; 53 - unsigned long dma_prep_flags = 0; 54 - 55 - if (submit->cb_fn) 56 - dma_prep_flags |= DMA_PREP_INTERRUPT; 57 - if (submit->flags & ASYNC_TX_FENCE) 58 - dma_prep_flags |= DMA_PREP_FENCE; 59 - dma_dest = dma_map_page(device->dev, dest, offset, len, 60 - DMA_FROM_DEVICE); 61 - 62 - tx = device->device_prep_dma_memset(chan, dma_dest, val, len, 63 - dma_prep_flags); 64 - } 65 - 66 - if (tx) { 67 - pr_debug("%s: (async) len: %zu\n", __func__, len); 68 - async_tx_submit(chan, tx, submit); 69 - } else { /* run the memset synchronously */ 70 - void *dest_buf; 71 - pr_debug("%s: (sync) len: %zu\n", __func__, len); 72 - 73 - dest_buf = page_address(dest) + offset; 74 - 75 - /* wait for any prerequisite operations */ 76 - async_tx_quiesce(&submit->depend_tx); 77 - 78 - memset(dest_buf, val, len); 79 - 80 - async_tx_sync_epilog(submit); 81 - } 82 - 83 - return tx; 84 - } 85 - EXPORT_SYMBOL_GPL(async_memset); 86 - 87 - MODULE_AUTHOR("Intel Corporation"); 88 - MODULE_DESCRIPTION("asynchronous memset api"); 89 - MODULE_LICENSE("GPL");
-7
drivers/dma/dmaengine.c
··· 663 663 return false; 664 664 #endif 665 665 666 - #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) 667 - if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) 668 - return false; 669 - #endif 670 - 671 666 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 672 667 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 673 668 return false; ··· 724 729 !device->device_prep_dma_pq); 725 730 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && 726 731 !device->device_prep_dma_pq_val); 727 - BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 728 - !device->device_prep_dma_memset); 729 732 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 730 733 !device->device_prep_dma_interrupt); 731 734 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
+1 -2
drivers/dma/ioat/dma.c
··· 1105 1105 { 1106 1106 struct dma_device *dma = c->device; 1107 1107 1108 - return sprintf(page, "copy%s%s%s%s%s%s\n", 1108 + return sprintf(page, "copy%s%s%s%s%s\n", 1109 1109 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", 1110 1110 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", 1111 1111 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", 1112 1112 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", 1113 - dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "", 1114 1113 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); 1115 1114 1116 1115 }
-1
drivers/dma/ioat/dma_v2.h
··· 123 123 struct ioat_ring_ent { 124 124 union { 125 125 struct ioat_dma_descriptor *hw; 126 - struct ioat_fill_descriptor *fill; 127 126 struct ioat_xor_descriptor *xor; 128 127 struct ioat_xor_ext_descriptor *xor_ex; 129 128 struct ioat_pq_descriptor *pq;
+2 -112
drivers/dma/ioat/dma_v3.c
··· 311 311 if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ 312 312 ioat_dma_unmap(chan, flags, len, desc->hw); 313 313 break; 314 - case IOAT_OP_FILL: { 315 - struct ioat_fill_descriptor *hw = desc->fill; 316 - 317 - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) 318 - ioat_unmap(pdev, hw->dst_addr - offset, len, 319 - PCI_DMA_FROMDEVICE, flags, 1); 320 - break; 321 - } 322 314 case IOAT_OP_XOR_VAL: 323 315 case IOAT_OP_XOR: { 324 316 struct ioat_xor_descriptor *xor = desc->xor; ··· 813 821 ioat3_cleanup(ioat); 814 822 815 823 return dma_cookie_status(c, cookie, txstate); 816 - } 817 - 818 - static struct dma_async_tx_descriptor * 819 - ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, 820 - size_t len, unsigned long flags) 821 - { 822 - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 823 - struct ioat_ring_ent *desc; 824 - size_t total_len = len; 825 - struct ioat_fill_descriptor *fill; 826 - u64 src_data = (0x0101010101010101ULL) * (value & 0xff); 827 - int num_descs, idx, i; 828 - 829 - num_descs = ioat2_xferlen_to_descs(ioat, len); 830 - if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) 831 - idx = ioat->head; 832 - else 833 - return NULL; 834 - i = 0; 835 - do { 836 - size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); 837 - 838 - desc = ioat2_get_ring_ent(ioat, idx + i); 839 - fill = desc->fill; 840 - 841 - fill->size = xfer_size; 842 - fill->src_data = src_data; 843 - fill->dst_addr = dest; 844 - fill->ctl = 0; 845 - fill->ctl_f.op = IOAT_OP_FILL; 846 - 847 - len -= xfer_size; 848 - dest += xfer_size; 849 - dump_desc_dbg(ioat, desc); 850 - } while (++i < num_descs); 851 - 852 - desc->txd.flags = flags; 853 - desc->len = total_len; 854 - fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 855 - fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 856 - fill->ctl_f.compl_write = 1; 857 - dump_desc_dbg(ioat, desc); 858 - 859 - /* we leave the channel locked to ensure in order submission */ 860 - return &desc->txd; 861 824 } 862 825 863 826 static struct dma_async_tx_descriptor * ··· 1378 1431 struct page *xor_srcs[IOAT_NUM_SRC_TEST]; 1379 1432 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; 1380 1433 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; 1381 - dma_addr_t dma_addr, dest_dma; 1434 + dma_addr_t dest_dma; 1382 1435 struct dma_async_tx_descriptor *tx; 1383 1436 struct dma_chan *dma_chan; 1384 1437 dma_cookie_t cookie; ··· 1545 1598 goto free_resources; 1546 1599 } 1547 1600 1548 - /* skip memset if the capability is not present */ 1549 - if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask)) 1550 - goto free_resources; 1551 - 1552 - /* test memset */ 1553 - op = IOAT_OP_FILL; 1554 - 1555 - dma_addr = dma_map_page(dev, dest, 0, 1556 - PAGE_SIZE, DMA_FROM_DEVICE); 1557 - tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1558 - DMA_PREP_INTERRUPT | 1559 - DMA_COMPL_SKIP_SRC_UNMAP | 1560 - DMA_COMPL_SKIP_DEST_UNMAP); 1561 - if (!tx) { 1562 - dev_err(dev, "Self-test memset prep failed\n"); 1563 - err = -ENODEV; 1564 - goto dma_unmap; 1565 - } 1566 - 1567 - async_tx_ack(tx); 1568 - init_completion(&cmp); 1569 - tx->callback = ioat3_dma_test_callback; 1570 - tx->callback_param = &cmp; 1571 - cookie = tx->tx_submit(tx); 1572 - if (cookie < 0) { 1573 - dev_err(dev, "Self-test memset setup failed\n"); 1574 - err = -ENODEV; 1575 - goto dma_unmap; 1576 - } 1577 - dma->device_issue_pending(dma_chan); 1578 - 1579 - tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1580 - 1581 - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1582 - dev_err(dev, "Self-test memset timed out\n"); 1583 - err = -ENODEV; 1584 - goto dma_unmap; 1585 - } 1586 - 1587 - dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); 1588 - 1589 - for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { 1590 - u32 *ptr = page_address(dest); 1591 - if (ptr[i]) { 1592 - dev_err(dev, "Self-test memset failed compare\n"); 1593 - err = -ENODEV; 1594 - goto free_resources; 1595 - } 1596 - } 1597 - 1598 1601 /* test for non-zero parity sum */ 1599 1602 op = IOAT_OP_XOR_VAL; 1600 1603 ··· 1603 1706 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1604 1707 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1605 1708 DMA_TO_DEVICE); 1606 - } else if (op == IOAT_OP_FILL) 1607 - dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); 1709 + } 1608 1710 free_resources: 1609 1711 dma->device_free_chan_resources(dma_chan); 1610 1712 out: ··· 1839 1943 } 1840 1944 } 1841 1945 } 1842 - 1843 - if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) { 1844 - dma_cap_set(DMA_MEMSET, dma->cap_mask); 1845 - dma->device_prep_dma_memset = ioat3_prep_memset_lock; 1846 - } 1847 - 1848 1946 1849 1947 dma->device_tx_status = ioat3_tx_status; 1850 1948 device->cleanup_fn = ioat3_cleanup_event;
-27
drivers/dma/ioat/hw.h
··· 100 100 uint64_t user2; 101 101 }; 102 102 103 - struct ioat_fill_descriptor { 104 - uint32_t size; 105 - union { 106 - uint32_t ctl; 107 - struct { 108 - unsigned int int_en:1; 109 - unsigned int rsvd:1; 110 - unsigned int dest_snoop_dis:1; 111 - unsigned int compl_write:1; 112 - unsigned int fence:1; 113 - unsigned int rsvd2:2; 114 - unsigned int dest_brk:1; 115 - unsigned int bundle:1; 116 - unsigned int rsvd4:15; 117 - #define IOAT_OP_FILL 0x01 118 - unsigned int op:8; 119 - } ctl_f; 120 - }; 121 - uint64_t src_data; 122 - uint64_t dst_addr; 123 - uint64_t next; 124 - uint64_t rsv1; 125 - uint64_t next_dst_addr; 126 - uint64_t user1; 127 - uint64_t user2; 128 - }; 129 - 130 103 struct ioat_xor_descriptor { 131 104 uint32_t size; 132 105 union {
+1 -65
drivers/dma/iop-adma.c
··· 633 633 } 634 634 635 635 static struct dma_async_tx_descriptor * 636 - iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, 637 - int value, size_t len, unsigned long flags) 638 - { 639 - struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 640 - struct iop_adma_desc_slot *sw_desc, *grp_start; 641 - int slot_cnt, slots_per_op; 642 - 643 - if (unlikely(!len)) 644 - return NULL; 645 - BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); 646 - 647 - dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 648 - __func__, len); 649 - 650 - spin_lock_bh(&iop_chan->lock); 651 - slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); 652 - sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 653 - if (sw_desc) { 654 - grp_start = sw_desc->group_head; 655 - iop_desc_init_memset(grp_start, flags); 656 - iop_desc_set_byte_count(grp_start, iop_chan, len); 657 - iop_desc_set_block_fill_val(grp_start, value); 658 - iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 659 - sw_desc->unmap_src_cnt = 1; 660 - sw_desc->unmap_len = len; 661 - sw_desc->async_tx.flags = flags; 662 - } 663 - spin_unlock_bh(&iop_chan->lock); 664 - 665 - return sw_desc ? &sw_desc->async_tx : NULL; 666 - } 667 - 668 - static struct dma_async_tx_descriptor * 669 636 iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, 670 637 dma_addr_t *dma_src, unsigned int src_cnt, size_t len, 671 638 unsigned long flags) ··· 1143 1176 goto free_resources; 1144 1177 } 1145 1178 1146 - /* test memset */ 1147 - dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, 1148 - PAGE_SIZE, DMA_FROM_DEVICE); 1149 - tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1150 - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1151 - 1152 - cookie = iop_adma_tx_submit(tx); 1153 - iop_adma_issue_pending(dma_chan); 1154 - msleep(8); 1155 - 1156 - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1157 - dev_err(dma_chan->device->dev, 1158 - "Self-test memset timed out, disabling\n"); 1159 - err = -ENODEV; 1160 - goto free_resources; 1161 - } 1162 - 1163 - for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { 1164 - u32 *ptr = page_address(dest); 1165 - if (ptr[i]) { 1166 - dev_err(dma_chan->device->dev, 1167 - "Self-test memset failed compare, disabling\n"); 1168 - err = -ENODEV; 1169 - goto free_resources; 1170 - } 1171 - } 1172 - 1173 1179 /* test for non-zero parity sum */ 1174 1180 zero_sum_result = 0; 1175 1181 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) ··· 1427 1487 /* set prep routines based on capability */ 1428 1488 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1429 1489 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; 1430 - if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) 1431 - dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset; 1432 1490 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1433 1491 dma_dev->max_xor = iop_adma_get_max_xor(); 1434 1492 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; ··· 1494 1556 goto err_free_iop_chan; 1495 1557 } 1496 1558 1497 - if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || 1498 - dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { 1559 + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1499 1560 ret = iop_adma_xor_val_self_test(adev); 1500 1561 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1501 1562 if (ret) ··· 1521 1584 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", 1522 1585 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1523 1586 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", 1524 - dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1525 1587 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1526 1588 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1527 1589
+4 -81
drivers/dma/mv_xor.c
··· 89 89 hw_desc->phy_next_desc = 0; 90 90 } 91 91 92 - static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) 93 - { 94 - desc->value = val; 95 - } 96 - 97 92 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, 98 93 dma_addr_t addr) 99 94 { ··· 121 126 u32 next_desc_addr) 122 127 { 123 128 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); 124 - } 125 - 126 - static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) 127 - { 128 - __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); 129 - } 130 - 131 - static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) 132 - { 133 - __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); 134 - } 135 - 136 - static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) 137 - { 138 - __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); 139 - __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); 140 129 } 141 130 142 131 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) ··· 165 186 166 187 if (chain_old_tail->type != desc->type) 167 188 return 0; 168 - if (desc->type == DMA_MEMSET) 169 - return 0; 170 189 171 190 return 1; 172 191 } ··· 181 204 break; 182 205 case DMA_MEMCPY: 183 206 op_mode = XOR_OPERATION_MODE_MEMCPY; 184 - break; 185 - case DMA_MEMSET: 186 - op_mode = XOR_OPERATION_MODE_MEMSET; 187 207 break; 188 208 default: 189 209 dev_err(mv_chan_to_devp(chan), ··· 248 274 if (sw_desc->type != mv_chan->current_type) 249 275 mv_set_mode(mv_chan, sw_desc->type); 250 276 251 - if (sw_desc->type == DMA_MEMSET) { 252 - /* for memset requests we need to program the engine, no 253 - * descriptors used. 254 - */ 255 - struct mv_xor_desc *hw_desc = sw_desc->hw_desc; 256 - mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); 257 - mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); 258 - mv_chan_set_value(mv_chan, sw_desc->value); 259 - } else { 260 - /* set the hardware chain */ 261 - mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 262 - } 277 + /* set the hardware chain */ 278 + mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 279 + 263 280 mv_chan->pending += sw_desc->slot_cnt; 264 281 mv_xor_issue_pending(&mv_chan->dmachan); 265 282 } ··· 649 684 "%s sw_desc %p async_tx %p\n", 650 685 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); 651 686 652 - return sw_desc ? &sw_desc->async_tx : NULL; 653 - } 654 - 655 - static struct dma_async_tx_descriptor * 656 - mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, 657 - size_t len, unsigned long flags) 658 - { 659 - struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 660 - struct mv_xor_desc_slot *sw_desc, *grp_start; 661 - int slot_cnt; 662 - 663 - dev_dbg(mv_chan_to_devp(mv_chan), 664 - "%s dest: %x len: %u flags: %ld\n", 665 - __func__, dest, len, flags); 666 - if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 667 - return NULL; 668 - 669 - BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 670 - 671 - spin_lock_bh(&mv_chan->lock); 672 - slot_cnt = mv_chan_memset_slot_count(len); 673 - sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 674 - if (sw_desc) { 675 - sw_desc->type = DMA_MEMSET; 676 - sw_desc->async_tx.flags = flags; 677 - grp_start = sw_desc->group_head; 678 - mv_desc_init(grp_start, flags); 679 - mv_desc_set_byte_count(grp_start, len); 680 - mv_desc_set_dest_addr(sw_desc->group_head, dest); 681 - mv_desc_set_block_fill_val(grp_start, value); 682 - sw_desc->unmap_src_cnt = 1; 683 - sw_desc->unmap_len = len; 684 - } 685 - spin_unlock_bh(&mv_chan->lock); 686 - dev_dbg(mv_chan_to_devp(mv_chan), 687 - "%s sw_desc %p async_tx %p \n", 688 - __func__, sw_desc, &sw_desc->async_tx); 689 687 return sw_desc ? &sw_desc->async_tx : NULL; 690 688 } 691 689 ··· 1065 1137 /* set prep routines based on capability */ 1066 1138 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1067 1139 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1068 - if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) 1069 - dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; 1070 1140 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1071 1141 dma_dev->max_xor = 8; 1072 1142 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; ··· 1113 1187 goto err_free_irq; 1114 1188 } 1115 1189 1116 - dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n", 1190 + dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", 1117 1191 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1118 - dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1119 1192 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1120 1193 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1121 1194 ··· 1223 1298 dma_cap_set(DMA_MEMCPY, cap_mask); 1224 1299 if (of_property_read_bool(np, "dmacap,xor")) 1225 1300 dma_cap_set(DMA_XOR, cap_mask); 1226 - if (of_property_read_bool(np, "dmacap,memset")) 1227 - dma_cap_set(DMA_MEMSET, cap_mask); 1228 1301 if (of_property_read_bool(np, "dmacap,interrupt")) 1229 1302 dma_cap_set(DMA_INTERRUPT, cap_mask); 1230 1303
-1
drivers/dma/mv_xor.h
··· 31 31 32 32 #define XOR_OPERATION_MODE_XOR 0 33 33 #define XOR_OPERATION_MODE_MEMCPY 2 34 - #define XOR_OPERATION_MODE_MEMSET 4 35 34 36 35 #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) 37 36 #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
-47
drivers/dma/ppc4xx/adma.c
··· 2323 2323 } 2324 2324 2325 2325 /** 2326 - * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation 2327 - */ 2328 - static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset( 2329 - struct dma_chan *chan, dma_addr_t dma_dest, int value, 2330 - size_t len, unsigned long flags) 2331 - { 2332 - struct ppc440spe_adma_chan *ppc440spe_chan; 2333 - struct ppc440spe_adma_desc_slot *sw_desc, *group_start; 2334 - int slot_cnt, slots_per_op; 2335 - 2336 - ppc440spe_chan = to_ppc440spe_adma_chan(chan); 2337 - 2338 - if (unlikely(!len)) 2339 - return NULL; 2340 - 2341 - BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT); 2342 - 2343 - spin_lock_bh(&ppc440spe_chan->lock); 2344 - 2345 - dev_dbg(ppc440spe_chan->device->common.dev, 2346 - "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n", 2347 - ppc440spe_chan->device->id, __func__, value, len, 2348 - flags & DMA_PREP_INTERRUPT ? 1 : 0); 2349 - 2350 - slot_cnt = slots_per_op = 1; 2351 - sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 2352 - slots_per_op); 2353 - if (sw_desc) { 2354 - group_start = sw_desc->group_head; 2355 - ppc440spe_desc_init_memset(group_start, value, flags); 2356 - ppc440spe_adma_set_dest(group_start, dma_dest, 0); 2357 - ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len); 2358 - sw_desc->unmap_len = len; 2359 - sw_desc->async_tx.flags = flags; 2360 - } 2361 - spin_unlock_bh(&ppc440spe_chan->lock); 2362 - 2363 - return sw_desc ? &sw_desc->async_tx : NULL; 2364 - } 2365 - 2366 - /** 2367 2326 * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation 2368 2327 */ 2369 2328 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( ··· 4084 4125 case PPC440SPE_DMA1_ID: 4085 4126 dma_cap_set(DMA_MEMCPY, adev->common.cap_mask); 4086 4127 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); 4087 - dma_cap_set(DMA_MEMSET, adev->common.cap_mask); 4088 4128 dma_cap_set(DMA_PQ, adev->common.cap_mask); 4089 4129 dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask); 4090 4130 dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask); ··· 4108 4150 if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) { 4109 4151 adev->common.device_prep_dma_memcpy = 4110 4152 ppc440spe_adma_prep_dma_memcpy; 4111 - } 4112 - if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) { 4113 - adev->common.device_prep_dma_memset = 4114 - ppc440spe_adma_prep_dma_memset; 4115 4153 } 4116 4154 if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) { 4117 4155 adev->common.max_xor = XOR_MAX_OPS; ··· 4171 4217 dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "", 4172 4218 dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "", 4173 4219 dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "", 4174 - dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "", 4175 4220 dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : ""); 4176 4221 } 4177 4222
-4
include/linux/async_tx.h
··· 182 182 unsigned int src_offset, size_t len, 183 183 struct async_submit_ctl *submit); 184 184 185 - struct dma_async_tx_descriptor * 186 - async_memset(struct page *dest, int val, unsigned int offset, 187 - size_t len, struct async_submit_ctl *submit); 188 - 189 185 struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); 190 186 191 187 struct dma_async_tx_descriptor *
-5
include/linux/dmaengine.h
··· 66 66 DMA_PQ, 67 67 DMA_XOR_VAL, 68 68 DMA_PQ_VAL, 69 - DMA_MEMSET, 70 69 DMA_INTERRUPT, 71 70 DMA_SG, 72 71 DMA_PRIVATE, ··· 519 520 * @device_prep_dma_xor_val: prepares a xor validation operation 520 521 * @device_prep_dma_pq: prepares a pq operation 521 522 * @device_prep_dma_pq_val: prepares a pqzero_sum operation 522 - * @device_prep_dma_memset: prepares a memset operation 523 523 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 524 524 * @device_prep_slave_sg: prepares a slave dma operation 525 525 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. ··· 571 573 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 572 574 unsigned int src_cnt, const unsigned char *scf, size_t len, 573 575 enum sum_check_flags *pqres, unsigned long flags); 574 - struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 575 - struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 576 - unsigned long flags); 577 576 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 578 577 struct dma_chan *chan, unsigned long flags); 579 578 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(