Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dmaengine-fixes-3.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine

Pull dmaengine fixes from Dan Williams:

- deprecation of net_dma to be removed in 3.14

- crash regression fix in pl330 from the dmaengine_unmap rework

- crash regression fix for any channel running raid ops without
CONFIG_ASYNC_TX_DMA from dmaengine_unmap

- memory leak regression in mv_xor from dmaengine_unmap

- build warning regressions in mv_xor, fsldma, ppc4xx, txx9, and
at_hdmac from dmaengine_unmap

- sleep in atomic regression in dma_async_memcpy_pg_to_pg

- new fix in mv_xor for handling channel initialization failures

* tag 'dmaengine-fixes-3.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine:
net_dma: mark broken
dma: pl330: ensure DMA descriptors are zero-initialised
dmaengine: fix sleep in atomic
dmaengine: mv_xor: fix oops when channels fail to initialise
dma: mv_xor: Use dmaengine_unmap_data for the self-tests
dmaengine: fix enable for high order unmap pools
dma: fix build warnings in txx9
dmatest: fix build warning on mips
dma: fix fsldma build warnings
dma: fix build warnings in ppc4xx
dmaengine: at_hdmac: remove unused function
dma: mv_xor: remove mv_desc_get_dest_addr()

+80 -110
+7
drivers/dma/Kconfig
··· 62 62 tristate "Intel I/OAT DMA support" 63 63 depends on PCI && X86 64 64 select DMA_ENGINE 65 + select DMA_ENGINE_RAID 65 66 select DCA 66 67 help 67 68 Enable support for the Intel(R) I/OAT DMA engine present ··· 113 112 bool "Marvell XOR engine support" 114 113 depends on PLAT_ORION 115 114 select DMA_ENGINE 115 + select DMA_ENGINE_RAID 116 116 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 117 117 ---help--- 118 118 Enable support for the Marvell XOR engine. ··· 189 187 tristate "AMCC PPC440SPe ADMA support" 190 188 depends on 440SPe || 440SP 191 189 select DMA_ENGINE 190 + select DMA_ENGINE_RAID 192 191 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL 193 192 select ASYNC_TX_ENABLE_CHANNEL_SWITCH 194 193 help ··· 355 352 bool "Network: TCP receive copy offload" 356 353 depends on DMA_ENGINE && NET 357 354 default (INTEL_IOATDMA || FSL_DMA) 355 + depends on BROKEN 358 356 help 359 357 This enables the use of DMA engines in the network stack to 360 358 offload receive copy-to-user operations, freeing CPU cycles. ··· 380 376 help 381 377 Simple DMA test client. Say N unless you're debugging a 382 378 DMA Device driver. 379 + 380 + config DMA_ENGINE_RAID 381 + bool 383 382 384 383 endif
-4
drivers/dma/at_hdmac_regs.h
··· 347 347 { 348 348 return &chan->dev->device; 349 349 } 350 - static struct device *chan2parent(struct dma_chan *chan) 351 - { 352 - return chan->dev->device.parent; 353 - } 354 350 355 351 #if defined(VERBOSE_DEBUG) 356 352 static void vdbg_dump_regs(struct at_dma_chan *atchan)
+2 -2
drivers/dma/dmaengine.c
··· 912 912 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } 913 913 static struct dmaengine_unmap_pool unmap_pool[] = { 914 914 __UNMAP_POOL(2), 915 - #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) 915 + #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) 916 916 __UNMAP_POOL(16), 917 917 __UNMAP_POOL(128), 918 918 __UNMAP_POOL(256), ··· 1054 1054 dma_cookie_t cookie; 1055 1055 unsigned long flags; 1056 1056 1057 - unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); 1057 + unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); 1058 1058 if (!unmap) 1059 1059 return -ENOMEM; 1060 1060
+4 -4
drivers/dma/dmatest.c
··· 539 539 540 540 um->len = params->buf_size; 541 541 for (i = 0; i < src_cnt; i++) { 542 - unsigned long buf = (unsigned long) thread->srcs[i]; 542 + void *buf = thread->srcs[i]; 543 543 struct page *pg = virt_to_page(buf); 544 - unsigned pg_off = buf & ~PAGE_MASK; 544 + unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; 545 545 546 546 um->addr[i] = dma_map_page(dev->dev, pg, pg_off, 547 547 um->len, DMA_TO_DEVICE); ··· 559 559 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 560 560 dsts = &um->addr[src_cnt]; 561 561 for (i = 0; i < dst_cnt; i++) { 562 - unsigned long buf = (unsigned long) thread->dsts[i]; 562 + void *buf = thread->dsts[i]; 563 563 struct page *pg = virt_to_page(buf); 564 - unsigned pg_off = buf & ~PAGE_MASK; 564 + unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; 565 565 566 566 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, 567 567 DMA_BIDIRECTIONAL);
+1 -30
drivers/dma/fsldma.c
··· 86 86 hw->count = CPU_TO_DMA(chan, count, 32); 87 87 } 88 88 89 - static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 90 - { 91 - return DMA_TO_CPU(chan, desc->hw.count, 32); 92 - } 93 - 94 89 static void set_desc_src(struct fsldma_chan *chan, 95 90 struct fsl_dma_ld_hw *hw, dma_addr_t src) 96 91 { ··· 96 101 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 97 102 } 98 103 99 - static dma_addr_t get_desc_src(struct fsldma_chan *chan, 100 - struct fsl_desc_sw *desc) 101 - { 102 - u64 snoop_bits; 103 - 104 - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 105 - ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 106 - return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; 107 - } 108 - 109 104 static void set_desc_dst(struct fsldma_chan *chan, 110 105 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 111 106 { ··· 104 119 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 105 120 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 106 121 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 107 - } 108 - 109 - static dma_addr_t get_desc_dst(struct fsldma_chan *chan, 110 - struct fsl_desc_sw *desc) 111 - { 112 - u64 snoop_bits; 113 - 114 - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 115 - ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 116 - return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; 117 122 } 118 123 119 124 static void set_desc_next(struct fsldma_chan *chan, ··· 383 408 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 384 409 struct fsl_desc_sw *child; 385 410 unsigned long flags; 386 - dma_cookie_t cookie; 411 + dma_cookie_t cookie = -EINVAL; 387 412 388 413 spin_lock_irqsave(&chan->desc_lock, flags); 389 414 ··· 829 854 struct fsl_desc_sw *desc) 830 855 { 831 856 struct dma_async_tx_descriptor *txd = &desc->async_tx; 832 - struct device *dev = chan->common.device->dev; 833 - dma_addr_t src = get_desc_src(chan, desc); 834 - dma_addr_t dst = get_desc_dst(chan, desc); 835 - u32 len = get_desc_cnt(chan, desc); 836 857 837 858 /* Run the link descriptor callback function */ 838 859 if (txd->callback) {
+64 -39
drivers/dma/mv_xor.c
··· 54 54 hw_desc->desc_command = (1 << 31); 55 55 } 56 56 57 - static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) 58 - { 59 - struct mv_xor_desc *hw_desc = desc->hw_desc; 60 - return hw_desc->phy_dest_addr; 61 - } 62 - 63 57 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 64 58 u32 byte_count) 65 59 { ··· 781 787 /* 782 788 * Perform a transaction to verify the HW works. 783 789 */ 784 - #define MV_XOR_TEST_SIZE 2000 785 790 786 791 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 787 792 { ··· 790 797 struct dma_chan *dma_chan; 791 798 dma_cookie_t cookie; 792 799 struct dma_async_tx_descriptor *tx; 800 + struct dmaengine_unmap_data *unmap; 793 801 int err = 0; 794 802 795 - src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 803 + src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); 796 804 if (!src) 797 805 return -ENOMEM; 798 806 799 - dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 807 + dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); 800 808 if (!dest) { 801 809 kfree(src); 802 810 return -ENOMEM; 803 811 } 804 812 805 813 /* Fill in src buffer */ 806 - for (i = 0; i < MV_XOR_TEST_SIZE; i++) 814 + for (i = 0; i < PAGE_SIZE; i++) 807 815 ((u8 *) src)[i] = (u8)i; 808 816 809 817 dma_chan = &mv_chan->dmachan; ··· 813 819 goto out; 814 820 } 815 821 816 - dest_dma = dma_map_single(dma_chan->device->dev, dest, 817 - MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 822 + unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); 823 + if (!unmap) { 824 + err = -ENOMEM; 825 + goto free_resources; 826 + } 818 827 819 - src_dma = dma_map_single(dma_chan->device->dev, src, 820 - MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 828 + src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, 829 + PAGE_SIZE, DMA_TO_DEVICE); 830 + unmap->to_cnt = 1; 831 + unmap->addr[0] = src_dma; 832 + 833 + dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, 834 + PAGE_SIZE, DMA_FROM_DEVICE); 835 + unmap->from_cnt = 1; 836 + unmap->addr[1] = dest_dma; 837 + 838 + unmap->len = PAGE_SIZE; 821 839 822 840 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 823 - MV_XOR_TEST_SIZE, 0); 841 + PAGE_SIZE, 0); 824 842 cookie = mv_xor_tx_submit(tx); 825 843 mv_xor_issue_pending(dma_chan); 826 844 async_tx_ack(tx); ··· 847 841 } 848 842 849 843 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 850 - MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 851 - if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 844 + PAGE_SIZE, DMA_FROM_DEVICE); 845 + if (memcmp(src, dest, PAGE_SIZE)) { 852 846 dev_err(dma_chan->device->dev, 853 847 "Self-test copy failed compare, disabling\n"); 854 848 err = -ENODEV; ··· 856 850 } 857 851 858 852 free_resources: 853 + dmaengine_unmap_put(unmap); 859 854 mv_xor_free_chan_resources(dma_chan); 860 855 out: 861 856 kfree(src); ··· 874 867 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 875 868 dma_addr_t dest_dma; 876 869 struct dma_async_tx_descriptor *tx; 870 + struct dmaengine_unmap_data *unmap; 877 871 struct dma_chan *dma_chan; 878 872 dma_cookie_t cookie; 879 873 u8 cmp_byte = 0; 880 874 u32 cmp_word; 881 875 int err = 0; 876 + int src_count = MV_XOR_NUM_SRC_TEST; 882 877 883 - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 878 + for (src_idx = 0; src_idx < src_count; src_idx++) { 884 879 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 885 880 if (!xor_srcs[src_idx]) { 886 881 while (src_idx--) ··· 899 890 } 900 891 901 892 /* Fill in src buffers */ 902 - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 893 + for (src_idx = 0; src_idx < src_count; src_idx++) { 903 894 u8 *ptr = page_address(xor_srcs[src_idx]); 904 895 for (i = 0; i < PAGE_SIZE; i++) 905 896 ptr[i] = (1 << src_idx); 906 897 } 907 898 908 - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 899 + for (src_idx = 0; src_idx < src_count; src_idx++) 909 900 cmp_byte ^= (u8) (1 << src_idx); 910 901 911 902 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | ··· 919 910 goto out; 920 911 } 921 912 922 - /* test xor */ 923 - dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 924 - DMA_FROM_DEVICE); 913 + unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, 914 + GFP_KERNEL); 915 + if (!unmap) { 916 + err = -ENOMEM; 917 + goto free_resources; 918 + } 925 919 926 - for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 927 - dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 928 - 0, PAGE_SIZE, DMA_TO_DEVICE); 920 + /* test xor */ 921 + for (i = 0; i < src_count; i++) { 922 + unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 923 + 0, PAGE_SIZE, DMA_TO_DEVICE); 924 + dma_srcs[i] = unmap->addr[i]; 925 + unmap->to_cnt++; 926 + } 927 + 928 + unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 929 + DMA_FROM_DEVICE); 930 + dest_dma = unmap->addr[src_count]; 931 + unmap->from_cnt = 1; 932 + unmap->len = PAGE_SIZE; 929 933 930 934 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 931 - MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 935 + src_count, PAGE_SIZE, 0); 932 936 933 937 cookie = mv_xor_tx_submit(tx); 934 938 mv_xor_issue_pending(dma_chan); ··· 970 948 } 971 949 972 950 free_resources: 951 + dmaengine_unmap_put(unmap); 973 952 mv_xor_free_chan_resources(dma_chan); 974 953 out: 975 - src_idx = MV_XOR_NUM_SRC_TEST; 954 + src_idx = src_count; 976 955 while (src_idx--) 977 956 __free_page(xor_srcs[src_idx]); 978 957 __free_page(dest); ··· 1199 1176 int i = 0; 1200 1177 1201 1178 for_each_child_of_node(pdev->dev.of_node, np) { 1179 + struct mv_xor_chan *chan; 1202 1180 dma_cap_mask_t cap_mask; 1203 1181 int irq; 1204 1182 ··· 1217 1193 goto err_channel_add; 1218 1194 } 1219 1195 1220 - xordev->channels[i] = 1221 - mv_xor_channel_add(xordev, pdev, i, 1222 - cap_mask, irq); 1223 - if (IS_ERR(xordev->channels[i])) { 1224 - ret = PTR_ERR(xordev->channels[i]); 1225 - xordev->channels[i] = NULL; 1196 + chan = mv_xor_channel_add(xordev, pdev, i, 1197 + cap_mask, irq); 1198 + if (IS_ERR(chan)) { 1199 + ret = PTR_ERR(chan); 1226 1200 irq_dispose_mapping(irq); 1227 1201 goto err_channel_add; 1228 1202 } 1229 1203 1204 + xordev->channels[i] = chan; 1230 1205 i++; 1231 1206 } 1232 1207 } else if (pdata && pdata->channels) { 1233 1208 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1234 1209 struct mv_xor_channel_data *cd; 1210 + struct mv_xor_chan *chan; 1235 1211 int irq; 1236 1212 1237 1213 cd = &pdata->channels[i]; ··· 1246 1222 goto err_channel_add; 1247 1223 } 1248 1224 1249 - xordev->channels[i] = 1250 - mv_xor_channel_add(xordev, pdev, i, 1251 - cd->cap_mask, irq); 1252 - if (IS_ERR(xordev->channels[i])) { 1253 - ret = PTR_ERR(xordev->channels[i]); 1225 + chan = mv_xor_channel_add(xordev, pdev, i, 1226 + cd->cap_mask, irq); 1227 + if (IS_ERR(chan)) { 1228 + ret = PTR_ERR(chan); 1254 1229 goto err_channel_add; 1255 1230 } 1231 + 1232 + xordev->channels[i] = chan; 1256 1233 } 1257 1234 } 1258 1235
+1 -4
drivers/dma/pl330.c
··· 2492 2492 2493 2493 static inline void _init_desc(struct dma_pl330_desc *desc) 2494 2494 { 2495 - desc->pchan = NULL; 2496 2495 desc->req.x = &desc->px; 2497 2496 desc->req.token = desc; 2498 2497 desc->rqcfg.swap = SWAP_NO; 2499 - desc->rqcfg.privileged = 0; 2500 - desc->rqcfg.insnaccess = 0; 2501 2498 desc->rqcfg.scctl = SCCTRL0; 2502 2499 desc->rqcfg.dcctl = DCCTRL0; 2503 2500 desc->req.cfg = &desc->rqcfg; ··· 2514 2517 if (!pdmac) 2515 2518 return 0; 2516 2519 2517 - desc = kmalloc(count * sizeof(*desc), flg); 2520 + desc = kcalloc(count, sizeof(*desc), flg); 2518 2521 if (!desc) 2519 2522 return 0; 2520 2523
+1 -26
drivers/dma/ppc4xx/adma.c
··· 533 533 } 534 534 535 535 /** 536 - * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation 537 - */ 538 - static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc, 539 - int value, unsigned long flags) 540 - { 541 - struct dma_cdb *hw_desc = desc->hw_desc; 542 - 543 - memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); 544 - desc->hw_next = NULL; 545 - desc->src_cnt = 1; 546 - desc->dst_cnt = 1; 547 - 548 - if (flags & DMA_PREP_INTERRUPT) 549 - set_bit(PPC440SPE_DESC_INT, &desc->flags); 550 - else 551 - clear_bit(PPC440SPE_DESC_INT, &desc->flags); 552 - 553 - hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value); 554 - hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value); 555 - hw_desc->opc = DMA_CDB_OPC_DFILL128; 556 - } 557 - 558 - /** 559 536 * ppc440spe_desc_set_src_addr - set source address into the descriptor 560 537 */ 561 538 static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, ··· 1481 1504 struct ppc440spe_adma_chan *chan, 1482 1505 dma_cookie_t cookie) 1483 1506 { 1484 - int i; 1485 - 1486 1507 BUG_ON(desc->async_tx.cookie < 0); 1487 1508 if (desc->async_tx.cookie > 0) { 1488 1509 cookie = desc->async_tx.cookie; ··· 3873 3898 ppc440spe_adma_prep_dma_interrupt; 3874 3899 } 3875 3900 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " 3876 - "( %s%s%s%s%s%s%s)\n", 3901 + "( %s%s%s%s%s%s)\n", 3877 3902 dev_name(adev->dev), 3878 3903 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", 3879 3904 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
-1
drivers/dma/txx9dmac.c
··· 406 406 dma_async_tx_callback callback; 407 407 void *param; 408 408 struct dma_async_tx_descriptor *txd = &desc->txd; 409 - struct txx9dmac_slave *ds = dc->chan.private; 410 409 411 410 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 412 411 txd->cookie, desc);