Merge tag 'dmaengine-fix-4.6-rc4' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
"This time we have some odd fixes in hsu, edma, omap and xilinx.

Usual fixes and nothing special"

* tag 'dmaengine-fix-4.6-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: dw: fix master selection
dmaengine: edma: special case slot limit workaround
dmaengine: edma: Remove dynamic TPTC power management feature
dmaengine: vdma: don't crash when bad channel is requested
dmaengine: omap-dma: Do not suppress interrupts for memcpy
dmaengine: omap-dma: Fix polled channel completion detection and handling
dmaengine: hsu: correct use of channel status register
dmaengine: hsu: correct residue calculation of active descriptor
dmaengine: hsu: set HSU_CH_MTSR to memory width

Changed files
+73 -68
drivers
+19 -15
drivers/dma/dw/core.c
··· 130 130 static void dwc_initialize(struct dw_dma_chan *dwc) 131 131 { 132 132 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 133 - struct dw_dma_slave *dws = dwc->chan.private; 134 133 u32 cfghi = DWC_CFGH_FIFO_MODE; 135 134 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); 136 135 137 136 if (dwc->initialized == true) 138 137 return; 139 138 140 - if (dws) { 141 - /* 142 - * We need controller-specific data to set up slave 143 - * transfers. 144 - */ 145 - BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); 146 - 147 - cfghi |= DWC_CFGH_DST_PER(dws->dst_id); 148 - cfghi |= DWC_CFGH_SRC_PER(dws->src_id); 149 - } else { 150 - cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); 151 - cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); 152 - } 139 + cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); 140 + cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); 153 141 154 142 channel_writel(dwc, CFG_LO, cfglo); 155 143 channel_writel(dwc, CFG_HI, cfghi); ··· 929 941 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 930 942 struct dw_dma_slave *dws = param; 931 943 932 - if (!dws || dws->dma_dev != chan->device->dev) 944 + if (dws->dma_dev != chan->device->dev) 933 945 return false; 934 946 935 947 /* We have to copy data since dws can be temporary storage */ ··· 1153 1165 * doesn't mean what you think it means), and status writeback. 1154 1166 */ 1155 1167 1168 + /* 1169 + * We need controller-specific data to set up slave transfers. 1170 + */ 1171 + if (chan->private && !dw_dma_filter(chan, chan->private)) { 1172 + dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); 1173 + return -EINVAL; 1174 + } 1175 + 1156 1176 /* Enable controller here if needed */ 1157 1177 if (!dw->in_use) 1158 1178 dw_dma_on(dw); ··· 1222 1226 spin_lock_irqsave(&dwc->lock, flags); 1223 1227 list_splice_init(&dwc->free_list, &list); 1224 1228 dwc->descs_allocated = 0; 1229 + 1230 + /* Clear custom channel configuration */ 1231 + dwc->src_id = 0; 1232 + dwc->dst_id = 0; 1233 + 1234 + dwc->src_master = 0; 1235 + dwc->dst_master = 0; 1236 + 1225 1237 dwc->initialized = false; 1226 1238 1227 1239 /* Disable interrupts */
+25 -38
drivers/dma/edma.c
··· 1238 1238 struct edma_desc *edesc; 1239 1239 dma_addr_t src_addr, dst_addr; 1240 1240 enum dma_slave_buswidth dev_width; 1241 + bool use_intermediate = false; 1241 1242 u32 burst; 1242 1243 int i, ret, nslots; 1243 1244 ··· 1280 1279 * but the synchronization is difficult to achieve with Cyclic and 1281 1280 * cannot be guaranteed, so we error out early. 1282 1281 */ 1283 - if (nslots > MAX_NR_SG) 1284 - return NULL; 1282 + if (nslots > MAX_NR_SG) { 1283 + /* 1284 + * If the burst and period sizes are the same, we can put 1285 + * the full buffer into a single period and activate 1286 + * intermediate interrupts. This will produce interrupts 1287 + * after each burst, which is also after each desired period. 1288 + */ 1289 + if (burst == period_len) { 1290 + period_len = buf_len; 1291 + nslots = 2; 1292 + use_intermediate = true; 1293 + } else { 1294 + return NULL; 1295 + } 1296 + } 1285 1297 1286 1298 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), 1287 1299 GFP_ATOMIC); ··· 1372 1358 /* 1373 1359 * Enable period interrupt only if it is requested 1374 1360 */ 1375 - if (tx_flags & DMA_PREP_INTERRUPT) 1361 + if (tx_flags & DMA_PREP_INTERRUPT) { 1376 1362 edesc->pset[i].param.opt |= TCINTEN; 1363 + 1364 + /* Also enable intermediate interrupts if necessary */ 1365 + if (use_intermediate) 1366 + edesc->pset[i].param.opt |= ITCINTEN; 1367 + } 1377 1368 } 1378 1369 1379 1370 /* Place the cyclic channel to highest priority queue */ ··· 1589 1570 return IRQ_HANDLED; 1590 1571 } 1591 1572 1592 - static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable) 1593 - { 1594 - struct platform_device *tc_pdev; 1595 - int ret; 1596 - 1597 - if (!IS_ENABLED(CONFIG_OF) || !tc) 1598 - return; 1599 - 1600 - tc_pdev = of_find_device_by_node(tc->node); 1601 - if (!tc_pdev) { 1602 - pr_err("%s: TPTC device is not found\n", __func__); 1603 - return; 1604 - } 1605 - if (!pm_runtime_enabled(&tc_pdev->dev)) 1606 - pm_runtime_enable(&tc_pdev->dev); 1607 - 1608 - if (enable) 1609 - ret = pm_runtime_get_sync(&tc_pdev->dev); 1610 - else 1611 - ret = pm_runtime_put_sync(&tc_pdev->dev); 1612 - 1613 - if (ret < 0) 1614 - pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__, 1615 - enable ? "get" : "put", dev_name(&tc_pdev->dev)); 1616 - } 1617 - 1618 1573 /* Alloc channel resources */ 1619 1574 static int edma_alloc_chan_resources(struct dma_chan *chan) 1620 1575 { ··· 1624 1631 dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n", 1625 1632 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, 1626 1633 echan->hw_triggered ? "HW" : "SW"); 1627 - 1628 - edma_tc_set_pm_state(echan->tc, true); 1629 1634 1630 1635 return 0; 1631 1636 ··· 1661 1670 echan->alloced = false; 1662 1671 } 1663 1672 1664 - edma_tc_set_pm_state(echan->tc, false); 1665 1673 echan->tc = NULL; 1666 1674 echan->hw_triggered = false; 1667 1675 ··· 2407 2417 int i; 2408 2418 2409 2419 for (i = 0; i < ecc->num_channels; i++) { 2410 - if (echan[i].alloced) { 2420 + if (echan[i].alloced) 2411 2421 edma_setup_interrupt(&echan[i], false); 2412 - edma_tc_set_pm_state(echan[i].tc, false); 2413 - } 2414 2422 } 2415 2423 2416 2424 return 0; ··· 2438 2450 2439 2451 /* Set up channel -> slot mapping for the entry slot */ 2440 2452 edma_set_chmap(&echan[i], echan[i].slot[0]); 2441 - 2442 - edma_tc_set_pm_state(echan[i].tc, true); 2443 2453 } 2444 2454 } 2445 2455 ··· 2461 2475 2462 2476 static int edma_tptc_probe(struct platform_device *pdev) 2463 2477 { 2464 - return 0; 2478 + pm_runtime_enable(&pdev->dev); 2479 + return pm_runtime_get_sync(&pdev->dev); 2465 2480 } 2466 2481 2467 2482 static struct platform_driver edma_tptc_driver = {
+8 -5
drivers/dma/hsu/hsu.c
··· 64 64 65 65 if (hsuc->direction == DMA_MEM_TO_DEV) { 66 66 bsr = config->dst_maxburst; 67 - mtsr = config->dst_addr_width; 67 + mtsr = config->src_addr_width; 68 68 } else if (hsuc->direction == DMA_DEV_TO_MEM) { 69 69 bsr = config->src_maxburst; 70 - mtsr = config->src_addr_width; 70 + mtsr = config->dst_addr_width; 71 71 } 72 72 73 73 hsu_chan_disable(hsuc); ··· 135 135 sr = hsu_chan_readl(hsuc, HSU_CH_SR); 136 136 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 137 137 138 - return sr; 138 + return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY); 139 139 } 140 140 141 141 irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) ··· 254 254 static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) 255 255 { 256 256 struct hsu_dma_desc *desc = hsuc->desc; 257 - size_t bytes = desc->length; 257 + size_t bytes = 0; 258 258 int i; 259 259 260 - i = desc->active % HSU_DMA_CHAN_NR_DESC; 260 + for (i = desc->active; i < desc->nents; i++) 261 + bytes += desc->sg[i].len; 262 + 263 + i = HSU_DMA_CHAN_NR_DESC - 1; 261 264 do { 262 265 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); 263 266 } while (--i >= 0);
+3
drivers/dma/hsu/hsu.h
··· 41 41 #define HSU_CH_SR_DESCTO(x) BIT(8 + (x)) 42 42 #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8)) 43 43 #define HSU_CH_SR_CHE BIT(15) 44 + #define HSU_CH_SR_DESCE(x) BIT(16 + (x)) 45 + #define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16)) 46 + #define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30)) 44 47 45 48 /* Bits in HSU_CH_CR */ 46 49 #define HSU_CH_CR_CHA BIT(0)
+17 -9
drivers/dma/omap-dma.c
··· 48 48 unsigned dma_sig; 49 49 bool cyclic; 50 50 bool paused; 51 + bool running; 51 52 52 53 int dma_ch; 53 54 struct omap_desc *desc; ··· 295 294 296 295 /* Enable channel */ 297 296 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); 297 + 298 + c->running = true; 298 299 } 299 300 300 301 static void omap_dma_stop(struct omap_chan *c) ··· 358 355 359 356 omap_dma_chan_write(c, CLNK_CTRL, val); 360 357 } 358 + 359 + c->running = false; 361 360 } 362 361 363 362 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, ··· 678 673 struct omap_chan *c = to_omap_dma_chan(chan); 679 674 struct virt_dma_desc *vd; 680 675 enum dma_status ret; 681 - uint32_t ccr; 682 676 unsigned long flags; 683 677 684 - ccr = omap_dma_chan_read(c, CCR); 685 - /* The channel is no longer active, handle the completion right away */ 686 - if (!(ccr & CCR_ENABLE)) 687 - omap_dma_callback(c->dma_ch, 0, c); 688 - 689 678 ret = dma_cookie_status(chan, cookie, txstate); 679 + 680 + if (!c->paused && c->running) { 681 + uint32_t ccr = omap_dma_chan_read(c, CCR); 682 + /* 683 + * The channel is no longer active, set the return value 684 + * accordingly 685 + */ 686 + if (!(ccr & CCR_ENABLE)) 687 + ret = DMA_COMPLETE; 688 + } 689 + 690 690 if (ret == DMA_COMPLETE || !txstate) 691 691 return ret; 692 692 ··· 955 945 d->ccr = c->ccr; 956 946 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC; 957 947 958 - d->cicr = CICR_DROP_IE; 959 - if (tx_flags & DMA_PREP_INTERRUPT) 960 - d->cicr |= CICR_FRAME_IE; 948 + d->cicr = CICR_DROP_IE | CICR_FRAME_IE; 961 949 962 950 d->csdp = data_type; 963 951
+1 -1
drivers/dma/xilinx/xilinx_vdma.c
··· 1236 1236 struct xilinx_vdma_device *xdev = ofdma->of_dma_data; 1237 1237 int chan_id = dma_spec->args[0]; 1238 1238 1239 - if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE) 1239 + if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id]) 1240 1240 return NULL; 1241 1241 1242 1242 return dma_get_slave_channel(&xdev->chan[chan_id]->common);