Merge branch 'fixes' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
"Here are hopefully last set of fixes for 4.1. This time we have:

- fixing pause capability reporting on both dmaengine pause & resume
support by Krzysztof

- locking fix fir at_xdmac by Ludovic

- slave configuration fix for at_xdmac by Ludovic"

* 'fixes' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: Fix choppy sound because of unimplemented resume
dmaengine: at_xdmac: rework slave configuration part
dmaengine: at_xdmac: lock fixes

+144 -93
+139 -92
drivers/dma/at_xdmac.c
··· 174 #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ 175 176 #define AT_XDMAC_MAX_CHAN 0x20 177 178 #define AT_XDMAC_DMA_BUSWIDTHS\ 179 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ ··· 194 struct dma_chan chan; 195 void __iomem *ch_regs; 196 u32 mask; /* Channel Mask */ 197 - u32 cfg[2]; /* Channel Configuration Register */ 198 - #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */ 199 - #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */ 200 u8 perid; /* Peripheral ID */ 201 u8 perif; /* Peripheral Interface */ 202 u8 memif; /* Memory Interface */ 203 - u32 per_src_addr; 204 - u32 per_dst_addr; 205 u32 save_cc; 206 u32 save_cim; 207 u32 save_cnda; 208 u32 save_cndc; 209 unsigned long status; 210 struct tasklet_struct tasklet; 211 212 spinlock_t lock; 213 ··· 414 struct at_xdmac_desc *desc = txd_to_at_desc(tx); 415 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); 416 dma_cookie_t cookie; 417 418 - spin_lock_bh(&atchan->lock); 419 cookie = dma_cookie_assign(tx); 420 421 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", ··· 425 if (list_is_singular(&atchan->xfers_list)) 426 at_xdmac_start_xfer(atchan, desc); 427 428 - spin_unlock_bh(&atchan->lock); 429 return cookie; 430 } 431 ··· 494 return chan; 495 } 496 497 static int at_xdmac_set_slave_config(struct dma_chan *chan, 498 struct dma_slave_config *sconfig) 499 { 500 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 501 - u8 dwidth; 502 - int csize; 503 504 - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] = 505 - AT91_XDMAC_DT_PERID(atchan->perid) 506 - | AT_XDMAC_CC_DAM_INCREMENTED_AM 507 - | AT_XDMAC_CC_SAM_FIXED_AM 508 - | AT_XDMAC_CC_DIF(atchan->memif) 509 - | AT_XDMAC_CC_SIF(atchan->perif) 510 - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED 511 - | AT_XDMAC_CC_DSYNC_PER2MEM 512 - | AT_XDMAC_CC_MBSIZE_SIXTEEN 513 - | AT_XDMAC_CC_TYPE_PER_TRAN; 514 - csize = at_xdmac_csize(sconfig->src_maxburst); 515 - if (csize < 0) { 516 - dev_err(chan2dev(chan), "invalid src maxburst value\n"); 517 return -EINVAL; 518 } 519 - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize); 520 - dwidth = ffs(sconfig->src_addr_width) - 1; 521 - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); 522 523 - 524 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] = 525 - AT91_XDMAC_DT_PERID(atchan->perid) 526 - | AT_XDMAC_CC_DAM_FIXED_AM 527 - | AT_XDMAC_CC_SAM_INCREMENTED_AM 528 - | AT_XDMAC_CC_DIF(atchan->perif) 529 - | AT_XDMAC_CC_SIF(atchan->memif) 530 - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED 531 - | AT_XDMAC_CC_DSYNC_MEM2PER 532 - | AT_XDMAC_CC_MBSIZE_SIXTEEN 533 - | AT_XDMAC_CC_TYPE_PER_TRAN; 534 - csize = at_xdmac_csize(sconfig->dst_maxburst); 535 - if (csize < 0) { 536 - dev_err(chan2dev(chan), "invalid src maxburst value\n"); 537 - return -EINVAL; 538 - } 539 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize); 540 - dwidth = ffs(sconfig->dst_addr_width) - 1; 541 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); 542 - 543 - /* Src and dst addr are needed to configure the link list descriptor. */ 544 - atchan->per_src_addr = sconfig->src_addr; 545 - atchan->per_dst_addr = sconfig->dst_addr; 546 - 547 - dev_dbg(chan2dev(chan), 548 - "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n", 549 - __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG], 550 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG], 551 - atchan->per_src_addr, atchan->per_dst_addr); 552 553 return 0; 554 } ··· 596 struct scatterlist *sg; 597 int i; 598 unsigned int xfer_size = 0; 599 600 if (!sgl) 601 return NULL; ··· 613 flags); 614 615 /* Protect dma_sconfig field that can be modified by set_slave_conf. */ 616 - spin_lock_bh(&atchan->lock); 617 618 /* Prepare descriptors. */ 619 for_each_sg(sgl, sg, sg_len, i) { ··· 627 mem = sg_dma_address(sg); 628 if (unlikely(!len)) { 629 dev_err(chan2dev(chan), "sg data length is zero\n"); 630 - spin_unlock_bh(&atchan->lock); 631 - return NULL; 632 } 633 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", 634 __func__, i, len, mem); ··· 637 dev_err(chan2dev(chan), "can't get descriptor\n"); 638 if (first) 639 list_splice_init(&first->descs_list, &atchan->free_descs_list); 640 - spin_unlock_bh(&atchan->lock); 641 - return NULL; 642 } 643 644 /* Linked list descriptor setup. */ 645 if (direction == DMA_DEV_TO_MEM) { 646 - desc->lld.mbr_sa = atchan->per_src_addr; 647 desc->lld.mbr_da = mem; 648 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; 649 } else { 650 desc->lld.mbr_sa = mem; 651 - desc->lld.mbr_da = atchan->per_dst_addr; 652 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; 653 } 654 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); 655 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) 656 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) ··· 680 xfer_size += len; 681 } 682 683 - spin_unlock_bh(&atchan->lock); 684 685 first->tx_dma_desc.flags = flags; 686 first->xfer_size = xfer_size; 687 first->direction = direction; 688 689 - return &first->tx_dma_desc; 690 } 691 692 static struct dma_async_tx_descriptor * ··· 701 struct at_xdmac_desc *first = NULL, *prev = NULL; 702 unsigned int periods = buf_len / period_len; 703 int i; 704 705 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", 706 __func__, &buf_addr, buf_len, period_len, ··· 717 return NULL; 718 } 719 720 for (i = 0; i < periods; i++) { 721 struct at_xdmac_desc *desc = NULL; 722 723 - spin_lock_bh(&atchan->lock); 724 desc = at_xdmac_get_desc(atchan); 725 if (!desc) { 726 dev_err(chan2dev(chan), "can't get descriptor\n"); 727 if (first) 728 list_splice_init(&first->descs_list, &atchan->free_descs_list); 729 - spin_unlock_bh(&atchan->lock); 730 return NULL; 731 } 732 - spin_unlock_bh(&atchan->lock); 733 dev_dbg(chan2dev(chan), 734 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", 735 __func__, desc, &desc->tx_dma_desc.phys); 736 737 if (direction == DMA_DEV_TO_MEM) { 738 - desc->lld.mbr_sa = atchan->per_src_addr; 739 desc->lld.mbr_da = buf_addr + i * period_len; 740 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; 741 } else { 742 desc->lld.mbr_sa = buf_addr + i * period_len; 743 - desc->lld.mbr_da = atchan->per_dst_addr; 744 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; 745 } 746 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 747 | AT_XDMAC_MBR_UBC_NDEN 748 | AT_XDMAC_MBR_UBC_NSEN ··· 806 | AT_XDMAC_CC_SIF(0) 807 | AT_XDMAC_CC_MBSIZE_SIXTEEN 808 | AT_XDMAC_CC_TYPE_MEM_TRAN; 809 810 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", 811 __func__, &src, &dest, len, flags); ··· 839 840 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); 841 842 - spin_lock_bh(&atchan->lock); 843 desc = at_xdmac_get_desc(atchan); 844 - spin_unlock_bh(&atchan->lock); 845 if (!desc) { 846 dev_err(chan2dev(chan), "can't get descriptor\n"); 847 if (first) ··· 927 int residue; 928 u32 cur_nda, mask, value; 929 u8 dwidth = 0; 930 931 ret = dma_cookie_status(chan, cookie, txstate); 932 if (ret == DMA_COMPLETE) ··· 936 if (!txstate) 937 return ret; 938 939 - spin_lock_bh(&atchan->lock); 940 941 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); 942 ··· 946 */ 947 if (!desc->active_xfer) { 948 dma_set_residue(txstate, desc->xfer_size); 949 - spin_unlock_bh(&atchan->lock); 950 - return ret; 951 } 952 953 residue = desc->xfer_size; ··· 977 } 978 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; 979 980 - spin_unlock_bh(&atchan->lock); 981 - 982 dma_set_residue(txstate, residue); 983 984 dev_dbg(chan2dev(chan), 985 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", 986 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); 987 988 return ret; 989 } 990 ··· 1005 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) 1006 { 1007 struct at_xdmac_desc *desc; 1008 1009 - spin_lock_bh(&atchan->lock); 1010 1011 /* 1012 * If channel is enabled, do nothing, advance_work will be triggered ··· 1022 at_xdmac_start_xfer(atchan, desc); 1023 } 1024 1025 - spin_unlock_bh(&atchan->lock); 1026 } 1027 1028 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) ··· 1158 { 1159 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1160 int ret; 1161 1162 dev_dbg(chan2dev(chan), "%s\n", __func__); 1163 1164 - spin_lock_bh(&atchan->lock); 1165 ret = at_xdmac_set_slave_config(chan, config); 1166 - spin_unlock_bh(&atchan->lock); 1167 1168 return ret; 1169 } ··· 1173 { 1174 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1175 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1176 1177 dev_dbg(chan2dev(chan), "%s\n", __func__); 1178 1179 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) 1180 return 0; 1181 1182 - spin_lock_bh(&atchan->lock); 1183 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); 1184 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) 1185 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) 1186 cpu_relax(); 1187 - spin_unlock_bh(&atchan->lock); 1188 1189 return 0; 1190 } ··· 1194 { 1195 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1196 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1197 1198 dev_dbg(chan2dev(chan), "%s\n", __func__); 1199 1200 - spin_lock_bh(&atchan->lock); 1201 if (!at_xdmac_chan_is_paused(atchan)) { 1202 - spin_unlock_bh(&atchan->lock); 1203 return 0; 1204 } 1205 1206 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); 1207 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); 1208 - spin_unlock_bh(&atchan->lock); 1209 1210 return 0; 1211 } ··· 1216 struct at_xdmac_desc *desc, *_desc; 1217 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1218 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1219 1220 dev_dbg(chan2dev(chan), "%s\n", __func__); 1221 1222 - spin_lock_bh(&atchan->lock); 1223 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1224 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) 1225 cpu_relax(); ··· 1230 at_xdmac_remove_xfer(atchan, desc); 1231 1232 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); 1233 - spin_unlock_bh(&atchan->lock); 1234 1235 return 0; 1236 } ··· 1240 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1241 struct at_xdmac_desc *desc; 1242 int i; 1243 1244 - spin_lock_bh(&atchan->lock); 1245 1246 if (at_xdmac_chan_is_enabled(atchan)) { 1247 dev_err(chan2dev(chan), ··· 1273 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); 1274 1275 spin_unlock: 1276 - spin_unlock_bh(&atchan->lock); 1277 return i; 1278 } 1279
··· 174 #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ 175 176 #define AT_XDMAC_MAX_CHAN 0x20 177 + #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ 178 + #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ 179 180 #define AT_XDMAC_DMA_BUSWIDTHS\ 181 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ ··· 192 struct dma_chan chan; 193 void __iomem *ch_regs; 194 u32 mask; /* Channel Mask */ 195 + u32 cfg; /* Channel Configuration Register */ 196 u8 perid; /* Peripheral ID */ 197 u8 perif; /* Peripheral Interface */ 198 u8 memif; /* Memory Interface */ 199 u32 save_cc; 200 u32 save_cim; 201 u32 save_cnda; 202 u32 save_cndc; 203 unsigned long status; 204 struct tasklet_struct tasklet; 205 + struct dma_slave_config sconfig; 206 207 spinlock_t lock; 208 ··· 415 struct at_xdmac_desc *desc = txd_to_at_desc(tx); 416 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); 417 dma_cookie_t cookie; 418 + unsigned long irqflags; 419 420 + spin_lock_irqsave(&atchan->lock, irqflags); 421 cookie = dma_cookie_assign(tx); 422 423 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", ··· 425 if (list_is_singular(&atchan->xfers_list)) 426 at_xdmac_start_xfer(atchan, desc); 427 428 + spin_unlock_irqrestore(&atchan->lock, irqflags); 429 return cookie; 430 } 431 ··· 494 return chan; 495 } 496 497 + static int at_xdmac_compute_chan_conf(struct dma_chan *chan, 498 + enum dma_transfer_direction direction) 499 + { 500 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 501 + int csize, dwidth; 502 + 503 + if (direction == DMA_DEV_TO_MEM) { 504 + atchan->cfg = 505 + AT91_XDMAC_DT_PERID(atchan->perid) 506 + | AT_XDMAC_CC_DAM_INCREMENTED_AM 507 + | AT_XDMAC_CC_SAM_FIXED_AM 508 + | AT_XDMAC_CC_DIF(atchan->memif) 509 + | AT_XDMAC_CC_SIF(atchan->perif) 510 + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED 511 + | AT_XDMAC_CC_DSYNC_PER2MEM 512 + | AT_XDMAC_CC_MBSIZE_SIXTEEN 513 + | AT_XDMAC_CC_TYPE_PER_TRAN; 514 + csize = ffs(atchan->sconfig.src_maxburst) - 1; 515 + if (csize < 0) { 516 + dev_err(chan2dev(chan), "invalid src maxburst value\n"); 517 + return -EINVAL; 518 + } 519 + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); 520 + dwidth = ffs(atchan->sconfig.src_addr_width) - 1; 521 + if (dwidth < 0) { 522 + dev_err(chan2dev(chan), "invalid src addr width value\n"); 523 + return -EINVAL; 524 + } 525 + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); 526 + } else if (direction == DMA_MEM_TO_DEV) { 527 + atchan->cfg = 528 + AT91_XDMAC_DT_PERID(atchan->perid) 529 + | AT_XDMAC_CC_DAM_FIXED_AM 530 + | AT_XDMAC_CC_SAM_INCREMENTED_AM 531 + | AT_XDMAC_CC_DIF(atchan->perif) 532 + | AT_XDMAC_CC_SIF(atchan->memif) 533 + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED 534 + | AT_XDMAC_CC_DSYNC_MEM2PER 535 + | AT_XDMAC_CC_MBSIZE_SIXTEEN 536 + | AT_XDMAC_CC_TYPE_PER_TRAN; 537 + csize = ffs(atchan->sconfig.dst_maxburst) - 1; 538 + if (csize < 0) { 539 + dev_err(chan2dev(chan), "invalid src maxburst value\n"); 540 + return -EINVAL; 541 + } 542 + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); 543 + dwidth = ffs(atchan->sconfig.dst_addr_width) - 1; 544 + if (dwidth < 0) { 545 + dev_err(chan2dev(chan), "invalid dst addr width value\n"); 546 + return -EINVAL; 547 + } 548 + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); 549 + } 550 + 551 + dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg); 552 + 553 + return 0; 554 + } 555 + 556 + /* 557 + * Only check that maxburst and addr width values are supported by the 558 + * the controller but not that the configuration is good to perform the 559 + * transfer since we don't know the direction at this stage. 560 + */ 561 + static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig) 562 + { 563 + if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE) 564 + || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) 565 + return -EINVAL; 566 + 567 + if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH) 568 + || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) 569 + return -EINVAL; 570 + 571 + return 0; 572 + } 573 + 574 static int at_xdmac_set_slave_config(struct dma_chan *chan, 575 struct dma_slave_config *sconfig) 576 { 577 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 578 579 + if (at_xdmac_check_slave_config(sconfig)) { 580 + dev_err(chan2dev(chan), "invalid slave configuration\n"); 581 return -EINVAL; 582 } 583 584 + memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig)); 585 586 return 0; 587 } ··· 563 struct scatterlist *sg; 564 int i; 565 unsigned int xfer_size = 0; 566 + unsigned long irqflags; 567 + struct dma_async_tx_descriptor *ret = NULL; 568 569 if (!sgl) 570 return NULL; ··· 578 flags); 579 580 /* Protect dma_sconfig field that can be modified by set_slave_conf. */ 581 + spin_lock_irqsave(&atchan->lock, irqflags); 582 + 583 + if (at_xdmac_compute_chan_conf(chan, direction)) 584 + goto spin_unlock; 585 586 /* Prepare descriptors. */ 587 for_each_sg(sgl, sg, sg_len, i) { ··· 589 mem = sg_dma_address(sg); 590 if (unlikely(!len)) { 591 dev_err(chan2dev(chan), "sg data length is zero\n"); 592 + goto spin_unlock; 593 } 594 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", 595 __func__, i, len, mem); ··· 600 dev_err(chan2dev(chan), "can't get descriptor\n"); 601 if (first) 602 list_splice_init(&first->descs_list, &atchan->free_descs_list); 603 + goto spin_unlock; 604 } 605 606 /* Linked list descriptor setup. */ 607 if (direction == DMA_DEV_TO_MEM) { 608 + desc->lld.mbr_sa = atchan->sconfig.src_addr; 609 desc->lld.mbr_da = mem; 610 } else { 611 desc->lld.mbr_sa = mem; 612 + desc->lld.mbr_da = atchan->sconfig.dst_addr; 613 } 614 + desc->lld.mbr_cfg = atchan->cfg; 615 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); 616 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) 617 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) ··· 645 xfer_size += len; 646 } 647 648 649 first->tx_dma_desc.flags = flags; 650 first->xfer_size = xfer_size; 651 first->direction = direction; 652 + ret = &first->tx_dma_desc; 653 654 + spin_unlock: 655 + spin_unlock_irqrestore(&atchan->lock, irqflags); 656 + return ret; 657 } 658 659 static struct dma_async_tx_descriptor * ··· 664 struct at_xdmac_desc *first = NULL, *prev = NULL; 665 unsigned int periods = buf_len / period_len; 666 int i; 667 + unsigned long irqflags; 668 669 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", 670 __func__, &buf_addr, buf_len, period_len, ··· 679 return NULL; 680 } 681 682 + if (at_xdmac_compute_chan_conf(chan, direction)) 683 + return NULL; 684 + 685 for (i = 0; i < periods; i++) { 686 struct at_xdmac_desc *desc = NULL; 687 688 + spin_lock_irqsave(&atchan->lock, irqflags); 689 desc = at_xdmac_get_desc(atchan); 690 if (!desc) { 691 dev_err(chan2dev(chan), "can't get descriptor\n"); 692 if (first) 693 list_splice_init(&first->descs_list, &atchan->free_descs_list); 694 + spin_unlock_irqrestore(&atchan->lock, irqflags); 695 return NULL; 696 } 697 + spin_unlock_irqrestore(&atchan->lock, irqflags); 698 dev_dbg(chan2dev(chan), 699 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", 700 __func__, desc, &desc->tx_dma_desc.phys); 701 702 if (direction == DMA_DEV_TO_MEM) { 703 + desc->lld.mbr_sa = atchan->sconfig.src_addr; 704 desc->lld.mbr_da = buf_addr + i * period_len; 705 } else { 706 desc->lld.mbr_sa = buf_addr + i * period_len; 707 + desc->lld.mbr_da = atchan->sconfig.dst_addr; 708 } 709 + desc->lld.mbr_cfg = atchan->cfg; 710 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 711 | AT_XDMAC_MBR_UBC_NDEN 712 | AT_XDMAC_MBR_UBC_NSEN ··· 766 | AT_XDMAC_CC_SIF(0) 767 | AT_XDMAC_CC_MBSIZE_SIXTEEN 768 | AT_XDMAC_CC_TYPE_MEM_TRAN; 769 + unsigned long irqflags; 770 771 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", 772 __func__, &src, &dest, len, flags); ··· 798 799 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); 800 801 + spin_lock_irqsave(&atchan->lock, irqflags); 802 desc = at_xdmac_get_desc(atchan); 803 + spin_unlock_irqrestore(&atchan->lock, irqflags); 804 if (!desc) { 805 dev_err(chan2dev(chan), "can't get descriptor\n"); 806 if (first) ··· 886 int residue; 887 u32 cur_nda, mask, value; 888 u8 dwidth = 0; 889 + unsigned long flags; 890 891 ret = dma_cookie_status(chan, cookie, txstate); 892 if (ret == DMA_COMPLETE) ··· 894 if (!txstate) 895 return ret; 896 897 + spin_lock_irqsave(&atchan->lock, flags); 898 899 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); 900 ··· 904 */ 905 if (!desc->active_xfer) { 906 dma_set_residue(txstate, desc->xfer_size); 907 + goto spin_unlock; 908 } 909 910 residue = desc->xfer_size; ··· 936 } 937 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; 938 939 dma_set_residue(txstate, residue); 940 941 dev_dbg(chan2dev(chan), 942 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", 943 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); 944 945 + spin_unlock: 946 + spin_unlock_irqrestore(&atchan->lock, flags); 947 return ret; 948 } 949 ··· 964 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) 965 { 966 struct at_xdmac_desc *desc; 967 + unsigned long flags; 968 969 + spin_lock_irqsave(&atchan->lock, flags); 970 971 /* 972 * If channel is enabled, do nothing, advance_work will be triggered ··· 980 at_xdmac_start_xfer(atchan, desc); 981 } 982 983 + spin_unlock_irqrestore(&atchan->lock, flags); 984 } 985 986 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) ··· 1116 { 1117 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1118 int ret; 1119 + unsigned long flags; 1120 1121 dev_dbg(chan2dev(chan), "%s\n", __func__); 1122 1123 + spin_lock_irqsave(&atchan->lock, flags); 1124 ret = at_xdmac_set_slave_config(chan, config); 1125 + spin_unlock_irqrestore(&atchan->lock, flags); 1126 1127 return ret; 1128 } ··· 1130 { 1131 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1132 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1133 + unsigned long flags; 1134 1135 dev_dbg(chan2dev(chan), "%s\n", __func__); 1136 1137 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) 1138 return 0; 1139 1140 + spin_lock_irqsave(&atchan->lock, flags); 1141 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); 1142 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) 1143 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) 1144 cpu_relax(); 1145 + spin_unlock_irqrestore(&atchan->lock, flags); 1146 1147 return 0; 1148 } ··· 1150 { 1151 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1152 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1153 + unsigned long flags; 1154 1155 dev_dbg(chan2dev(chan), "%s\n", __func__); 1156 1157 + spin_lock_irqsave(&atchan->lock, flags); 1158 if (!at_xdmac_chan_is_paused(atchan)) { 1159 + spin_unlock_irqrestore(&atchan->lock, flags); 1160 return 0; 1161 } 1162 1163 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); 1164 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); 1165 + spin_unlock_irqrestore(&atchan->lock, flags); 1166 1167 return 0; 1168 } ··· 1171 struct at_xdmac_desc *desc, *_desc; 1172 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1173 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1174 + unsigned long flags; 1175 1176 dev_dbg(chan2dev(chan), "%s\n", __func__); 1177 1178 + spin_lock_irqsave(&atchan->lock, flags); 1179 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1180 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) 1181 cpu_relax(); ··· 1184 at_xdmac_remove_xfer(atchan, desc); 1185 1186 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); 1187 + spin_unlock_irqrestore(&atchan->lock, flags); 1188 1189 return 0; 1190 } ··· 1194 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1195 struct at_xdmac_desc *desc; 1196 int i; 1197 + unsigned long flags; 1198 1199 + spin_lock_irqsave(&atchan->lock, flags); 1200 1201 if (at_xdmac_chan_is_enabled(atchan)) { 1202 dev_err(chan2dev(chan), ··· 1226 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); 1227 1228 spin_unlock: 1229 + spin_unlock_irqrestore(&atchan->lock, flags); 1230 return i; 1231 } 1232
+5 -1
drivers/dma/dmaengine.c
··· 487 caps->directions = device->directions; 488 caps->residue_granularity = device->residue_granularity; 489 490 - caps->cmd_pause = !!device->device_pause; 491 caps->cmd_terminate = !!device->device_terminate_all; 492 493 return 0;
··· 487 caps->directions = device->directions; 488 caps->residue_granularity = device->residue_granularity; 489 490 + /* 491 + * Some devices implement only pause (e.g. to get residuum) but no 492 + * resume. However cmd_pause is advertised as pause AND resume. 493 + */ 494 + caps->cmd_pause = !!(device->device_pause && device->device_resume); 495 caps->cmd_terminate = !!device->device_terminate_all; 496 497 return 0;