Merge branch 'fixes' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
"Here are hopefully last set of fixes for 4.1. This time we have:

- fixing pause capability reporting on both dmaengine pause & resume
support by Krzysztof

- locking fix fir at_xdmac by Ludovic

- slave configuration fix for at_xdmac by Ludovic"

* 'fixes' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: Fix choppy sound because of unimplemented resume
dmaengine: at_xdmac: rework slave configuration part
dmaengine: at_xdmac: lock fixes

+144 -93
+139 -92
drivers/dma/at_xdmac.c
··· 174 174 #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ 175 175 176 176 #define AT_XDMAC_MAX_CHAN 0x20 177 + #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ 178 + #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ 177 179 178 180 #define AT_XDMAC_DMA_BUSWIDTHS\ 179 181 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ ··· 194 192 struct dma_chan chan; 195 193 void __iomem *ch_regs; 196 194 u32 mask; /* Channel Mask */ 197 - u32 cfg[2]; /* Channel Configuration Register */ 198 - #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */ 199 - #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */ 195 + u32 cfg; /* Channel Configuration Register */ 200 196 u8 perid; /* Peripheral ID */ 201 197 u8 perif; /* Peripheral Interface */ 202 198 u8 memif; /* Memory Interface */ 203 - u32 per_src_addr; 204 - u32 per_dst_addr; 205 199 u32 save_cc; 206 200 u32 save_cim; 207 201 u32 save_cnda; 208 202 u32 save_cndc; 209 203 unsigned long status; 210 204 struct tasklet_struct tasklet; 205 + struct dma_slave_config sconfig; 211 206 212 207 spinlock_t lock; 213 208 ··· 414 415 struct at_xdmac_desc *desc = txd_to_at_desc(tx); 415 416 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); 416 417 dma_cookie_t cookie; 418 + unsigned long irqflags; 417 419 418 - spin_lock_bh(&atchan->lock); 420 + spin_lock_irqsave(&atchan->lock, irqflags); 419 421 cookie = dma_cookie_assign(tx); 420 422 421 423 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", ··· 425 425 if (list_is_singular(&atchan->xfers_list)) 426 426 at_xdmac_start_xfer(atchan, desc); 427 427 428 - spin_unlock_bh(&atchan->lock); 428 + spin_unlock_irqrestore(&atchan->lock, irqflags); 429 429 return cookie; 430 430 } 431 431 ··· 494 494 return chan; 495 495 } 496 496 497 + static int at_xdmac_compute_chan_conf(struct dma_chan *chan, 498 + enum dma_transfer_direction direction) 499 + { 500 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 501 + int csize, dwidth; 502 + 503 + if (direction == DMA_DEV_TO_MEM) { 504 + atchan->cfg = 505 + AT91_XDMAC_DT_PERID(atchan->perid) 506 + | AT_XDMAC_CC_DAM_INCREMENTED_AM 507 + | AT_XDMAC_CC_SAM_FIXED_AM 508 + | AT_XDMAC_CC_DIF(atchan->memif) 509 + | AT_XDMAC_CC_SIF(atchan->perif) 510 + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED 511 + | AT_XDMAC_CC_DSYNC_PER2MEM 512 + | AT_XDMAC_CC_MBSIZE_SIXTEEN 513 + | AT_XDMAC_CC_TYPE_PER_TRAN; 514 + csize = ffs(atchan->sconfig.src_maxburst) - 1; 515 + if (csize < 0) { 516 + dev_err(chan2dev(chan), "invalid src maxburst value\n"); 517 + return -EINVAL; 518 + } 519 + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); 520 + dwidth = ffs(atchan->sconfig.src_addr_width) - 1; 521 + if (dwidth < 0) { 522 + dev_err(chan2dev(chan), "invalid src addr width value\n"); 523 + return -EINVAL; 524 + } 525 + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); 526 + } else if (direction == DMA_MEM_TO_DEV) { 527 + atchan->cfg = 528 + AT91_XDMAC_DT_PERID(atchan->perid) 529 + | AT_XDMAC_CC_DAM_FIXED_AM 530 + | AT_XDMAC_CC_SAM_INCREMENTED_AM 531 + | AT_XDMAC_CC_DIF(atchan->perif) 532 + | AT_XDMAC_CC_SIF(atchan->memif) 533 + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED 534 + | AT_XDMAC_CC_DSYNC_MEM2PER 535 + | AT_XDMAC_CC_MBSIZE_SIXTEEN 536 + | AT_XDMAC_CC_TYPE_PER_TRAN; 537 + csize = ffs(atchan->sconfig.dst_maxburst) - 1; 538 + if (csize < 0) { 539 + dev_err(chan2dev(chan), "invalid src maxburst value\n"); 540 + return -EINVAL; 541 + } 542 + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); 543 + dwidth = ffs(atchan->sconfig.dst_addr_width) - 1; 544 + if (dwidth < 0) { 545 + dev_err(chan2dev(chan), "invalid dst addr width value\n"); 546 + return -EINVAL; 547 + } 548 + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); 549 + } 550 + 551 + dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg); 552 + 553 + return 0; 554 + } 555 + 556 + /* 557 + * Only check that maxburst and addr width values are supported by the 558 + * the controller but not that the configuration is good to perform the 559 + * transfer since we don't know the direction at this stage. 560 + */ 561 + static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig) 562 + { 563 + if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE) 564 + || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) 565 + return -EINVAL; 566 + 567 + if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH) 568 + || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) 569 + return -EINVAL; 570 + 571 + return 0; 572 + } 573 + 497 574 static int at_xdmac_set_slave_config(struct dma_chan *chan, 498 575 struct dma_slave_config *sconfig) 499 576 { 500 577 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 501 - u8 dwidth; 502 - int csize; 503 578 504 - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] = 505 - AT91_XDMAC_DT_PERID(atchan->perid) 506 - | AT_XDMAC_CC_DAM_INCREMENTED_AM 507 - | AT_XDMAC_CC_SAM_FIXED_AM 508 - | AT_XDMAC_CC_DIF(atchan->memif) 509 - | AT_XDMAC_CC_SIF(atchan->perif) 510 - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED 511 - | AT_XDMAC_CC_DSYNC_PER2MEM 512 - | AT_XDMAC_CC_MBSIZE_SIXTEEN 513 - | AT_XDMAC_CC_TYPE_PER_TRAN; 514 - csize = at_xdmac_csize(sconfig->src_maxburst); 515 - if (csize < 0) { 516 - dev_err(chan2dev(chan), "invalid src maxburst value\n"); 579 + if (at_xdmac_check_slave_config(sconfig)) { 580 + dev_err(chan2dev(chan), "invalid slave configuration\n"); 517 581 return -EINVAL; 518 582 } 519 - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize); 520 - dwidth = ffs(sconfig->src_addr_width) - 1; 521 - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); 522 583 523 - 524 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] = 525 - AT91_XDMAC_DT_PERID(atchan->perid) 526 - | AT_XDMAC_CC_DAM_FIXED_AM 527 - | AT_XDMAC_CC_SAM_INCREMENTED_AM 528 - | AT_XDMAC_CC_DIF(atchan->perif) 529 - | AT_XDMAC_CC_SIF(atchan->memif) 530 - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED 531 - | AT_XDMAC_CC_DSYNC_MEM2PER 532 - | AT_XDMAC_CC_MBSIZE_SIXTEEN 533 - | AT_XDMAC_CC_TYPE_PER_TRAN; 534 - csize = at_xdmac_csize(sconfig->dst_maxburst); 535 - if (csize < 0) { 536 - dev_err(chan2dev(chan), "invalid src maxburst value\n"); 537 - return -EINVAL; 538 - } 539 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize); 540 - dwidth = ffs(sconfig->dst_addr_width) - 1; 541 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); 542 - 543 - /* Src and dst addr are needed to configure the link list descriptor. */ 544 - atchan->per_src_addr = sconfig->src_addr; 545 - atchan->per_dst_addr = sconfig->dst_addr; 546 - 547 - dev_dbg(chan2dev(chan), 548 - "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n", 549 - __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG], 550 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG], 551 - atchan->per_src_addr, atchan->per_dst_addr); 584 + memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig)); 552 585 553 586 return 0; 554 587 } ··· 596 563 struct scatterlist *sg; 597 564 int i; 598 565 unsigned int xfer_size = 0; 566 + unsigned long irqflags; 567 + struct dma_async_tx_descriptor *ret = NULL; 599 568 600 569 if (!sgl) 601 570 return NULL; ··· 613 578 flags); 614 579 615 580 /* Protect dma_sconfig field that can be modified by set_slave_conf. */ 616 - spin_lock_bh(&atchan->lock); 581 + spin_lock_irqsave(&atchan->lock, irqflags); 582 + 583 + if (at_xdmac_compute_chan_conf(chan, direction)) 584 + goto spin_unlock; 617 585 618 586 /* Prepare descriptors. */ 619 587 for_each_sg(sgl, sg, sg_len, i) { ··· 627 589 mem = sg_dma_address(sg); 628 590 if (unlikely(!len)) { 629 591 dev_err(chan2dev(chan), "sg data length is zero\n"); 630 - spin_unlock_bh(&atchan->lock); 631 - return NULL; 592 + goto spin_unlock; 632 593 } 633 594 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", 634 595 __func__, i, len, mem); ··· 637 600 dev_err(chan2dev(chan), "can't get descriptor\n"); 638 601 if (first) 639 602 list_splice_init(&first->descs_list, &atchan->free_descs_list); 640 - spin_unlock_bh(&atchan->lock); 641 - return NULL; 603 + goto spin_unlock; 642 604 } 643 605 644 606 /* Linked list descriptor setup. */ 645 607 if (direction == DMA_DEV_TO_MEM) { 646 - desc->lld.mbr_sa = atchan->per_src_addr; 608 + desc->lld.mbr_sa = atchan->sconfig.src_addr; 647 609 desc->lld.mbr_da = mem; 648 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; 649 610 } else { 650 611 desc->lld.mbr_sa = mem; 651 - desc->lld.mbr_da = atchan->per_dst_addr; 652 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; 612 + desc->lld.mbr_da = atchan->sconfig.dst_addr; 653 613 } 614 + desc->lld.mbr_cfg = atchan->cfg; 654 615 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); 655 616 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) 656 617 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) ··· 680 645 xfer_size += len; 681 646 } 682 647 683 - spin_unlock_bh(&atchan->lock); 684 648 685 649 first->tx_dma_desc.flags = flags; 686 650 first->xfer_size = xfer_size; 687 651 first->direction = direction; 652 + ret = &first->tx_dma_desc; 688 653 689 - return &first->tx_dma_desc; 654 + spin_unlock: 655 + spin_unlock_irqrestore(&atchan->lock, irqflags); 656 + return ret; 690 657 } 691 658 692 659 static struct dma_async_tx_descriptor * ··· 701 664 struct at_xdmac_desc *first = NULL, *prev = NULL; 702 665 unsigned int periods = buf_len / period_len; 703 666 int i; 667 + unsigned long irqflags; 704 668 705 669 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", 706 670 __func__, &buf_addr, buf_len, period_len, ··· 717 679 return NULL; 718 680 } 719 681 682 + if (at_xdmac_compute_chan_conf(chan, direction)) 683 + return NULL; 684 + 720 685 for (i = 0; i < periods; i++) { 721 686 struct at_xdmac_desc *desc = NULL; 722 687 723 - spin_lock_bh(&atchan->lock); 688 + spin_lock_irqsave(&atchan->lock, irqflags); 724 689 desc = at_xdmac_get_desc(atchan); 725 690 if (!desc) { 726 691 dev_err(chan2dev(chan), "can't get descriptor\n"); 727 692 if (first) 728 693 list_splice_init(&first->descs_list, &atchan->free_descs_list); 729 - spin_unlock_bh(&atchan->lock); 694 + spin_unlock_irqrestore(&atchan->lock, irqflags); 730 695 return NULL; 731 696 } 732 - spin_unlock_bh(&atchan->lock); 697 + spin_unlock_irqrestore(&atchan->lock, irqflags); 733 698 dev_dbg(chan2dev(chan), 734 699 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", 735 700 __func__, desc, &desc->tx_dma_desc.phys); 736 701 737 702 if (direction == DMA_DEV_TO_MEM) { 738 - desc->lld.mbr_sa = atchan->per_src_addr; 703 + desc->lld.mbr_sa = atchan->sconfig.src_addr; 739 704 desc->lld.mbr_da = buf_addr + i * period_len; 740 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; 741 705 } else { 742 706 desc->lld.mbr_sa = buf_addr + i * period_len; 743 - desc->lld.mbr_da = atchan->per_dst_addr; 744 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; 707 + desc->lld.mbr_da = atchan->sconfig.dst_addr; 745 708 } 709 + desc->lld.mbr_cfg = atchan->cfg; 746 710 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 747 711 | AT_XDMAC_MBR_UBC_NDEN 748 712 | AT_XDMAC_MBR_UBC_NSEN ··· 806 766 | AT_XDMAC_CC_SIF(0) 807 767 | AT_XDMAC_CC_MBSIZE_SIXTEEN 808 768 | AT_XDMAC_CC_TYPE_MEM_TRAN; 769 + unsigned long irqflags; 809 770 810 771 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", 811 772 __func__, &src, &dest, len, flags); ··· 839 798 840 799 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); 841 800 842 - spin_lock_bh(&atchan->lock); 801 + spin_lock_irqsave(&atchan->lock, irqflags); 843 802 desc = at_xdmac_get_desc(atchan); 844 - spin_unlock_bh(&atchan->lock); 803 + spin_unlock_irqrestore(&atchan->lock, irqflags); 845 804 if (!desc) { 846 805 dev_err(chan2dev(chan), "can't get descriptor\n"); 847 806 if (first) ··· 927 886 int residue; 928 887 u32 cur_nda, mask, value; 929 888 u8 dwidth = 0; 889 + unsigned long flags; 930 890 931 891 ret = dma_cookie_status(chan, cookie, txstate); 932 892 if (ret == DMA_COMPLETE) ··· 936 894 if (!txstate) 937 895 return ret; 938 896 939 - spin_lock_bh(&atchan->lock); 897 + spin_lock_irqsave(&atchan->lock, flags); 940 898 941 899 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); 942 900 ··· 946 904 */ 947 905 if (!desc->active_xfer) { 948 906 dma_set_residue(txstate, desc->xfer_size); 949 - spin_unlock_bh(&atchan->lock); 950 - return ret; 907 + goto spin_unlock; 951 908 } 952 909 953 910 residue = desc->xfer_size; ··· 977 936 } 978 937 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; 979 938 980 - spin_unlock_bh(&atchan->lock); 981 - 982 939 dma_set_residue(txstate, residue); 983 940 984 941 dev_dbg(chan2dev(chan), 985 942 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", 986 943 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); 987 944 945 + spin_unlock: 946 + spin_unlock_irqrestore(&atchan->lock, flags); 988 947 return ret; 989 948 } 990 949 ··· 1005 964 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) 1006 965 { 1007 966 struct at_xdmac_desc *desc; 967 + unsigned long flags; 1008 968 1009 - spin_lock_bh(&atchan->lock); 969 + spin_lock_irqsave(&atchan->lock, flags); 1010 970 1011 971 /* 1012 972 * If channel is enabled, do nothing, advance_work will be triggered ··· 1022 980 at_xdmac_start_xfer(atchan, desc); 1023 981 } 1024 982 1025 - spin_unlock_bh(&atchan->lock); 983 + spin_unlock_irqrestore(&atchan->lock, flags); 1026 984 } 1027 985 1028 986 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) ··· 1158 1116 { 1159 1117 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1160 1118 int ret; 1119 + unsigned long flags; 1161 1120 1162 1121 dev_dbg(chan2dev(chan), "%s\n", __func__); 1163 1122 1164 - spin_lock_bh(&atchan->lock); 1123 + spin_lock_irqsave(&atchan->lock, flags); 1165 1124 ret = at_xdmac_set_slave_config(chan, config); 1166 - spin_unlock_bh(&atchan->lock); 1125 + spin_unlock_irqrestore(&atchan->lock, flags); 1167 1126 1168 1127 return ret; 1169 1128 } ··· 1173 1130 { 1174 1131 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1175 1132 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1133 + unsigned long flags; 1176 1134 1177 1135 dev_dbg(chan2dev(chan), "%s\n", __func__); 1178 1136 1179 1137 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) 1180 1138 return 0; 1181 1139 1182 - spin_lock_bh(&atchan->lock); 1140 + spin_lock_irqsave(&atchan->lock, flags); 1183 1141 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); 1184 1142 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) 1185 1143 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) 1186 1144 cpu_relax(); 1187 - spin_unlock_bh(&atchan->lock); 1145 + spin_unlock_irqrestore(&atchan->lock, flags); 1188 1146 1189 1147 return 0; 1190 1148 } ··· 1194 1150 { 1195 1151 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1196 1152 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1153 + unsigned long flags; 1197 1154 1198 1155 dev_dbg(chan2dev(chan), "%s\n", __func__); 1199 1156 1200 - spin_lock_bh(&atchan->lock); 1157 + spin_lock_irqsave(&atchan->lock, flags); 1201 1158 if (!at_xdmac_chan_is_paused(atchan)) { 1202 - spin_unlock_bh(&atchan->lock); 1159 + spin_unlock_irqrestore(&atchan->lock, flags); 1203 1160 return 0; 1204 1161 } 1205 1162 1206 1163 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); 1207 1164 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); 1208 - spin_unlock_bh(&atchan->lock); 1165 + spin_unlock_irqrestore(&atchan->lock, flags); 1209 1166 1210 1167 return 0; 1211 1168 } ··· 1216 1171 struct at_xdmac_desc *desc, *_desc; 1217 1172 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1218 1173 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1174 + unsigned long flags; 1219 1175 1220 1176 dev_dbg(chan2dev(chan), "%s\n", __func__); 1221 1177 1222 - spin_lock_bh(&atchan->lock); 1178 + spin_lock_irqsave(&atchan->lock, flags); 1223 1179 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1224 1180 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) 1225 1181 cpu_relax(); ··· 1230 1184 at_xdmac_remove_xfer(atchan, desc); 1231 1185 1232 1186 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); 1233 - spin_unlock_bh(&atchan->lock); 1187 + spin_unlock_irqrestore(&atchan->lock, flags); 1234 1188 1235 1189 return 0; 1236 1190 } ··· 1240 1194 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1241 1195 struct at_xdmac_desc *desc; 1242 1196 int i; 1197 + unsigned long flags; 1243 1198 1244 - spin_lock_bh(&atchan->lock); 1199 + spin_lock_irqsave(&atchan->lock, flags); 1245 1200 1246 1201 if (at_xdmac_chan_is_enabled(atchan)) { 1247 1202 dev_err(chan2dev(chan), ··· 1273 1226 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); 1274 1227 1275 1228 spin_unlock: 1276 - spin_unlock_bh(&atchan->lock); 1229 + spin_unlock_irqrestore(&atchan->lock, flags); 1277 1230 return i; 1278 1231 } 1279 1232
+5 -1
drivers/dma/dmaengine.c
··· 487 487 caps->directions = device->directions; 488 488 caps->residue_granularity = device->residue_granularity; 489 489 490 - caps->cmd_pause = !!device->device_pause; 490 + /* 491 + * Some devices implement only pause (e.g. to get residuum) but no 492 + * resume. However cmd_pause is advertised as pause AND resume. 493 + */ 494 + caps->cmd_pause = !!(device->device_pause && device->device_resume); 491 495 caps->cmd_terminate = !!device->device_terminate_all; 492 496 493 497 return 0;