Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'next' into v3.1-rc4

Fixed trivial conflicts in drivers/dma/amba-pl08x.c

Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by

Vinod Koul and committed by
Vinod Koul
8516f52f c6a389f1

+447 -327
+4
arch/arm/include/asm/hardware/pl080.h
··· 21 21 * OneNAND features. 22 22 */ 23 23 24 + #ifndef ASM_PL080_H 25 + #define ASM_PL080_H 26 + 24 27 #define PL080_INT_STATUS (0x00) 25 28 #define PL080_TC_STATUS (0x04) 26 29 #define PL080_TC_CLEAR (0x08) ··· 141 138 u32 control1; 142 139 }; 143 140 141 + #endif /* ASM_PL080_H */
+188 -257
drivers/dma/amba-pl08x.c
··· 66 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 67 67 * will then move to the next LLI entry. 68 68 * 69 - * Only the former works sanely with scatter lists, so we only implement 70 - * the DMAC flow control method. However, peripherals which use the LBREQ 71 - * and LSREQ signals (eg, MMCI) are unable to use this mode, which through 72 - * these hardware restrictions prevents them from using scatter DMA. 73 - * 74 69 * Global TODO: 75 70 * - Break out common code from arch/arm/mach-s3c64xx and share 76 71 */ 77 - #include <linux/device.h> 78 - #include <linux/init.h> 79 - #include <linux/module.h> 80 - #include <linux/interrupt.h> 81 - #include <linux/slab.h> 82 - #include <linux/delay.h> 83 - #include <linux/dma-mapping.h> 84 - #include <linux/dmapool.h> 85 - #include <linux/dmaengine.h> 86 72 #include <linux/amba/bus.h> 87 73 #include <linux/amba/pl08x.h> 88 74 #include <linux/debugfs.h> 75 + #include <linux/delay.h> 76 + #include <linux/device.h> 77 + #include <linux/dmaengine.h> 78 + #include <linux/dmapool.h> 79 + #include <linux/dma-mapping.h> 80 + #include <linux/init.h> 81 + #include <linux/interrupt.h> 82 + #include <linux/module.h> 83 + #include <linux/pm_runtime.h> 89 84 #include <linux/seq_file.h> 90 - 85 + #include <linux/slab.h> 91 86 #include <asm/hardware/pl080.h> 92 87 93 88 #define DRIVER_NAME "pl08xdmac" ··· 121 126 * @phy_chans: array of data for the physical channels 122 127 * @pool: a pool for the LLI descriptors 123 128 * @pool_ctr: counter of LLIs in the pool 124 - * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches 129 + * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 130 + * fetches 125 131 * @mem_buses: set to indicate memory transfers on AHB2. 126 132 * @lock: a spinlock for this struct 127 133 */ ··· 144 148 /* 145 149 * PL08X specific defines 146 150 */ 147 - 148 - /* 149 - * Memory boundaries: the manual for PL08x says that the controller 150 - * cannot read past a 1KiB boundary, so these defines are used to 151 - * create transfer LLIs that do not cross such boundaries. 152 - */ 153 - #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ 154 - #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) 155 151 156 152 /* Size (bytes) of each LLI buffer allocated for one transfer */ 157 153 # define PL08X_LLI_TSFR_SIZE 0x2000 ··· 259 271 val &= ~PL080_CONFIG_HALT; 260 272 writel(val, ch->base + PL080_CH_CONFIG); 261 273 } 262 - 263 274 264 275 /* 265 276 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and ··· 394 407 return NULL; 395 408 } 396 409 410 + pm_runtime_get_sync(&pl08x->adev->dev); 397 411 return ch; 398 412 } 399 413 ··· 407 419 408 420 /* Stop the channel and clear its interrupts */ 409 421 pl08x_terminate_phy_chan(pl08x, ch); 422 + 423 + pm_runtime_put(&pl08x->adev->dev); 410 424 411 425 /* Mark it as free */ 412 426 ch->serving = NULL; ··· 489 499 }; 490 500 491 501 /* 492 - * Autoselect a master bus to use for the transfer this prefers the 493 - * destination bus if both available if fixed address on one bus the 494 - * other will be chosen 502 + * Autoselect a master bus to use for the transfer. Slave will be the chosen as 503 + * victim in case src & dest are not similarly aligned. i.e. If after aligning 504 + * masters address with width requirements of transfer (by sending few byte by 505 + * byte data), slave is still not aligned, then its width will be reduced to 506 + * BYTE. 507 + * - prefers the destination bus if both available 508 + * - prefers bus with fixed address (i.e. peripheral) 495 509 */ 496 510 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 497 511 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 498 512 { 499 513 if (!(cctl & PL080_CONTROL_DST_INCR)) { 500 - *mbus = &bd->srcbus; 501 - *sbus = &bd->dstbus; 502 - } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 503 514 *mbus = &bd->dstbus; 504 515 *sbus = &bd->srcbus; 516 + } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 517 + *mbus = &bd->srcbus; 518 + *sbus = &bd->dstbus; 505 519 } else { 506 - if (bd->dstbus.buswidth == 4) { 520 + if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 507 521 *mbus = &bd->dstbus; 508 522 *sbus = &bd->srcbus; 509 - } else if (bd->srcbus.buswidth == 4) { 510 - *mbus = &bd->srcbus; 511 - *sbus = &bd->dstbus; 512 - } else if (bd->dstbus.buswidth == 2) { 513 - *mbus = &bd->dstbus; 514 - *sbus = &bd->srcbus; 515 - } else if (bd->srcbus.buswidth == 2) { 516 - *mbus = &bd->srcbus; 517 - *sbus = &bd->dstbus; 518 523 } else { 519 - /* bd->srcbus.buswidth == 1 */ 520 - *mbus = &bd->dstbus; 521 - *sbus = &bd->srcbus; 524 + *mbus = &bd->srcbus; 525 + *sbus = &bd->dstbus; 522 526 } 523 527 } 524 528 } ··· 531 547 llis_va[num_llis].cctl = cctl; 532 548 llis_va[num_llis].src = bd->srcbus.addr; 533 549 llis_va[num_llis].dst = bd->dstbus.addr; 534 - llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); 550 + llis_va[num_llis].lli = llis_bus + (num_llis + 1) * 551 + sizeof(struct pl08x_lli); 535 552 llis_va[num_llis].lli |= bd->lli_bus; 536 553 537 554 if (cctl & PL080_CONTROL_SRC_INCR) ··· 545 560 bd->remainder -= len; 546 561 } 547 562 548 - /* 549 - * Return number of bytes to fill to boundary, or len. 550 - * This calculation works for any value of addr. 551 - */ 552 - static inline size_t pl08x_pre_boundary(u32 addr, size_t len) 563 + static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, 564 + u32 *cctl, u32 len, int num_llis, size_t *total_bytes) 553 565 { 554 - size_t boundary_len = PL08X_BOUNDARY_SIZE - 555 - (addr & (PL08X_BOUNDARY_SIZE - 1)); 556 - 557 - return min(boundary_len, len); 566 + *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 567 + pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); 568 + (*total_bytes) += len; 558 569 } 559 570 560 571 /* ··· 564 583 struct pl08x_bus_data *mbus, *sbus; 565 584 struct pl08x_lli_build_data bd; 566 585 int num_llis = 0; 567 - u32 cctl; 568 - size_t max_bytes_per_lli; 569 - size_t total_bytes = 0; 586 + u32 cctl, early_bytes = 0; 587 + size_t max_bytes_per_lli, total_bytes = 0; 570 588 struct pl08x_lli *llis_va; 571 589 572 - txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 573 - &txd->llis_bus); 590 + txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 574 591 if (!txd->llis_va) { 575 592 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 576 593 return 0; ··· 598 619 bd.srcbus.buswidth = bd.srcbus.maxwidth; 599 620 bd.dstbus.buswidth = bd.dstbus.maxwidth; 600 621 601 - /* 602 - * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 603 - */ 604 - max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * 605 - PL080_CONTROL_TRANSFER_SIZE_MASK; 606 - 607 622 /* We need to count this down to zero */ 608 623 bd.remainder = txd->len; 609 624 610 - /* 611 - * Choose bus to align to 612 - * - prefers destination bus if both available 613 - * - if fixed address on one bus chooses other 614 - */ 615 625 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 616 626 617 - dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", 627 + dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", 618 628 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 619 629 bd.srcbus.buswidth, 620 630 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", 621 631 bd.dstbus.buswidth, 622 - bd.remainder, max_bytes_per_lli); 632 + bd.remainder); 623 633 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 624 634 mbus == &bd.srcbus ? "src" : "dst", 625 635 sbus == &bd.srcbus ? "src" : "dst"); 626 636 627 - if (txd->len < mbus->buswidth) { 628 - /* Less than a bus width available - send as single bytes */ 629 - while (bd.remainder) { 630 - dev_vdbg(&pl08x->adev->dev, 631 - "%s single byte LLIs for a transfer of " 632 - "less than a bus width (remain 0x%08x)\n", 633 - __func__, bd.remainder); 634 - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 635 - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 636 - total_bytes++; 637 - } 638 - } else { 639 - /* Make one byte LLIs until master bus is aligned */ 640 - while ((mbus->addr) % (mbus->buswidth)) { 641 - dev_vdbg(&pl08x->adev->dev, 642 - "%s adjustment lli for less than bus width " 643 - "(remain 0x%08x)\n", 644 - __func__, bd.remainder); 645 - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 646 - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 647 - total_bytes++; 637 + /* 638 + * Zero length is only allowed if all these requirements are met: 639 + * - flow controller is peripheral. 640 + * - src.addr is aligned to src.width 641 + * - dst.addr is aligned to dst.width 642 + * 643 + * sg_len == 1 should be true, as there can be two cases here: 644 + * - Memory addresses are contiguous and are not scattered. Here, Only 645 + * one sg will be passed by user driver, with memory address and zero 646 + * length. We pass this to controller and after the transfer it will 647 + * receive the last burst request from peripheral and so transfer 648 + * finishes. 649 + * 650 + * - Memory addresses are scattered and are not contiguous. Here, 651 + * Obviously as DMA controller doesn't know when a lli's transfer gets 652 + * over, it can't load next lli. So in this case, there has to be an 653 + * assumption that only one lli is supported. Thus, we can't have 654 + * scattered addresses. 655 + */ 656 + if (!bd.remainder) { 657 + u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 658 + PL080_CONFIG_FLOW_CONTROL_SHIFT; 659 + if (!((fc >= PL080_FLOW_SRC2DST_DST) && 660 + (fc <= PL080_FLOW_SRC2DST_SRC))) { 661 + dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 662 + __func__); 663 + return 0; 648 664 } 649 665 666 + if ((bd.srcbus.addr % bd.srcbus.buswidth) || 667 + (bd.srcbus.addr % bd.srcbus.buswidth)) { 668 + dev_err(&pl08x->adev->dev, 669 + "%s src & dst address must be aligned to src" 670 + " & dst width if peripheral is flow controller", 671 + __func__); 672 + return 0; 673 + } 674 + 675 + cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 676 + bd.dstbus.buswidth, 0); 677 + pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); 678 + } 679 + 680 + /* 681 + * Send byte by byte for following cases 682 + * - Less than a bus width available 683 + * - until master bus is aligned 684 + */ 685 + if (bd.remainder < mbus->buswidth) 686 + early_bytes = bd.remainder; 687 + else if ((mbus->addr) % (mbus->buswidth)) { 688 + early_bytes = mbus->buswidth - (mbus->addr) % (mbus->buswidth); 689 + if ((bd.remainder - early_bytes) < mbus->buswidth) 690 + early_bytes = bd.remainder; 691 + } 692 + 693 + if (early_bytes) { 694 + dev_vdbg(&pl08x->adev->dev, "%s byte width LLIs " 695 + "(remain 0x%08x)\n", __func__, bd.remainder); 696 + prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, 697 + &total_bytes); 698 + } 699 + 700 + if (bd.remainder) { 650 701 /* 651 702 * Master now aligned 652 703 * - if slave is not then we must set its width down ··· 689 680 sbus->buswidth = 1; 690 681 } 691 682 683 + /* Bytes transferred = tsize * src width, not MIN(buswidths) */ 684 + max_bytes_per_lli = bd.srcbus.buswidth * 685 + PL080_CONTROL_TRANSFER_SIZE_MASK; 686 + 692 687 /* 693 688 * Make largest possible LLIs until less than one bus 694 689 * width left 695 690 */ 696 691 while (bd.remainder > (mbus->buswidth - 1)) { 697 - size_t lli_len, target_len, tsize, odd_bytes; 692 + size_t lli_len, tsize, width; 698 693 699 694 /* 700 695 * If enough left try to send max possible, 701 696 * otherwise try to send the remainder 702 697 */ 703 - target_len = min(bd.remainder, max_bytes_per_lli); 698 + lli_len = min(bd.remainder, max_bytes_per_lli); 704 699 705 700 /* 706 - * Set bus lengths for incrementing buses to the 707 - * number of bytes which fill to next memory boundary, 708 - * limiting on the target length calculated above. 701 + * Check against maximum bus alignment: Calculate actual 702 + * transfer size in relation to bus width and get a 703 + * maximum remainder of the highest bus width - 1 709 704 */ 710 - if (cctl & PL080_CONTROL_SRC_INCR) 711 - bd.srcbus.fill_bytes = 712 - pl08x_pre_boundary(bd.srcbus.addr, 713 - target_len); 714 - else 715 - bd.srcbus.fill_bytes = target_len; 705 + width = max(mbus->buswidth, sbus->buswidth); 706 + lli_len = (lli_len / width) * width; 707 + tsize = lli_len / bd.srcbus.buswidth; 716 708 717 - if (cctl & PL080_CONTROL_DST_INCR) 718 - bd.dstbus.fill_bytes = 719 - pl08x_pre_boundary(bd.dstbus.addr, 720 - target_len); 721 - else 722 - bd.dstbus.fill_bytes = target_len; 709 + dev_vdbg(&pl08x->adev->dev, 710 + "%s fill lli with single lli chunk of " 711 + "size 0x%08zx (remainder 0x%08zx)\n", 712 + __func__, lli_len, bd.remainder); 723 713 724 - /* Find the nearest */ 725 - lli_len = min(bd.srcbus.fill_bytes, 726 - bd.dstbus.fill_bytes); 727 - 728 - BUG_ON(lli_len > bd.remainder); 729 - 730 - if (lli_len <= 0) { 731 - dev_err(&pl08x->adev->dev, 732 - "%s lli_len is %zu, <= 0\n", 733 - __func__, lli_len); 734 - return 0; 735 - } 736 - 737 - if (lli_len == target_len) { 738 - /* 739 - * Can send what we wanted. 740 - * Maintain alignment 741 - */ 742 - lli_len = (lli_len/mbus->buswidth) * 743 - mbus->buswidth; 744 - odd_bytes = 0; 745 - } else { 746 - /* 747 - * So now we know how many bytes to transfer 748 - * to get to the nearest boundary. The next 749 - * LLI will past the boundary. However, we 750 - * may be working to a boundary on the slave 751 - * bus. We need to ensure the master stays 752 - * aligned, and that we are working in 753 - * multiples of the bus widths. 754 - */ 755 - odd_bytes = lli_len % mbus->buswidth; 756 - lli_len -= odd_bytes; 757 - 758 - } 759 - 760 - if (lli_len) { 761 - /* 762 - * Check against minimum bus alignment: 763 - * Calculate actual transfer size in relation 764 - * to bus width an get a maximum remainder of 765 - * the smallest bus width - 1 766 - */ 767 - /* FIXME: use round_down()? */ 768 - tsize = lli_len / min(mbus->buswidth, 769 - sbus->buswidth); 770 - lli_len = tsize * min(mbus->buswidth, 771 - sbus->buswidth); 772 - 773 - if (target_len != lli_len) { 774 - dev_vdbg(&pl08x->adev->dev, 775 - "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", 776 - __func__, target_len, lli_len, txd->len); 777 - } 778 - 779 - cctl = pl08x_cctl_bits(cctl, 780 - bd.srcbus.buswidth, 781 - bd.dstbus.buswidth, 782 - tsize); 783 - 784 - dev_vdbg(&pl08x->adev->dev, 785 - "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", 786 - __func__, lli_len, bd.remainder); 787 - pl08x_fill_lli_for_desc(&bd, num_llis++, 788 - lli_len, cctl); 789 - total_bytes += lli_len; 790 - } 791 - 792 - 793 - if (odd_bytes) { 794 - /* 795 - * Creep past the boundary, maintaining 796 - * master alignment 797 - */ 798 - int j; 799 - for (j = 0; (j < mbus->buswidth) 800 - && (bd.remainder); j++) { 801 - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 802 - dev_vdbg(&pl08x->adev->dev, 803 - "%s align with boundary, single byte (remain 0x%08zx)\n", 804 - __func__, bd.remainder); 805 - pl08x_fill_lli_for_desc(&bd, 806 - num_llis++, 1, cctl); 807 - total_bytes++; 808 - } 809 - } 714 + cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 715 + bd.dstbus.buswidth, tsize); 716 + pl08x_fill_lli_for_desc(&bd, num_llis++, lli_len, cctl); 717 + total_bytes += lli_len; 810 718 } 811 719 812 720 /* 813 721 * Send any odd bytes 814 722 */ 815 - while (bd.remainder) { 816 - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 723 + if (bd.remainder) { 817 724 dev_vdbg(&pl08x->adev->dev, 818 - "%s align with boundary, single odd byte (remain %zu)\n", 725 + "%s align with boundary, send odd bytes (remain %zu)\n", 819 726 __func__, bd.remainder); 820 - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 821 - total_bytes++; 727 + prep_byte_width_lli(&bd, &cctl, bd.remainder, 728 + num_llis++, &total_bytes); 822 729 } 823 730 } 731 + 824 732 if (total_bytes != txd->len) { 825 733 dev_err(&pl08x->adev->dev, 826 734 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", ··· 843 917 * need, but for slaves the physical signals may be muxed! 844 918 * Can the platform allow us to use this channel? 845 919 */ 846 - if (plchan->slave && 847 - ch->signal < 0 && 848 - pl08x->pd->get_signal) { 920 + if (plchan->slave && pl08x->pd->get_signal) { 849 921 ret = pl08x->pd->get_signal(plchan); 850 922 if (ret < 0) { 851 923 dev_dbg(&pl08x->adev->dev, ··· 932 1008 * If slaves are relying on interrupts to signal completion this function 933 1009 * must not be called with interrupts disabled. 934 1010 */ 935 - static enum dma_status 936 - pl08x_dma_tx_status(struct dma_chan *chan, 937 - dma_cookie_t cookie, 938 - struct dma_tx_state *txstate) 1011 + static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 1012 + dma_cookie_t cookie, struct dma_tx_state *txstate) 939 1013 { 940 1014 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 941 1015 dma_cookie_t last_used; ··· 1175 1253 1176 1254 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1177 1255 if (!num_llis) { 1178 - kfree(txd); 1256 + spin_lock_irqsave(&plchan->lock, flags); 1257 + pl08x_free_txd(pl08x, txd); 1258 + spin_unlock_irqrestore(&plchan->lock, flags); 1179 1259 return -EINVAL; 1180 1260 } 1181 1261 ··· 1225 1301 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1226 1302 unsigned long flags) 1227 1303 { 1228 - struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1304 + struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1229 1305 1230 1306 if (txd) { 1231 1307 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); ··· 1291 1367 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1292 1368 struct pl08x_driver_data *pl08x = plchan->host; 1293 1369 struct pl08x_txd *txd; 1294 - int ret; 1370 + int ret, tmp; 1295 1371 1296 1372 /* 1297 1373 * Current implementation ASSUMES only one sg ··· 1325 1401 txd->len = sgl->length; 1326 1402 1327 1403 if (direction == DMA_TO_DEVICE) { 1328 - txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1329 1404 txd->cctl = plchan->dst_cctl; 1330 1405 txd->src_addr = sgl->dma_address; 1331 1406 txd->dst_addr = plchan->dst_addr; 1332 1407 } else if (direction == DMA_FROM_DEVICE) { 1333 - txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1334 1408 txd->cctl = plchan->src_cctl; 1335 1409 txd->src_addr = plchan->src_addr; 1336 1410 txd->dst_addr = sgl->dma_address; ··· 1337 1415 "%s direction unsupported\n", __func__); 1338 1416 return NULL; 1339 1417 } 1418 + 1419 + if (plchan->cd->device_fc) 1420 + tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER : 1421 + PL080_FLOW_PER2MEM_PER; 1422 + else 1423 + tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER : 1424 + PL080_FLOW_PER2MEM; 1425 + 1426 + txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1340 1427 1341 1428 ret = pl08x_prep_channel_resources(plchan, txd); 1342 1429 if (ret) ··· 1438 1507 */ 1439 1508 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1440 1509 { 1441 - u32 val; 1442 - 1443 - val = readl(pl08x->base + PL080_CONFIG); 1444 - val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); 1445 - /* We implicitly clear bit 1 and that means little-endian mode */ 1446 - val |= PL080_CONFIG_ENABLE; 1447 - writel(val, pl08x->base + PL080_CONFIG); 1510 + writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1448 1511 } 1449 1512 1450 1513 static void pl08x_unmap_buffers(struct pl08x_txd *txd) ··· 1514 1589 */ 1515 1590 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1516 1591 chan.device_node) { 1517 - if (waiting->state == PL08X_CHAN_WAITING && 1518 - waiting->waiting != NULL) { 1592 + if (waiting->state == PL08X_CHAN_WAITING && 1593 + waiting->waiting != NULL) { 1519 1594 int ret; 1520 1595 1521 1596 /* This should REALLY not fail now */ ··· 1555 1630 static irqreturn_t pl08x_irq(int irq, void *dev) 1556 1631 { 1557 1632 struct pl08x_driver_data *pl08x = dev; 1558 - u32 mask = 0; 1559 - u32 val; 1560 - int i; 1633 + u32 mask = 0, err, tc, i; 1561 1634 1562 - val = readl(pl08x->base + PL080_ERR_STATUS); 1563 - if (val) { 1564 - /* An error interrupt (on one or more channels) */ 1565 - dev_err(&pl08x->adev->dev, 1566 - "%s error interrupt, register value 0x%08x\n", 1567 - __func__, val); 1568 - /* 1569 - * Simply clear ALL PL08X error interrupts, 1570 - * regardless of channel and cause 1571 - * FIXME: should be 0x00000003 on PL081 really. 1572 - */ 1573 - writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1635 + /* check & clear - ERR & TC interrupts */ 1636 + err = readl(pl08x->base + PL080_ERR_STATUS); 1637 + if (err) { 1638 + dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1639 + __func__, err); 1640 + writel(err, pl08x->base + PL080_ERR_CLEAR); 1574 1641 } 1575 - val = readl(pl08x->base + PL080_INT_STATUS); 1642 + tc = readl(pl08x->base + PL080_INT_STATUS); 1643 + if (tc) 1644 + writel(tc, pl08x->base + PL080_TC_CLEAR); 1645 + 1646 + if (!err && !tc) 1647 + return IRQ_NONE; 1648 + 1576 1649 for (i = 0; i < pl08x->vd->channels; i++) { 1577 - if ((1 << i) & val) { 1650 + if (((1 << i) & err) || ((1 << i) & tc)) { 1578 1651 /* Locate physical channel */ 1579 1652 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1580 1653 struct pl08x_dma_chan *plchan = phychan->serving; 1581 1654 1655 + if (!plchan) { 1656 + dev_err(&pl08x->adev->dev, 1657 + "%s Error TC interrupt on unused channel: 0x%08x\n", 1658 + __func__, i); 1659 + continue; 1660 + } 1661 + 1582 1662 /* Schedule tasklet on this channel */ 1583 1663 tasklet_schedule(&plchan->tasklet); 1584 - 1585 1664 mask |= (1 << i); 1586 1665 } 1587 1666 } 1588 - /* Clear only the terminal interrupts on channels we processed */ 1589 - writel(mask, pl08x->base + PL080_TC_CLEAR); 1590 1667 1591 1668 return mask ? IRQ_HANDLED : IRQ_NONE; 1592 1669 } ··· 1612 1685 * Make a local wrapper to hold required data 1613 1686 */ 1614 1687 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1615 - struct dma_device *dmadev, 1616 - unsigned int channels, 1617 - bool slave) 1688 + struct dma_device *dmadev, unsigned int channels, bool slave) 1618 1689 { 1619 1690 struct pl08x_dma_chan *chan; 1620 1691 int i; ··· 1625 1700 * to cope with that situation. 1626 1701 */ 1627 1702 for (i = 0; i < channels; i++) { 1628 - chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); 1703 + chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1629 1704 if (!chan) { 1630 1705 dev_err(&pl08x->adev->dev, 1631 1706 "%s no memory for channel\n", __func__); ··· 1653 1728 kfree(chan); 1654 1729 continue; 1655 1730 } 1656 - dev_info(&pl08x->adev->dev, 1731 + dev_dbg(&pl08x->adev->dev, 1657 1732 "initialize virtual channel \"%s\"\n", 1658 1733 chan->name); 1659 1734 ··· 1762 1837 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1763 1838 { 1764 1839 /* Expose a simple debugfs interface to view all clocks */ 1765 - (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, 1766 - NULL, pl08x, 1767 - &pl08x_debugfs_operations); 1840 + (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 1841 + S_IFREG | S_IRUGO, NULL, pl08x, 1842 + &pl08x_debugfs_operations); 1768 1843 } 1769 1844 1770 1845 #else ··· 1785 1860 return ret; 1786 1861 1787 1862 /* Create the driver state holder */ 1788 - pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); 1863 + pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 1789 1864 if (!pl08x) { 1790 1865 ret = -ENOMEM; 1791 1866 goto out_no_pl08x; 1792 1867 } 1868 + 1869 + pm_runtime_set_active(&adev->dev); 1870 + pm_runtime_enable(&adev->dev); 1793 1871 1794 1872 /* Initialize memcpy engine */ 1795 1873 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); ··· 1867 1939 } 1868 1940 1869 1941 /* Initialize physical channels */ 1870 - pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), 1942 + pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)), 1871 1943 GFP_KERNEL); 1872 1944 if (!pl08x->phy_chans) { 1873 1945 dev_err(&adev->dev, "%s failed to allocate " ··· 1884 1956 spin_lock_init(&ch->lock); 1885 1957 ch->serving = NULL; 1886 1958 ch->signal = -1; 1887 - dev_info(&adev->dev, 1888 - "physical channel %d is %s\n", i, 1889 - pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1959 + dev_dbg(&adev->dev, "physical channel %d is %s\n", 1960 + i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1890 1961 } 1891 1962 1892 1963 /* Register as many memcpy channels as there are physical channels */ ··· 1901 1974 1902 1975 /* Register slave channels */ 1903 1976 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1904 - pl08x->pd->num_slave_channels, 1905 - true); 1977 + pl08x->pd->num_slave_channels, true); 1906 1978 if (ret <= 0) { 1907 1979 dev_warn(&pl08x->adev->dev, 1908 1980 "%s failed to enumerate slave channels - %d\n", ··· 1931 2005 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 1932 2006 amba_part(adev), amba_rev(adev), 1933 2007 (unsigned long long)adev->res.start, adev->irq[0]); 2008 + 2009 + pm_runtime_put(&adev->dev); 1934 2010 return 0; 1935 2011 1936 2012 out_no_slave_reg: ··· 1951 2023 dma_pool_destroy(pl08x->pool); 1952 2024 out_no_lli_pool: 1953 2025 out_no_platdata: 2026 + pm_runtime_put(&adev->dev); 2027 + pm_runtime_disable(&adev->dev); 2028 + 1954 2029 kfree(pl08x); 1955 2030 out_no_pl08x: 1956 2031 amba_release_regions(adev);
+125 -34
drivers/dma/at_hdmac.c
··· 107 107 { 108 108 struct at_desc *desc, *_desc; 109 109 struct at_desc *ret = NULL; 110 + unsigned long flags; 110 111 unsigned int i = 0; 111 112 LIST_HEAD(tmp_list); 112 113 113 - spin_lock_bh(&atchan->lock); 114 + spin_lock_irqsave(&atchan->lock, flags); 114 115 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 115 116 i++; 116 117 if (async_tx_test_ack(&desc->txd)) { ··· 122 121 dev_dbg(chan2dev(&atchan->chan_common), 123 122 "desc %p not ACKed\n", desc); 124 123 } 125 - spin_unlock_bh(&atchan->lock); 124 + spin_unlock_irqrestore(&atchan->lock, flags); 126 125 dev_vdbg(chan2dev(&atchan->chan_common), 127 126 "scanned %u descriptors on freelist\n", i); 128 127 ··· 130 129 if (!ret) { 131 130 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 132 131 if (ret) { 133 - spin_lock_bh(&atchan->lock); 132 + spin_lock_irqsave(&atchan->lock, flags); 134 133 atchan->descs_allocated++; 135 - spin_unlock_bh(&atchan->lock); 134 + spin_unlock_irqrestore(&atchan->lock, flags); 136 135 } else { 137 136 dev_err(chan2dev(&atchan->chan_common), 138 137 "not enough descriptors available\n"); ··· 151 150 { 152 151 if (desc) { 153 152 struct at_desc *child; 153 + unsigned long flags; 154 154 155 - spin_lock_bh(&atchan->lock); 155 + spin_lock_irqsave(&atchan->lock, flags); 156 156 list_for_each_entry(child, &desc->tx_list, desc_node) 157 157 dev_vdbg(chan2dev(&atchan->chan_common), 158 158 "moving child desc %p to freelist\n", ··· 162 160 dev_vdbg(chan2dev(&atchan->chan_common), 163 161 "moving desc %p to freelist\n", desc); 164 162 list_add(&desc->desc_node, &atchan->free_list); 165 - spin_unlock_bh(&atchan->lock); 163 + spin_unlock_irqrestore(&atchan->lock, flags); 166 164 } 167 165 } 168 166 ··· 301 299 302 300 /* for cyclic transfers, 303 301 * no need to replay callback function while stopping */ 304 - if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { 302 + if (!atc_chan_is_cyclic(atchan)) { 305 303 dma_async_tx_callback callback = txd->callback; 306 304 void *param = txd->callback_param; 307 305 ··· 473 471 static void atc_tasklet(unsigned long data) 474 472 { 475 473 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 474 + unsigned long flags; 476 475 477 - spin_lock(&atchan->lock); 476 + spin_lock_irqsave(&atchan->lock, flags); 478 477 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 479 478 atc_handle_error(atchan); 480 - else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 479 + else if (atc_chan_is_cyclic(atchan)) 481 480 atc_handle_cyclic(atchan); 482 481 else 483 482 atc_advance_work(atchan); 484 483 485 - spin_unlock(&atchan->lock); 484 + spin_unlock_irqrestore(&atchan->lock, flags); 486 485 } 487 486 488 487 static irqreturn_t at_dma_interrupt(int irq, void *dev_id) ··· 542 539 struct at_desc *desc = txd_to_at_desc(tx); 543 540 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 544 541 dma_cookie_t cookie; 542 + unsigned long flags; 545 543 546 - spin_lock_bh(&atchan->lock); 544 + spin_lock_irqsave(&atchan->lock, flags); 547 545 cookie = atc_assign_cookie(atchan, desc); 548 546 549 547 if (list_empty(&atchan->active_list)) { ··· 558 554 list_add_tail(&desc->desc_node, &atchan->queue); 559 555 } 560 556 561 - spin_unlock_bh(&atchan->lock); 557 + spin_unlock_irqrestore(&atchan->lock, flags); 562 558 563 559 return cookie; 564 560 } ··· 931 927 struct at_dma_chan *atchan = to_at_dma_chan(chan); 932 928 struct at_dma *atdma = to_at_dma(chan->device); 933 929 int chan_id = atchan->chan_common.chan_id; 930 + unsigned long flags; 934 931 935 932 LIST_HEAD(list); 936 933 937 934 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 938 935 939 936 if (cmd == DMA_PAUSE) { 940 - spin_lock_bh(&atchan->lock); 937 + spin_lock_irqsave(&atchan->lock, flags); 941 938 942 939 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 943 940 set_bit(ATC_IS_PAUSED, &atchan->status); 944 941 945 - spin_unlock_bh(&atchan->lock); 942 + spin_unlock_irqrestore(&atchan->lock, flags); 946 943 } else if (cmd == DMA_RESUME) { 947 - if (!test_bit(ATC_IS_PAUSED, &atchan->status)) 944 + if (!atc_chan_is_paused(atchan)) 948 945 return 0; 949 946 950 - spin_lock_bh(&atchan->lock); 947 + spin_lock_irqsave(&atchan->lock, flags); 951 948 952 949 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 953 950 clear_bit(ATC_IS_PAUSED, &atchan->status); 954 951 955 - spin_unlock_bh(&atchan->lock); 952 + spin_unlock_irqrestore(&atchan->lock, flags); 956 953 } else if (cmd == DMA_TERMINATE_ALL) { 957 954 struct at_desc *desc, *_desc; 958 955 /* ··· 962 957 * channel. We still have to poll the channel enable bit due 963 958 * to AHB/HSB limitations. 964 959 */ 965 - spin_lock_bh(&atchan->lock); 960 + spin_lock_irqsave(&atchan->lock, flags); 966 961 967 962 /* disabling channel: must also remove suspend state */ 968 963 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); ··· 983 978 /* if channel dedicated to cyclic operations, free it */ 984 979 clear_bit(ATC_IS_CYCLIC, &atchan->status); 985 980 986 - spin_unlock_bh(&atchan->lock); 981 + spin_unlock_irqrestore(&atchan->lock, flags); 987 982 } else { 988 983 return -ENXIO; 989 984 } ··· 1009 1004 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1010 1005 dma_cookie_t last_used; 1011 1006 dma_cookie_t last_complete; 1007 + unsigned long flags; 1012 1008 enum dma_status ret; 1013 1009 1014 - spin_lock_bh(&atchan->lock); 1010 + spin_lock_irqsave(&atchan->lock, flags); 1015 1011 1016 1012 last_complete = atchan->completed_cookie; 1017 1013 last_used = chan->cookie; ··· 1027 1021 ret = dma_async_is_complete(cookie, last_complete, last_used); 1028 1022 } 1029 1023 1030 - spin_unlock_bh(&atchan->lock); 1024 + spin_unlock_irqrestore(&atchan->lock, flags); 1031 1025 1032 1026 if (ret != DMA_SUCCESS) 1033 1027 dma_set_tx_state(txstate, last_complete, last_used, ··· 1035 1029 else 1036 1030 dma_set_tx_state(txstate, last_complete, last_used, 0); 1037 1031 1038 - if (test_bit(ATC_IS_PAUSED, &atchan->status)) 1032 + if (atc_chan_is_paused(atchan)) 1039 1033 ret = DMA_PAUSED; 1040 1034 1041 1035 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", ··· 1052 1046 static void atc_issue_pending(struct dma_chan *chan) 1053 1047 { 1054 1048 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1049 + unsigned long flags; 1055 1050 1056 1051 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1057 1052 1058 1053 /* Not needed for cyclic transfers */ 1059 - if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 1054 + if (atc_chan_is_cyclic(atchan)) 1060 1055 return; 1061 1056 1062 - spin_lock_bh(&atchan->lock); 1057 + spin_lock_irqsave(&atchan->lock, flags); 1063 1058 if (!atc_chan_is_enabled(atchan)) { 1064 1059 atc_advance_work(atchan); 1065 1060 } 1066 - spin_unlock_bh(&atchan->lock); 1061 + spin_unlock_irqrestore(&atchan->lock, flags); 1067 1062 } 1068 1063 1069 1064 /** ··· 1080 1073 struct at_dma *atdma = to_at_dma(chan->device); 1081 1074 struct at_desc *desc; 1082 1075 struct at_dma_slave *atslave; 1076 + unsigned long flags; 1083 1077 int i; 1084 1078 u32 cfg; 1085 1079 LIST_HEAD(tmp_list); ··· 1124 1116 list_add_tail(&desc->desc_node, &tmp_list); 1125 1117 } 1126 1118 1127 - spin_lock_bh(&atchan->lock); 1119 + spin_lock_irqsave(&atchan->lock, flags); 1128 1120 atchan->descs_allocated = i; 1129 1121 list_splice(&tmp_list, &atchan->free_list); 1130 1122 atchan->completed_cookie = chan->cookie = 1; 1131 - spin_unlock_bh(&atchan->lock); 1123 + spin_unlock_irqrestore(&atchan->lock, flags); 1132 1124 1133 1125 /* channel parameters */ 1134 1126 channel_writel(atchan, CFG, cfg); ··· 1301 1293 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1302 1294 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1303 1295 1304 - if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) 1296 + if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1305 1297 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1306 - 1307 - if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) 1298 + /* controller can do slave DMA: can trigger cyclic transfers */ 1299 + dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1308 1300 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1309 - 1310 - if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) || 1311 - dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) 1312 1301 atdma->dma_common.device_control = atc_control; 1302 + } 1313 1303 1314 1304 dma_writel(atdma, EN, AT_DMA_ENABLE); 1315 1305 ··· 1383 1377 clk_disable(atdma->clk); 1384 1378 } 1385 1379 1380 + static int at_dma_prepare(struct device *dev) 1381 + { 1382 + struct platform_device *pdev = to_platform_device(dev); 1383 + struct at_dma *atdma = platform_get_drvdata(pdev); 1384 + struct dma_chan *chan, *_chan; 1385 + 1386 + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1387 + device_node) { 1388 + struct at_dma_chan *atchan = to_at_dma_chan(chan); 1389 + /* wait for transaction completion (except in cyclic case) */ 1390 + if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1391 + return -EAGAIN; 1392 + } 1393 + return 0; 1394 + } 1395 + 1396 + static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1397 + { 1398 + struct dma_chan *chan = &atchan->chan_common; 1399 + 1400 + /* Channel should be paused by user 1401 + * do it anyway even if it is not done already */ 1402 + if (!atc_chan_is_paused(atchan)) { 1403 + dev_warn(chan2dev(chan), 1404 + "cyclic channel not paused, should be done by channel user\n"); 1405 + atc_control(chan, DMA_PAUSE, 0); 1406 + } 1407 + 1408 + /* now preserve additional data for cyclic operations */ 1409 + /* next descriptor address in the cyclic list */ 1410 + atchan->save_dscr = channel_readl(atchan, DSCR); 1411 + 1412 + vdbg_dump_regs(atchan); 1413 + } 1414 + 1386 1415 static int at_dma_suspend_noirq(struct device *dev) 1387 1416 { 1388 1417 struct platform_device *pdev = to_platform_device(dev); 1389 1418 struct at_dma *atdma = platform_get_drvdata(pdev); 1419 + struct dma_chan *chan, *_chan; 1390 1420 1391 - at_dma_off(platform_get_drvdata(pdev)); 1421 + /* preserve data */ 1422 + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1423 + device_node) { 1424 + struct at_dma_chan *atchan = to_at_dma_chan(chan); 1425 + 1426 + if (atc_chan_is_cyclic(atchan)) 1427 + atc_suspend_cyclic(atchan); 1428 + atchan->save_cfg = channel_readl(atchan, CFG); 1429 + } 1430 + atdma->save_imr = dma_readl(atdma, EBCIMR); 1431 + 1432 + /* disable DMA controller */ 1433 + at_dma_off(atdma); 1392 1434 clk_disable(atdma->clk); 1393 1435 return 0; 1436 + } 1437 + 1438 + static void atc_resume_cyclic(struct at_dma_chan *atchan) 1439 + { 1440 + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1441 + 1442 + /* restore channel status for cyclic descriptors list: 1443 + * next descriptor in the cyclic list at the time of suspend */ 1444 + channel_writel(atchan, SADDR, 0); 1445 + channel_writel(atchan, DADDR, 0); 1446 + channel_writel(atchan, CTRLA, 0); 1447 + channel_writel(atchan, CTRLB, 0); 1448 + channel_writel(atchan, DSCR, atchan->save_dscr); 1449 + dma_writel(atdma, CHER, atchan->mask); 1450 + 1451 + /* channel pause status should be removed by channel user 1452 + * We cannot take the initiative to do it here */ 1453 + 1454 + vdbg_dump_regs(atchan); 1394 1455 } 1395 1456 1396 1457 static int at_dma_resume_noirq(struct device *dev) 1397 1458 { 1398 1459 struct platform_device *pdev = to_platform_device(dev); 1399 1460 struct at_dma *atdma = platform_get_drvdata(pdev); 1461 + struct dma_chan *chan, *_chan; 1400 1462 1463 + /* bring back DMA controller */ 1401 1464 clk_enable(atdma->clk); 1402 1465 dma_writel(atdma, EN, AT_DMA_ENABLE); 1466 + 1467 + /* clear any pending interrupt */ 1468 + while (dma_readl(atdma, EBCISR)) 1469 + cpu_relax(); 1470 + 1471 + /* restore saved data */ 1472 + dma_writel(atdma, EBCIER, atdma->save_imr); 1473 + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1474 + device_node) { 1475 + struct at_dma_chan *atchan = to_at_dma_chan(chan); 1476 + 1477 + channel_writel(atchan, CFG, atchan->save_cfg); 1478 + if (atc_chan_is_cyclic(atchan)) 1479 + atc_resume_cyclic(atchan); 1480 + } 1403 1481 return 0; 1404 1482 } 1405 1483 1406 1484 static const struct dev_pm_ops at_dma_dev_pm_ops = { 1485 + .prepare = at_dma_prepare, 1407 1486 .suspend_noirq = at_dma_suspend_noirq, 1408 1487 .resume_noirq = at_dma_resume_noirq, 1409 1488 };
+24
drivers/dma/at_hdmac_regs.h
··· 204 204 * @status: transmit status information from irq/prep* functions 205 205 * to tasklet (use atomic operations) 206 206 * @tasklet: bottom half to finish transaction work 207 + * @save_cfg: configuration register that is saved on suspend/resume cycle 208 + * @save_dscr: for cyclic operations, preserve next descriptor address in 209 + * the cyclic list on suspend/resume cycle 207 210 * @lock: serializes enqueue/dequeue operations to descriptors lists 208 211 * @completed_cookie: identifier for the most recently completed operation 209 212 * @active_list: list of descriptors dmaengine is being running on ··· 221 218 u8 mask; 222 219 unsigned long status; 223 220 struct tasklet_struct tasklet; 221 + u32 save_cfg; 222 + u32 save_dscr; 224 223 225 224 spinlock_t lock; 226 225 ··· 253 248 * @chan_common: common dmaengine dma_device object members 254 249 * @ch_regs: memory mapped register base 255 250 * @clk: dma controller clock 251 + * @save_imr: interrupt mask register that is saved on suspend/resume cycle 256 252 * @all_chan_mask: all channels availlable in a mask 257 253 * @dma_desc_pool: base of DMA descriptor region (DMA address) 258 254 * @chan: channels table to store at_dma_chan structures ··· 262 256 struct dma_device dma_common; 263 257 void __iomem *regs; 264 258 struct clk *clk; 259 + u32 save_imr; 265 260 266 261 u8 all_chan_mask; 267 262 ··· 362 355 return !!(dma_readl(atdma, CHSR) & atchan->mask); 363 356 } 364 357 358 + /** 359 + * atc_chan_is_paused - test channel pause/resume status 360 + * @atchan: channel we want to test status 361 + */ 362 + static inline int atc_chan_is_paused(struct at_dma_chan *atchan) 363 + { 364 + return test_bit(ATC_IS_PAUSED, &atchan->status); 365 + } 366 + 367 + /** 368 + * atc_chan_is_cyclic - test if given channel has cyclic property set 369 + * @atchan: channel we want to test status 370 + */ 371 + static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan) 372 + { 373 + return test_bit(ATC_IS_CYCLIC, &atchan->status); 374 + } 365 375 366 376 /** 367 377 * set_desc_eol - set end-of-link to descriptor so it will end transfer
+21 -2
drivers/dma/dmatest.c
··· 10 10 #include <linux/delay.h> 11 11 #include <linux/dma-mapping.h> 12 12 #include <linux/dmaengine.h> 13 + #include <linux/freezer.h> 13 14 #include <linux/init.h> 14 15 #include <linux/kthread.h> 15 16 #include <linux/module.h> ··· 252 251 int i; 253 252 254 253 thread_name = current->comm; 254 + set_freezable_with_signal(); 255 255 256 256 ret = -ENOMEM; 257 257 ··· 307 305 dma_addr_t dma_srcs[src_cnt]; 308 306 dma_addr_t dma_dsts[dst_cnt]; 309 307 struct completion cmp; 310 - unsigned long tmo = msecs_to_jiffies(timeout); 308 + unsigned long start, tmo, end = 0 /* compiler... */; 309 + bool reload = true; 311 310 u8 align = 0; 312 311 313 312 total_tests++; ··· 407 404 } 408 405 dma_async_issue_pending(chan); 409 406 410 - tmo = wait_for_completion_timeout(&cmp, tmo); 407 + do { 408 + start = jiffies; 409 + if (reload) 410 + end = start + msecs_to_jiffies(timeout); 411 + else if (end <= start) 412 + end = start + 1; 413 + tmo = wait_for_completion_interruptible_timeout(&cmp, 414 + end - start); 415 + reload = try_to_freeze(); 416 + } while (tmo == -ERESTARTSYS); 417 + 411 418 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 412 419 413 420 if (tmo == 0) { ··· 490 477 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 491 478 thread_name, total_tests, failed_tests, ret); 492 479 480 + /* terminate all transfers on specified channels */ 481 + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 493 482 if (iterations > 0) 494 483 while (!kthread_should_stop()) { 495 484 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); ··· 514 499 list_del(&thread->node); 515 500 kfree(thread); 516 501 } 502 + 503 + /* terminate all transfers on specified channels */ 504 + dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0); 505 + 517 506 kfree(dtc); 518 507 } 519 508
+38 -9
drivers/dma/imx-sdma.c
··· 318 318 dma_addr_t context_phys; 319 319 struct dma_device dma_device; 320 320 struct clk *clk; 321 + struct mutex channel_0_lock; 321 322 struct sdma_script_start_addrs *script_addrs; 322 323 }; 323 324 ··· 416 415 dma_addr_t buf_phys; 417 416 int ret; 418 417 418 + mutex_lock(&sdma->channel_0_lock); 419 + 419 420 buf_virt = dma_alloc_coherent(NULL, 420 421 size, 421 422 &buf_phys, GFP_KERNEL); 422 - if (!buf_virt) 423 - return -ENOMEM; 423 + if (!buf_virt) { 424 + ret = -ENOMEM; 425 + goto err_out; 426 + } 424 427 425 428 bd0->mode.command = C0_SETPM; 426 429 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; ··· 437 432 ret = sdma_run_channel(&sdma->channel[0]); 438 433 439 434 dma_free_coherent(NULL, size, buf_virt, buf_phys); 435 + 436 + err_out: 437 + mutex_unlock(&sdma->channel_0_lock); 440 438 441 439 return ret; 442 440 } ··· 664 656 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); 665 657 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); 666 658 659 + mutex_lock(&sdma->channel_0_lock); 660 + 667 661 memset(context, 0, sizeof(*context)); 668 662 context->channel_state.pc = load_address; 669 663 ··· 685 675 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 686 676 687 677 ret = sdma_run_channel(&sdma->channel[0]); 678 + 679 + mutex_unlock(&sdma->channel_0_lock); 688 680 689 681 return ret; 690 682 } ··· 1143 1131 saddr_arr[i] = addr_arr[i]; 1144 1132 } 1145 1133 1146 - static int __init sdma_get_firmware(struct sdma_engine *sdma, 1147 - const char *fw_name) 1134 + static void sdma_load_firmware(const struct firmware *fw, void *context) 1148 1135 { 1149 - const struct firmware *fw; 1136 + struct sdma_engine *sdma = context; 1150 1137 const struct sdma_firmware_header *header; 1151 - int ret; 1152 1138 const struct sdma_script_start_addrs *addr; 1153 1139 unsigned short *ram_code; 1154 1140 1155 - ret = request_firmware(&fw, fw_name, sdma->dev); 1156 - if (ret) 1157 - return ret; 1141 + if (!fw) { 1142 + dev_err(sdma->dev, "firmware not found\n"); 1143 + return; 1144 + } 1158 1145 1159 1146 if (fw->size < sizeof(*header)) 1160 1147 goto err_firmware; ··· 1183 1172 1184 1173 err_firmware: 1185 1174 release_firmware(fw); 1175 + } 1176 + 1177 + static int __init sdma_get_firmware(struct sdma_engine *sdma, 1178 + const char *fw_name) 1179 + { 1180 + int ret; 1181 + 1182 + ret = request_firmware_nowait(THIS_MODULE, 1183 + FW_ACTION_HOTPLUG, fw_name, sdma->dev, 1184 + GFP_KERNEL, sdma, sdma_load_firmware); 1186 1185 1187 1186 return ret; 1188 1187 } ··· 1290 1269 struct sdma_platform_data *pdata = pdev->dev.platform_data; 1291 1270 int i; 1292 1271 struct sdma_engine *sdma; 1272 + s32 *saddr_arr; 1293 1273 1294 1274 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1295 1275 if (!sdma) 1296 1276 return -ENOMEM; 1277 + 1278 + mutex_init(&sdma->channel_0_lock); 1297 1279 1298 1280 sdma->dev = &pdev->dev; 1299 1281 ··· 1333 1309 ret = -ENOMEM; 1334 1310 goto err_alloc; 1335 1311 } 1312 + 1313 + /* initially no scripts available */ 1314 + saddr_arr = (s32 *)sdma->script_addrs; 1315 + for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) 1316 + saddr_arr[i] = -EINVAL; 1336 1317 1337 1318 if (of_id) 1338 1319 pdev->id_entry = of_id->data;
+24 -21
drivers/dma/mxs-dma.c
··· 130 130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; 131 131 }; 132 132 133 + static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable) 134 + { 135 + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 136 + int chan_id = mxs_chan->chan.chan_id; 137 + int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR; 138 + 139 + /* enable apbh channel clock */ 140 + if (dma_is_apbh()) { 141 + if (apbh_is_old()) 142 + writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), 143 + mxs_dma->base + HW_APBHX_CTRL0 + set_clr); 144 + else 145 + writel(1 << chan_id, 146 + mxs_dma->base + HW_APBHX_CTRL0 + set_clr); 147 + } 148 + } 149 + 133 150 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 134 151 { 135 152 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; ··· 165 148 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 166 149 int chan_id = mxs_chan->chan.chan_id; 167 150 151 + /* clkgate needs to be enabled before writing other registers */ 152 + mxs_dma_clkgate(mxs_chan, 1); 153 + 168 154 /* set cmd_addr up */ 169 155 writel(mxs_chan->ccw_phys, 170 156 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); 171 - 172 - /* enable apbh channel clock */ 173 - if (dma_is_apbh()) { 174 - if (apbh_is_old()) 175 - writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), 176 - mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); 177 - else 178 - writel(1 << chan_id, 179 - mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); 180 - } 181 157 182 158 /* write 1 to SEMA to kick off the channel */ 183 159 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); ··· 178 168 179 169 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 180 170 { 181 - struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 182 - int chan_id = mxs_chan->chan.chan_id; 183 - 184 171 /* disable apbh channel clock */ 185 - if (dma_is_apbh()) { 186 - if (apbh_is_old()) 187 - writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), 188 - mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 189 - else 190 - writel(1 << chan_id, 191 - mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 192 - } 172 + mxs_dma_clkgate(mxs_chan, 0); 193 173 194 174 mxs_chan->status = DMA_SUCCESS; 195 175 } ··· 338 338 if (ret) 339 339 goto err_clk; 340 340 341 + /* clkgate needs to be enabled for reset to finish */ 342 + mxs_dma_clkgate(mxs_chan, 1); 341 343 mxs_dma_reset_chan(mxs_chan); 344 + mxs_dma_clkgate(mxs_chan, 0); 342 345 343 346 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 344 347 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
+12 -2
include/linux/amba/pl08x.h
··· 47 47 * @muxval: a number usually used to poke into some mux regiser to 48 48 * mux in the signal to this channel 49 49 * @cctl_opt: default options for the channel control register 50 + * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave 51 + * channels. Fill with 'true' if peripheral should be flow controller. Direction 52 + * will be selected at Runtime. 50 53 * @addr: source/target address in physical memory for this DMA channel, 51 54 * can be the address of a FIFO register for burst requests for example. 52 55 * This can be left undefined if the PrimeCell API is used for configuring ··· 68 65 int max_signal; 69 66 u32 muxval; 70 67 u32 cctl; 68 + bool device_fc; 71 69 dma_addr_t addr; 72 70 bool circular_buffer; 73 71 bool single; ··· 81 77 * @addr: current address 82 78 * @maxwidth: the maximum width of a transfer on this bus 83 79 * @buswidth: the width of this bus in bytes: 1, 2 or 4 84 - * @fill_bytes: bytes required to fill to the next bus memory boundary 85 80 */ 86 81 struct pl08x_bus_data { 87 82 dma_addr_t addr; 88 83 u8 maxwidth; 89 84 u8 buswidth; 90 - size_t fill_bytes; 91 85 }; 92 86 93 87 /** ··· 107 105 108 106 /** 109 107 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 108 + * @tx: async tx descriptor 109 + * @node: node for txd list for channels 110 + * @src_addr: src address of txd 111 + * @dst_addr: dst address of txd 112 + * @len: transfer len in bytes 113 + * @direction: direction of transfer 110 114 * @llis_bus: DMA memory address (physical) start for the LLIs 111 115 * @llis_va: virtual memory address start for the LLIs 116 + * @cctl: control reg values for current txd 117 + * @ccfg: config reg values for current txd 112 118 */ 113 119 struct pl08x_txd { 114 120 struct dma_async_tx_descriptor tx;
+11 -2
include/linux/dmaengine.h
··· 24 24 #include <linux/device.h> 25 25 #include <linux/uio.h> 26 26 #include <linux/dma-direction.h> 27 - 28 - struct scatterlist; 27 + #include <linux/scatterlist.h> 29 28 30 29 /** 31 30 * typedef dma_cookie_t - an opaque DMA cookie ··· 516 517 { 517 518 return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, 518 519 (unsigned long)config); 520 + } 521 + 522 + static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( 523 + struct dma_chan *chan, void *buf, size_t len, 524 + enum dma_data_direction dir, unsigned long flags) 525 + { 526 + struct scatterlist sg; 527 + sg_init_one(&sg, buf, len); 528 + 529 + return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags); 519 530 } 520 531 521 532 static inline int dmaengine_terminate_all(struct dma_chan *chan)