Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: edma: New device tree binding

With the old binding and driver architecture we had many issues:
No way to assign eDMA channels to event queues, thus not able to tune the
system by moving specific DMA channels to low/high priority servicing. We
moved the cyclic channels to high priority within the code, but that was
just a workaround to this issue.
Memcopy was fundamentally broken: even if the driver scanned the DT/devices
in the booted system for direct DMA users (which is not effective when the
events are going through a crossbar) and created a map of 'used' channels,
this information was not really usable. Since via dmaengien API the eDMA
driver will be called with _some_ channel number, we would try to request
this channel when any channel is requested for memcpy. By luck we got
channel which is not used by any device most of the time so things worked,
but if a device would have been using the given channel, but not requested
it, the memcpy channel would have been waiting for HW event.
The old code had the am33xx/am43xx DMA event router handling embedded. This
should have been done in a separate driver since it is not part of the
actual eDMA IP.
There were no way to 'lock' PaRAM slots to be used by the DSP for example
when booting with DT.
In DT boot the edma node used more than one hwmod which is not a good
practice and the kernel prints warning because of this.

With the new bindings and the changes in the driver we can:
- No regression with Legacy binding and non DT boot
- DMA channels can be assigned to any TC (to set priority)
- PaRAM slots can be reserved for other cores to use
- Dynamic power management for CC and TCs, if only TC0 is used all other TC
can be powered down for example

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by

Peter Ujfalusi and committed by
Vinod Koul
1be5336b f7c7cae9

+457 -145
+116 -1
Documentation/devicetree/bindings/dma/ti-edma.txt
··· 1 - TI EDMA 1 + Texas Instruments eDMA 2 + 3 + The eDMA3 consists of two components: Channel controller (CC) and Transfer 4 + Controller(s) (TC). The CC is the main entry for DMA users since it is 5 + responsible for the DMA channel handling, while the TCs are responsible to 6 + execute the actual DMA tansfer. 7 + 8 + ------------------------------------------------------------------------------ 9 + eDMA3 Channel Controller 10 + 11 + Required properties: 12 + - compatible: "ti,edma3-tpcc" for the channel controller(s) 13 + - #dma-cells: Should be set to <2>. The first number is the DMA request 14 + number and the second is the TC the channel is serviced on. 15 + - reg: Memory map of eDMA CC 16 + - reg-names: "edma3_cc" 17 + - interrupts: Interrupt lines for CCINT, MPERR and CCERRINT. 18 + - interrupt-names: "edma3_ccint", "emda3_mperr" and "edma3_ccerrint" 19 + - ti,tptcs: List of TPTCs associated with the eDMA in the following form: 20 + <&tptc_phandle TC_priority_number>. The highest priority is 0. 21 + 22 + Optional properties: 23 + - ti,hwmods: Name of the hwmods associated to the eDMA CC 24 + - ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow 25 + these channels will be SW triggered channels. The list must 26 + contain 16 bits numbers, see example. 27 + - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by 28 + the driver, they are allocated to be used by for example the 29 + DSP. See example. 30 + 31 + ------------------------------------------------------------------------------ 32 + eDMA3 Transfer Controller 33 + 34 + Required properties: 35 + - compatible: "ti,edma3-tptc" for the transfer controller(s) 36 + - reg: Memory map of eDMA TC 37 + - interrupts: Interrupt number for TCerrint. 38 + 39 + Optional properties: 40 + - ti,hwmods: Name of the hwmods associated to the given eDMA TC 41 + - interrupt-names: "edma3_tcerrint" 42 + 43 + ------------------------------------------------------------------------------ 44 + Example: 45 + 46 + edma: edma@49000000 { 47 + compatible = "ti,edma3-tpcc"; 48 + ti,hwmods = "tpcc"; 49 + reg = <0x49000000 0x10000>; 50 + reg-names = "edma3_cc"; 51 + interrupts = <12 13 14>; 52 + interrupt-names = "edma3_ccint", "emda3_mperr", "edma3_ccerrint"; 53 + dma-requests = <64>; 54 + #dma-cells = <2>; 55 + 56 + ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>; 57 + 58 + /* Channel 20 and 21 is allocated for memcpy */ 59 + ti,edma-memcpy-channels = /bits/ 16 <20 21>; 60 + /* The following PaRAM slots are reserved: 35-45 and 100-110 */ 61 + ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>, 62 + /bits/ 16 <100 10>; 63 + }; 64 + 65 + edma_tptc0: tptc@49800000 { 66 + compatible = "ti,edma3-tptc"; 67 + ti,hwmods = "tptc0"; 68 + reg = <0x49800000 0x100000>; 69 + interrupts = <112>; 70 + interrupt-names = "edm3_tcerrint"; 71 + }; 72 + 73 + edma_tptc1: tptc@49900000 { 74 + compatible = "ti,edma3-tptc"; 75 + ti,hwmods = "tptc1"; 76 + reg = <0x49900000 0x100000>; 77 + interrupts = <113>; 78 + interrupt-names = "edm3_tcerrint"; 79 + }; 80 + 81 + edma_tptc2: tptc@49a00000 { 82 + compatible = "ti,edma3-tptc"; 83 + ti,hwmods = "tptc2"; 84 + reg = <0x49a00000 0x100000>; 85 + interrupts = <114>; 86 + interrupt-names = "edm3_tcerrint"; 87 + }; 88 + 89 + sham: sham@53100000 { 90 + compatible = "ti,omap4-sham"; 91 + ti,hwmods = "sham"; 92 + reg = <0x53100000 0x200>; 93 + interrupts = <109>; 94 + /* DMA channel 36 executed on eDMA TC0 - low priority queue */ 95 + dmas = <&edma 36 0>; 96 + dma-names = "rx"; 97 + }; 98 + 99 + mcasp0: mcasp@48038000 { 100 + compatible = "ti,am33xx-mcasp-audio"; 101 + ti,hwmods = "mcasp0"; 102 + reg = <0x48038000 0x2000>, 103 + <0x46000000 0x400000>; 104 + reg-names = "mpu", "dat"; 105 + interrupts = <80>, <81>; 106 + interrupt-names = "tx", "rx"; 107 + status = "disabled"; 108 + /* DMA channels 8 and 9 executed on eDMA TC2 - high priority queue */ 109 + dmas = <&edma 8 2>, 110 + <&edma 9 2>; 111 + dma-names = "tx", "rx"; 112 + }; 113 + 114 + ------------------------------------------------------------------------------ 115 + DEPRECATED binding, new DTS files must use the ti,edma3-tpcc/ti,edma3-tptc 116 + binding. 2 117 3 118 Required properties: 4 119 - compatible : "ti,edma3"
+338 -144
drivers/dma/edma.c
··· 201 201 202 202 struct edma_cc; 203 203 204 + struct edma_tc { 205 + struct device_node *node; 206 + u16 id; 207 + }; 208 + 204 209 struct edma_chan { 205 210 struct virt_dma_chan vchan; 206 211 struct list_head node; 207 212 struct edma_desc *edesc; 208 213 struct edma_cc *ecc; 214 + struct edma_tc *tc; 209 215 int ch_num; 210 216 bool alloced; 217 + bool hw_triggered; 211 218 int slot[EDMA_MAX_SLOTS]; 212 219 int missed; 213 220 struct dma_slave_config cfg; ··· 225 218 struct edma_soc_info *info; 226 219 void __iomem *base; 227 220 int id; 221 + bool legacy_mode; 228 222 229 223 /* eDMA3 resource information */ 230 224 unsigned num_channels; ··· 236 228 bool chmap_exist; 237 229 enum dma_event_q default_queue; 238 230 239 - bool unused_chan_list_done; 240 - /* The slot_inuse bit for each PaRAM slot is clear unless the 241 - * channel is in use ... by ARM or DSP, for QDMA, or whatever. 231 + /* 232 + * The slot_inuse bit for each PaRAM slot is clear unless the slot is 233 + * in use by Linux or if it is allocated to be used by DSP. 242 234 */ 243 235 unsigned long *slot_inuse; 244 236 245 - /* The channel_unused bit for each channel is clear unless 246 - * it is not being used on this platform. It uses a bit 247 - * of SOC-specific initialization code. 248 - */ 249 - unsigned long *channel_unused; 250 - 251 237 struct dma_device dma_slave; 238 + struct dma_device *dma_memcpy; 252 239 struct edma_chan *slave_chans; 240 + struct edma_tc *tc_list; 253 241 int dummy_slot; 254 242 }; 255 243 ··· 255 251 .ccnt = 1, 256 252 }; 257 253 254 + #define EDMA_BINDING_LEGACY 0 255 + #define EDMA_BINDING_TPCC 1 258 256 static const struct of_device_id edma_of_ids[] = { 259 - { .compatible = "ti,edma3", }, 257 + { 258 + .compatible = "ti,edma3", 259 + .data = (void *)EDMA_BINDING_LEGACY, 260 + }, 261 + { 262 + .compatible = "ti,edma3-tpcc", 263 + .data = (void *)EDMA_BINDING_TPCC, 264 + }, 260 265 {} 261 266 }; 262 267 ··· 425 412 } 426 413 } 427 414 428 - static int prepare_unused_channel_list(struct device *dev, void *data) 429 - { 430 - struct platform_device *pdev = to_platform_device(dev); 431 - struct edma_cc *ecc = data; 432 - int dma_req_min = EDMA_CTLR_CHAN(ecc->id, 0); 433 - int dma_req_max = dma_req_min + ecc->num_channels; 434 - int i, count; 435 - struct of_phandle_args dma_spec; 436 - 437 - if (dev->of_node) { 438 - struct platform_device *dma_pdev; 439 - 440 - count = of_property_count_strings(dev->of_node, "dma-names"); 441 - if (count < 0) 442 - return 0; 443 - for (i = 0; i < count; i++) { 444 - if (of_parse_phandle_with_args(dev->of_node, "dmas", 445 - "#dma-cells", i, 446 - &dma_spec)) 447 - continue; 448 - 449 - if (!of_match_node(edma_of_ids, dma_spec.np)) { 450 - of_node_put(dma_spec.np); 451 - continue; 452 - } 453 - 454 - dma_pdev = of_find_device_by_node(dma_spec.np); 455 - if (&dma_pdev->dev != ecc->dev) 456 - continue; 457 - 458 - clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]), 459 - ecc->channel_unused); 460 - of_node_put(dma_spec.np); 461 - } 462 - return 0; 463 - } 464 - 465 - /* For non-OF case */ 466 - for (i = 0; i < pdev->num_resources; i++) { 467 - struct resource *res = &pdev->resource[i]; 468 - int dma_req; 469 - 470 - if (!(res->flags & IORESOURCE_DMA)) 471 - continue; 472 - 473 - dma_req = (int)res->start; 474 - if (dma_req >= dma_req_min && dma_req < dma_req_max) 475 - clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start), 476 - ecc->channel_unused); 477 - } 478 - 479 - return 0; 480 - } 481 - 482 415 static void edma_setup_interrupt(struct edma_chan *echan, bool enable) 483 416 { 484 417 struct edma_cc *ecc = echan->ecc; ··· 576 617 int j = (channel >> 5); 577 618 unsigned int mask = BIT(channel & 0x1f); 578 619 579 - if (test_bit(channel, ecc->channel_unused)) { 620 + if (!echan->hw_triggered) { 580 621 /* EDMA channels without event association */ 581 622 dev_dbg(ecc->dev, "ESR%d %08x\n", j, 582 623 edma_shadow0_read_array(ecc, SH_ESR, j)); ··· 692 733 { 693 734 struct edma_cc *ecc = echan->ecc; 694 735 int channel = EDMA_CHAN_SLOT(echan->ch_num); 695 - 696 - if (!ecc->unused_chan_list_done) { 697 - /* 698 - * Scan all the platform devices to find out the EDMA channels 699 - * used and clear them in the unused list, making the rest 700 - * available for ARM usage. 701 - */ 702 - int ret = bus_for_each_dev(&platform_bus_type, NULL, ecc, 703 - prepare_unused_channel_list); 704 - if (ret < 0) 705 - return ret; 706 - 707 - ecc->unused_chan_list_done = true; 708 - } 709 736 710 737 /* ensure access through shadow region 0 */ 711 738 edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); ··· 844 899 if (echan->edesc) { 845 900 edma_stop(echan); 846 901 /* Move the cyclic channel back to default queue */ 847 - if (echan->edesc->cyclic) 902 + if (!echan->tc && echan->edesc->cyclic) 848 903 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); 849 904 /* 850 905 * free the running request descriptor ··· 1348 1403 } 1349 1404 1350 1405 /* Place the cyclic channel to highest priority queue */ 1351 - edma_assign_channel_eventq(echan, EVENTQ_0); 1406 + if (!echan->tc) 1407 + edma_assign_channel_eventq(echan, EVENTQ_0); 1352 1408 1353 1409 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 1354 1410 } ··· 1555 1609 return IRQ_HANDLED; 1556 1610 } 1557 1611 1612 + static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable) 1613 + { 1614 + struct platform_device *tc_pdev; 1615 + int ret; 1616 + 1617 + if (!tc) 1618 + return; 1619 + 1620 + tc_pdev = of_find_device_by_node(tc->node); 1621 + if (!tc_pdev) { 1622 + pr_err("%s: TPTC device is not found\n", __func__); 1623 + return; 1624 + } 1625 + if (!pm_runtime_enabled(&tc_pdev->dev)) 1626 + pm_runtime_enable(&tc_pdev->dev); 1627 + 1628 + if (enable) 1629 + ret = pm_runtime_get_sync(&tc_pdev->dev); 1630 + else 1631 + ret = pm_runtime_put_sync(&tc_pdev->dev); 1632 + 1633 + if (ret < 0) 1634 + pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__, 1635 + enable ? "get" : "put", dev_name(&tc_pdev->dev)); 1636 + } 1637 + 1558 1638 /* Alloc channel resources */ 1559 1639 static int edma_alloc_chan_resources(struct dma_chan *chan) 1560 1640 { 1561 1641 struct edma_chan *echan = to_edma_chan(chan); 1562 - struct device *dev = chan->device->dev; 1642 + struct edma_cc *ecc = echan->ecc; 1643 + struct device *dev = ecc->dev; 1644 + enum dma_event_q eventq_no = EVENTQ_DEFAULT; 1563 1645 int ret; 1564 1646 1565 - ret = edma_alloc_channel(echan, EVENTQ_DEFAULT); 1647 + if (echan->tc) { 1648 + eventq_no = echan->tc->id; 1649 + } else if (ecc->tc_list) { 1650 + /* memcpy channel */ 1651 + echan->tc = &ecc->tc_list[ecc->info->default_queue]; 1652 + eventq_no = echan->tc->id; 1653 + } 1654 + 1655 + ret = edma_alloc_channel(echan, eventq_no); 1566 1656 if (ret) 1567 1657 return ret; 1568 1658 1569 - echan->slot[0] = edma_alloc_slot(echan->ecc, echan->ch_num); 1659 + echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num); 1570 1660 if (echan->slot[0] < 0) { 1571 1661 dev_err(dev, "Entry slot allocation failed for channel %u\n", 1572 1662 EDMA_CHAN_SLOT(echan->ch_num)); ··· 1613 1631 edma_set_chmap(echan, echan->slot[0]); 1614 1632 echan->alloced = true; 1615 1633 1616 - dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num, 1617 - EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 1634 + dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n", 1635 + EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, 1636 + echan->hw_triggered ? "HW" : "SW"); 1637 + 1638 + edma_tc_set_pm_state(echan->tc, true); 1618 1639 1619 1640 return 0; 1620 1641 ··· 1630 1645 static void edma_free_chan_resources(struct dma_chan *chan) 1631 1646 { 1632 1647 struct edma_chan *echan = to_edma_chan(chan); 1648 + struct device *dev = echan->ecc->dev; 1633 1649 int i; 1634 1650 1635 1651 /* Terminate transfers */ ··· 1655 1669 echan->alloced = false; 1656 1670 } 1657 1671 1658 - dev_dbg(chan->device->dev, "freeing channel for %u\n", echan->ch_num); 1672 + edma_tc_set_pm_state(echan->tc, false); 1673 + echan->tc = NULL; 1674 + echan->hw_triggered = false; 1675 + 1676 + dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n", 1677 + EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id); 1659 1678 } 1660 1679 1661 1680 /* Send pending descriptor to hardware */ ··· 1747 1756 return ret; 1748 1757 } 1749 1758 1759 + static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels) 1760 + { 1761 + s16 *memcpy_ch = memcpy_channels; 1762 + 1763 + if (!memcpy_channels) 1764 + return false; 1765 + while (*memcpy_ch != -1) { 1766 + if (*memcpy_ch == ch_num) 1767 + return true; 1768 + memcpy_ch++; 1769 + } 1770 + return false; 1771 + } 1772 + 1750 1773 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 1751 1774 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 1752 1775 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 1753 1776 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 1754 1777 1755 - static void edma_dma_init(struct edma_cc *ecc) 1778 + static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) 1756 1779 { 1757 - struct dma_device *ddev = &ecc->dma_slave; 1780 + struct dma_device *s_ddev = &ecc->dma_slave; 1781 + struct dma_device *m_ddev = NULL; 1782 + s16 *memcpy_channels = ecc->info->memcpy_channels; 1758 1783 int i, j; 1759 1784 1760 - dma_cap_zero(ddev->cap_mask); 1761 - dma_cap_set(DMA_SLAVE, ddev->cap_mask); 1762 - dma_cap_set(DMA_CYCLIC, ddev->cap_mask); 1763 - dma_cap_set(DMA_MEMCPY, ddev->cap_mask); 1785 + dma_cap_zero(s_ddev->cap_mask); 1786 + dma_cap_set(DMA_SLAVE, s_ddev->cap_mask); 1787 + dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask); 1788 + if (ecc->legacy_mode && !memcpy_channels) { 1789 + dev_warn(ecc->dev, 1790 + "Legacy memcpy is enabled, things might not work\n"); 1764 1791 1765 - ddev->device_prep_slave_sg = edma_prep_slave_sg; 1766 - ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic; 1767 - ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; 1768 - ddev->device_alloc_chan_resources = edma_alloc_chan_resources; 1769 - ddev->device_free_chan_resources = edma_free_chan_resources; 1770 - ddev->device_issue_pending = edma_issue_pending; 1771 - ddev->device_tx_status = edma_tx_status; 1772 - ddev->device_config = edma_slave_config; 1773 - ddev->device_pause = edma_dma_pause; 1774 - ddev->device_resume = edma_dma_resume; 1775 - ddev->device_terminate_all = edma_terminate_all; 1792 + dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask); 1793 + s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; 1794 + s_ddev->directions = BIT(DMA_MEM_TO_MEM); 1795 + } 1776 1796 1777 - ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; 1778 - ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; 1779 - ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1780 - ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1797 + s_ddev->device_prep_slave_sg = edma_prep_slave_sg; 1798 + s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic; 1799 + s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; 1800 + s_ddev->device_free_chan_resources = edma_free_chan_resources; 1801 + s_ddev->device_issue_pending = edma_issue_pending; 1802 + s_ddev->device_tx_status = edma_tx_status; 1803 + s_ddev->device_config = edma_slave_config; 1804 + s_ddev->device_pause = edma_dma_pause; 1805 + s_ddev->device_resume = edma_dma_resume; 1806 + s_ddev->device_terminate_all = edma_terminate_all; 1781 1807 1782 - ddev->dev = ecc->dev; 1808 + s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; 1809 + s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; 1810 + s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); 1811 + s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1783 1812 1784 - INIT_LIST_HEAD(&ddev->channels); 1813 + s_ddev->dev = ecc->dev; 1814 + INIT_LIST_HEAD(&s_ddev->channels); 1815 + 1816 + if (memcpy_channels) { 1817 + m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL); 1818 + ecc->dma_memcpy = m_ddev; 1819 + 1820 + dma_cap_zero(m_ddev->cap_mask); 1821 + dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask); 1822 + 1823 + m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; 1824 + m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; 1825 + m_ddev->device_free_chan_resources = edma_free_chan_resources; 1826 + m_ddev->device_issue_pending = edma_issue_pending; 1827 + m_ddev->device_tx_status = edma_tx_status; 1828 + m_ddev->device_config = edma_slave_config; 1829 + m_ddev->device_pause = edma_dma_pause; 1830 + m_ddev->device_resume = edma_dma_resume; 1831 + m_ddev->device_terminate_all = edma_terminate_all; 1832 + 1833 + m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; 1834 + m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; 1835 + m_ddev->directions = BIT(DMA_MEM_TO_MEM); 1836 + m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1837 + 1838 + m_ddev->dev = ecc->dev; 1839 + INIT_LIST_HEAD(&m_ddev->channels); 1840 + } else if (!ecc->legacy_mode) { 1841 + dev_info(ecc->dev, "memcpy is disabled\n"); 1842 + } 1785 1843 1786 1844 for (i = 0; i < ecc->num_channels; i++) { 1787 1845 struct edma_chan *echan = &ecc->slave_chans[i]; ··· 1838 1798 echan->ecc = ecc; 1839 1799 echan->vchan.desc_free = edma_desc_free; 1840 1800 1841 - vchan_init(&echan->vchan, ddev); 1801 + if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels)) 1802 + vchan_init(&echan->vchan, m_ddev); 1803 + else 1804 + vchan_init(&echan->vchan, s_ddev); 1842 1805 1843 1806 INIT_LIST_HEAD(&echan->node); 1844 1807 for (j = 0; j < EDMA_MAX_SLOTS; j++) ··· 1964 1921 return 0; 1965 1922 } 1966 1923 1967 - static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev) 1924 + static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, 1925 + bool legacy_mode) 1968 1926 { 1969 1927 struct edma_soc_info *info; 1970 1928 struct property *prop; ··· 1976 1932 if (!info) 1977 1933 return ERR_PTR(-ENOMEM); 1978 1934 1979 - prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", &sz); 1935 + if (legacy_mode) { 1936 + prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", 1937 + &sz); 1938 + if (prop) { 1939 + ret = edma_xbar_event_map(dev, info, sz); 1940 + if (ret) 1941 + return ERR_PTR(ret); 1942 + } 1943 + return info; 1944 + } 1945 + 1946 + /* Get the list of channels allocated to be used for memcpy */ 1947 + prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); 1980 1948 if (prop) { 1981 - ret = edma_xbar_event_map(dev, info, sz); 1949 + const char pname[] = "ti,edma-memcpy-channels"; 1950 + size_t nelm = sz / sizeof(s16); 1951 + s16 *memcpy_ch; 1952 + 1953 + memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16), 1954 + GFP_KERNEL); 1955 + if (!memcpy_ch) 1956 + return ERR_PTR(-ENOMEM); 1957 + 1958 + ret = of_property_read_u16_array(dev->of_node, pname, 1959 + (u16 *)memcpy_ch, nelm); 1982 1960 if (ret) 1983 1961 return ERR_PTR(ret); 1962 + 1963 + memcpy_ch[nelm] = -1; 1964 + info->memcpy_channels = memcpy_ch; 1965 + } 1966 + 1967 + prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges", 1968 + &sz); 1969 + if (prop) { 1970 + const char pname[] = "ti,edma-reserved-slot-ranges"; 1971 + s16 (*rsv_slots)[2]; 1972 + size_t nelm = sz / sizeof(*rsv_slots); 1973 + struct edma_rsv_info *rsv_info; 1974 + 1975 + if (!nelm) 1976 + return info; 1977 + 1978 + rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); 1979 + if (!rsv_info) 1980 + return ERR_PTR(-ENOMEM); 1981 + 1982 + rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), 1983 + GFP_KERNEL); 1984 + if (!rsv_slots) 1985 + return ERR_PTR(-ENOMEM); 1986 + 1987 + ret = of_property_read_u16_array(dev->of_node, pname, 1988 + (u16 *)rsv_slots, nelm * 2); 1989 + if (ret) 1990 + return ERR_PTR(ret); 1991 + 1992 + rsv_slots[nelm][0] = -1; 1993 + rsv_slots[nelm][1] = -1; 1994 + info->rsv = rsv_info; 1995 + info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; 1984 1996 } 1985 1997 1986 1998 return info; 1987 1999 } 2000 + 2001 + static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, 2002 + struct of_dma *ofdma) 2003 + { 2004 + struct edma_cc *ecc = ofdma->of_dma_data; 2005 + struct dma_chan *chan = NULL; 2006 + struct edma_chan *echan; 2007 + int i; 2008 + 2009 + if (!ecc || dma_spec->args_count < 1) 2010 + return NULL; 2011 + 2012 + for (i = 0; i < ecc->num_channels; i++) { 2013 + echan = &ecc->slave_chans[i]; 2014 + if (echan->ch_num == dma_spec->args[0]) { 2015 + chan = &echan->vchan.chan; 2016 + break; 2017 + } 2018 + } 2019 + 2020 + if (!chan) 2021 + return NULL; 2022 + 2023 + if (echan->ecc->legacy_mode && dma_spec->args_count == 1) 2024 + goto out; 2025 + 2026 + if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 && 2027 + dma_spec->args[1] < echan->ecc->num_tc) { 2028 + echan->tc = &echan->ecc->tc_list[dma_spec->args[1]]; 2029 + goto out; 2030 + } 2031 + 2032 + return NULL; 2033 + out: 2034 + /* The channel is going to be used as HW synchronized */ 2035 + echan->hw_triggered = true; 2036 + return dma_get_slave_channel(chan); 2037 + } 1988 2038 #else 1989 - static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev) 2039 + static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, 2040 + bool legacy_mode) 1990 2041 { 1991 2042 return ERR_PTR(-EINVAL); 2043 + } 2044 + 2045 + static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, 2046 + struct of_dma *ofdma) 2047 + { 2048 + return NULL; 1992 2049 } 1993 2050 #endif 1994 2051 ··· 2098 1953 struct edma_soc_info *info = pdev->dev.platform_data; 2099 1954 s8 (*queue_priority_mapping)[2]; 2100 1955 int i, off, ln; 2101 - const s16 (*rsv_chans)[2]; 2102 1956 const s16 (*rsv_slots)[2]; 2103 1957 const s16 (*xbar_chans)[2]; 2104 1958 int irq; ··· 2106 1962 struct device_node *node = pdev->dev.of_node; 2107 1963 struct device *dev = &pdev->dev; 2108 1964 struct edma_cc *ecc; 1965 + bool legacy_mode = true; 2109 1966 int ret; 2110 1967 2111 1968 if (node) { 2112 - info = edma_setup_info_from_dt(dev); 1969 + const struct of_device_id *match; 1970 + 1971 + match = of_match_node(edma_of_ids, node); 1972 + if (match && (u32)match->data == EDMA_BINDING_TPCC) 1973 + legacy_mode = false; 1974 + 1975 + info = edma_setup_info_from_dt(dev, legacy_mode); 2113 1976 if (IS_ERR(info)) { 2114 1977 dev_err(dev, "failed to get DT data\n"); 2115 1978 return PTR_ERR(info); ··· 2145 1994 2146 1995 ecc->dev = dev; 2147 1996 ecc->id = pdev->id; 1997 + ecc->legacy_mode = legacy_mode; 2148 1998 /* When booting with DT the pdev->id is -1 */ 2149 1999 if (ecc->id < 0) 2150 2000 ecc->id = 0; ··· 2176 2024 if (!ecc->slave_chans) 2177 2025 return -ENOMEM; 2178 2026 2179 - ecc->channel_unused = devm_kcalloc(dev, 2180 - BITS_TO_LONGS(ecc->num_channels), 2181 - sizeof(unsigned long), GFP_KERNEL); 2182 - if (!ecc->channel_unused) 2183 - return -ENOMEM; 2184 - 2185 2027 ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), 2186 2028 sizeof(unsigned long), GFP_KERNEL); 2187 2029 if (!ecc->slot_inuse) ··· 2186 2040 for (i = 0; i < ecc->num_slots; i++) 2187 2041 edma_write_slot(ecc, i, &dummy_paramset); 2188 2042 2189 - /* Mark all channels as unused */ 2190 - memset(ecc->channel_unused, 0xff, sizeof(ecc->channel_unused)); 2191 - 2192 2043 if (info->rsv) { 2193 - /* Clear the reserved channels in unused list */ 2194 - rsv_chans = info->rsv->rsv_chans; 2195 - if (rsv_chans) { 2196 - for (i = 0; rsv_chans[i][0] != -1; i++) { 2197 - off = rsv_chans[i][0]; 2198 - ln = rsv_chans[i][1]; 2199 - clear_bits(off, ln, ecc->channel_unused); 2200 - } 2201 - } 2202 - 2203 2044 /* Set the reserved slots in inuse list */ 2204 2045 rsv_slots = info->rsv->rsv_slots; 2205 2046 if (rsv_slots) { ··· 2203 2070 if (xbar_chans) { 2204 2071 for (i = 0; xbar_chans[i][1] != -1; i++) { 2205 2072 off = xbar_chans[i][1]; 2206 - clear_bits(off, 1, ecc->channel_unused); 2207 2073 } 2208 2074 } 2209 2075 ··· 2244 2112 2245 2113 queue_priority_mapping = info->queue_priority_mapping; 2246 2114 2115 + if (!ecc->legacy_mode) { 2116 + int lowest_priority = 0; 2117 + struct of_phandle_args tc_args; 2118 + 2119 + ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, 2120 + sizeof(*ecc->tc_list), GFP_KERNEL); 2121 + if (!ecc->tc_list) 2122 + return -ENOMEM; 2123 + 2124 + for (i = 0;; i++) { 2125 + ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs", 2126 + 1, i, &tc_args); 2127 + if (ret || i == ecc->num_tc) 2128 + break; 2129 + 2130 + ecc->tc_list[i].node = tc_args.np; 2131 + ecc->tc_list[i].id = i; 2132 + queue_priority_mapping[i][1] = tc_args.args[0]; 2133 + if (queue_priority_mapping[i][1] > lowest_priority) { 2134 + lowest_priority = queue_priority_mapping[i][1]; 2135 + info->default_queue = i; 2136 + } 2137 + } 2138 + } 2139 + 2247 2140 /* Event queue priority mapping */ 2248 2141 for (i = 0; queue_priority_mapping[i][0] != -1; i++) 2249 2142 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], ··· 2282 2125 ecc->info = info; 2283 2126 2284 2127 /* Init the dma device and channels */ 2285 - edma_dma_init(ecc); 2128 + edma_dma_init(ecc, legacy_mode); 2286 2129 2287 2130 for (i = 0; i < ecc->num_channels; i++) { 2288 2131 /* Assign all channels to the default queue */ ··· 2293 2136 } 2294 2137 2295 2138 ret = dma_async_device_register(&ecc->dma_slave); 2296 - if (ret) 2139 + if (ret) { 2140 + dev_err(dev, "slave ddev registration failed (%d)\n", ret); 2297 2141 goto err_reg1; 2142 + } 2143 + 2144 + if (ecc->dma_memcpy) { 2145 + ret = dma_async_device_register(ecc->dma_memcpy); 2146 + if (ret) { 2147 + dev_err(dev, "memcpy ddev registration failed (%d)\n", 2148 + ret); 2149 + dma_async_device_unregister(&ecc->dma_slave); 2150 + goto err_reg1; 2151 + } 2152 + } 2298 2153 2299 2154 if (node) 2300 - of_dma_controller_register(node, of_dma_xlate_by_chan_id, 2301 - &ecc->dma_slave); 2155 + of_dma_controller_register(node, of_edma_xlate, ecc); 2302 2156 2303 2157 dev_info(dev, "TI EDMA DMA engine driver\n"); 2304 2158 ··· 2328 2160 if (dev->of_node) 2329 2161 of_dma_controller_free(dev->of_node); 2330 2162 dma_async_device_unregister(&ecc->dma_slave); 2163 + if (ecc->dma_memcpy) 2164 + dma_async_device_unregister(ecc->dma_memcpy); 2331 2165 edma_free_slot(ecc, ecc->dummy_slot); 2332 2166 2333 2167 return 0; 2334 2168 } 2335 2169 2336 2170 #ifdef CONFIG_PM_SLEEP 2171 + static int edma_pm_suspend(struct device *dev) 2172 + { 2173 + struct edma_cc *ecc = dev_get_drvdata(dev); 2174 + struct edma_chan *echan = ecc->slave_chans; 2175 + int i; 2176 + 2177 + for (i = 0; i < ecc->num_channels; i++) { 2178 + if (echan[i].alloced) { 2179 + edma_setup_interrupt(&echan[i], false); 2180 + edma_tc_set_pm_state(echan[i].tc, false); 2181 + } 2182 + } 2183 + 2184 + return 0; 2185 + } 2186 + 2337 2187 static int edma_pm_resume(struct device *dev) 2338 2188 { 2339 2189 struct edma_cc *ecc = dev_get_drvdata(dev); ··· 2376 2190 2377 2191 /* Set up channel -> slot mapping for the entry slot */ 2378 2192 edma_set_chmap(&echan[i], echan[i].slot[0]); 2193 + 2194 + edma_tc_set_pm_state(echan[i].tc, true); 2379 2195 } 2380 2196 } 2381 2197 ··· 2386 2198 #endif 2387 2199 2388 2200 static const struct dev_pm_ops edma_pm_ops = { 2389 - SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume) 2201 + SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume) 2390 2202 }; 2391 2203 2392 2204 static struct platform_driver edma_driver = { ··· 2401 2213 2402 2214 bool edma_filter_fn(struct dma_chan *chan, void *param) 2403 2215 { 2216 + bool match = false; 2217 + 2404 2218 if (chan->device->dev->driver == &edma_driver.driver) { 2405 2219 struct edma_chan *echan = to_edma_chan(chan); 2406 2220 unsigned ch_req = *(unsigned *)param; 2407 - return ch_req == echan->ch_num; 2221 + if (ch_req == echan->ch_num) { 2222 + /* The channel is going to be used as HW synchronized */ 2223 + echan->hw_triggered = true; 2224 + match = true; 2225 + } 2408 2226 } 2409 - return false; 2227 + return match; 2410 2228 } 2411 2229 EXPORT_SYMBOL(edma_filter_fn); 2412 2230
+3
include/linux/platform_data/edma.h
··· 71 71 /* Resource reservation for other cores */ 72 72 struct edma_rsv_info *rsv; 73 73 74 + /* List of channels allocated for memcpy, terminated with -1 */ 75 + s16 *memcpy_channels; 76 + 74 77 s8 (*queue_priority_mapping)[2]; 75 78 const s16 (*xbar_chans)[2]; 76 79 };