Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'davinci-for-v3.11/soc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci into next/soc

From Sekhar Nori:

DaVinci SoC updates for v3.11 - part 2

This pull request adds DT and runtime PM to
EDMA ARM private API so it can be used on
DT enabled DaVinci and OMAP platforms.

Also adds DMA channel crossbar mapping
support to be used by DT-enabled platforms
which use it.

* tag 'davinci-for-v3.11/soc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci:
dmaengine: edma: enable build for AM33XX
ARM: edma: Add EDMA crossbar event mux support
ARM: edma: Add DT and runtime PM support to the private EDMA API
dmaengine: edma: Add TI EDMA device tree binding
ARM: edma: Convert to devm_* api

Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+328 -79
+34
Documentation/devicetree/bindings/dma/ti-edma.txt
··· 1 + TI EDMA 2 + 3 + Required properties: 4 + - compatible : "ti,edma3" 5 + - ti,edma-regions: Number of regions 6 + - ti,edma-slots: Number of slots 7 + - #dma-cells: Should be set to <1> 8 + Clients should use a single channel number per DMA request. 9 + - dma-channels: Specify total DMA channels per CC 10 + - reg: Memory map for accessing module 11 + - interrupt-parent: Interrupt controller the interrupt is routed through 12 + - interrupts: Exactly 3 interrupts need to be specified in the order: 13 + 1. Transfer completion interrupt. 14 + 2. Memory protection interrupt. 15 + 3. Error interrupt. 16 + Optional properties: 17 + - ti,hwmods: Name of the hwmods associated to the EDMA 18 + - ti,edma-xbar-event-map: Crossbar event to channel map 19 + 20 + Example: 21 + 22 + edma: edma@49000000 { 23 + reg = <0x49000000 0x10000>; 24 + interrupt-parent = <&intc>; 25 + interrupts = <12 13 14>; 26 + compatible = "ti,edma3"; 27 + ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; 28 + #dma-cells = <1>; 29 + dma-channels = <64>; 30 + ti,edma-regions = <4>; 31 + ti,edma-slots = <256>; 32 + ti,edma-xbar-event-map = <1 12 33 + 2 13>; 34 + };
+275 -62
arch/arm/common/edma.c
··· 17 17 * along with this program; if not, write to the Free Software 18 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 19 */ 20 + #include <linux/err.h> 20 21 #include <linux/kernel.h> 21 22 #include <linux/init.h> 22 23 #include <linux/module.h> ··· 25 24 #include <linux/platform_device.h> 26 25 #include <linux/io.h> 27 26 #include <linux/slab.h> 27 + #include <linux/edma.h> 28 + #include <linux/err.h> 29 + #include <linux/of_address.h> 30 + #include <linux/of_device.h> 31 + #include <linux/of_dma.h> 32 + #include <linux/of_irq.h> 33 + #include <linux/pm_runtime.h> 28 34 29 35 #include <linux/platform_data/edma.h> 30 36 ··· 1376 1368 } 1377 1369 EXPORT_SYMBOL(edma_clear_event); 1378 1370 1379 - /*-----------------------------------------------------------------------*/ 1371 + #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) 1380 1372 1381 - static int __init edma_probe(struct platform_device *pdev) 1373 + static int edma_of_read_u32_to_s16_array(const struct device_node *np, 1374 + const char *propname, s16 *out_values, 1375 + size_t sz) 1376 + { 1377 + int ret; 1378 + 1379 + ret = of_property_read_u16_array(np, propname, out_values, sz); 1380 + if (ret) 1381 + return ret; 1382 + 1383 + /* Terminate it */ 1384 + *out_values++ = -1; 1385 + *out_values++ = -1; 1386 + 1387 + return 0; 1388 + } 1389 + 1390 + static int edma_xbar_event_map(struct device *dev, 1391 + struct device_node *node, 1392 + struct edma_soc_info *pdata, int len) 1393 + { 1394 + int ret, i; 1395 + struct resource res; 1396 + void __iomem *xbar; 1397 + const s16 (*xbar_chans)[2]; 1398 + u32 shift, offset, mux; 1399 + 1400 + xbar_chans = devm_kzalloc(dev, 1401 + len/sizeof(s16) + 2*sizeof(s16), 1402 + GFP_KERNEL); 1403 + if (!xbar_chans) 1404 + return -ENOMEM; 1405 + 1406 + ret = of_address_to_resource(node, 1, &res); 1407 + if (ret) 1408 + return -EIO; 1409 + 1410 + xbar = devm_ioremap(dev, res.start, resource_size(&res)); 1411 + if (!xbar) 1412 + return -ENOMEM; 1413 + 1414 + ret = edma_of_read_u32_to_s16_array(node, 1415 + "ti,edma-xbar-event-map", 1416 + (s16 *)xbar_chans, 1417 + len/sizeof(u32)); 1418 + if (ret) 1419 + return -EIO; 1420 + 1421 + for (i = 0; xbar_chans[i][0] != -1; i++) { 1422 + shift = (xbar_chans[i][1] & 0x03) << 3; 1423 + offset = xbar_chans[i][1] & 0xfffffffc; 1424 + mux = readl(xbar + offset); 1425 + mux &= ~(0xff << shift); 1426 + mux |= xbar_chans[i][0] << shift; 1427 + writel(mux, (xbar + offset)); 1428 + } 1429 + 1430 + pdata->xbar_chans = xbar_chans; 1431 + 1432 + return 0; 1433 + } 1434 + 1435 + static int edma_of_parse_dt(struct device *dev, 1436 + struct device_node *node, 1437 + struct edma_soc_info *pdata) 1438 + { 1439 + int ret = 0, i; 1440 + u32 value; 1441 + struct property *prop; 1442 + size_t sz; 1443 + struct edma_rsv_info *rsv_info; 1444 + s8 (*queue_tc_map)[2], (*queue_priority_map)[2]; 1445 + 1446 + memset(pdata, 0, sizeof(struct edma_soc_info)); 1447 + 1448 + ret = of_property_read_u32(node, "dma-channels", &value); 1449 + if (ret < 0) 1450 + return ret; 1451 + pdata->n_channel = value; 1452 + 1453 + ret = of_property_read_u32(node, "ti,edma-regions", &value); 1454 + if (ret < 0) 1455 + return ret; 1456 + pdata->n_region = value; 1457 + 1458 + ret = of_property_read_u32(node, "ti,edma-slots", &value); 1459 + if (ret < 0) 1460 + return ret; 1461 + pdata->n_slot = value; 1462 + 1463 + pdata->n_cc = 1; 1464 + 1465 + rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL); 1466 + if (!rsv_info) 1467 + return -ENOMEM; 1468 + pdata->rsv = rsv_info; 1469 + 1470 + queue_tc_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL); 1471 + if (!queue_tc_map) 1472 + return -ENOMEM; 1473 + 1474 + for (i = 0; i < 3; i++) { 1475 + queue_tc_map[i][0] = i; 1476 + queue_tc_map[i][1] = i; 1477 + } 1478 + queue_tc_map[i][0] = -1; 1479 + queue_tc_map[i][1] = -1; 1480 + 1481 + pdata->queue_tc_mapping = queue_tc_map; 1482 + 1483 + queue_priority_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL); 1484 + if (!queue_priority_map) 1485 + return -ENOMEM; 1486 + 1487 + for (i = 0; i < 3; i++) { 1488 + queue_priority_map[i][0] = i; 1489 + queue_priority_map[i][1] = i; 1490 + } 1491 + queue_priority_map[i][0] = -1; 1492 + queue_priority_map[i][1] = -1; 1493 + 1494 + pdata->queue_priority_mapping = queue_priority_map; 1495 + 1496 + pdata->default_queue = 0; 1497 + 1498 + prop = of_find_property(node, "ti,edma-xbar-event-map", &sz); 1499 + if (prop) 1500 + ret = edma_xbar_event_map(dev, node, pdata, sz); 1501 + 1502 + return ret; 1503 + } 1504 + 1505 + static struct of_dma_filter_info edma_filter_info = { 1506 + .filter_fn = edma_filter_fn, 1507 + }; 1508 + 1509 + static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, 1510 + struct device_node *node) 1511 + { 1512 + struct edma_soc_info *info; 1513 + int ret; 1514 + 1515 + info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL); 1516 + if (!info) 1517 + return ERR_PTR(-ENOMEM); 1518 + 1519 + ret = edma_of_parse_dt(dev, node, info); 1520 + if (ret) 1521 + return ERR_PTR(ret); 1522 + 1523 + dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap); 1524 + of_dma_controller_register(dev->of_node, of_dma_simple_xlate, 1525 + &edma_filter_info); 1526 + 1527 + return info; 1528 + } 1529 + #else 1530 + static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, 1531 + struct device_node *node) 1532 + { 1533 + return ERR_PTR(-ENOSYS); 1534 + } 1535 + #endif 1536 + 1537 + static int edma_probe(struct platform_device *pdev) 1382 1538 { 1383 1539 struct edma_soc_info **info = pdev->dev.platform_data; 1384 - const s8 (*queue_priority_mapping)[2]; 1385 - const s8 (*queue_tc_mapping)[2]; 1540 + struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL}; 1541 + s8 (*queue_priority_mapping)[2]; 1542 + s8 (*queue_tc_mapping)[2]; 1386 1543 int i, j, off, ln, found = 0; 1387 1544 int status = -1; 1388 1545 const s16 (*rsv_chans)[2]; 1389 1546 const s16 (*rsv_slots)[2]; 1547 + const s16 (*xbar_chans)[2]; 1390 1548 int irq[EDMA_MAX_CC] = {0, 0}; 1391 1549 int err_irq[EDMA_MAX_CC] = {0, 0}; 1392 1550 struct resource *r[EDMA_MAX_CC] = {NULL}; 1393 - resource_size_t len[EDMA_MAX_CC]; 1551 + struct resource res[EDMA_MAX_CC]; 1394 1552 char res_name[10]; 1395 1553 char irq_name[10]; 1554 + struct device_node *node = pdev->dev.of_node; 1555 + struct device *dev = &pdev->dev; 1556 + int ret; 1557 + 1558 + if (node) { 1559 + /* Check if this is a second instance registered */ 1560 + if (arch_num_cc) { 1561 + dev_err(dev, "only one EDMA instance is supported via DT\n"); 1562 + return -ENODEV; 1563 + } 1564 + 1565 + ninfo[0] = edma_setup_info_from_dt(dev, node); 1566 + if (IS_ERR(ninfo[0])) { 1567 + dev_err(dev, "failed to get DT data\n"); 1568 + return PTR_ERR(ninfo[0]); 1569 + } 1570 + 1571 + info = ninfo; 1572 + } 1396 1573 1397 1574 if (!info) 1398 1575 return -ENODEV; 1399 1576 1577 + pm_runtime_enable(dev); 1578 + ret = pm_runtime_get_sync(dev); 1579 + if (ret < 0) { 1580 + dev_err(dev, "pm_runtime_get_sync() failed\n"); 1581 + return ret; 1582 + } 1583 + 1400 1584 for (j = 0; j < EDMA_MAX_CC; j++) { 1401 - sprintf(res_name, "edma_cc%d", j); 1402 - r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1585 + if (!info[j]) { 1586 + if (!found) 1587 + return -ENODEV; 1588 + break; 1589 + } 1590 + if (node) { 1591 + ret = of_address_to_resource(node, j, &res[j]); 1592 + if (!ret) 1593 + r[j] = &res[j]; 1594 + } else { 1595 + sprintf(res_name, "edma_cc%d", j); 1596 + r[j] = platform_get_resource_byname(pdev, 1597 + IORESOURCE_MEM, 1403 1598 res_name); 1404 - if (!r[j] || !info[j]) { 1599 + } 1600 + if (!r[j]) { 1405 1601 if (found) 1406 1602 break; 1407 1603 else ··· 1614 1402 found = 1; 1615 1403 } 1616 1404 1617 - len[j] = resource_size(r[j]); 1405 + edmacc_regs_base[j] = devm_ioremap_resource(&pdev->dev, r[j]); 1406 + if (IS_ERR(edmacc_regs_base[j])) 1407 + return PTR_ERR(edmacc_regs_base[j]); 1618 1408 1619 - r[j] = request_mem_region(r[j]->start, len[j], 1620 - dev_name(&pdev->dev)); 1621 - if (!r[j]) { 1622 - status = -EBUSY; 1623 - goto fail1; 1624 - } 1625 - 1626 - edmacc_regs_base[j] = ioremap(r[j]->start, len[j]); 1627 - if (!edmacc_regs_base[j]) { 1628 - status = -EBUSY; 1629 - goto fail1; 1630 - } 1631 - 1632 - edma_cc[j] = kzalloc(sizeof(struct edma), GFP_KERNEL); 1633 - if (!edma_cc[j]) { 1634 - status = -ENOMEM; 1635 - goto fail1; 1636 - } 1409 + edma_cc[j] = devm_kzalloc(&pdev->dev, sizeof(struct edma), 1410 + GFP_KERNEL); 1411 + if (!edma_cc[j]) 1412 + return -ENOMEM; 1637 1413 1638 1414 edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel, 1639 1415 EDMA_MAX_DMACH); ··· 1652 1452 off = rsv_chans[i][0]; 1653 1453 ln = rsv_chans[i][1]; 1654 1454 clear_bits(off, ln, 1655 - edma_cc[j]->edma_unused); 1455 + edma_cc[j]->edma_unused); 1656 1456 } 1657 1457 } 1658 1458 ··· 1668 1468 } 1669 1469 } 1670 1470 1671 - sprintf(irq_name, "edma%d", j); 1672 - irq[j] = platform_get_irq_byname(pdev, irq_name); 1673 - edma_cc[j]->irq_res_start = irq[j]; 1674 - status = request_irq(irq[j], dma_irq_handler, 0, "edma", 1675 - &pdev->dev); 1676 - if (status < 0) { 1677 - dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", 1678 - irq[j], status); 1679 - goto fail; 1471 + /* Clear the xbar mapped channels in unused list */ 1472 + xbar_chans = info[j]->xbar_chans; 1473 + if (xbar_chans) { 1474 + for (i = 0; xbar_chans[i][1] != -1; i++) { 1475 + off = xbar_chans[i][1]; 1476 + clear_bits(off, 1, 1477 + edma_cc[j]->edma_unused); 1478 + } 1680 1479 } 1681 1480 1682 - sprintf(irq_name, "edma%d_err", j); 1683 - err_irq[j] = platform_get_irq_byname(pdev, irq_name); 1684 - edma_cc[j]->irq_res_end = err_irq[j]; 1685 - status = request_irq(err_irq[j], dma_ccerr_handler, 0, 1686 - "edma_error", &pdev->dev); 1481 + if (node) { 1482 + irq[j] = irq_of_parse_and_map(node, 0); 1483 + } else { 1484 + sprintf(irq_name, "edma%d", j); 1485 + irq[j] = platform_get_irq_byname(pdev, irq_name); 1486 + } 1487 + edma_cc[j]->irq_res_start = irq[j]; 1488 + status = devm_request_irq(&pdev->dev, irq[j], 1489 + dma_irq_handler, 0, "edma", 1490 + &pdev->dev); 1687 1491 if (status < 0) { 1688 - dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", 1492 + dev_dbg(&pdev->dev, 1493 + "devm_request_irq %d failed --> %d\n", 1494 + irq[j], status); 1495 + return status; 1496 + } 1497 + 1498 + if (node) { 1499 + err_irq[j] = irq_of_parse_and_map(node, 2); 1500 + } else { 1501 + sprintf(irq_name, "edma%d_err", j); 1502 + err_irq[j] = platform_get_irq_byname(pdev, irq_name); 1503 + } 1504 + edma_cc[j]->irq_res_end = err_irq[j]; 1505 + status = devm_request_irq(&pdev->dev, err_irq[j], 1506 + dma_ccerr_handler, 0, 1507 + "edma_error", &pdev->dev); 1508 + if (status < 0) { 1509 + dev_dbg(&pdev->dev, 1510 + "devm_request_irq %d failed --> %d\n", 1689 1511 err_irq[j], status); 1690 - goto fail; 1512 + return status; 1691 1513 } 1692 1514 1693 1515 for (i = 0; i < edma_cc[j]->num_channels; i++) ··· 1744 1522 } 1745 1523 1746 1524 return 0; 1747 - 1748 - fail: 1749 - for (i = 0; i < EDMA_MAX_CC; i++) { 1750 - if (err_irq[i]) 1751 - free_irq(err_irq[i], &pdev->dev); 1752 - if (irq[i]) 1753 - free_irq(irq[i], &pdev->dev); 1754 - } 1755 - fail1: 1756 - for (i = 0; i < EDMA_MAX_CC; i++) { 1757 - if (r[i]) 1758 - release_mem_region(r[i]->start, len[i]); 1759 - if (edmacc_regs_base[i]) 1760 - iounmap(edmacc_regs_base[i]); 1761 - kfree(edma_cc[i]); 1762 - } 1763 - return status; 1764 1525 } 1765 1526 1527 + static const struct of_device_id edma_of_ids[] = { 1528 + { .compatible = "ti,edma3", }, 1529 + {} 1530 + }; 1766 1531 1767 1532 static struct platform_driver edma_driver = { 1768 - .driver.name = "edma", 1533 + .driver = { 1534 + .name = "edma", 1535 + .of_match_table = edma_of_ids, 1536 + }, 1537 + .probe = edma_probe, 1769 1538 }; 1770 1539 1771 1540 static int __init edma_init(void)
+4 -4
arch/arm/mach-davinci/devices-da8xx.c
··· 105 105 }, 106 106 }; 107 107 108 - static const s8 da8xx_queue_tc_mapping[][2] = { 108 + static s8 da8xx_queue_tc_mapping[][2] = { 109 109 /* {event queue no, TC no} */ 110 110 {0, 0}, 111 111 {1, 1}, 112 112 {-1, -1} 113 113 }; 114 114 115 - static const s8 da8xx_queue_priority_mapping[][2] = { 115 + static s8 da8xx_queue_priority_mapping[][2] = { 116 116 /* {event queue no, Priority} */ 117 117 {0, 3}, 118 118 {1, 7}, 119 119 {-1, -1} 120 120 }; 121 121 122 - static const s8 da850_queue_tc_mapping[][2] = { 122 + static s8 da850_queue_tc_mapping[][2] = { 123 123 /* {event queue no, TC no} */ 124 124 {0, 0}, 125 125 {-1, -1} 126 126 }; 127 127 128 - static const s8 da850_queue_priority_mapping[][2] = { 128 + static s8 da850_queue_priority_mapping[][2] = { 129 129 /* {event queue no, Priority} */ 130 130 {0, 3}, 131 131 {-1, -1}
+2 -2
arch/arm/mach-davinci/devices-tnetv107x.c
··· 58 58 #define TNETV107X_DMACH_SDIO1_RX 28 59 59 #define TNETV107X_DMACH_SDIO1_TX 29 60 60 61 - static const s8 edma_tc_mapping[][2] = { 61 + static s8 edma_tc_mapping[][2] = { 62 62 /* event queue no TC no */ 63 63 { 0, 0 }, 64 64 { 1, 1 }, 65 65 { -1, -1 } 66 66 }; 67 67 68 - static const s8 edma_priority_mapping[][2] = { 68 + static s8 edma_priority_mapping[][2] = { 69 69 /* event queue no Prio */ 70 70 { 0, 3 }, 71 71 { 1, 7 },
+2 -2
arch/arm/mach-davinci/dm355.c
··· 569 569 570 570 /*----------------------------------------------------------------------*/ 571 571 572 - static const s8 572 + static s8 573 573 queue_tc_mapping[][2] = { 574 574 /* {event queue no, TC no} */ 575 575 {0, 0}, ··· 577 577 {-1, -1}, 578 578 }; 579 579 580 - static const s8 580 + static s8 581 581 queue_priority_mapping[][2] = { 582 582 /* {event queue no, Priority} */ 583 583 {0, 3},
+2 -2
arch/arm/mach-davinci/dm365.c
··· 826 826 }; 827 827 828 828 /* Four Transfer Controllers on DM365 */ 829 - static const s8 829 + static s8 830 830 dm365_queue_tc_mapping[][2] = { 831 831 /* {event queue no, TC no} */ 832 832 {0, 0}, ··· 836 836 {-1, -1}, 837 837 }; 838 838 839 - static const s8 839 + static s8 840 840 dm365_queue_priority_mapping[][2] = { 841 841 /* {event queue no, Priority} */ 842 842 {0, 7},
+2 -2
arch/arm/mach-davinci/dm644x.c
··· 497 497 498 498 /*----------------------------------------------------------------------*/ 499 499 500 - static const s8 500 + static s8 501 501 queue_tc_mapping[][2] = { 502 502 /* {event queue no, TC no} */ 503 503 {0, 0}, ··· 505 505 {-1, -1}, 506 506 }; 507 507 508 - static const s8 508 + static s8 509 509 queue_priority_mapping[][2] = { 510 510 /* {event queue no, Priority} */ 511 511 {0, 3},
+2 -2
arch/arm/mach-davinci/dm646x.c
··· 531 531 /*----------------------------------------------------------------------*/ 532 532 533 533 /* Four Transfer Controllers on DM646x */ 534 - static const s8 534 + static s8 535 535 dm646x_queue_tc_mapping[][2] = { 536 536 /* {event queue no, TC no} */ 537 537 {0, 0}, ··· 541 541 {-1, -1}, 542 542 }; 543 543 544 - static const s8 544 + static s8 545 545 dm646x_queue_priority_mapping[][2] = { 546 546 /* {event queue no, Priority} */ 547 547 {0, 4},
+1
arch/arm/mach-omap2/Kconfig
··· 17 17 select PROC_DEVICETREE if PROC_FS 18 18 select SOC_BUS 19 19 select SPARSE_IRQ 20 + select TI_PRIV_EDMA 20 21 select USE_OF 21 22 help 22 23 Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
+1 -1
drivers/dma/Kconfig
··· 213 213 214 214 config TI_EDMA 215 215 tristate "TI EDMA support" 216 - depends on ARCH_DAVINCI 216 + depends on ARCH_DAVINCI || ARCH_OMAP 217 217 select DMA_ENGINE 218 218 select DMA_VIRTUAL_CHANNELS 219 219 default n
+3 -2
include/linux/platform_data/edma.h
··· 175 175 /* Resource reservation for other cores */ 176 176 struct edma_rsv_info *rsv; 177 177 178 - const s8 (*queue_tc_mapping)[2]; 179 - const s8 (*queue_priority_mapping)[2]; 178 + s8 (*queue_tc_mapping)[2]; 179 + s8 (*queue_priority_mapping)[2]; 180 + const s16 (*xbar_chans)[2]; 180 181 }; 181 182 182 183 #endif