Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

thunderbolt: Add KUnit tests for DMA tunnels

Add a couple of tests to check DMA tunneling functionality.

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>

+240
+240
drivers/thunderbolt/test.c
··· 119 119 sw->ports[7].config.type = TB_TYPE_NHI; 120 120 sw->ports[7].config.max_in_hop_id = 11; 121 121 sw->ports[7].config.max_out_hop_id = 11; 122 + sw->ports[7].config.nfc_credits = 0x41800000; 122 123 123 124 sw->ports[8].config.type = TB_TYPE_PCIE_DOWN; 124 125 sw->ports[8].config.max_in_hop_id = 8; ··· 1595 1594 tb_tunnel_free(dp_tunnel); 1596 1595 } 1597 1596 1597 + static void tb_test_tunnel_dma(struct kunit *test) 1598 + { 1599 + struct tb_port *nhi, *port; 1600 + struct tb_tunnel *tunnel; 1601 + struct tb_switch *host; 1602 + 1603 + /* 1604 + * Create DMA tunnel from NHI to port 1 and back. 1605 + * 1606 + * [Host 1] 1607 + * 1 ^ In HopID 1 -> Out HopID 8 1608 + * | 1609 + * v In HopID 8 -> Out HopID 1 1610 + * ............ Domain border 1611 + * | 1612 + * [Host 2] 1613 + */ 1614 + host = alloc_host(test); 1615 + nhi = &host->ports[7]; 1616 + port = &host->ports[1]; 1617 + 1618 + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); 1619 + KUNIT_ASSERT_TRUE(test, tunnel != NULL); 1620 + KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA); 1621 + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); 1622 + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); 1623 + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); 1624 + /* RX path */ 1625 + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1); 1626 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port); 1627 + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8); 1628 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi); 1629 + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1); 1630 + /* TX path */ 1631 + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1); 1632 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi); 1633 + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1); 1634 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port); 1635 + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8); 1636 + 1637 + tb_tunnel_free(tunnel); 1638 + } 1639 + 1640 + static void tb_test_tunnel_dma_rx(struct kunit *test) 1641 + { 1642 + struct tb_port *nhi, *port; 1643 + struct tb_tunnel *tunnel; 1644 + struct tb_switch *host; 1645 + 1646 + /* 1647 + * Create DMA RX tunnel from port 1 to NHI. 1648 + * 1649 + * [Host 1] 1650 + * 1 ^ 1651 + * | 1652 + * | In HopID 15 -> Out HopID 2 1653 + * ............ Domain border 1654 + * | 1655 + * [Host 2] 1656 + */ 1657 + host = alloc_host(test); 1658 + nhi = &host->ports[7]; 1659 + port = &host->ports[1]; 1660 + 1661 + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2); 1662 + KUNIT_ASSERT_TRUE(test, tunnel != NULL); 1663 + KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA); 1664 + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); 1665 + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); 1666 + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1); 1667 + /* RX path */ 1668 + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1); 1669 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port); 1670 + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15); 1671 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi); 1672 + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2); 1673 + 1674 + tb_tunnel_free(tunnel); 1675 + } 1676 + 1677 + static void tb_test_tunnel_dma_tx(struct kunit *test) 1678 + { 1679 + struct tb_port *nhi, *port; 1680 + struct tb_tunnel *tunnel; 1681 + struct tb_switch *host; 1682 + 1683 + /* 1684 + * Create DMA TX tunnel from NHI to port 1. 1685 + * 1686 + * [Host 1] 1687 + * 1 | In HopID 2 -> Out HopID 15 1688 + * | 1689 + * v 1690 + * ............ Domain border 1691 + * | 1692 + * [Host 2] 1693 + */ 1694 + host = alloc_host(test); 1695 + nhi = &host->ports[7]; 1696 + port = &host->ports[1]; 1697 + 1698 + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1); 1699 + KUNIT_ASSERT_TRUE(test, tunnel != NULL); 1700 + KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA); 1701 + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); 1702 + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); 1703 + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1); 1704 + /* TX path */ 1705 + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1); 1706 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi); 1707 + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2); 1708 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port); 1709 + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15); 1710 + 1711 + tb_tunnel_free(tunnel); 1712 + } 1713 + 1714 + static void tb_test_tunnel_dma_chain(struct kunit *test) 1715 + { 1716 + struct tb_switch *host, *dev1, *dev2; 1717 + struct tb_port *nhi, *port; 1718 + struct tb_tunnel *tunnel; 1719 + 1720 + /* 1721 + * Create DMA tunnel from NHI to Device #2 port 3 and back. 1722 + * 1723 + * [Host 1] 1724 + * 1 ^ In HopID 1 -> Out HopID x 1725 + * | 1726 + * 1 | In HopID x -> Out HopID 1 1727 + * [Device #1] 1728 + * 7 \ 1729 + * 1 \ 1730 + * [Device #2] 1731 + * 3 | In HopID x -> Out HopID 8 1732 + * | 1733 + * v In HopID 8 -> Out HopID x 1734 + * ............ Domain border 1735 + * | 1736 + * [Host 2] 1737 + */ 1738 + host = alloc_host(test); 1739 + dev1 = alloc_dev_default(test, host, 0x1, true); 1740 + dev2 = alloc_dev_default(test, dev1, 0x701, true); 1741 + 1742 + nhi = &host->ports[7]; 1743 + port = &dev2->ports[3]; 1744 + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); 1745 + KUNIT_ASSERT_TRUE(test, tunnel != NULL); 1746 + KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA); 1747 + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); 1748 + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); 1749 + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); 1750 + /* RX path */ 1751 + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3); 1752 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port); 1753 + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8); 1754 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, 1755 + &dev2->ports[1]); 1756 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port, 1757 + &dev1->ports[7]); 1758 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, 1759 + &dev1->ports[1]); 1760 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port, 1761 + &host->ports[1]); 1762 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi); 1763 + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1); 1764 + /* TX path */ 1765 + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3); 1766 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi); 1767 + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1); 1768 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port, 1769 + &dev1->ports[1]); 1770 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, 1771 + &dev1->ports[7]); 1772 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port, 1773 + &dev2->ports[1]); 1774 + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port); 1775 + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8); 1776 + 1777 + tb_tunnel_free(tunnel); 1778 + } 1779 + 1780 + static void tb_test_tunnel_dma_match(struct kunit *test) 1781 + { 1782 + struct tb_port *nhi, *port; 1783 + struct tb_tunnel *tunnel; 1784 + struct tb_switch *host; 1785 + 1786 + host = alloc_host(test); 1787 + nhi = &host->ports[7]; 1788 + port = &host->ports[1]; 1789 + 1790 + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1); 1791 + KUNIT_ASSERT_TRUE(test, tunnel != NULL); 1792 + 1793 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1)); 1794 + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1)); 1795 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1)); 1796 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1)); 1797 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1)); 1798 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1)); 1799 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1)); 1800 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1)); 1801 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1)); 1802 + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1)); 1803 + 1804 + tb_tunnel_free(tunnel); 1805 + 1806 + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1); 1807 + KUNIT_ASSERT_TRUE(test, tunnel != NULL); 1808 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1)); 1809 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1)); 1810 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1)); 1811 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1)); 1812 + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1)); 1813 + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1)); 1814 + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1)); 1815 + 1816 + tb_tunnel_free(tunnel); 1817 + 1818 + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11); 1819 + KUNIT_ASSERT_TRUE(test, tunnel != NULL); 1820 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11)); 1821 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1)); 1822 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11)); 1823 + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1)); 1824 + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1)); 1825 + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11)); 1826 + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1)); 1827 + 1828 + tb_tunnel_free(tunnel); 1829 + } 1830 + 1598 1831 static const u32 root_directory[] = { 1599 1832 0x55584401, /* "UXD" v1 */ 1600 1833 0x00000018, /* Root directory length */ ··· 2100 1865 KUNIT_CASE(tb_test_tunnel_dp_max_length), 2101 1866 KUNIT_CASE(tb_test_tunnel_port_on_path), 2102 1867 KUNIT_CASE(tb_test_tunnel_usb3), 1868 + KUNIT_CASE(tb_test_tunnel_dma), 1869 + KUNIT_CASE(tb_test_tunnel_dma_rx), 1870 + KUNIT_CASE(tb_test_tunnel_dma_tx), 1871 + KUNIT_CASE(tb_test_tunnel_dma_chain), 1872 + KUNIT_CASE(tb_test_tunnel_dma_match), 2103 1873 KUNIT_CASE(tb_test_property_parse), 2104 1874 KUNIT_CASE(tb_test_property_format), 2105 1875 KUNIT_CASE(tb_test_property_copy),