Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

thunderbolt: Handle DisplayPort tunnel activation asynchronously

Sometimes setting up a DisplayPort tunnel may take quite long time. The
reason is that the graphics driver (DPRX) is expected to issue read of
certain monitor capabilities over the AUX channel and the "suggested"
timeout from VESA is 5 seconds. If there is no graphics driver loaded
this does not happen and currently we timeout and tear the tunnel down.
The reason for this is that at least Intel discrete USB4 controllers do
not send plug/unplug events about whether the DisplayPort cable from the
GPU to the controller is connected or not, so in order to "release" the
DisplayPort OUT adapter (the one that has monitor connected) we must
tear the tunnel down after this timeout has been elapsed.

In typical cases there is always graphics driver loaded, and also all
the cables are connected but for instance in Intel graphics CI they only
load the graphics driver after the system is fully booted up. This
makes the driver to tear down the DisplayPort tunnel. To help this case
we allow passing bigger or indefinite timeout through a new module
parameter (dprx_timeout). To keep the driver bit more responsive during
that time we change the way DisplayPort tunnels get activated. We first
do the normal tunnel setup and then run the polling of DPRX capabilities
read completion in a separate worker. This also makes the driver to
accept bandwidth requests to already established DisplayPort tunnels
more responsive.

If the tunnel still fails to establish we will tear it down and remove
the DisplayPort IN adapter from the dp_resource list to avoid using it
again (unless we get hotplug to that adapter).

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>

+444 -208
+127 -47
drivers/thunderbolt/tb.c
··· 20 20 #define TB_RELEASE_BW_TIMEOUT 10000 /* ms */ 21 21 22 22 /* 23 + * How many time bandwidth allocation request from graphics driver is 24 + * retried if the DP tunnel is still activating. 25 + */ 26 + #define TB_BW_ALLOC_RETRIES 3 27 + 28 + /* 23 29 * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver 24 30 * direction. This is 40G - 10% guard band bandwidth. 25 31 */ ··· 75 69 } 76 70 77 71 struct tb_hotplug_event { 78 - struct work_struct work; 72 + struct delayed_work work; 79 73 struct tb *tb; 80 74 u64 route; 81 75 u8 port; 82 76 bool unplug; 77 + int retry; 83 78 }; 84 79 85 80 static void tb_scan_port(struct tb_port *port); 86 81 static void tb_handle_hotplug(struct work_struct *work); 82 + static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port, 83 + const char *reason); 84 + static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port, 85 + int retry, unsigned long delay); 87 86 88 87 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) 89 88 { ··· 102 91 ev->route = route; 103 92 ev->port = port; 104 93 ev->unplug = unplug; 105 - INIT_WORK(&ev->work, tb_handle_hotplug); 106 - queue_work(tb->wq, &ev->work); 94 + INIT_DELAYED_WORK(&ev->work, tb_handle_hotplug); 95 + queue_delayed_work(tb->wq, &ev->work, 0); 107 96 } 108 97 109 98 /* enumeration & hot plug handling */ ··· 973 962 return 0; 974 963 975 964 err_free: 976 - tb_tunnel_free(tunnel); 965 + tb_tunnel_put(tunnel); 977 966 err_reclaim: 978 967 if (tb_route(parent)) 979 968 tb_reclaim_usb3_bandwidth(tb, down, up); ··· 1737 1726 break; 1738 1727 } 1739 1728 1740 - tb_tunnel_free(tunnel); 1729 + tb_tunnel_put(tunnel); 1741 1730 } 1742 1731 1743 1732 /* ··· 1874 1863 return NULL; 1875 1864 } 1876 1865 1866 + static void tb_dp_tunnel_active(struct tb_tunnel *tunnel, void *data) 1867 + { 1868 + struct tb_port *in = tunnel->src_port; 1869 + struct tb_port *out = tunnel->dst_port; 1870 + struct tb *tb = data; 1871 + 1872 + mutex_lock(&tb->lock); 1873 + if (tb_tunnel_is_active(tunnel)) { 1874 + int consumed_up, consumed_down, ret; 1875 + 1876 + tb_tunnel_dbg(tunnel, "DPRX capabilities read completed\n"); 1877 + 1878 + /* If fail reading tunnel's consumed bandwidth, tear it down */ 1879 + ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, 1880 + &consumed_down); 1881 + if (ret) { 1882 + tb_tunnel_warn(tunnel, 1883 + "failed to read consumed bandwidth, tearing down\n"); 1884 + tb_deactivate_and_free_tunnel(tunnel); 1885 + } else { 1886 + tb_reclaim_usb3_bandwidth(tb, in, out); 1887 + /* 1888 + * Transition the links to asymmetric if the 1889 + * consumption exceeds the threshold. 1890 + */ 1891 + tb_configure_asym(tb, in, out, consumed_up, 1892 + consumed_down); 1893 + /* 1894 + * Update the domain with the new bandwidth 1895 + * estimation. 1896 + */ 1897 + tb_recalc_estimated_bandwidth(tb); 1898 + /* 1899 + * In case of DP tunnel exists, change host 1900 + * router's 1st children TMU mode to HiFi for 1901 + * CL0s to work. 1902 + */ 1903 + tb_increase_tmu_accuracy(tunnel); 1904 + } 1905 + } else { 1906 + struct tb_port *in = tunnel->src_port; 1907 + 1908 + /* 1909 + * This tunnel failed to establish. This means DPRX 1910 + * negotiation most likely did not complete which 1911 + * happens either because there is no graphics driver 1912 + * loaded or not all DP cables where connected to the 1913 + * discrete router. 1914 + * 1915 + * In both cases we remove the DP IN adapter from the 1916 + * available resources as it is not usable. This will 1917 + * also tear down the tunnel and try to re-use the 1918 + * released DP OUT. 1919 + * 1920 + * It will be added back only if there is hotplug for 1921 + * the DP IN again. 1922 + */ 1923 + tb_tunnel_warn(tunnel, "not active, tearing down\n"); 1924 + tb_dp_resource_unavailable(tb, in, "DPRX negotiation failed"); 1925 + } 1926 + mutex_unlock(&tb->lock); 1927 + 1928 + tb_domain_put(tb); 1929 + } 1930 + 1877 1931 static void tb_tunnel_one_dp(struct tb *tb, struct tb_port *in, 1878 1932 struct tb_port *out) 1879 1933 { 1880 1934 int available_up, available_down, ret, link_nr; 1881 1935 struct tb_cm *tcm = tb_priv(tb); 1882 - int consumed_up, consumed_down; 1883 1936 struct tb_tunnel *tunnel; 1884 1937 1885 1938 /* ··· 1995 1920 available_up, available_down); 1996 1921 1997 1922 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up, 1998 - available_down); 1923 + available_down, tb_dp_tunnel_active, 1924 + tb_domain_get(tb)); 1999 1925 if (!tunnel) { 2000 1926 tb_port_dbg(out, "could not allocate DP tunnel\n"); 2001 1927 goto err_reclaim_usb; 2002 1928 } 2003 1929 2004 - if (tb_tunnel_activate(tunnel)) { 1930 + list_add_tail(&tunnel->list, &tcm->tunnel_list); 1931 + 1932 + ret = tb_tunnel_activate(tunnel); 1933 + if (ret && ret != -EINPROGRESS) { 2005 1934 tb_port_info(out, "DP tunnel activation failed, aborting\n"); 1935 + list_del(&tunnel->list); 2006 1936 goto err_free; 2007 1937 } 2008 1938 2009 - /* If fail reading tunnel's consumed bandwidth, tear it down */ 2010 - ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down); 2011 - if (ret) 2012 - goto err_deactivate; 2013 - 2014 - list_add_tail(&tunnel->list, &tcm->tunnel_list); 2015 - 2016 - tb_reclaim_usb3_bandwidth(tb, in, out); 2017 - /* 2018 - * Transition the links to asymmetric if the consumption exceeds 2019 - * the threshold. 2020 - */ 2021 - tb_configure_asym(tb, in, out, consumed_up, consumed_down); 2022 - 2023 - /* Update the domain with the new bandwidth estimation */ 2024 - tb_recalc_estimated_bandwidth(tb); 2025 - 2026 - /* 2027 - * In case of DP tunnel exists, change host router's 1st children 2028 - * TMU mode to HiFi for CL0s to work. 2029 - */ 2030 - tb_increase_tmu_accuracy(tunnel); 2031 1939 return; 2032 1940 2033 - err_deactivate: 2034 - tb_tunnel_deactivate(tunnel); 2035 1941 err_free: 2036 - tb_tunnel_free(tunnel); 1942 + tb_tunnel_put(tunnel); 2037 1943 err_reclaim_usb: 2038 1944 tb_reclaim_usb3_bandwidth(tb, in, out); 1945 + tb_domain_put(tb); 2039 1946 err_detach_group: 2040 1947 tb_detach_bandwidth_group(in); 2041 1948 err_dealloc_dp: ··· 2237 2180 2238 2181 tb_tunnel_deactivate(tunnel); 2239 2182 list_del(&tunnel->list); 2240 - tb_tunnel_free(tunnel); 2183 + tb_tunnel_put(tunnel); 2241 2184 return 0; 2242 2185 } 2243 2186 ··· 2267 2210 if (tb_tunnel_activate(tunnel)) { 2268 2211 tb_port_info(up, 2269 2212 "PCIe tunnel activation failed, aborting\n"); 2270 - tb_tunnel_free(tunnel); 2213 + tb_tunnel_put(tunnel); 2271 2214 return -EIO; 2272 2215 } 2273 2216 ··· 2326 2269 return 0; 2327 2270 2328 2271 err_free: 2329 - tb_tunnel_free(tunnel); 2272 + tb_tunnel_put(tunnel); 2330 2273 err_clx: 2331 2274 tb_enable_clx(sw); 2332 2275 mutex_unlock(&tb->lock); ··· 2389 2332 */ 2390 2333 static void tb_handle_hotplug(struct work_struct *work) 2391 2334 { 2392 - struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 2335 + struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work); 2393 2336 struct tb *tb = ev->tb; 2394 2337 struct tb_cm *tcm = tb_priv(tb); 2395 2338 struct tb_switch *sw; ··· 2694 2637 2695 2638 static void tb_handle_dp_bandwidth_request(struct work_struct *work) 2696 2639 { 2697 - struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 2640 + struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work); 2698 2641 int requested_bw, requested_up, requested_down, ret; 2699 2642 struct tb_tunnel *tunnel; 2700 2643 struct tb *tb = ev->tb; ··· 2721 2664 goto put_sw; 2722 2665 } 2723 2666 2724 - tb_port_dbg(in, "handling bandwidth allocation request\n"); 2667 + tb_port_dbg(in, "handling bandwidth allocation request, retry %d\n", ev->retry); 2725 2668 2726 2669 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); 2727 2670 if (!tunnel) { ··· 2774 2717 2775 2718 ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down); 2776 2719 if (ret) { 2777 - if (ret == -ENOBUFS) 2720 + if (ret == -ENOBUFS) { 2778 2721 tb_tunnel_warn(tunnel, 2779 2722 "not enough bandwidth available\n"); 2780 - else 2723 + } else if (ret == -ENOTCONN) { 2724 + tb_tunnel_dbg(tunnel, "not active yet\n"); 2725 + /* 2726 + * We got bandwidth allocation request but the 2727 + * tunnel is not yet active. This means that 2728 + * tb_dp_tunnel_active() is not yet called for 2729 + * this tunnel. Allow it some time and retry 2730 + * this request a couple of times. 2731 + */ 2732 + if (ev->retry < TB_BW_ALLOC_RETRIES) { 2733 + tb_tunnel_dbg(tunnel, 2734 + "retrying bandwidth allocation request\n"); 2735 + tb_queue_dp_bandwidth_request(tb, ev->route, 2736 + ev->port, 2737 + ev->retry + 1, 2738 + msecs_to_jiffies(50)); 2739 + } else { 2740 + tb_tunnel_dbg(tunnel, 2741 + "run out of retries, failing the request"); 2742 + } 2743 + } else { 2781 2744 tb_tunnel_warn(tunnel, 2782 2745 "failed to change bandwidth allocation\n"); 2746 + } 2783 2747 } else { 2784 2748 tb_tunnel_dbg(tunnel, 2785 2749 "bandwidth allocation changed to %d/%d Mb/s\n", ··· 2821 2743 kfree(ev); 2822 2744 } 2823 2745 2824 - static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port) 2746 + static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port, 2747 + int retry, unsigned long delay) 2825 2748 { 2826 2749 struct tb_hotplug_event *ev; 2827 2750 ··· 2833 2754 ev->tb = tb; 2834 2755 ev->route = route; 2835 2756 ev->port = port; 2836 - INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request); 2837 - queue_work(tb->wq, &ev->work); 2757 + ev->retry = retry; 2758 + INIT_DELAYED_WORK(&ev->work, tb_handle_dp_bandwidth_request); 2759 + queue_delayed_work(tb->wq, &ev->work, delay); 2838 2760 } 2839 2761 2840 2762 static void tb_handle_notification(struct tb *tb, u64 route, ··· 2855 2775 if (tb_cfg_ack_notification(tb->ctl, route, error)) 2856 2776 tb_warn(tb, "could not ack notification on %llx\n", 2857 2777 route); 2858 - tb_queue_dp_bandwidth_request(tb, route, error->port); 2778 + tb_queue_dp_bandwidth_request(tb, route, error->port, 0, 0); 2859 2779 break; 2860 2780 2861 2781 default: ··· 2910 2830 */ 2911 2831 if (tb_tunnel_is_dma(tunnel)) 2912 2832 tb_tunnel_deactivate(tunnel); 2913 - tb_tunnel_free(tunnel); 2833 + tb_tunnel_put(tunnel); 2914 2834 } 2915 2835 tb_switch_remove(tb->root_switch); 2916 2836 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ ··· 3106 3026 if (tb_tunnel_is_usb3(tunnel)) 3107 3027 usb3_delay = 500; 3108 3028 tb_tunnel_deactivate(tunnel); 3109 - tb_tunnel_free(tunnel); 3029 + tb_tunnel_put(tunnel); 3110 3030 } 3111 3031 3112 3032 /* Re-create our tunnels now */
+45 -45
drivers/thunderbolt/test.c
··· 1382 1382 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up); 1383 1383 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down); 1384 1384 1385 - tb_tunnel_free(tunnel2); 1386 - tb_tunnel_free(tunnel1); 1385 + tb_tunnel_put(tunnel2); 1386 + tb_tunnel_put(tunnel1); 1387 1387 } 1388 1388 1389 1389 static void tb_test_tunnel_dp(struct kunit *test) ··· 1406 1406 in = &host->ports[5]; 1407 1407 out = &dev->ports[13]; 1408 1408 1409 - tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); 1409 + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL); 1410 1410 KUNIT_ASSERT_NOT_NULL(test, tunnel); 1411 1411 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); 1412 1412 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); ··· 1421 1421 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2); 1422 1422 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out); 1423 1423 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in); 1424 - tb_tunnel_free(tunnel); 1424 + tb_tunnel_put(tunnel); 1425 1425 } 1426 1426 1427 1427 static void tb_test_tunnel_dp_chain(struct kunit *test) ··· 1452 1452 in = &host->ports[5]; 1453 1453 out = &dev4->ports[14]; 1454 1454 1455 - tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); 1455 + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL); 1456 1456 KUNIT_ASSERT_NOT_NULL(test, tunnel); 1457 1457 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); 1458 1458 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); ··· 1467 1467 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3); 1468 1468 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out); 1469 1469 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in); 1470 - tb_tunnel_free(tunnel); 1470 + tb_tunnel_put(tunnel); 1471 1471 } 1472 1472 1473 1473 static void tb_test_tunnel_dp_tree(struct kunit *test) ··· 1502 1502 in = &dev2->ports[13]; 1503 1503 out = &dev5->ports[13]; 1504 1504 1505 - tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); 1505 + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL); 1506 1506 KUNIT_ASSERT_NOT_NULL(test, tunnel); 1507 1507 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); 1508 1508 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); ··· 1517 1517 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4); 1518 1518 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out); 1519 1519 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in); 1520 - tb_tunnel_free(tunnel); 1520 + tb_tunnel_put(tunnel); 1521 1521 } 1522 1522 1523 1523 static void tb_test_tunnel_dp_max_length(struct kunit *test) ··· 1567 1567 in = &dev6->ports[13]; 1568 1568 out = &dev12->ports[13]; 1569 1569 1570 - tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); 1570 + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL); 1571 1571 KUNIT_ASSERT_NOT_NULL(test, tunnel); 1572 1572 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); 1573 1573 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); ··· 1597 1597 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port, 1598 1598 &host->ports[1]); 1599 1599 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in); 1600 - tb_tunnel_free(tunnel); 1600 + tb_tunnel_put(tunnel); 1601 1601 } 1602 1602 1603 1603 static void tb_test_tunnel_3dp(struct kunit *test) ··· 1637 1637 out2 = &dev5->ports[13]; 1638 1638 out3 = &dev4->ports[14]; 1639 1639 1640 - tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0); 1640 + tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0, NULL, NULL); 1641 1641 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL); 1642 1642 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP); 1643 1643 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1); ··· 1645 1645 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3); 1646 1646 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3); 1647 1647 1648 - tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0); 1648 + tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0, NULL, NULL); 1649 1649 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL); 1650 1650 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP); 1651 1651 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2); ··· 1653 1653 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3); 1654 1654 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4); 1655 1655 1656 - tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0); 1656 + tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0, NULL, NULL); 1657 1657 KUNIT_ASSERT_TRUE(test, tunnel3 != NULL); 1658 1658 KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP); 1659 1659 KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3); ··· 1661 1661 KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3); 1662 1662 KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3); 1663 1663 1664 - tb_tunnel_free(tunnel2); 1665 - tb_tunnel_free(tunnel1); 1664 + tb_tunnel_put(tunnel2); 1665 + tb_tunnel_put(tunnel1); 1666 1666 } 1667 1667 1668 1668 static void tb_test_tunnel_usb3(struct kunit *test) ··· 1716 1716 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up); 1717 1717 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down); 1718 1718 1719 - tb_tunnel_free(tunnel2); 1720 - tb_tunnel_free(tunnel1); 1719 + tb_tunnel_put(tunnel2); 1720 + tb_tunnel_put(tunnel1); 1721 1721 } 1722 1722 1723 1723 static void tb_test_tunnel_port_on_path(struct kunit *test) ··· 1750 1750 in = &dev2->ports[13]; 1751 1751 out = &dev5->ports[13]; 1752 1752 1753 - dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); 1753 + dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL); 1754 1754 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel); 1755 1755 1756 1756 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in)); ··· 1783 1783 port = &dev4->ports[1]; 1784 1784 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port)); 1785 1785 1786 - tb_tunnel_free(dp_tunnel); 1786 + tb_tunnel_put(dp_tunnel); 1787 1787 } 1788 1788 1789 1789 static void tb_test_tunnel_dma(struct kunit *test) ··· 1826 1826 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port); 1827 1827 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8); 1828 1828 1829 - tb_tunnel_free(tunnel); 1829 + tb_tunnel_put(tunnel); 1830 1830 } 1831 1831 1832 1832 static void tb_test_tunnel_dma_rx(struct kunit *test) ··· 1863 1863 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi); 1864 1864 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2); 1865 1865 1866 - tb_tunnel_free(tunnel); 1866 + tb_tunnel_put(tunnel); 1867 1867 } 1868 1868 1869 1869 static void tb_test_tunnel_dma_tx(struct kunit *test) ··· 1900 1900 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port); 1901 1901 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15); 1902 1902 1903 - tb_tunnel_free(tunnel); 1903 + tb_tunnel_put(tunnel); 1904 1904 } 1905 1905 1906 1906 static void tb_test_tunnel_dma_chain(struct kunit *test) ··· 1966 1966 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port); 1967 1967 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8); 1968 1968 1969 - tb_tunnel_free(tunnel); 1969 + tb_tunnel_put(tunnel); 1970 1970 } 1971 1971 1972 1972 static void tb_test_tunnel_dma_match(struct kunit *test) ··· 1993 1993 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1)); 1994 1994 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1)); 1995 1995 1996 - tb_tunnel_free(tunnel); 1996 + tb_tunnel_put(tunnel); 1997 1997 1998 1998 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1); 1999 1999 KUNIT_ASSERT_NOT_NULL(test, tunnel); ··· 2005 2005 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1)); 2006 2006 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1)); 2007 2007 2008 - tb_tunnel_free(tunnel); 2008 + tb_tunnel_put(tunnel); 2009 2009 2010 2010 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11); 2011 2011 KUNIT_ASSERT_NOT_NULL(test, tunnel); ··· 2017 2017 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11)); 2018 2018 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1)); 2019 2019 2020 - tb_tunnel_free(tunnel); 2020 + tb_tunnel_put(tunnel); 2021 2021 } 2022 2022 2023 2023 static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test) ··· 2050 2050 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); 2051 2051 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U); 2052 2052 2053 - tb_tunnel_free(tunnel); 2053 + tb_tunnel_put(tunnel); 2054 2054 } 2055 2055 2056 2056 static void tb_test_credit_alloc_legacy_bonded(struct kunit *test) ··· 2083 2083 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); 2084 2084 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); 2085 2085 2086 - tb_tunnel_free(tunnel); 2086 + tb_tunnel_put(tunnel); 2087 2087 } 2088 2088 2089 2089 static void tb_test_credit_alloc_pcie(struct kunit *test) ··· 2116 2116 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); 2117 2117 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U); 2118 2118 2119 - tb_tunnel_free(tunnel); 2119 + tb_tunnel_put(tunnel); 2120 2120 } 2121 2121 2122 2122 static void tb_test_credit_alloc_without_dp(struct kunit *test) ··· 2166 2166 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); 2167 2167 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U); 2168 2168 2169 - tb_tunnel_free(tunnel); 2169 + tb_tunnel_put(tunnel); 2170 2170 } 2171 2171 2172 2172 static void tb_test_credit_alloc_dp(struct kunit *test) ··· 2182 2182 in = &host->ports[5]; 2183 2183 out = &dev->ports[14]; 2184 2184 2185 - tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); 2185 + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL); 2186 2186 KUNIT_ASSERT_NOT_NULL(test, tunnel); 2187 2187 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3); 2188 2188 ··· 2210 2210 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); 2211 2211 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); 2212 2212 2213 - tb_tunnel_free(tunnel); 2213 + tb_tunnel_put(tunnel); 2214 2214 } 2215 2215 2216 2216 static void tb_test_credit_alloc_usb3(struct kunit *test) ··· 2243 2243 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); 2244 2244 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); 2245 2245 2246 - tb_tunnel_free(tunnel); 2246 + tb_tunnel_put(tunnel); 2247 2247 } 2248 2248 2249 2249 static void tb_test_credit_alloc_dma(struct kunit *test) ··· 2279 2279 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); 2280 2280 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); 2281 2281 2282 - tb_tunnel_free(tunnel); 2282 + tb_tunnel_put(tunnel); 2283 2283 } 2284 2284 2285 2285 static void tb_test_credit_alloc_dma_multiple(struct kunit *test) ··· 2356 2356 * Release the first DMA tunnel. That should make 14 buffers 2357 2357 * available for the next tunnel. 2358 2358 */ 2359 - tb_tunnel_free(tunnel1); 2359 + tb_tunnel_put(tunnel1); 2360 2360 2361 2361 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3); 2362 2362 KUNIT_ASSERT_NOT_NULL(test, tunnel3); ··· 2375 2375 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); 2376 2376 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); 2377 2377 2378 - tb_tunnel_free(tunnel3); 2379 - tb_tunnel_free(tunnel2); 2378 + tb_tunnel_put(tunnel3); 2379 + tb_tunnel_put(tunnel2); 2380 2380 } 2381 2381 2382 2382 static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test, ··· 2418 2418 2419 2419 in = &host->ports[5]; 2420 2420 out = &dev->ports[13]; 2421 - dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); 2421 + dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL); 2422 2422 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1); 2423 2423 KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3); 2424 2424 ··· 2455 2455 2456 2456 in = &host->ports[6]; 2457 2457 out = &dev->ports[14]; 2458 - dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); 2458 + dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL); 2459 2459 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2); 2460 2460 KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3); 2461 2461 ··· 2595 2595 dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev); 2596 2596 dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev); 2597 2597 2598 - tb_tunnel_free(dma_tunnel2); 2599 - tb_tunnel_free(dma_tunnel1); 2600 - tb_tunnel_free(usb3_tunnel); 2601 - tb_tunnel_free(dp_tunnel2); 2602 - tb_tunnel_free(dp_tunnel1); 2603 - tb_tunnel_free(pcie_tunnel); 2598 + tb_tunnel_put(dma_tunnel2); 2599 + tb_tunnel_put(dma_tunnel1); 2600 + tb_tunnel_put(usb3_tunnel); 2601 + tb_tunnel_put(dp_tunnel2); 2602 + tb_tunnel_put(dp_tunnel1); 2603 + tb_tunnel_put(pcie_tunnel); 2604 2604 } 2605 2605 2606 2606 static const u32 root_directory[] = {
+227 -114
drivers/thunderbolt/tunnel.c
··· 70 70 #define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT) 71 71 #define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT) 72 72 73 + /* 74 + * According to VESA spec, the DPRX negotiation shall compete in 5 75 + * seconds after tunnel is established. Since at least i915 can runtime 76 + * suspend if there is nothing connected, and that it polls any new 77 + * connections every 10 seconds, we use 12 seconds here. 78 + * 79 + * These are in ms. 80 + */ 81 + #define TB_DPRX_TIMEOUT 12000 82 + #define TB_DPRX_WAIT_TIMEOUT 25 83 + #define TB_DPRX_POLL_DELAY 50 84 + 85 + static int dprx_timeout = TB_DPRX_TIMEOUT; 86 + module_param(dprx_timeout, int, 0444); 87 + MODULE_PARM_DESC(dprx_timeout, 88 + "DPRX capability read timeout in ms, -1 waits forever (default: " 89 + __MODULE_STRING(TB_DPRX_TIMEOUT) ")"); 90 + 73 91 static unsigned int dma_credits = TB_DMA_CREDITS; 74 92 module_param(dma_credits, uint, 0444); 75 93 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: " ··· 99 81 "enable bandwidth allocation mode if supported (default: true)"); 100 82 101 83 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; 84 + 85 + /* Synchronizes kref_get()/put() of struct tb_tunnel */ 86 + static DEFINE_MUTEX(tb_tunnel_lock); 102 87 103 88 static inline unsigned int tb_usable_credits(const struct tb_port *port) 104 89 { ··· 176 155 177 156 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); 178 157 if (!tunnel->paths) { 179 - tb_tunnel_free(tunnel); 158 + kfree(tunnel); 180 159 return NULL; 181 160 } 182 161 ··· 184 163 tunnel->tb = tb; 185 164 tunnel->npaths = npaths; 186 165 tunnel->type = type; 166 + kref_init(&tunnel->kref); 187 167 188 168 return tunnel; 169 + } 170 + 171 + static void tb_tunnel_get(struct tb_tunnel *tunnel) 172 + { 173 + mutex_lock(&tb_tunnel_lock); 174 + kref_get(&tunnel->kref); 175 + mutex_unlock(&tb_tunnel_lock); 176 + } 177 + 178 + static void tb_tunnel_destroy(struct kref *kref) 179 + { 180 + struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref); 181 + int i; 182 + 183 + if (tunnel->destroy) 184 + tunnel->destroy(tunnel); 185 + 186 + for (i = 0; i < tunnel->npaths; i++) { 187 + if (tunnel->paths[i]) 188 + tb_path_free(tunnel->paths[i]); 189 + } 190 + 191 + kfree(tunnel->paths); 192 + kfree(tunnel); 193 + } 194 + 195 + void tb_tunnel_put(struct tb_tunnel *tunnel) 196 + { 197 + mutex_lock(&tb_tunnel_lock); 198 + kref_put(&tunnel->kref, tb_tunnel_destroy); 199 + mutex_unlock(&tb_tunnel_lock); 189 200 } 190 201 191 202 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) ··· 408 355 err_deactivate: 409 356 tb_tunnel_deactivate(tunnel); 410 357 err_free: 411 - tb_tunnel_free(tunnel); 358 + tb_tunnel_put(tunnel); 412 359 413 360 return NULL; 414 361 } ··· 457 404 return tunnel; 458 405 459 406 err_free: 460 - tb_tunnel_free(tunnel); 407 + tb_tunnel_put(tunnel); 461 408 return NULL; 462 409 } 463 410 ··· 942 889 } 943 890 } 944 891 892 + static ktime_t dprx_timeout_to_ktime(int timeout_msec) 893 + { 894 + return timeout_msec >= 0 ? 895 + ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX; 896 + } 897 + 898 + static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec) 899 + { 900 + ktime_t timeout = dprx_timeout_to_ktime(timeout_msec); 901 + struct tb_port *in = tunnel->src_port; 902 + 903 + /* 904 + * Wait for DPRX done. Normally it should be already set for 905 + * active tunnel. 906 + */ 907 + do { 908 + u32 val; 909 + int ret; 910 + 911 + ret = tb_port_read(in, &val, TB_CFG_PORT, 912 + in->cap_adap + DP_COMMON_CAP, 1); 913 + if (ret) 914 + return ret; 915 + 916 + if (val & DP_COMMON_CAP_DPRX_DONE) 917 + return 0; 918 + 919 + usleep_range(100, 150); 920 + } while (ktime_before(ktime_get(), timeout)); 921 + 922 + tb_tunnel_dbg(tunnel, "DPRX read timeout\n"); 923 + return -ETIMEDOUT; 924 + } 925 + 926 + static void tb_dp_dprx_work(struct work_struct *work) 927 + { 928 + struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work); 929 + struct tb *tb = tunnel->tb; 930 + 931 + if (!tunnel->dprx_canceled) { 932 + mutex_lock(&tb->lock); 933 + if (tb_dp_is_usb4(tunnel->src_port->sw) && 934 + tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) { 935 + if (ktime_before(ktime_get(), tunnel->dprx_timeout)) { 936 + queue_delayed_work(tb->wq, &tunnel->dprx_work, 937 + msecs_to_jiffies(TB_DPRX_POLL_DELAY)); 938 + mutex_unlock(&tb->lock); 939 + return; 940 + } 941 + } else { 942 + tunnel->state = TB_TUNNEL_ACTIVE; 943 + } 944 + mutex_unlock(&tb->lock); 945 + } 946 + 947 + if (tunnel->callback) 948 + tunnel->callback(tunnel, tunnel->callback_data); 949 + } 950 + 951 + static int tb_dp_dprx_start(struct tb_tunnel *tunnel) 952 + { 953 + /* 954 + * Bump up the reference to keep the tunnel around. It will be 955 + * dropped in tb_dp_dprx_stop() once the tunnel is deactivated. 956 + */ 957 + tb_tunnel_get(tunnel); 958 + 959 + if (tunnel->callback) { 960 + tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout); 961 + queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0); 962 + return -EINPROGRESS; 963 + } 964 + 965 + return tb_dp_is_usb4(tunnel->src_port->sw) ? 966 + tb_dp_wait_dprx(tunnel, dprx_timeout) : 0; 967 + } 968 + 969 + static void tb_dp_dprx_stop(struct tb_tunnel *tunnel) 970 + { 971 + tunnel->dprx_canceled = true; 972 + cancel_delayed_work(&tunnel->dprx_work); 973 + tb_tunnel_put(tunnel); 974 + } 975 + 945 976 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) 946 977 { 947 978 int ret; ··· 1047 910 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index, 1048 911 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index); 1049 912 } else { 913 + tb_dp_dprx_stop(tunnel); 1050 914 tb_dp_port_hpd_clear(tunnel->src_port); 1051 915 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0); 1052 916 if (tb_port_is_dpout(tunnel->dst_port)) ··· 1058 920 if (ret) 1059 921 return ret; 1060 922 1061 - if (tb_port_is_dpout(tunnel->dst_port)) 1062 - return tb_dp_port_enable(tunnel->dst_port, active); 923 + if (tb_port_is_dpout(tunnel->dst_port)) { 924 + ret = tb_dp_port_enable(tunnel->dst_port, active); 925 + if (ret) 926 + return ret; 927 + } 1063 928 1064 - return 0; 929 + return active ? tb_dp_dprx_start(tunnel) : 0; 1065 930 } 1066 931 1067 932 /** ··· 1217 1076 return 0; 1218 1077 } 1219 1078 1220 - static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec) 1221 - { 1222 - ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1223 - struct tb_port *in = tunnel->src_port; 1224 - 1225 - /* 1226 - * Wait for DPRX done. Normally it should be already set for 1227 - * active tunnel. 1228 - */ 1229 - do { 1230 - u32 val; 1231 - int ret; 1232 - 1233 - ret = tb_port_read(in, &val, TB_CFG_PORT, 1234 - in->cap_adap + DP_COMMON_CAP, 1); 1235 - if (ret) 1236 - return ret; 1237 - 1238 - if (val & DP_COMMON_CAP_DPRX_DONE) { 1239 - tb_tunnel_dbg(tunnel, "DPRX read done\n"); 1240 - return 0; 1241 - } 1242 - usleep_range(100, 150); 1243 - } while (ktime_before(ktime_get(), timeout)); 1244 - 1245 - tb_tunnel_dbg(tunnel, "DPRX read timeout\n"); 1246 - return -ETIMEDOUT; 1247 - } 1248 - 1249 1079 /* Read cap from tunnel DP IN */ 1250 1080 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, 1251 1081 u32 *lanes) ··· 1280 1168 int ret; 1281 1169 1282 1170 if (tb_dp_is_usb4(sw)) { 1283 - /* 1284 - * On USB4 routers check if the bandwidth allocation 1285 - * mode is enabled first and then read the bandwidth 1286 - * through those registers. 1287 - */ 1288 - ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up, 1289 - consumed_down); 1290 - if (ret < 0) { 1291 - if (ret != -EOPNOTSUPP) 1171 + ret = tb_dp_wait_dprx(tunnel, 0); 1172 + if (ret) { 1173 + if (ret == -ETIMEDOUT) { 1174 + /* 1175 + * While we wait for DPRX complete the 1176 + * tunnel consumes as much as it had 1177 + * been reserved initially. 1178 + */ 1179 + ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, 1180 + &rate, &lanes); 1181 + if (ret) 1182 + return ret; 1183 + } else { 1292 1184 return ret; 1293 - } else if (!ret) { 1294 - return 0; 1185 + } 1186 + } else { 1187 + /* 1188 + * On USB4 routers check if the bandwidth allocation 1189 + * mode is enabled first and then read the bandwidth 1190 + * through those registers. 1191 + */ 1192 + ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up, 1193 + consumed_down); 1194 + if (ret < 0) { 1195 + if (ret != -EOPNOTSUPP) 1196 + return ret; 1197 + } else if (!ret) { 1198 + return 0; 1199 + } 1200 + ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes); 1201 + if (ret) 1202 + return ret; 1295 1203 } 1296 - /* 1297 - * Then see if the DPRX negotiation is ready and if yes 1298 - * return that bandwidth (it may be smaller than the 1299 - * reduced one). According to VESA spec, the DPRX 1300 - * negotiation shall compete in 5 seconds after tunnel 1301 - * established. Since at least i915 can runtime suspend 1302 - * if there is nothing connected, and that it polls any 1303 - * new connections every 10 seconds, we use 12 seconds 1304 - * here. 1305 - */ 1306 - ret = tb_dp_wait_dprx(tunnel, 12000); 1307 - if (ret) 1308 - return ret; 1309 - ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes); 1310 - if (ret) 1311 - return ret; 1312 1204 } else if (sw->generation >= 2) { 1313 1205 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes); 1314 1206 if (ret) ··· 1543 1427 err_deactivate: 1544 1428 tb_tunnel_deactivate(tunnel); 1545 1429 err_free: 1546 - tb_tunnel_free(tunnel); 1430 + tb_tunnel_put(tunnel); 1547 1431 1548 1432 return NULL; 1549 1433 } ··· 1558 1442 * %0 if no available bandwidth. 1559 1443 * @max_down: Maximum available downstream bandwidth for the DP tunnel. 1560 1444 * %0 if no available bandwidth. 1445 + * @callback: Optional callback that is called when the DP tunnel is 1446 + * fully activated (or there is an error) 1447 + * @callback_data: Optional data for @callback 1561 1448 * 1562 1449 * Allocates a tunnel between @in and @out that is capable of tunneling 1563 - * Display Port traffic. 1450 + * Display Port traffic. If @callback is not %NULL it will be called 1451 + * after tb_tunnel_activate() once the tunnel has been fully activated. 1452 + * It can call tb_tunnel_is_active() to check if activation was 1453 + * successful (or if it returns %false there was some sort of issue). 1454 + * The @callback is called without @tb->lock held. 1564 1455 * 1565 - * Return: Returns a tb_tunnel on success or NULL on failure. 1456 + * Return: Returns a tb_tunnel on success or &NULL on failure. 1566 1457 */ 1567 1458 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, 1568 1459 struct tb_port *out, int link_nr, 1569 - int max_up, int max_down) 1460 + int max_up, int max_down, 1461 + void (*callback)(struct tb_tunnel *, void *), 1462 + void *callback_data) 1570 1463 { 1571 1464 struct tb_tunnel *tunnel; 1572 1465 struct tb_path **paths; ··· 1600 1475 tunnel->dst_port = out; 1601 1476 tunnel->max_up = max_up; 1602 1477 tunnel->max_down = max_down; 1478 + tunnel->callback = callback; 1479 + tunnel->callback_data = callback_data; 1480 + INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work); 1603 1481 1604 1482 paths = tunnel->paths; 1605 1483 pm_support = usb4_switch_version(in->sw) >= 2; ··· 1631 1503 return tunnel; 1632 1504 1633 1505 err_free: 1634 - tb_tunnel_free(tunnel); 1506 + tb_tunnel_put(tunnel); 1635 1507 return NULL; 1636 1508 } 1637 1509 ··· 1843 1715 return tunnel; 1844 1716 1845 1717 err_free: 1846 - tb_tunnel_free(tunnel); 1718 + tb_tunnel_put(tunnel); 1847 1719 return NULL; 1848 1720 } 1849 1721 ··· 2169 2041 err_deactivate: 2170 2042 tb_tunnel_deactivate(tunnel); 2171 2043 err_free: 2172 - tb_tunnel_free(tunnel); 2044 + tb_tunnel_put(tunnel); 2173 2045 2174 2046 return NULL; 2175 2047 } ··· 2225 2097 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0, 2226 2098 "USB3 Down"); 2227 2099 if (!path) { 2228 - tb_tunnel_free(tunnel); 2100 + tb_tunnel_put(tunnel); 2229 2101 return NULL; 2230 2102 } 2231 2103 tb_usb3_init_path(path); ··· 2234 2106 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0, 2235 2107 "USB3 Up"); 2236 2108 if (!path) { 2237 - tb_tunnel_free(tunnel); 2109 + tb_tunnel_put(tunnel); 2238 2110 return NULL; 2239 2111 } 2240 2112 tb_usb3_init_path(path); ··· 2253 2125 } 2254 2126 2255 2127 return tunnel; 2256 - } 2257 - 2258 - /** 2259 - * tb_tunnel_free() - free a tunnel 2260 - * @tunnel: Tunnel to be freed 2261 - * 2262 - * Frees a tunnel. The tunnel does not need to be deactivated. 2263 - */ 2264 - void tb_tunnel_free(struct tb_tunnel *tunnel) 2265 - { 2266 - int i; 2267 - 2268 - if (!tunnel) 2269 - return; 2270 - 2271 - if (tunnel->destroy) 2272 - tunnel->destroy(tunnel); 2273 - 2274 - for (i = 0; i < tunnel->npaths; i++) { 2275 - if (tunnel->paths[i]) 2276 - tb_path_free(tunnel->paths[i]); 2277 - } 2278 - 2279 - kfree(tunnel->paths); 2280 - kfree(tunnel); 2281 2128 } 2282 2129 2283 2130 /** ··· 2276 2173 * tb_tunnel_activate() - activate a tunnel 2277 2174 * @tunnel: Tunnel to activate 2278 2175 * 2279 - * Return: 0 on success and negative errno in case if failure 2176 + * Return: 0 on success and negative errno in case if failure. 2177 + * Specifically returns %-EINPROGRESS if the tunnel activation is still 2178 + * in progress (that's for DP tunnels to complete DPRX capabilities 2179 + * read). 2280 2180 */ 2281 2181 int tb_tunnel_activate(struct tb_tunnel *tunnel) 2282 2182 { ··· 2298 2192 } 2299 2193 } 2300 2194 2195 + tunnel->state = TB_TUNNEL_ACTIVATING; 2196 + 2301 2197 if (tunnel->pre_activate) { 2302 2198 res = tunnel->pre_activate(tunnel); 2303 2199 if (res) ··· 2314 2206 2315 2207 if (tunnel->activate) { 2316 2208 res = tunnel->activate(tunnel, true); 2317 - if (res) 2209 + if (res) { 2210 + if (res == -EINPROGRESS) 2211 + return res; 2318 2212 goto err; 2213 + } 2319 2214 } 2320 2215 2216 + tunnel->state = TB_TUNNEL_ACTIVE; 2321 2217 return 0; 2322 2218 2323 2219 err: ··· 2350 2238 2351 2239 if (tunnel->post_deactivate) 2352 2240 tunnel->post_deactivate(tunnel); 2241 + 2242 + tunnel->state = TB_TUNNEL_INACTIVE; 2353 2243 } 2354 2244 2355 2245 /** ··· 2378 2264 return false; 2379 2265 } 2380 2266 2381 - static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) 2267 + // Is tb_tunnel_activate() called for the tunnel 2268 + static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel) 2382 2269 { 2383 - int i; 2384 - 2385 - for (i = 0; i < tunnel->npaths; i++) { 2386 - if (!tunnel->paths[i]) 2387 - return false; 2388 - if (!tunnel->paths[i]->activated) 2389 - return false; 2390 - } 2391 - 2392 - return true; 2270 + return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel); 2393 2271 } 2394 2272 2395 2273 /** ··· 2398 2292 int *max_down) 2399 2293 { 2400 2294 if (!tb_tunnel_is_active(tunnel)) 2401 - return -EINVAL; 2295 + return -ENOTCONN; 2402 2296 2403 2297 if (tunnel->maximum_bandwidth) 2404 2298 return tunnel->maximum_bandwidth(tunnel, max_up, max_down); ··· 2419 2313 int *allocated_down) 2420 2314 { 2421 2315 if (!tb_tunnel_is_active(tunnel)) 2422 - return -EINVAL; 2316 + return -ENOTCONN; 2423 2317 2424 2318 if (tunnel->allocated_bandwidth) 2425 2319 return tunnel->allocated_bandwidth(tunnel, allocated_up, ··· 2442 2336 int *alloc_down) 2443 2337 { 2444 2338 if (!tb_tunnel_is_active(tunnel)) 2445 - return -EINVAL; 2339 + return -ENOTCONN; 2446 2340 2447 2341 if (tunnel->alloc_bandwidth) 2448 2342 return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down); ··· 2467 2361 { 2468 2362 int up_bw = 0, down_bw = 0; 2469 2363 2470 - if (tb_tunnel_is_active(tunnel) && tunnel->consumed_bandwidth) { 2364 + /* 2365 + * Here we need to distinguish between not active tunnel from 2366 + * tunnels that are either fully active or activation started. 2367 + * The latter is true for DP tunnels where we must report the 2368 + * consumed to be the maximum we gave it until DPRX capabilities 2369 + * read is done by the graphics driver. 2370 + */ 2371 + if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) { 2471 2372 int ret; 2472 2373 2473 2374 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); ··· 2503 2390 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel) 2504 2391 { 2505 2392 if (!tb_tunnel_is_active(tunnel)) 2506 - return 0; 2393 + return -ENOTCONN; 2507 2394 2508 2395 if (tunnel->release_unused_bandwidth) { 2509 2396 int ret;
+45 -2
drivers/thunderbolt/tunnel.h
··· 19 19 }; 20 20 21 21 /** 22 + * enum tb_tunnel_state - State of a tunnel 23 + * @TB_TUNNEL_INACTIVE: tb_tunnel_activate() is not called for the tunnel 24 + * @TB_TUNNEL_ACTIVATING: tb_tunnel_activate() returned successfully for the tunnel 25 + * @TB_TUNNEL_ACTIVE: The tunnel is fully active 26 + */ 27 + enum tb_tunnel_state { 28 + TB_TUNNEL_INACTIVE, 29 + TB_TUNNEL_ACTIVATING, 30 + TB_TUNNEL_ACTIVE, 31 + }; 32 + 33 + /** 22 34 * struct tb_tunnel - Tunnel between two ports 35 + * @kref: Reference count 23 36 * @tb: Pointer to the domain 24 37 * @src_port: Source port of the tunnel 25 38 * @dst_port: Destination port of the tunnel. For discovered incomplete ··· 54 41 * @reclaim_available_bandwidth: Reclaim back available bandwidth 55 42 * @list: Tunnels are linked using this field 56 43 * @type: Type of the tunnel 44 + * @state: Current state of the tunnel 57 45 * @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel. 58 46 * Only set if the bandwidth needs to be limited. 59 47 * @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel. ··· 63 49 * @allocated_down: Allocated downstream bandwidth (only for USB3) 64 50 * @bw_mode: DP bandwidth allocation mode registers can be used to 65 51 * determine consumed and allocated bandwidth 52 + * @dprx_canceled: Was DPRX capabilities read poll canceled 53 + * @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes 54 + * @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read 55 + * @callback: Optional callback called when DP tunnel is fully activated 56 + * @callback_data: Optional data for @callback 66 57 */ 67 58 struct tb_tunnel { 59 + struct kref kref; 68 60 struct tb *tb; 69 61 struct tb_port *src_port; 70 62 struct tb_port *dst_port; ··· 94 74 int *available_down); 95 75 struct list_head list; 96 76 enum tb_tunnel_type type; 77 + enum tb_tunnel_state state; 97 78 int max_up; 98 79 int max_down; 99 80 int allocated_up; 100 81 int allocated_down; 101 82 bool bw_mode; 83 + bool dprx_canceled; 84 + ktime_t dprx_timeout; 85 + struct delayed_work dprx_work; 86 + void (*callback)(struct tb_tunnel *tunnel, void *data); 87 + void *callback_data; 102 88 }; 103 89 104 90 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, ··· 117 91 bool alloc_hopid); 118 92 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, 119 93 struct tb_port *out, int link_nr, 120 - int max_up, int max_down); 94 + int max_up, int max_down, 95 + void (*callback)(struct tb_tunnel *, void *), 96 + void *callback_data); 121 97 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, 122 98 struct tb_port *dst, int transmit_path, 123 99 int transmit_ring, int receive_path, ··· 132 104 struct tb_port *down, int max_up, 133 105 int max_down); 134 106 135 - void tb_tunnel_free(struct tb_tunnel *tunnel); 107 + void tb_tunnel_put(struct tb_tunnel *tunnel); 136 108 int tb_tunnel_activate(struct tb_tunnel *tunnel); 137 109 void tb_tunnel_deactivate(struct tb_tunnel *tunnel); 110 + 111 + /** 112 + * tb_tunnel_is_active() - Is tunnel fully activated 113 + * @tunnel: Tunnel to check 114 + * 115 + * Returns %true if @tunnel is fully activated. For other than DP 116 + * tunnels this is pretty much once tb_tunnel_activate() returns 117 + * successfully. However, for DP tunnels this returns %true only once the 118 + * DPRX capabilities read has been issued successfully. 119 + */ 120 + static inline bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) 121 + { 122 + return tunnel->state == TB_TUNNEL_ACTIVE; 123 + } 124 + 138 125 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel); 139 126 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, 140 127 const struct tb_port *port);