Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'thunderbolt-for-v6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:

thunderbolt: Changes for v6.5 merge window

This includes following Thunderbolt/USB4 changes for the v6.5 merge
window:

- Improve debug logging
- Rework for TMU and CL states handling
- Retimer access improvements
- Initial support for USB4 v2 features:

* 80G symmetric link support
* New notifications
* PCIe extended encapsulation
* enhanced uni-directional TMU mode
* CL2 link low power state
* DisplayPort 2.x tunneling

- Support for Intel Barlow Ridge Thunderbolt/USB4 controller
- Minor fixes and improvements.

All these have been in linux-next with no reported issues.

* tag 'thunderbolt-for-v6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (55 commits)
thunderbolt: Add test case for 3 DisplayPort tunnels
thunderbolt: Add DisplayPort 2.x tunneling support
thunderbolt: Make bandwidth allocation mode function names consistent
thunderbolt: Enable CL2 low power state
thunderbolt: Add support for enhanced uni-directional TMU mode
thunderbolt: Increase NVM_MAX_SIZE to support Intel Barlow Ridge controller
thunderbolt: Move constants related to NVM into nvm.c
thunderbolt: Limit Intel Barlow Ridge USB3 bandwidth
thunderbolt: Add Intel Barlow Ridge PCI ID
thunderbolt: Fix PCIe adapter capability length for USB4 v2 routers
thunderbolt: Fix DisplayPort IN adapter capability length for USB4 v2 routers
thunderbolt: Add two additional double words for adapters TMU for USB4 v2 routers
thunderbolt: Enable USB4 v2 PCIe TLP/DLLP extended encapsulation
thunderbolt: Announce USB4 v2 connection manager support
thunderbolt: Reset USB4 v2 host router
thunderbolt: Add the new USB4 v2 notification types
thunderbolt: Add support for USB4 v2 80 Gb/s link
thunderbolt: Identify USB4 v2 routers
thunderbolt: Do not touch lane 1 adapter path config space
thunderbolt: Ignore data CRC mismatch for USB4 routers
...

+2182 -1017
+1 -1
drivers/thunderbolt/Makefile
··· 2 2 obj-${CONFIG_USB4} := thunderbolt.o 3 3 thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o 4 4 thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o 5 - thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o 5 + thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o clx.o 6 6 7 7 thunderbolt-${CONFIG_ACPI} += acpi.o 8 8 thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
+2 -3
drivers/thunderbolt/acpi.c
··· 296 296 297 297 static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw) 298 298 { 299 + struct tb_switch *parent_sw = tb_switch_parent(sw); 299 300 struct acpi_device *adev = NULL; 300 - struct tb_switch *parent_sw; 301 301 302 302 /* 303 303 * Device routers exists under the downstream facing USB4 port 304 304 * of the parent router. Their _ADR is always 0. 305 305 */ 306 - parent_sw = tb_switch_parent(sw); 307 306 if (parent_sw) { 308 - struct tb_port *port = tb_port_at(tb_route(sw), parent_sw); 307 + struct tb_port *port = tb_switch_downstream_port(sw); 309 308 struct acpi_device *port_adev; 310 309 311 310 port_adev = acpi_find_child_by_adr(ACPI_COMPANION(&parent_sw->dev),
+423
drivers/thunderbolt/clx.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * CLx support 4 + * 5 + * Copyright (C) 2020 - 2023, Intel Corporation 6 + * Authors: Gil Fine <gil.fine@intel.com> 7 + * Mika Westerberg <mika.westerberg@linux.intel.com> 8 + */ 9 + 10 + #include <linux/module.h> 11 + 12 + #include "tb.h" 13 + 14 + static bool clx_enabled = true; 15 + module_param_named(clx, clx_enabled, bool, 0444); 16 + MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)"); 17 + 18 + static const char *clx_name(unsigned int clx) 19 + { 20 + switch (clx) { 21 + case TB_CL0S | TB_CL1 | TB_CL2: 22 + return "CL0s/CL1/CL2"; 23 + case TB_CL1 | TB_CL2: 24 + return "CL1/CL2"; 25 + case TB_CL0S | TB_CL2: 26 + return "CL0s/CL2"; 27 + case TB_CL0S | TB_CL1: 28 + return "CL0s/CL1"; 29 + case TB_CL0S: 30 + return "CL0s"; 31 + case 0: 32 + return "disabled"; 33 + default: 34 + return "unknown"; 35 + } 36 + } 37 + 38 + static int tb_port_pm_secondary_set(struct tb_port *port, bool secondary) 39 + { 40 + u32 phy; 41 + int ret; 42 + 43 + ret = tb_port_read(port, &phy, TB_CFG_PORT, 44 + port->cap_phy + LANE_ADP_CS_1, 1); 45 + if (ret) 46 + return ret; 47 + 48 + if (secondary) 49 + phy |= LANE_ADP_CS_1_PMS; 50 + else 51 + phy &= ~LANE_ADP_CS_1_PMS; 52 + 53 + return tb_port_write(port, &phy, TB_CFG_PORT, 54 + port->cap_phy + LANE_ADP_CS_1, 1); 55 + } 56 + 57 + static int tb_port_pm_secondary_enable(struct tb_port *port) 58 + { 59 + return tb_port_pm_secondary_set(port, true); 60 + } 61 + 62 + static int tb_port_pm_secondary_disable(struct tb_port *port) 63 + { 64 + return tb_port_pm_secondary_set(port, false); 65 + } 66 + 67 + /* Called for USB4 or Titan Ridge routers only */ 68 + static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx) 69 + { 70 + u32 val, mask = 0; 71 + bool ret; 72 + 73 + /* Don't enable CLx in case of two single-lane links */ 74 + if (!port->bonded && port->dual_link_port) 75 + return false; 76 + 77 + /* Don't enable CLx in case of inter-domain link */ 78 + if (port->xdomain) 79 + return false; 80 + 81 + if (tb_switch_is_usb4(port->sw)) { 82 + if (!usb4_port_clx_supported(port)) 83 + return false; 84 + } else if (!tb_lc_is_clx_supported(port)) { 85 + return false; 86 + } 87 + 88 + if (clx & TB_CL0S) 89 + mask |= LANE_ADP_CS_0_CL0S_SUPPORT; 90 + if (clx & TB_CL1) 91 + mask |= LANE_ADP_CS_0_CL1_SUPPORT; 92 + if (clx & TB_CL2) 93 + mask |= LANE_ADP_CS_0_CL2_SUPPORT; 94 + 95 + ret = tb_port_read(port, &val, TB_CFG_PORT, 96 + port->cap_phy + LANE_ADP_CS_0, 1); 97 + if (ret) 98 + return false; 99 + 100 + return !!(val & mask); 101 + } 102 + 103 + static int tb_port_clx_set(struct tb_port *port, unsigned int clx, bool enable) 104 + { 105 + u32 phy, mask = 0; 106 + int ret; 107 + 108 + if (clx & TB_CL0S) 109 + mask |= LANE_ADP_CS_1_CL0S_ENABLE; 110 + if (clx & TB_CL1) 111 + mask |= LANE_ADP_CS_1_CL1_ENABLE; 112 + if (clx & TB_CL2) 113 + mask |= LANE_ADP_CS_1_CL2_ENABLE; 114 + 115 + if (!mask) 116 + return -EOPNOTSUPP; 117 + 118 + ret = tb_port_read(port, &phy, TB_CFG_PORT, 119 + port->cap_phy + LANE_ADP_CS_1, 1); 120 + if (ret) 121 + return ret; 122 + 123 + if (enable) 124 + phy |= mask; 125 + else 126 + phy &= ~mask; 127 + 128 + return tb_port_write(port, &phy, TB_CFG_PORT, 129 + port->cap_phy + LANE_ADP_CS_1, 1); 130 + } 131 + 132 + static int tb_port_clx_disable(struct tb_port *port, unsigned int clx) 133 + { 134 + return tb_port_clx_set(port, clx, false); 135 + } 136 + 137 + static int tb_port_clx_enable(struct tb_port *port, unsigned int clx) 138 + { 139 + return tb_port_clx_set(port, clx, true); 140 + } 141 + 142 + static int tb_port_clx(struct tb_port *port) 143 + { 144 + u32 val; 145 + int ret; 146 + 147 + if (!tb_port_clx_supported(port, TB_CL0S | TB_CL1 | TB_CL2)) 148 + return 0; 149 + 150 + ret = tb_port_read(port, &val, TB_CFG_PORT, 151 + port->cap_phy + LANE_ADP_CS_1, 1); 152 + if (ret) 153 + return ret; 154 + 155 + if (val & LANE_ADP_CS_1_CL0S_ENABLE) 156 + ret |= TB_CL0S; 157 + if (val & LANE_ADP_CS_1_CL1_ENABLE) 158 + ret |= TB_CL1; 159 + if (val & LANE_ADP_CS_1_CL2_ENABLE) 160 + ret |= TB_CL2; 161 + 162 + return ret; 163 + } 164 + 165 + /** 166 + * tb_port_clx_is_enabled() - Is given CL state enabled 167 + * @port: USB4 port to check 168 + * @clx: Mask of CL states to check 169 + * 170 + * Returns true if any of the given CL states is enabled for @port. 171 + */ 172 + bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx) 173 + { 174 + return !!(tb_port_clx(port) & clx); 175 + } 176 + 177 + /** 178 + * tb_switch_clx_init() - Initialize router CL states 179 + * @sw: Router 180 + * 181 + * Can be called for any router. Initializes the current CL state by 182 + * reading it from the hardware. 183 + * 184 + * Returns %0 in case of success and negative errno in case of failure. 185 + */ 186 + int tb_switch_clx_init(struct tb_switch *sw) 187 + { 188 + struct tb_port *up, *down; 189 + unsigned int clx, tmp; 190 + 191 + if (tb_switch_is_icm(sw)) 192 + return 0; 193 + 194 + if (!tb_route(sw)) 195 + return 0; 196 + 197 + if (!tb_switch_clx_is_supported(sw)) 198 + return 0; 199 + 200 + up = tb_upstream_port(sw); 201 + down = tb_switch_downstream_port(sw); 202 + 203 + clx = tb_port_clx(up); 204 + tmp = tb_port_clx(down); 205 + if (clx != tmp) 206 + tb_sw_warn(sw, "CLx: inconsistent configuration %#x != %#x\n", 207 + clx, tmp); 208 + 209 + tb_sw_dbg(sw, "CLx: current mode: %s\n", clx_name(clx)); 210 + 211 + sw->clx = clx; 212 + return 0; 213 + } 214 + 215 + static int tb_switch_pm_secondary_resolve(struct tb_switch *sw) 216 + { 217 + struct tb_port *up, *down; 218 + int ret; 219 + 220 + if (!tb_route(sw)) 221 + return 0; 222 + 223 + up = tb_upstream_port(sw); 224 + down = tb_switch_downstream_port(sw); 225 + ret = tb_port_pm_secondary_enable(up); 226 + if (ret) 227 + return ret; 228 + 229 + return tb_port_pm_secondary_disable(down); 230 + } 231 + 232 + static int tb_switch_mask_clx_objections(struct tb_switch *sw) 233 + { 234 + int up_port = sw->config.upstream_port_number; 235 + u32 offset, val[2], mask_obj, unmask_obj; 236 + int ret, i; 237 + 238 + /* Only Titan Ridge of pre-USB4 devices support CLx states */ 239 + if (!tb_switch_is_titan_ridge(sw)) 240 + return 0; 241 + 242 + if (!tb_route(sw)) 243 + return 0; 244 + 245 + /* 246 + * In Titan Ridge there are only 2 dual-lane Thunderbolt ports: 247 + * Port A consists of lane adapters 1,2 and 248 + * Port B consists of lane adapters 3,4 249 + * If upstream port is A, (lanes are 1,2), we mask objections from 250 + * port B (lanes 3,4) and unmask objections from Port A and vice-versa. 251 + */ 252 + if (up_port == 1) { 253 + mask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 254 + unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 255 + offset = TB_LOW_PWR_C1_CL1; 256 + } else { 257 + mask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 258 + unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 259 + offset = TB_LOW_PWR_C3_CL1; 260 + } 261 + 262 + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, 263 + sw->cap_lp + offset, ARRAY_SIZE(val)); 264 + if (ret) 265 + return ret; 266 + 267 + for (i = 0; i < ARRAY_SIZE(val); i++) { 268 + val[i] |= mask_obj; 269 + val[i] &= ~unmask_obj; 270 + } 271 + 272 + return tb_sw_write(sw, &val, TB_CFG_SWITCH, 273 + sw->cap_lp + offset, ARRAY_SIZE(val)); 274 + } 275 + 276 + /** 277 + * tb_switch_clx_is_supported() - Is CLx supported on this type of router 278 + * @sw: The router to check CLx support for 279 + */ 280 + bool tb_switch_clx_is_supported(const struct tb_switch *sw) 281 + { 282 + if (!clx_enabled) 283 + return false; 284 + 285 + if (sw->quirks & QUIRK_NO_CLX) 286 + return false; 287 + 288 + /* 289 + * CLx is not enabled and validated on Intel USB4 platforms 290 + * before Alder Lake. 291 + */ 292 + if (tb_switch_is_tiger_lake(sw)) 293 + return false; 294 + 295 + return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); 296 + } 297 + 298 + static bool validate_mask(unsigned int clx) 299 + { 300 + /* Previous states need to be enabled */ 301 + if (clx & TB_CL1) 302 + return (clx & TB_CL0S) == TB_CL0S; 303 + return true; 304 + } 305 + 306 + /** 307 + * tb_switch_clx_enable() - Enable CLx on upstream port of specified router 308 + * @sw: Router to enable CLx for 309 + * @clx: The CLx state to enable 310 + * 311 + * CLx is enabled only if both sides of the link support CLx, and if both sides 312 + * of the link are not configured as two single lane links and only if the link 313 + * is not inter-domain link. The complete set of conditions is described in CM 314 + * Guide 1.0 section 8.1. 315 + * 316 + * Returns %0 on success or an error code on failure. 317 + */ 318 + int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx) 319 + { 320 + bool up_clx_support, down_clx_support; 321 + struct tb_switch *parent_sw; 322 + struct tb_port *up, *down; 323 + int ret; 324 + 325 + if (!clx || sw->clx == clx) 326 + return 0; 327 + 328 + if (!validate_mask(clx)) 329 + return -EINVAL; 330 + 331 + parent_sw = tb_switch_parent(sw); 332 + if (!parent_sw) 333 + return 0; 334 + 335 + if (!tb_switch_clx_is_supported(parent_sw) || 336 + !tb_switch_clx_is_supported(sw)) 337 + return 0; 338 + 339 + /* Only support CL2 for v2 routers */ 340 + if ((clx & TB_CL2) && 341 + (usb4_switch_version(parent_sw) < 2 || 342 + usb4_switch_version(sw) < 2)) 343 + return -EOPNOTSUPP; 344 + 345 + ret = tb_switch_pm_secondary_resolve(sw); 346 + if (ret) 347 + return ret; 348 + 349 + up = tb_upstream_port(sw); 350 + down = tb_switch_downstream_port(sw); 351 + 352 + up_clx_support = tb_port_clx_supported(up, clx); 353 + down_clx_support = tb_port_clx_supported(down, clx); 354 + 355 + tb_port_dbg(up, "CLx: %s %ssupported\n", clx_name(clx), 356 + up_clx_support ? "" : "not "); 357 + tb_port_dbg(down, "CLx: %s %ssupported\n", clx_name(clx), 358 + down_clx_support ? "" : "not "); 359 + 360 + if (!up_clx_support || !down_clx_support) 361 + return -EOPNOTSUPP; 362 + 363 + ret = tb_port_clx_enable(up, clx); 364 + if (ret) 365 + return ret; 366 + 367 + ret = tb_port_clx_enable(down, clx); 368 + if (ret) { 369 + tb_port_clx_disable(up, clx); 370 + return ret; 371 + } 372 + 373 + ret = tb_switch_mask_clx_objections(sw); 374 + if (ret) { 375 + tb_port_clx_disable(up, clx); 376 + tb_port_clx_disable(down, clx); 377 + return ret; 378 + } 379 + 380 + sw->clx |= clx; 381 + 382 + tb_sw_dbg(sw, "CLx: %s enabled\n", clx_name(clx)); 383 + return 0; 384 + } 385 + 386 + /** 387 + * tb_switch_clx_disable() - Disable CLx on upstream port of specified router 388 + * @sw: Router to disable CLx for 389 + * 390 + * Disables all CL states of the given router. Can be called on any 391 + * router and if the states were not enabled already does nothing. 392 + * 393 + * Returns the CL states that were disabled or negative errno in case of 394 + * failure. 395 + */ 396 + int tb_switch_clx_disable(struct tb_switch *sw) 397 + { 398 + unsigned int clx = sw->clx; 399 + struct tb_port *up, *down; 400 + int ret; 401 + 402 + if (!tb_switch_clx_is_supported(sw)) 403 + return 0; 404 + 405 + if (!clx) 406 + return 0; 407 + 408 + up = tb_upstream_port(sw); 409 + down = tb_switch_downstream_port(sw); 410 + 411 + ret = tb_port_clx_disable(up, clx); 412 + if (ret) 413 + return ret; 414 + 415 + ret = tb_port_clx_disable(down, clx); 416 + if (ret) 417 + return ret; 418 + 419 + sw->clx = 0; 420 + 421 + tb_sw_dbg(sw, "CLx: %s disabled\n", clx_name(clx)); 422 + return clx; 423 + }
+28
drivers/thunderbolt/ctl.c
··· 409 409 case TB_CFG_ERROR_HEC_ERROR_DETECTED: 410 410 case TB_CFG_ERROR_FLOW_CONTROL_ERROR: 411 411 case TB_CFG_ERROR_DP_BW: 412 + case TB_CFG_ERROR_ROP_CMPLT: 413 + case TB_CFG_ERROR_POP_CMPLT: 414 + case TB_CFG_ERROR_PCIE_WAKE: 415 + case TB_CFG_ERROR_DP_CON_CHANGE: 416 + case TB_CFG_ERROR_DPTX_DISCOVERY: 417 + case TB_CFG_ERROR_LINK_RECOVERY: 418 + case TB_CFG_ERROR_ASYM_LINK: 412 419 return true; 413 420 414 421 default: ··· 764 757 break; 765 758 case TB_CFG_ERROR_DP_BW: 766 759 name = "DP_BW"; 760 + break; 761 + case TB_CFG_ERROR_ROP_CMPLT: 762 + name = "router operation completion"; 763 + break; 764 + case TB_CFG_ERROR_POP_CMPLT: 765 + name = "port operation completion"; 766 + break; 767 + case TB_CFG_ERROR_PCIE_WAKE: 768 + name = "PCIe wake"; 769 + break; 770 + case TB_CFG_ERROR_DP_CON_CHANGE: 771 + name = "DP connector change"; 772 + break; 773 + case TB_CFG_ERROR_DPTX_DISCOVERY: 774 + name = "DPTX discovery"; 775 + break; 776 + case TB_CFG_ERROR_LINK_RECOVERY: 777 + name = "link recovery"; 778 + break; 779 + case TB_CFG_ERROR_ASYM_LINK: 780 + name = "asymmetric link"; 767 781 break; 768 782 default: 769 783 name = "unknown";
+44 -20
drivers/thunderbolt/debugfs.c
··· 14 14 #include "tb.h" 15 15 #include "sb_regs.h" 16 16 17 - #define PORT_CAP_PCIE_LEN 1 17 + #define PORT_CAP_V1_PCIE_LEN 1 18 + #define PORT_CAP_V2_PCIE_LEN 2 18 19 #define PORT_CAP_POWER_LEN 2 19 20 #define PORT_CAP_LANE_LEN 3 20 21 #define PORT_CAP_USB3_LEN 5 21 - #define PORT_CAP_DP_LEN 8 22 - #define PORT_CAP_TMU_LEN 8 22 + #define PORT_CAP_DP_V1_LEN 9 23 + #define PORT_CAP_DP_V2_LEN 14 24 + #define PORT_CAP_TMU_V1_LEN 8 25 + #define PORT_CAP_TMU_V2_LEN 10 23 26 #define PORT_CAP_BASIC_LEN 9 24 27 #define PORT_CAP_USB4_LEN 20 25 28 ··· 556 553 struct usb4_port *usb4 = port->usb4; 557 554 struct tb_switch *sw = port->sw; 558 555 struct tb_margining *margining; 556 + struct tb_switch *down_sw; 559 557 struct tb *tb = sw->tb; 560 - int ret; 558 + int ret, clx; 561 559 562 560 if (val != 1) 563 561 return -EINVAL; ··· 570 566 goto out_rpm_put; 571 567 } 572 568 573 - /* 574 - * CL states may interfere with lane margining so inform the user know 575 - * and bail out. 576 - */ 577 - if (tb_port_is_clx_enabled(port, TB_CL1 | TB_CL2)) { 578 - tb_port_warn(port, 579 - "CL states are enabled, Disable them with clx=0 and re-connect\n"); 580 - ret = -EINVAL; 581 - goto out_unlock; 569 + if (tb_is_upstream_port(port)) 570 + down_sw = sw; 571 + else if (port->remote) 572 + down_sw = port->remote->sw; 573 + else 574 + down_sw = NULL; 575 + 576 + if (down_sw) { 577 + /* 578 + * CL states may interfere with lane margining so 579 + * disable them temporarily now. 580 + */ 581 + ret = tb_switch_clx_disable(down_sw); 582 + if (ret < 0) { 583 + tb_sw_warn(down_sw, "failed to disable CL states\n"); 584 + goto out_unlock; 585 + } 586 + clx = ret; 582 587 } 583 588 584 589 margining = usb4->margining; ··· 599 586 margining->right_high, 600 587 USB4_MARGIN_SW_COUNTER_CLEAR); 601 588 if (ret) 602 - goto out_unlock; 589 + goto out_clx; 603 590 604 591 ret = usb4_port_sw_margin_errors(port, &margining->results[0]); 605 592 } else { ··· 613 600 margining->right_high, margining->results); 614 601 } 615 602 603 + out_clx: 604 + if (down_sw) 605 + tb_switch_clx_enable(down_sw, clx); 616 606 out_unlock: 617 607 mutex_unlock(&tb->lock); 618 608 out_rpm_put: ··· 1164 1148 break; 1165 1149 1166 1150 case TB_PORT_CAP_TIME1: 1167 - length = PORT_CAP_TMU_LEN; 1151 + if (usb4_switch_version(port->sw) < 2) 1152 + length = PORT_CAP_TMU_V1_LEN; 1153 + else 1154 + length = PORT_CAP_TMU_V2_LEN; 1168 1155 break; 1169 1156 1170 1157 case TB_PORT_CAP_POWER: ··· 1176 1157 1177 1158 case TB_PORT_CAP_ADAP: 1178 1159 if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) { 1179 - length = PORT_CAP_PCIE_LEN; 1180 - } else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) { 1181 - if (usb4_dp_port_bw_mode_supported(port)) 1182 - length = PORT_CAP_DP_LEN + 1; 1160 + if (usb4_switch_version(port->sw) < 2) 1161 + length = PORT_CAP_V1_PCIE_LEN; 1183 1162 else 1184 - length = PORT_CAP_DP_LEN; 1163 + length = PORT_CAP_V2_PCIE_LEN; 1164 + } else if (tb_port_is_dpin(port)) { 1165 + if (usb4_switch_version(port->sw) < 2) 1166 + length = PORT_CAP_DP_V1_LEN; 1167 + else 1168 + length = PORT_CAP_DP_V2_LEN; 1169 + } else if (tb_port_is_dpout(port)) { 1170 + length = PORT_CAP_DP_V1_LEN; 1185 1171 } else if (tb_port_is_usb3_down(port) || 1186 1172 tb_port_is_usb3_up(port)) { 1187 1173 length = PORT_CAP_USB3_LEN;
+12 -8
drivers/thunderbolt/dma_test.c
··· 192 192 } 193 193 194 194 ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid, 195 - dt->tx_ring ? dt->tx_ring->hop : 0, 195 + dt->tx_ring ? dt->tx_ring->hop : -1, 196 196 dt->rx_hopid, 197 - dt->rx_ring ? dt->rx_ring->hop : 0); 197 + dt->rx_ring ? dt->rx_ring->hop : -1); 198 198 if (ret) { 199 199 dma_test_free_rings(dt); 200 200 return ret; ··· 218 218 tb_ring_stop(dt->tx_ring); 219 219 220 220 ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid, 221 - dt->tx_ring ? dt->tx_ring->hop : 0, 221 + dt->tx_ring ? dt->tx_ring->hop : -1, 222 222 dt->rx_hopid, 223 - dt->rx_ring ? dt->rx_ring->hop : 0); 223 + dt->rx_ring ? dt->rx_ring->hop : -1); 224 224 if (ret) 225 225 dev_warn(&dt->svc->dev, "failed to disable DMA paths\n"); 226 226 ··· 412 412 static int speed_validate(u64 val) 413 413 { 414 414 switch (val) { 415 + case 40: 415 416 case 20: 416 417 case 10: 417 418 case 0: ··· 490 489 if (!dt->error_code) { 491 490 if (dt->link_speed && dt->xd->link_speed != dt->link_speed) { 492 491 dt->error_code = DMA_TEST_SPEED_ERROR; 493 - } else if (dt->link_width && 494 - dt->xd->link_width != dt->link_width) { 495 - dt->error_code = DMA_TEST_WIDTH_ERROR; 492 + } else if (dt->link_width) { 493 + const struct tb_xdomain *xd = dt->xd; 494 + 495 + if ((dt->link_width == 1 && xd->link_width != TB_LINK_WIDTH_SINGLE) || 496 + (dt->link_width == 2 && xd->link_width < TB_LINK_WIDTH_DUAL)) 497 + dt->error_code = DMA_TEST_WIDTH_ERROR; 496 498 } else if (dt->packets_to_send != dt->packets_sent || 497 499 dt->packets_to_receive != dt->packets_received || 498 500 dt->crc_errors || dt->buffer_overflow_errors) { ··· 760 756 761 757 MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>"); 762 758 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 763 - MODULE_DESCRIPTION("DMA traffic test driver"); 759 + MODULE_DESCRIPTION("Thunderbolt/USB4 DMA traffic test driver"); 764 760 MODULE_LICENSE("GPL v2");
+1 -2
drivers/thunderbolt/eeprom.c
··· 605 605 crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len); 606 606 if (crc != header->data_crc32) { 607 607 tb_sw_warn(sw, 608 - "DROM data CRC32 mismatch (expected: %#x, got: %#x), aborting\n", 608 + "DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n", 609 609 header->data_crc32, crc); 610 - return -EINVAL; 611 610 } 612 611 613 612 return tb_drom_parse_entries(sw, USB4_DROM_HEADER_SIZE);
+14 -16
drivers/thunderbolt/icm.c
··· 644 644 return ret; 645 645 } 646 646 647 - static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, 648 - u64 route, u8 connection_id, u8 connection_key, 649 - u8 link, u8 depth, bool boot) 647 + static void update_switch(struct tb_switch *sw, u64 route, u8 connection_id, 648 + u8 connection_key, u8 link, u8 depth, bool boot) 650 649 { 650 + struct tb_switch *parent_sw = tb_switch_parent(sw); 651 + 651 652 /* Disconnect from parent */ 652 - tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 653 - /* Re-connect via updated port*/ 653 + tb_switch_downstream_port(sw)->remote = NULL; 654 + /* Re-connect via updated port */ 654 655 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); 655 656 656 657 /* Update with the new addressing information */ ··· 672 671 673 672 static void remove_switch(struct tb_switch *sw) 674 673 { 675 - struct tb_switch *parent_sw; 676 - 677 - parent_sw = tb_to_switch(sw->dev.parent); 678 - tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 674 + tb_switch_downstream_port(sw)->remote = NULL; 679 675 tb_switch_remove(sw); 680 676 } 681 677 ··· 753 755 if (sw) { 754 756 u8 phy_port, sw_phy_port; 755 757 756 - parent_sw = tb_to_switch(sw->dev.parent); 757 758 sw_phy_port = tb_phy_port_from_link(sw->link); 758 759 phy_port = tb_phy_port_from_link(link); 759 760 ··· 782 785 route = tb_route(sw); 783 786 } 784 787 785 - update_switch(parent_sw, sw, route, pkg->connection_id, 788 + update_switch(sw, route, pkg->connection_id, 786 789 pkg->connection_key, link, depth, boot); 787 790 tb_switch_put(sw); 788 791 return; ··· 850 853 sw->security_level = security_level; 851 854 sw->boot = boot; 852 855 sw->link_speed = speed_gen3 ? 20 : 10; 853 - sw->link_width = dual_lane ? 2 : 1; 856 + sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL : 857 + TB_LINK_WIDTH_SINGLE; 854 858 sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name)); 855 859 856 860 if (add_switch(parent_sw, sw)) ··· 1234 1236 if (sw) { 1235 1237 /* Update the switch if it is still in the same place */ 1236 1238 if (tb_route(sw) == route && !!sw->authorized == authorized) { 1237 - parent_sw = tb_to_switch(sw->dev.parent); 1238 - update_switch(parent_sw, sw, route, pkg->connection_id, 1239 - 0, 0, 0, boot); 1239 + update_switch(sw, route, pkg->connection_id, 0, 0, 0, 1240 + boot); 1240 1241 tb_switch_put(sw); 1241 1242 return; 1242 1243 } ··· 1273 1276 sw->security_level = security_level; 1274 1277 sw->boot = boot; 1275 1278 sw->link_speed = speed_gen3 ? 20 : 10; 1276 - sw->link_width = dual_lane ? 2 : 1; 1279 + sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL : 1280 + TB_LINK_WIDTH_SINGLE; 1277 1281 sw->rpm = force_rtd3; 1278 1282 if (!sw->rpm) 1279 1283 sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
+49 -4
drivers/thunderbolt/nhi.c
··· 46 46 #define QUIRK_AUTO_CLEAR_INT BIT(0) 47 47 #define QUIRK_E2E BIT(1) 48 48 49 + static bool host_reset = true; 50 + module_param(host_reset, bool, 0444); 51 + MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)"); 52 + 49 53 static int ring_interrupt_index(const struct tb_ring *ring) 50 54 { 51 55 int bit = ring->hop; ··· 60 56 61 57 static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring) 62 58 { 63 - if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) 64 - return; 65 - iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring); 59 + if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) { 60 + u32 val; 61 + 62 + val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring); 63 + iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring); 64 + } else { 65 + iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring); 66 + } 66 67 } 67 68 68 69 static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring) ··· 1221 1212 str_enabled_disabled(port_ok)); 1222 1213 } 1223 1214 1215 + static void nhi_reset(struct tb_nhi *nhi) 1216 + { 1217 + ktime_t timeout; 1218 + u32 val; 1219 + 1220 + val = ioread32(nhi->iobase + REG_CAPS); 1221 + /* Reset only v2 and later routers */ 1222 + if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2) 1223 + return; 1224 + 1225 + if (!host_reset) { 1226 + dev_dbg(&nhi->pdev->dev, "skipping host router reset\n"); 1227 + return; 1228 + } 1229 + 1230 + iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET); 1231 + msleep(100); 1232 + 1233 + timeout = ktime_add_ms(ktime_get(), 500); 1234 + do { 1235 + val = ioread32(nhi->iobase + REG_RESET); 1236 + if (!(val & REG_RESET_HRR)) { 1237 + dev_warn(&nhi->pdev->dev, "host router reset successful\n"); 1238 + return; 1239 + } 1240 + usleep_range(10, 20); 1241 + } while (ktime_before(ktime_get(), timeout)); 1242 + 1243 + dev_warn(&nhi->pdev->dev, "timeout resetting host router\n"); 1244 + } 1245 + 1224 1246 static int nhi_init_msi(struct tb_nhi *nhi) 1225 1247 { 1226 1248 struct pci_dev *pdev = nhi->pdev; ··· 1352 1312 nhi->ops = (const struct tb_nhi_ops *)id->driver_data; 1353 1313 /* cannot fail - table is allocated in pcim_iomap_regions */ 1354 1314 nhi->iobase = pcim_iomap_table(pdev)[0]; 1355 - nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; 1315 + nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff; 1356 1316 dev_dbg(dev, "total paths: %d\n", nhi->hop_count); 1357 1317 1358 1318 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, ··· 1364 1324 1365 1325 nhi_check_quirks(nhi); 1366 1326 nhi_check_iommu(nhi); 1327 + 1328 + nhi_reset(nhi); 1367 1329 1368 1330 res = nhi_init_msi(nhi); 1369 1331 if (res) ··· 1517 1475 .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 1518 1476 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1), 1519 1477 .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 1478 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) }, 1479 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) }, 1520 1480 1521 1481 /* Any USB4 compliant host */ 1522 1482 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) }, ··· 1527 1483 }; 1528 1484 1529 1485 MODULE_DEVICE_TABLE(pci, nhi_ids); 1486 + MODULE_DESCRIPTION("Thunderbolt/USB4 core driver"); 1530 1487 MODULE_LICENSE("GPL"); 1531 1488 1532 1489 static struct pci_driver nhi_driver = {
+4
drivers/thunderbolt/nhi.h
··· 75 75 #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef 76 76 #define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e 77 77 #define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d 78 + #define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI 0x5781 79 + #define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI 0x5784 80 + #define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE 0x5786 81 + #define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE 0x57a4 78 82 #define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2 79 83 #define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2 80 84 #define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3
+12 -7
drivers/thunderbolt/nhi_regs.h
··· 37 37 /* NHI registers in bar 0 */ 38 38 39 39 /* 40 - * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT) 40 + * 16 bytes per entry, one entry for every hop (REG_CAPS) 41 41 * 00: physical pointer to an array of struct ring_desc 42 42 * 08: ring tail (set by NHI) 43 43 * 10: ring head (index of first non posted descriptor) ··· 46 46 #define REG_TX_RING_BASE 0x00000 47 47 48 48 /* 49 - * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT) 49 + * 16 bytes per entry, one entry for every hop (REG_CAPS) 50 50 * 00: physical pointer to an array of struct ring_desc 51 51 * 08: ring head (index of first not posted descriptor) 52 52 * 10: ring tail (set by NHI) ··· 56 56 #define REG_RX_RING_BASE 0x08000 57 57 58 58 /* 59 - * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT) 59 + * 32 bytes per entry, one entry for every hop (REG_CAPS) 60 60 * 00: enum_ring_flags 61 61 * 04: isoch time stamp ?? (write 0) 62 62 * ..: unknown ··· 64 64 #define REG_TX_OPTIONS_BASE 0x19800 65 65 66 66 /* 67 - * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT) 67 + * 32 bytes per entry, one entry for every hop (REG_CAPS) 68 68 * 00: enum ring_flags 69 69 * If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to 70 70 * the corresponding TX hop id. ··· 77 77 78 78 /* 79 79 * three bitfields: tx, rx, rx overflow 80 - * Every bitfield contains one bit for every hop (REG_HOP_COUNT). 80 + * Every bitfield contains one bit for every hop (REG_CAPS). 81 81 * New interrupts are fired only after ALL registers have been 82 82 * read (even those containing only disabled rings). 83 83 */ ··· 87 87 88 88 /* 89 89 * two bitfields: rx, tx 90 - * Both bitfields contains one bit for every hop (REG_HOP_COUNT). To 90 + * Both bitfields contains one bit for every hop (REG_CAPS). To 91 91 * enable/disable interrupts set/clear the corresponding bits. 92 92 */ 93 93 #define REG_RING_INTERRUPT_BASE 0x38200 ··· 104 104 #define REG_INT_VEC_ALLOC_REGS (32 / REG_INT_VEC_ALLOC_BITS) 105 105 106 106 /* The last 11 bits contain the number of hops supported by the NHI port. */ 107 - #define REG_HOP_COUNT 0x39640 107 + #define REG_CAPS 0x39640 108 + #define REG_CAPS_VERSION_MASK GENMASK(23, 16) 109 + #define REG_CAPS_VERSION_2 0x40 108 110 109 111 #define REG_DMA_MISC 0x39864 110 112 #define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2) 111 113 #define REG_DMA_MISC_DISABLE_AUTO_CLEAR BIT(17) 114 + 115 + #define REG_RESET 0x39898 116 + #define REG_RESET_HRR BIT(0) 112 117 113 118 #define REG_INMAIL_DATA 0x39900 114 119
+4
drivers/thunderbolt/nvm.c
··· 12 12 13 13 #include "tb.h" 14 14 15 + #define NVM_MIN_SIZE SZ_32K 16 + #define NVM_MAX_SIZE SZ_1M 17 + #define NVM_DATA_DWORDS 16 18 + 15 19 /* Intel specific NVM offsets */ 16 20 #define INTEL_NVM_DEVID 0x05 17 21 #define INTEL_NVM_VERSION 0x08
+10
drivers/thunderbolt/quirks.c
··· 10 10 static void quirk_force_power_link(struct tb_switch *sw) 11 11 { 12 12 sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER; 13 + tb_sw_dbg(sw, "forcing power to link controller\n"); 13 14 } 14 15 15 16 static void quirk_dp_credit_allocation(struct tb_switch *sw) ··· 75 74 quirk_usb3_maximum_bandwidth }, 76 75 { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000, 77 76 quirk_usb3_maximum_bandwidth }, 77 + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000, 78 + quirk_usb3_maximum_bandwidth }, 79 + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000, 80 + quirk_usb3_maximum_bandwidth }, 81 + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE, 0x0000, 0x0000, 82 + quirk_usb3_maximum_bandwidth }, 83 + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000, 84 + quirk_usb3_maximum_bandwidth }, 78 85 /* 79 86 * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms. 80 87 */ ··· 114 105 if (q->device && q->device != sw->device) 115 106 continue; 116 107 108 + tb_sw_dbg(sw, "running %ps\n", q->hook); 117 109 q->hook(sw); 118 110 } 119 111 }
+49 -15
drivers/thunderbolt/retimer.c
··· 187 187 return ret; 188 188 } 189 189 190 + static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status) 191 + { 192 + int i; 193 + 194 + tb_port_dbg(port, "reading NVM authentication status of retimers\n"); 195 + 196 + /* 197 + * Before doing anything else, read the authentication status. 198 + * If the retimer has it set, store it for the new retimer 199 + * device instance. 200 + */ 201 + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) 202 + usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); 203 + } 204 + 190 205 static void tb_retimer_set_inbound_sbtx(struct tb_port *port) 191 206 { 192 207 int i; 208 + 209 + /* 210 + * When USB4 port is online sideband communications are 211 + * already up. 212 + */ 213 + if (!usb4_port_device_is_offline(port->usb4)) 214 + return; 215 + 216 + tb_port_dbg(port, "enabling sideband transactions\n"); 193 217 194 218 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) 195 219 usb4_port_retimer_set_inbound_sbtx(port, i); ··· 222 198 static void tb_retimer_unset_inbound_sbtx(struct tb_port *port) 223 199 { 224 200 int i; 201 + 202 + /* 203 + * When USB4 port is offline we need to keep the sideband 204 + * communications up to make it possible to communicate with 205 + * the connected retimers. 206 + */ 207 + if (usb4_port_device_is_offline(port->usb4)) 208 + return; 209 + 210 + tb_port_dbg(port, "disabling sideband transactions\n"); 225 211 226 212 for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) 227 213 usb4_port_retimer_unset_inbound_sbtx(port, i); ··· 263 229 rt->auth_status = 0; 264 230 265 231 if (val) { 232 + /* 233 + * When NVM authentication starts the retimer is not 234 + * accessible so calling tb_retimer_unset_inbound_sbtx() 235 + * will fail and therefore we do not call it. Exception 236 + * is when the validation fails or we only write the new 237 + * NVM image without authentication. 238 + */ 266 239 tb_retimer_set_inbound_sbtx(rt->port); 267 240 if (val == AUTHENTICATE_ONLY) { 268 241 ret = tb_retimer_nvm_authenticate(rt, true); ··· 290 249 } 291 250 292 251 exit_unlock: 293 - tb_retimer_unset_inbound_sbtx(rt->port); 252 + if (ret || val == WRITE_ONLY) 253 + tb_retimer_unset_inbound_sbtx(rt->port); 294 254 mutex_unlock(&rt->tb->lock); 295 255 exit_rpm: 296 256 pm_runtime_mark_last_busy(&rt->dev); ··· 381 339 if (ret != -ENODEV) 382 340 tb_port_warn(port, "failed read retimer ProductId: %d\n", ret); 383 341 return ret; 384 - } 385 - 386 - if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) { 387 - tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n", 388 - vendor); 389 - return -EOPNOTSUPP; 390 342 } 391 343 392 344 /* ··· 491 455 return ret; 492 456 493 457 /* 458 + * Immediately after sending enumerate retimers read the 459 + * authentication status of each retimer. 460 + */ 461 + tb_retimer_nvm_authenticate_status(port, status); 462 + 463 + /* 494 464 * Enable sideband channel for each retimer. We can do this 495 465 * regardless whether there is device connected or not. 496 466 */ 497 467 tb_retimer_set_inbound_sbtx(port); 498 - 499 - /* 500 - * Before doing anything else, read the authentication status. 501 - * If the retimer has it set, store it for the new retimer 502 - * device instance. 503 - */ 504 - for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) 505 - usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); 506 468 507 469 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { 508 470 /*
+161 -433
drivers/thunderbolt/switch.c
··· 26 26 u32 status; 27 27 }; 28 28 29 - static bool clx_enabled = true; 30 - module_param_named(clx, clx_enabled, bool, 0444); 31 - MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)"); 32 - 33 29 /* 34 30 * Hold NVM authentication failure status per switch This information 35 31 * needs to stay around even when the switch gets power cycled so we ··· 723 727 * can be read from the path config space. Legacy 724 728 * devices we use hard-coded value. 725 729 */ 726 - if (tb_switch_is_usb4(port->sw)) { 730 + if (port->cap_usb4) { 727 731 struct tb_regs_hop hop; 728 732 729 733 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2)) ··· 903 907 904 908 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> 905 909 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; 906 - return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; 910 + 911 + switch (speed) { 912 + case LANE_ADP_CS_1_CURRENT_SPEED_GEN4: 913 + return 40; 914 + case LANE_ADP_CS_1_CURRENT_SPEED_GEN3: 915 + return 20; 916 + default: 917 + return 10; 918 + } 907 919 } 908 920 909 921 /** 910 922 * tb_port_get_link_width() - Get current link width 911 923 * @port: Port to check (USB4 or CIO) 912 924 * 913 - * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane) 914 - * or negative errno in case of failure. 925 + * Returns link width. Return the link width as encoded in &enum 926 + * tb_link_width or negative errno in case of failure. 915 927 */ 916 928 int tb_port_get_link_width(struct tb_port *port) 917 929 { ··· 934 930 if (ret) 935 931 return ret; 936 932 933 + /* Matches the values in enum tb_link_width */ 937 934 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> 938 935 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; 939 936 } 940 937 941 - static bool tb_port_is_width_supported(struct tb_port *port, int width) 938 + static bool tb_port_is_width_supported(struct tb_port *port, 939 + unsigned int width_mask) 942 940 { 943 941 u32 phy, widths; 944 942 int ret; ··· 956 950 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> 957 951 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; 958 952 959 - return !!(widths & width); 953 + return widths & width_mask; 954 + } 955 + 956 + static bool is_gen4_link(struct tb_port *port) 957 + { 958 + return tb_port_get_link_speed(port) > 20; 960 959 } 961 960 962 961 /** 963 962 * tb_port_set_link_width() - Set target link width of the lane adapter 964 963 * @port: Lane adapter 965 - * @width: Target link width (%1 or %2) 964 + * @width: Target link width 966 965 * 967 966 * Sets the target link width of the lane adapter to @width. Does not 968 967 * enable/disable lane bonding. For that call tb_port_set_lane_bonding(). 969 968 * 970 969 * Return: %0 in case of success and negative errno in case of error 971 970 */ 972 - int tb_port_set_link_width(struct tb_port *port, unsigned int width) 971 + int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width) 973 972 { 974 973 u32 val; 975 974 int ret; ··· 989 978 990 979 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; 991 980 switch (width) { 992 - case 1: 981 + case TB_LINK_WIDTH_SINGLE: 982 + /* Gen 4 link cannot be single */ 983 + if (is_gen4_link(port)) 984 + return -EOPNOTSUPP; 993 985 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << 994 986 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 995 987 break; 996 - case 2: 988 + case TB_LINK_WIDTH_DUAL: 997 989 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << 998 990 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; 999 991 break; ··· 1018 1004 * cases one should use tb_port_lane_bonding_enable() instead to enable 1019 1005 * lane bonding. 1020 1006 * 1021 - * As a side effect sets @port->bonding accordingly (and does the same 1022 - * for lane 1 too). 1023 - * 1024 1007 * Return: %0 in case of success and negative errno in case of error 1025 1008 */ 1026 - int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) 1009 + static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) 1027 1010 { 1028 1011 u32 val; 1029 1012 int ret; ··· 1038 1027 else 1039 1028 val &= ~LANE_ADP_CS_1_LB; 1040 1029 1041 - ret = tb_port_write(port, &val, TB_CFG_PORT, 1042 - port->cap_phy + LANE_ADP_CS_1, 1); 1043 - if (ret) 1044 - return ret; 1045 - 1046 - /* 1047 - * When lane 0 bonding is set it will affect lane 1 too so 1048 - * update both. 1049 - */ 1050 - port->bonded = bonding; 1051 - port->dual_link_port->bonded = bonding; 1052 - 1053 - return 0; 1030 + return tb_port_write(port, &val, TB_CFG_PORT, 1031 + port->cap_phy + LANE_ADP_CS_1, 1); 1054 1032 } 1055 1033 1056 1034 /** ··· 1056 1056 */ 1057 1057 int tb_port_lane_bonding_enable(struct tb_port *port) 1058 1058 { 1059 + enum tb_link_width width; 1059 1060 int ret; 1060 1061 1061 1062 /* 1062 1063 * Enable lane bonding for both links if not already enabled by 1063 1064 * for example the boot firmware. 1064 1065 */ 1065 - ret = tb_port_get_link_width(port); 1066 - if (ret == 1) { 1067 - ret = tb_port_set_link_width(port, 2); 1066 + width = tb_port_get_link_width(port); 1067 + if (width == TB_LINK_WIDTH_SINGLE) { 1068 + ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL); 1068 1069 if (ret) 1069 1070 goto err_lane0; 1070 1071 } 1071 1072 1072 - ret = tb_port_get_link_width(port->dual_link_port); 1073 - if (ret == 1) { 1074 - ret = tb_port_set_link_width(port->dual_link_port, 2); 1073 + width = tb_port_get_link_width(port->dual_link_port); 1074 + if (width == TB_LINK_WIDTH_SINGLE) { 1075 + ret = tb_port_set_link_width(port->dual_link_port, 1076 + TB_LINK_WIDTH_DUAL); 1075 1077 if (ret) 1076 1078 goto err_lane0; 1077 1079 } 1078 1080 1079 - ret = tb_port_set_lane_bonding(port, true); 1080 - if (ret) 1081 - goto err_lane1; 1081 + /* 1082 + * Only set bonding if the link was not already bonded. This 1083 + * avoids the lane adapter to re-enter bonding state. 1084 + */ 1085 + if (width == TB_LINK_WIDTH_SINGLE) { 1086 + ret = tb_port_set_lane_bonding(port, true); 1087 + if (ret) 1088 + goto err_lane1; 1089 + } 1090 + 1091 + /* 1092 + * When lane 0 bonding is set it will affect lane 1 too so 1093 + * update both. 1094 + */ 1095 + port->bonded = true; 1096 + port->dual_link_port->bonded = true; 1082 1097 1083 1098 return 0; 1084 1099 1085 1100 err_lane1: 1086 - tb_port_set_link_width(port->dual_link_port, 1); 1101 + tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE); 1087 1102 err_lane0: 1088 - tb_port_set_link_width(port, 1); 1103 + tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE); 1104 + 1089 1105 return ret; 1090 1106 } 1091 1107 ··· 1115 1099 void tb_port_lane_bonding_disable(struct tb_port *port) 1116 1100 { 1117 1101 tb_port_set_lane_bonding(port, false); 1118 - tb_port_set_link_width(port->dual_link_port, 1); 1119 - tb_port_set_link_width(port, 1); 1102 + tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE); 1103 + tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE); 1104 + port->dual_link_port->bonded = false; 1105 + port->bonded = false; 1120 1106 } 1121 1107 1122 1108 /** 1123 1109 * tb_port_wait_for_link_width() - Wait until link reaches specific width 1124 1110 * @port: Port to wait for 1125 - * @width: Expected link width (%1 or %2) 1111 + * @width_mask: Expected link width mask 1126 1112 * @timeout_msec: Timeout in ms how long to wait 1127 1113 * 1128 1114 * Should be used after both ends of the link have been bonded (or 1129 1115 * bonding has been disabled) to wait until the link actually reaches 1130 - * the expected state. Returns %-ETIMEDOUT if the @width was not reached 1131 - * within the given timeout, %0 if it did. 1116 + * the expected state. Returns %-ETIMEDOUT if the width was not reached 1117 + * within the given timeout, %0 if it did. Can be passed a mask of 1118 + * expected widths and succeeds if any of the widths is reached. 1132 1119 */ 1133 - int tb_port_wait_for_link_width(struct tb_port *port, int width, 1120 + int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask, 1134 1121 int timeout_msec) 1135 1122 { 1136 1123 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1137 1124 int ret; 1125 + 1126 + /* Gen 4 link does not support single lane */ 1127 + if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port)) 1128 + return -EOPNOTSUPP; 1138 1129 1139 1130 do { 1140 1131 ret = tb_port_get_link_width(port); ··· 1153 1130 */ 1154 1131 if (ret != -EACCES) 1155 1132 return ret; 1156 - } else if (ret == width) { 1133 + } else if (ret & width_mask) { 1157 1134 return 0; 1158 1135 } 1159 1136 ··· 1204 1181 if (ret) 1205 1182 return ret; 1206 1183 return tb_port_do_update_credits(port->dual_link_port); 1207 - } 1208 - 1209 - static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary) 1210 - { 1211 - u32 phy; 1212 - int ret; 1213 - 1214 - ret = tb_port_read(port, &phy, TB_CFG_PORT, 1215 - port->cap_phy + LANE_ADP_CS_1, 1); 1216 - if (ret) 1217 - return ret; 1218 - 1219 - if (secondary) 1220 - phy |= LANE_ADP_CS_1_PMS; 1221 - else 1222 - phy &= ~LANE_ADP_CS_1_PMS; 1223 - 1224 - return tb_port_write(port, &phy, TB_CFG_PORT, 1225 - port->cap_phy + LANE_ADP_CS_1, 1); 1226 - } 1227 - 1228 - static int tb_port_pm_secondary_enable(struct tb_port *port) 1229 - { 1230 - return __tb_port_pm_secondary_set(port, true); 1231 - } 1232 - 1233 - static int tb_port_pm_secondary_disable(struct tb_port *port) 1234 - { 1235 - return __tb_port_pm_secondary_set(port, false); 1236 - } 1237 - 1238 - /* Called for USB4 or Titan Ridge routers only */ 1239 - static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask) 1240 - { 1241 - u32 val, mask = 0; 1242 - bool ret; 1243 - 1244 - /* Don't enable CLx in case of two single-lane links */ 1245 - if (!port->bonded && port->dual_link_port) 1246 - return false; 1247 - 1248 - /* Don't enable CLx in case of inter-domain link */ 1249 - if (port->xdomain) 1250 - return false; 1251 - 1252 - if (tb_switch_is_usb4(port->sw)) { 1253 - if (!usb4_port_clx_supported(port)) 1254 - return false; 1255 - } else if (!tb_lc_is_clx_supported(port)) { 1256 - return false; 1257 - } 1258 - 1259 - if (clx_mask & TB_CL1) { 1260 - /* CL0s and CL1 are enabled and supported together */ 1261 - mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT; 1262 - } 1263 - if (clx_mask & TB_CL2) 1264 - mask |= LANE_ADP_CS_0_CL2_SUPPORT; 1265 - 1266 - ret = tb_port_read(port, &val, TB_CFG_PORT, 1267 - port->cap_phy + LANE_ADP_CS_0, 1); 1268 - if (ret) 1269 - return false; 1270 - 1271 - return !!(val & mask); 1272 - } 1273 - 1274 - static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable) 1275 - { 1276 - u32 phy, mask; 1277 - int ret; 1278 - 1279 - /* CL0s and CL1 are enabled and supported together */ 1280 - if (clx == TB_CL1) 1281 - mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE; 1282 - else 1283 - /* For now we support only CL0s and CL1. Not CL2 */ 1284 - return -EOPNOTSUPP; 1285 - 1286 - ret = tb_port_read(port, &phy, TB_CFG_PORT, 1287 - port->cap_phy + LANE_ADP_CS_1, 1); 1288 - if (ret) 1289 - return ret; 1290 - 1291 - if (enable) 1292 - phy |= mask; 1293 - else 1294 - phy &= ~mask; 1295 - 1296 - return tb_port_write(port, &phy, TB_CFG_PORT, 1297 - port->cap_phy + LANE_ADP_CS_1, 1); 1298 - } 1299 - 1300 - static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx) 1301 - { 1302 - return __tb_port_clx_set(port, clx, false); 1303 - } 1304 - 1305 - static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx) 1306 - { 1307 - return __tb_port_clx_set(port, clx, true); 1308 - } 1309 - 1310 - /** 1311 - * tb_port_is_clx_enabled() - Is given CL state enabled 1312 - * @port: USB4 port to check 1313 - * @clx_mask: Mask of CL states to check 1314 - * 1315 - * Returns true if any of the given CL states is enabled for @port. 1316 - */ 1317 - bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask) 1318 - { 1319 - u32 val, mask = 0; 1320 - int ret; 1321 - 1322 - if (!tb_port_clx_supported(port, clx_mask)) 1323 - return false; 1324 - 1325 - if (clx_mask & TB_CL1) 1326 - mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE; 1327 - if (clx_mask & TB_CL2) 1328 - mask |= LANE_ADP_CS_1_CL2_ENABLE; 1329 - 1330 - ret = tb_port_read(port, &val, TB_CFG_PORT, 1331 - port->cap_phy + LANE_ADP_CS_1, 1); 1332 - if (ret) 1333 - return false; 1334 - 1335 - return !!(val & mask); 1336 1184 } 1337 1185 1338 1186 static int tb_port_start_lane_initialization(struct tb_port *port) ··· 1805 1911 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1806 1912 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1807 1913 1808 - static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1809 - char *buf) 1914 + static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, 1915 + char *buf) 1810 1916 { 1811 1917 struct tb_switch *sw = tb_to_switch(dev); 1918 + unsigned int width; 1812 1919 1813 - return sysfs_emit(buf, "%u\n", sw->link_width); 1920 + switch (sw->link_width) { 1921 + case TB_LINK_WIDTH_SINGLE: 1922 + case TB_LINK_WIDTH_ASYM_TX: 1923 + width = 1; 1924 + break; 1925 + case TB_LINK_WIDTH_DUAL: 1926 + width = 2; 1927 + break; 1928 + case TB_LINK_WIDTH_ASYM_RX: 1929 + width = 3; 1930 + break; 1931 + default: 1932 + WARN_ON_ONCE(1); 1933 + return -EINVAL; 1934 + } 1935 + 1936 + return sysfs_emit(buf, "%u\n", width); 1814 1937 } 1938 + static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL); 1815 1939 1816 - /* 1817 - * Currently link has same amount of lanes both directions (1 or 2) but 1818 - * expose them separately to allow possible asymmetric links in the future. 1819 - */ 1820 - static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1821 - static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1940 + static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, 1941 + char *buf) 1942 + { 1943 + struct tb_switch *sw = tb_to_switch(dev); 1944 + unsigned int width; 1945 + 1946 + switch (sw->link_width) { 1947 + case TB_LINK_WIDTH_SINGLE: 1948 + case TB_LINK_WIDTH_ASYM_RX: 1949 + width = 1; 1950 + break; 1951 + case TB_LINK_WIDTH_DUAL: 1952 + width = 2; 1953 + break; 1954 + case TB_LINK_WIDTH_ASYM_TX: 1955 + width = 3; 1956 + break; 1957 + default: 1958 + WARN_ON_ONCE(1); 1959 + return -EINVAL; 1960 + } 1961 + 1962 + return sysfs_emit(buf, "%u\n", width); 1963 + } 1964 + static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL); 1822 1965 1823 1966 static ssize_t nvm_authenticate_show(struct device *dev, 1824 1967 struct device_attribute *attr, char *buf) ··· 2120 2189 const struct tb_switch *sw = tb_to_switch(dev); 2121 2190 const char *type; 2122 2191 2123 - if (sw->config.thunderbolt_version == USB4_VERSION_1_0) { 2124 - if (add_uevent_var(env, "USB4_VERSION=1.0")) 2192 + if (tb_switch_is_usb4(sw)) { 2193 + if (add_uevent_var(env, "USB4_VERSION=%u.0", 2194 + usb4_switch_version(sw))) 2125 2195 return -ENOMEM; 2126 2196 } 2127 2197 ··· 2430 2498 /* 2431 2499 * For USB4 devices, we need to program the CM version 2432 2500 * accordingly so that it knows to expose all the 2433 - * additional capabilities. 2501 + * additional capabilities. Program it according to USB4 2502 + * version to avoid changing existing (v1) routers behaviour. 2434 2503 */ 2435 - sw->config.cmuv = USB4_VERSION_1_0; 2504 + if (usb4_switch_version(sw) < 2) 2505 + sw->config.cmuv = ROUTER_CS_4_CMUV_V1; 2506 + else 2507 + sw->config.cmuv = ROUTER_CS_4_CMUV_V2; 2436 2508 sw->config.plug_events_delay = 0xa; 2437 2509 2438 2510 /* Enumerate the switch */ ··· 2464 2528 return ret; 2465 2529 2466 2530 return tb_plug_events_active(sw, true); 2531 + } 2532 + 2533 + /** 2534 + * tb_switch_configuration_valid() - Set the tunneling configuration to be valid 2535 + * @sw: Router to configure 2536 + * 2537 + * Needs to be called before any tunnels can be setup through the 2538 + * router. Can be called to any router. 2539 + * 2540 + * Returns %0 in success and negative errno otherwise. 2541 + */ 2542 + int tb_switch_configuration_valid(struct tb_switch *sw) 2543 + { 2544 + if (tb_switch_is_usb4(sw)) 2545 + return usb4_switch_configuration_valid(sw); 2546 + return 0; 2467 2547 } 2468 2548 2469 2549 static int tb_switch_set_uuid(struct tb_switch *sw) ··· 2706 2754 */ 2707 2755 int tb_switch_lane_bonding_enable(struct tb_switch *sw) 2708 2756 { 2709 - struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2710 2757 struct tb_port *up, *down; 2711 2758 u64 route = tb_route(sw); 2759 + unsigned int width_mask; 2712 2760 int ret; 2713 2761 2714 2762 if (!route) ··· 2718 2766 return 0; 2719 2767 2720 2768 up = tb_upstream_port(sw); 2721 - down = tb_port_at(route, parent); 2769 + down = tb_switch_downstream_port(sw); 2722 2770 2723 - if (!tb_port_is_width_supported(up, 2) || 2724 - !tb_port_is_width_supported(down, 2)) 2771 + if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) || 2772 + !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL)) 2725 2773 return 0; 2726 2774 2727 2775 ret = tb_port_lane_bonding_enable(up); ··· 2737 2785 return ret; 2738 2786 } 2739 2787 2740 - ret = tb_port_wait_for_link_width(down, 2, 100); 2788 + /* Any of the widths are all bonded */ 2789 + width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | 2790 + TB_LINK_WIDTH_ASYM_RX; 2791 + 2792 + ret = tb_port_wait_for_link_width(down, width_mask, 100); 2741 2793 if (ret) { 2742 2794 tb_port_warn(down, "timeout enabling lane bonding\n"); 2743 2795 return ret; ··· 2764 2808 */ 2765 2809 void tb_switch_lane_bonding_disable(struct tb_switch *sw) 2766 2810 { 2767 - struct tb_switch *parent = tb_to_switch(sw->dev.parent); 2768 2811 struct tb_port *up, *down; 2812 + int ret; 2769 2813 2770 2814 if (!tb_route(sw)) 2771 2815 return; ··· 2774 2818 if (!up->bonded) 2775 2819 return; 2776 2820 2777 - down = tb_port_at(tb_route(sw), parent); 2821 + down = tb_switch_downstream_port(sw); 2778 2822 2779 2823 tb_port_lane_bonding_disable(up); 2780 2824 tb_port_lane_bonding_disable(down); ··· 2783 2827 * It is fine if we get other errors as the router might have 2784 2828 * been unplugged. 2785 2829 */ 2786 - if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT) 2830 + ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100); 2831 + if (ret == -ETIMEDOUT) 2787 2832 tb_sw_warn(sw, "timeout disabling lane bonding\n"); 2788 2833 2789 2834 tb_port_update_credits(down); ··· 2948 2991 tb_switch_default_link_ports(sw); 2949 2992 2950 2993 ret = tb_switch_update_link_attributes(sw); 2994 + if (ret) 2995 + return ret; 2996 + 2997 + ret = tb_switch_clx_init(sw); 2951 2998 if (ret) 2952 2999 return ret; 2953 3000 ··· 3207 3246 /* 3208 3247 * Actually only needed for Titan Ridge but for simplicity can be 3209 3248 * done for USB4 device too as CLx is re-enabled at resume. 3210 - * CL0s and CL1 are enabled and supported together. 3211 3249 */ 3212 - if (tb_switch_is_clx_enabled(sw, TB_CL1)) { 3213 - if (tb_switch_disable_clx(sw, TB_CL1)) 3214 - tb_sw_warn(sw, "failed to disable %s on upstream port\n", 3215 - tb_switch_clx_name(TB_CL1)); 3216 - } 3250 + tb_switch_clx_disable(sw); 3217 3251 3218 3252 err = tb_plug_events_active(sw, false); 3219 3253 if (err) ··· 3428 3472 } 3429 3473 3430 3474 return NULL; 3431 - } 3432 - 3433 - static int tb_switch_pm_secondary_resolve(struct tb_switch *sw) 3434 - { 3435 - struct tb_switch *parent = tb_switch_parent(sw); 3436 - struct tb_port *up, *down; 3437 - int ret; 3438 - 3439 - if (!tb_route(sw)) 3440 - return 0; 3441 - 3442 - up = tb_upstream_port(sw); 3443 - down = tb_port_at(tb_route(sw), parent); 3444 - ret = tb_port_pm_secondary_enable(up); 3445 - if (ret) 3446 - return ret; 3447 - 3448 - return tb_port_pm_secondary_disable(down); 3449 - } 3450 - 3451 - static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) 3452 - { 3453 - struct tb_switch *parent = tb_switch_parent(sw); 3454 - bool up_clx_support, down_clx_support; 3455 - struct tb_port *up, *down; 3456 - int ret; 3457 - 3458 - if (!tb_switch_is_clx_supported(sw)) 3459 - return 0; 3460 - 3461 - /* 3462 - * Enable CLx for host router's downstream port as part of the 3463 - * downstream router enabling procedure. 3464 - */ 3465 - if (!tb_route(sw)) 3466 - return 0; 3467 - 3468 - /* Enable CLx only for first hop router (depth = 1) */ 3469 - if (tb_route(parent)) 3470 - return 0; 3471 - 3472 - ret = tb_switch_pm_secondary_resolve(sw); 3473 - if (ret) 3474 - return ret; 3475 - 3476 - up = tb_upstream_port(sw); 3477 - down = tb_port_at(tb_route(sw), parent); 3478 - 3479 - up_clx_support = tb_port_clx_supported(up, clx); 3480 - down_clx_support = tb_port_clx_supported(down, clx); 3481 - 3482 - tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx), 3483 - up_clx_support ? "" : "not "); 3484 - tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx), 3485 - down_clx_support ? "" : "not "); 3486 - 3487 - if (!up_clx_support || !down_clx_support) 3488 - return -EOPNOTSUPP; 3489 - 3490 - ret = tb_port_clx_enable(up, clx); 3491 - if (ret) 3492 - return ret; 3493 - 3494 - ret = tb_port_clx_enable(down, clx); 3495 - if (ret) { 3496 - tb_port_clx_disable(up, clx); 3497 - return ret; 3498 - } 3499 - 3500 - ret = tb_switch_mask_clx_objections(sw); 3501 - if (ret) { 3502 - tb_port_clx_disable(up, clx); 3503 - tb_port_clx_disable(down, clx); 3504 - return ret; 3505 - } 3506 - 3507 - sw->clx = clx; 3508 - 3509 - tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx)); 3510 - return 0; 3511 - } 3512 - 3513 - /** 3514 - * tb_switch_enable_clx() - Enable CLx on upstream port of specified router 3515 - * @sw: Router to enable CLx for 3516 - * @clx: The CLx state to enable 3517 - * 3518 - * Enable CLx state only for first hop router. That is the most common 3519 - * use-case, that is intended for better thermal management, and so helps 3520 - * to improve performance. CLx is enabled only if both sides of the link 3521 - * support CLx, and if both sides of the link are not configured as two 3522 - * single lane links and only if the link is not inter-domain link. The 3523 - * complete set of conditions is described in CM Guide 1.0 section 8.1. 3524 - * 3525 - * Return: Returns 0 on success or an error code on failure. 3526 - */ 3527 - int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) 3528 - { 3529 - struct tb_switch *root_sw = sw->tb->root_switch; 3530 - 3531 - if (!clx_enabled) 3532 - return 0; 3533 - 3534 - /* 3535 - * CLx is not enabled and validated on Intel USB4 platforms before 3536 - * Alder Lake. 3537 - */ 3538 - if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw)) 3539 - return 0; 3540 - 3541 - switch (clx) { 3542 - case TB_CL1: 3543 - /* CL0s and CL1 are enabled and supported together */ 3544 - return __tb_switch_enable_clx(sw, clx); 3545 - 3546 - default: 3547 - return -EOPNOTSUPP; 3548 - } 3549 - } 3550 - 3551 - static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) 3552 - { 3553 - struct tb_switch *parent = tb_switch_parent(sw); 3554 - struct tb_port *up, *down; 3555 - int ret; 3556 - 3557 - if (!tb_switch_is_clx_supported(sw)) 3558 - return 0; 3559 - 3560 - /* 3561 - * Disable CLx for host router's downstream port as part of the 3562 - * downstream router enabling procedure. 3563 - */ 3564 - if (!tb_route(sw)) 3565 - return 0; 3566 - 3567 - /* Disable CLx only for first hop router (depth = 1) */ 3568 - if (tb_route(parent)) 3569 - return 0; 3570 - 3571 - up = tb_upstream_port(sw); 3572 - down = tb_port_at(tb_route(sw), parent); 3573 - ret = tb_port_clx_disable(up, clx); 3574 - if (ret) 3575 - return ret; 3576 - 3577 - ret = tb_port_clx_disable(down, clx); 3578 - if (ret) 3579 - return ret; 3580 - 3581 - sw->clx = TB_CLX_DISABLE; 3582 - 3583 - tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx)); 3584 - return 0; 3585 - } 3586 - 3587 - /** 3588 - * tb_switch_disable_clx() - Disable CLx on upstream port of specified router 3589 - * @sw: Router to disable CLx for 3590 - * @clx: The CLx state to disable 3591 - * 3592 - * Return: Returns 0 on success or an error code on failure. 3593 - */ 3594 - int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) 3595 - { 3596 - if (!clx_enabled) 3597 - return 0; 3598 - 3599 - switch (clx) { 3600 - case TB_CL1: 3601 - /* CL0s and CL1 are enabled and supported together */ 3602 - return __tb_switch_disable_clx(sw, clx); 3603 - 3604 - default: 3605 - return -EOPNOTSUPP; 3606 - } 3607 - } 3608 - 3609 - /** 3610 - * tb_switch_mask_clx_objections() - Mask CLx objections for a router 3611 - * @sw: Router to mask objections for 3612 - * 3613 - * Mask the objections coming from the second depth routers in order to 3614 - * stop these objections from interfering with the CLx states of the first 3615 - * depth link. 3616 - */ 3617 - int tb_switch_mask_clx_objections(struct tb_switch *sw) 3618 - { 3619 - int up_port = sw->config.upstream_port_number; 3620 - u32 offset, val[2], mask_obj, unmask_obj; 3621 - int ret, i; 3622 - 3623 - /* Only Titan Ridge of pre-USB4 devices support CLx states */ 3624 - if (!tb_switch_is_titan_ridge(sw)) 3625 - return 0; 3626 - 3627 - if (!tb_route(sw)) 3628 - return 0; 3629 - 3630 - /* 3631 - * In Titan Ridge there are only 2 dual-lane Thunderbolt ports: 3632 - * Port A consists of lane adapters 1,2 and 3633 - * Port B consists of lane adapters 3,4 3634 - * If upstream port is A, (lanes are 1,2), we mask objections from 3635 - * port B (lanes 3,4) and unmask objections from Port A and vice-versa. 3636 - */ 3637 - if (up_port == 1) { 3638 - mask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 3639 - unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 3640 - offset = TB_LOW_PWR_C1_CL1; 3641 - } else { 3642 - mask_obj = TB_LOW_PWR_C1_PORT_A_MASK; 3643 - unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK; 3644 - offset = TB_LOW_PWR_C3_CL1; 3645 - } 3646 - 3647 - ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, 3648 - sw->cap_lp + offset, ARRAY_SIZE(val)); 3649 - if (ret) 3650 - return ret; 3651 - 3652 - for (i = 0; i < ARRAY_SIZE(val); i++) { 3653 - val[i] |= mask_obj; 3654 - val[i] &= ~unmask_obj; 3655 - } 3656 - 3657 - return tb_sw_write(sw, &val, TB_CFG_SWITCH, 3658 - sw->cap_lp + offset, ARRAY_SIZE(val)); 3659 3475 } 3660 3476 3661 3477 /*
+243 -89
drivers/thunderbolt/tb.c
··· 131 131 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, 132 132 struct tb_port *out) 133 133 { 134 - if (usb4_dp_port_bw_mode_enabled(in)) { 134 + if (usb4_dp_port_bandwidth_mode_enabled(in)) { 135 135 int index, i; 136 136 137 137 index = usb4_dp_port_group_id(in); ··· 240 240 } 241 241 } 242 242 243 + /* Enables CL states up to host router */ 244 + static int tb_enable_clx(struct tb_switch *sw) 245 + { 246 + struct tb_cm *tcm = tb_priv(sw->tb); 247 + unsigned int clx = TB_CL0S | TB_CL1; 248 + const struct tb_tunnel *tunnel; 249 + int ret; 250 + 251 + /* 252 + * Currently only enable CLx for the first link. This is enough 253 + * to allow the CPU to save energy at least on Intel hardware 254 + * and makes it slightly simpler to implement. We may change 255 + * this in the future to cover the whole topology if it turns 256 + * out to be beneficial. 257 + */ 258 + while (sw && sw->config.depth > 1) 259 + sw = tb_switch_parent(sw); 260 + 261 + if (!sw) 262 + return 0; 263 + 264 + if (sw->config.depth != 1) 265 + return 0; 266 + 267 + /* 268 + * If we are re-enabling then check if there is an active DMA 269 + * tunnel and in that case bail out. 270 + */ 271 + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { 272 + if (tb_tunnel_is_dma(tunnel)) { 273 + if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw))) 274 + return 0; 275 + } 276 + } 277 + 278 + /* 279 + * Initially try with CL2. If that's not supported by the 280 + * topology try with CL0s and CL1 and then give up. 281 + */ 282 + ret = tb_switch_clx_enable(sw, clx | TB_CL2); 283 + if (ret == -EOPNOTSUPP) 284 + ret = tb_switch_clx_enable(sw, clx); 285 + return ret == -EOPNOTSUPP ? 0 : ret; 286 + } 287 + 288 + /* Disables CL states up to the host router */ 289 + static void tb_disable_clx(struct tb_switch *sw) 290 + { 291 + do { 292 + if (tb_switch_clx_disable(sw) < 0) 293 + tb_sw_warn(sw, "failed to disable CL states\n"); 294 + sw = tb_switch_parent(sw); 295 + } while (sw); 296 + } 297 + 298 + static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data) 299 + { 300 + struct tb_switch *sw; 301 + 302 + sw = tb_to_switch(dev); 303 + if (!sw) 304 + return 0; 305 + 306 + if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) { 307 + enum tb_switch_tmu_mode mode; 308 + int ret; 309 + 310 + if (tb_switch_clx_is_enabled(sw, TB_CL1)) 311 + mode = TB_SWITCH_TMU_MODE_HIFI_UNI; 312 + else 313 + mode = TB_SWITCH_TMU_MODE_HIFI_BI; 314 + 315 + ret = tb_switch_tmu_configure(sw, mode); 316 + if (ret) 317 + return ret; 318 + 319 + return tb_switch_tmu_enable(sw); 320 + } 321 + 322 + return 0; 323 + } 324 + 325 + static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel) 326 + { 327 + struct tb_switch *sw; 328 + 329 + if (!tunnel) 330 + return; 331 + 332 + /* 333 + * Once first DP tunnel is established we change the TMU 334 + * accuracy of first depth child routers (and the host router) 335 + * to the highest. This is needed for the DP tunneling to work 336 + * but also allows CL0s. 337 + * 338 + * If both routers are v2 then we don't need to do anything as 339 + * they are using enhanced TMU mode that allows all CLx. 340 + */ 341 + sw = tunnel->tb->root_switch; 342 + device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy); 343 + } 344 + 345 + static int tb_enable_tmu(struct tb_switch *sw) 346 + { 347 + int ret; 348 + 349 + /* 350 + * If both routers at the end of the link are v2 we simply 351 + * enable the enhanched uni-directional mode. That covers all 352 + * the CL states. For v1 and before we need to use the normal 353 + * rate to allow CL1 (when supported). Otherwise we keep the TMU 354 + * running at the highest accuracy. 355 + */ 356 + ret = tb_switch_tmu_configure(sw, 357 + TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI); 358 + if (ret == -EOPNOTSUPP) { 359 + if (tb_switch_clx_is_enabled(sw, TB_CL1)) 360 + ret = tb_switch_tmu_configure(sw, 361 + TB_SWITCH_TMU_MODE_LOWRES); 362 + else 363 + ret = tb_switch_tmu_configure(sw, 364 + TB_SWITCH_TMU_MODE_HIFI_BI); 365 + } 366 + if (ret) 367 + return ret; 368 + 369 + /* If it is already enabled in correct mode, don't touch it */ 370 + if (tb_switch_tmu_is_enabled(sw)) 371 + return 0; 372 + 373 + ret = tb_switch_tmu_disable(sw); 374 + if (ret) 375 + return ret; 376 + 377 + ret = tb_switch_tmu_post_time(sw); 378 + if (ret) 379 + return ret; 380 + 381 + return tb_switch_tmu_enable(sw); 382 + } 383 + 243 384 static void tb_switch_discover_tunnels(struct tb_switch *sw, 244 385 struct list_head *list, 245 386 bool alloc_hopids) ··· 394 253 switch (port->config.type) { 395 254 case TB_TYPE_DP_HDMI_IN: 396 255 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids); 397 - /* 398 - * In case of DP tunnel exists, change host router's 399 - * 1st children TMU mode to HiFi for CL0s to work. 400 - */ 401 - if (tunnel) 402 - tb_switch_enable_tmu_1st_child(tb->root_switch, 403 - TB_SWITCH_TMU_RATE_HIFI); 256 + tb_increase_tmu_accuracy(tunnel); 404 257 break; 405 258 406 259 case TB_TYPE_PCIE_DOWN: ··· 490 355 tb_port_configure_xdomain(port, xd); 491 356 tb_xdomain_add(xd); 492 357 } 493 - } 494 - 495 - static int tb_enable_tmu(struct tb_switch *sw) 496 - { 497 - int ret; 498 - 499 - /* If it is already enabled in correct mode, don't touch it */ 500 - if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request)) 501 - return 0; 502 - 503 - ret = tb_switch_tmu_disable(sw); 504 - if (ret) 505 - return ret; 506 - 507 - ret = tb_switch_tmu_post_time(sw); 508 - if (ret) 509 - return ret; 510 - 511 - return tb_switch_tmu_enable(sw); 512 358 } 513 359 514 360 /** ··· 596 480 usb3_consumed_down = 0; 597 481 } 598 482 599 - *available_up = *available_down = 40000; 483 + /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */ 484 + *available_up = *available_down = 120000; 600 485 601 486 /* Find the minimum available bandwidth over all links */ 602 487 tb_for_each_port_on_path(src_port, dst_port, port) { ··· 608 491 609 492 if (tb_is_upstream_port(port)) { 610 493 link_speed = port->sw->link_speed; 494 + /* 495 + * sw->link_width is from upstream perspective 496 + * so we use the opposite for downstream of the 497 + * host router. 498 + */ 499 + if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { 500 + up_bw = link_speed * 3 * 1000; 501 + down_bw = link_speed * 1 * 1000; 502 + } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { 503 + up_bw = link_speed * 1 * 1000; 504 + down_bw = link_speed * 3 * 1000; 505 + } else { 506 + up_bw = link_speed * port->sw->link_width * 1000; 507 + down_bw = up_bw; 508 + } 611 509 } else { 612 510 link_speed = tb_port_get_link_speed(port); 613 511 if (link_speed < 0) 614 512 return link_speed; 513 + 514 + link_width = tb_port_get_link_width(port); 515 + if (link_width < 0) 516 + return link_width; 517 + 518 + if (link_width == TB_LINK_WIDTH_ASYM_TX) { 519 + up_bw = link_speed * 1 * 1000; 520 + down_bw = link_speed * 3 * 1000; 521 + } else if (link_width == TB_LINK_WIDTH_ASYM_RX) { 522 + up_bw = link_speed * 3 * 1000; 523 + down_bw = link_speed * 1 * 1000; 524 + } else { 525 + up_bw = link_speed * link_width * 1000; 526 + down_bw = up_bw; 527 + } 615 528 } 616 529 617 - link_width = port->bonded ? 2 : 1; 618 - 619 - up_bw = link_speed * link_width * 1000; /* Mb/s */ 620 530 /* Leave 10% guard band */ 621 531 up_bw -= up_bw / 10; 622 - down_bw = up_bw; 532 + down_bw -= down_bw / 10; 623 533 624 534 tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw, 625 535 down_bw); ··· 772 628 * Look up available down port. Since we are chaining it should 773 629 * be found right above this switch. 774 630 */ 775 - port = tb_port_at(tb_route(sw), parent); 631 + port = tb_switch_downstream_port(sw); 776 632 down = tb_find_usb3_down(parent, port); 777 633 if (!down) 778 634 return 0; ··· 881 737 { 882 738 struct tb_cm *tcm = tb_priv(port->sw->tb); 883 739 struct tb_port *upstream_port; 740 + bool discovery = false; 884 741 struct tb_switch *sw; 885 - int ret; 886 742 887 743 if (tb_is_upstream_port(port)) 888 744 return; ··· 948 804 * tunnels and know which switches were authorized already by 949 805 * the boot firmware. 950 806 */ 951 - if (!tcm->hotplug_active) 807 + if (!tcm->hotplug_active) { 952 808 dev_set_uevent_suppress(&sw->dev, true); 809 + discovery = true; 810 + } 953 811 954 812 /* 955 813 * At the moment Thunderbolt 2 and beyond (devices with LC) we ··· 981 835 * CL0s and CL1 are enabled and supported together. 982 836 * Silently ignore CLx enabling in case CLx is not supported. 983 837 */ 984 - ret = tb_switch_enable_clx(sw, TB_CL1); 985 - if (ret && ret != -EOPNOTSUPP) 986 - tb_sw_warn(sw, "failed to enable %s on upstream port\n", 987 - tb_switch_clx_name(TB_CL1)); 988 - 989 - if (tb_switch_is_clx_enabled(sw, TB_CL1)) 990 - /* 991 - * To support highest CLx state, we set router's TMU to 992 - * Normal-Uni mode. 993 - */ 994 - tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true); 995 - else 996 - /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/ 997 - tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false); 838 + if (discovery) 839 + tb_sw_dbg(sw, "discovery, not touching CL states\n"); 840 + else if (tb_enable_clx(sw)) 841 + tb_sw_warn(sw, "failed to enable CL states\n"); 998 842 999 843 if (tb_enable_tmu(sw)) 1000 844 tb_sw_warn(sw, "failed to enable TMU\n"); 845 + 846 + /* 847 + * Configuration valid needs to be set after the TMU has been 848 + * enabled for the upstream port of the router so we do it here. 849 + */ 850 + tb_switch_configuration_valid(sw); 1001 851 1002 852 /* Scan upstream retimers */ 1003 853 tb_retimer_scan(upstream_port, true); ··· 1169 1027 struct tb_tunnel *tunnel; 1170 1028 struct tb_port *out; 1171 1029 1172 - if (!usb4_dp_port_bw_mode_enabled(in)) 1030 + if (!usb4_dp_port_bandwidth_mode_enabled(in)) 1173 1031 continue; 1174 1032 1175 1033 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); ··· 1217 1075 else 1218 1076 estimated_bw = estimated_up; 1219 1077 1220 - if (usb4_dp_port_set_estimated_bw(in, estimated_bw)) 1078 + if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw)) 1221 1079 tb_port_warn(in, "failed to update estimated bandwidth\n"); 1222 1080 } 1223 1081 ··· 1398 1256 * In case of DP tunnel exists, change host router's 1st children 1399 1257 * TMU mode to HiFi for CL0s to work. 1400 1258 */ 1401 - tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI); 1402 - 1259 + tb_increase_tmu_accuracy(tunnel); 1403 1260 return; 1404 1261 1405 1262 err_free: ··· 1512 1371 { 1513 1372 struct tb_port *up, *down, *port; 1514 1373 struct tb_cm *tcm = tb_priv(tb); 1515 - struct tb_switch *parent_sw; 1516 1374 struct tb_tunnel *tunnel; 1517 1375 1518 1376 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); ··· 1522 1382 * Look up available down port. Since we are chaining it should 1523 1383 * be found right above this switch. 1524 1384 */ 1525 - parent_sw = tb_to_switch(sw->dev.parent); 1526 - port = tb_port_at(tb_route(sw), parent_sw); 1527 - down = tb_find_pcie_down(parent_sw, port); 1385 + port = tb_switch_downstream_port(sw); 1386 + down = tb_find_pcie_down(tb_switch_parent(sw), port); 1528 1387 if (!down) 1529 1388 return 0; 1530 1389 ··· 1560 1421 struct tb_port *nhi_port, *dst_port; 1561 1422 struct tb_tunnel *tunnel; 1562 1423 struct tb_switch *sw; 1424 + int ret; 1563 1425 1564 1426 sw = tb_to_switch(xd->dev.parent); 1565 1427 dst_port = tb_port_at(xd->route, sw); 1566 1428 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); 1567 1429 1568 1430 mutex_lock(&tb->lock); 1431 + 1432 + /* 1433 + * When tunneling DMA paths the link should not enter CL states 1434 + * so disable them now. 1435 + */ 1436 + tb_disable_clx(sw); 1437 + 1569 1438 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path, 1570 1439 transmit_ring, receive_path, receive_ring); 1571 1440 if (!tunnel) { 1572 - mutex_unlock(&tb->lock); 1573 - return -ENOMEM; 1441 + ret = -ENOMEM; 1442 + goto err_clx; 1574 1443 } 1575 1444 1576 1445 if (tb_tunnel_activate(tunnel)) { 1577 1446 tb_port_info(nhi_port, 1578 1447 "DMA tunnel activation failed, aborting\n"); 1579 - tb_tunnel_free(tunnel); 1580 - mutex_unlock(&tb->lock); 1581 - return -EIO; 1448 + ret = -EIO; 1449 + goto err_free; 1582 1450 } 1583 1451 1584 1452 list_add_tail(&tunnel->list, &tcm->tunnel_list); 1585 1453 mutex_unlock(&tb->lock); 1586 1454 return 0; 1455 + 1456 + err_free: 1457 + tb_tunnel_free(tunnel); 1458 + err_clx: 1459 + tb_enable_clx(sw); 1460 + mutex_unlock(&tb->lock); 1461 + 1462 + return ret; 1587 1463 } 1588 1464 1589 1465 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, ··· 1624 1470 receive_path, receive_ring)) 1625 1471 tb_deactivate_and_free_tunnel(tunnel); 1626 1472 } 1473 + 1474 + /* 1475 + * Try to re-enable CL states now, it is OK if this fails 1476 + * because we may still have another DMA tunnel active through 1477 + * the same host router USB4 downstream port. 1478 + */ 1479 + tb_enable_clx(sw); 1627 1480 } 1628 1481 1629 1482 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, ··· 1912 1751 1913 1752 tb_port_dbg(in, "handling bandwidth allocation request\n"); 1914 1753 1915 - if (!usb4_dp_port_bw_mode_enabled(in)) { 1754 + if (!usb4_dp_port_bandwidth_mode_enabled(in)) { 1916 1755 tb_port_warn(in, "bandwidth allocation mode not enabled\n"); 1917 1756 goto unlock; 1918 1757 } 1919 1758 1920 - ret = usb4_dp_port_requested_bw(in); 1759 + ret = usb4_dp_port_requested_bandwidth(in); 1921 1760 if (ret < 0) { 1922 1761 if (ret == -ENODATA) 1923 1762 tb_port_dbg(in, "no bandwidth request active\n"); ··· 1984 1823 static void tb_handle_notification(struct tb *tb, u64 route, 1985 1824 const struct cfg_error_pkg *error) 1986 1825 { 1987 - if (tb_cfg_ack_notification(tb->ctl, route, error)) 1988 - tb_warn(tb, "could not ack notification on %llx\n", route); 1989 1826 1990 1827 switch (error->error) { 1828 + case TB_CFG_ERROR_PCIE_WAKE: 1829 + case TB_CFG_ERROR_DP_CON_CHANGE: 1830 + case TB_CFG_ERROR_DPTX_DISCOVERY: 1831 + if (tb_cfg_ack_notification(tb->ctl, route, error)) 1832 + tb_warn(tb, "could not ack notification on %llx\n", 1833 + route); 1834 + break; 1835 + 1991 1836 case TB_CFG_ERROR_DP_BW: 1837 + if (tb_cfg_ack_notification(tb->ctl, route, error)) 1838 + tb_warn(tb, "could not ack notification on %llx\n", 1839 + route); 1992 1840 tb_queue_dp_bandwidth_request(tb, route, error->port); 1993 1841 break; 1994 1842 1995 1843 default: 1996 - /* Ack is enough */ 1997 - return; 1844 + /* Ignore for now */ 1845 + break; 1998 1846 } 1999 1847 } 2000 1848 ··· 2118 1948 * To support highest CLx state, we set host router's TMU to 2119 1949 * Normal mode. 2120 1950 */ 2121 - tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL, 2122 - false); 1951 + tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES); 2123 1952 /* Enable TMU if it is off */ 2124 1953 tb_switch_tmu_enable(tb->root_switch); 2125 1954 /* Full scan to discover devices added before the driver was loaded. */ ··· 2159 1990 static void tb_restore_children(struct tb_switch *sw) 2160 1991 { 2161 1992 struct tb_port *port; 2162 - int ret; 2163 1993 2164 1994 /* No need to restore if the router is already unplugged */ 2165 1995 if (sw->is_unplugged) 2166 1996 return; 2167 1997 2168 - /* 2169 - * CL0s and CL1 are enabled and supported together. 2170 - * Silently ignore CLx re-enabling in case CLx is not supported. 2171 - */ 2172 - ret = tb_switch_enable_clx(sw, TB_CL1); 2173 - if (ret && ret != -EOPNOTSUPP) 2174 - tb_sw_warn(sw, "failed to re-enable %s on upstream port\n", 2175 - tb_switch_clx_name(TB_CL1)); 2176 - 2177 - if (tb_switch_is_clx_enabled(sw, TB_CL1)) 2178 - /* 2179 - * To support highest CLx state, we set router's TMU to 2180 - * Normal-Uni mode. 2181 - */ 2182 - tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true); 2183 - else 2184 - /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/ 2185 - tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false); 1998 + if (tb_enable_clx(sw)) 1999 + tb_sw_warn(sw, "failed to re-enable CL states\n"); 2186 2000 2187 2001 if (tb_enable_tmu(sw)) 2188 2002 tb_sw_warn(sw, "failed to restore TMU configuration\n"); 2003 + 2004 + tb_switch_configuration_valid(sw); 2189 2005 2190 2006 tb_switch_for_each_port(sw, port) { 2191 2007 if (!tb_port_has_remote(port) && !port->xdomain)
+135 -108
drivers/thunderbolt/tb.h
··· 19 19 #include "ctl.h" 20 20 #include "dma_port.h" 21 21 22 - #define NVM_MIN_SIZE SZ_32K 23 - #define NVM_MAX_SIZE SZ_512K 24 - #define NVM_DATA_DWORDS 16 25 - 26 22 /* Keep link controller awake during update */ 27 23 #define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) 28 24 /* Disable CLx if not supported */ ··· 73 77 #define USB4_SWITCH_MAX_DEPTH 5 74 78 75 79 /** 76 - * enum tb_switch_tmu_rate - TMU refresh rate 77 - * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake) 78 - * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive 79 - * transmission of the Delay Request TSNOS 80 - * (Time Sync Notification Ordered Set) on a Link 81 - * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive 82 - * transmission of the Delay Request TSNOS on 83 - * a Link 80 + * enum tb_switch_tmu_mode - TMU mode 81 + * @TB_SWITCH_TMU_MODE_OFF: TMU is off 82 + * @TB_SWITCH_TMU_MODE_LOWRES: Uni-directional, normal mode 83 + * @TB_SWITCH_TMU_MODE_HIFI_UNI: Uni-directional, HiFi mode 84 + * @TB_SWITCH_TMU_MODE_HIFI_BI: Bi-directional, HiFi mode 85 + * @TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: Enhanced Uni-directional, MedRes mode 86 + * 87 + * Ordering is based on TMU accuracy level (highest last). 84 88 */ 85 - enum tb_switch_tmu_rate { 86 - TB_SWITCH_TMU_RATE_OFF = 0, 87 - TB_SWITCH_TMU_RATE_HIFI = 16, 88 - TB_SWITCH_TMU_RATE_NORMAL = 1000, 89 + enum tb_switch_tmu_mode { 90 + TB_SWITCH_TMU_MODE_OFF, 91 + TB_SWITCH_TMU_MODE_LOWRES, 92 + TB_SWITCH_TMU_MODE_HIFI_UNI, 93 + TB_SWITCH_TMU_MODE_HIFI_BI, 94 + TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI, 89 95 }; 90 96 91 97 /** 92 - * struct tb_switch_tmu - Structure holding switch TMU configuration 98 + * struct tb_switch_tmu - Structure holding router TMU configuration 93 99 * @cap: Offset to the TMU capability (%0 if not found) 94 100 * @has_ucap: Does the switch support uni-directional mode 95 - * @rate: TMU refresh rate related to upstream switch. In case of root 96 - * switch this holds the domain rate. Reflects the HW setting. 97 - * @unidirectional: Is the TMU in uni-directional or bi-directional mode 98 - * related to upstream switch. Don't care for root switch. 99 - * Reflects the HW setting. 100 - * @unidirectional_request: Is the new TMU mode: uni-directional or bi-directional 101 - * that is requested to be set. Related to upstream switch. 102 - * Don't care for root switch. 103 - * @rate_request: TMU new refresh rate related to upstream switch that is 104 - * requested to be set. In case of root switch, this holds 105 - * the new domain rate that is requested to be set. 101 + * @mode: TMU mode related to the upstream router. Reflects the HW 102 + * setting. Don't care for host router. 103 + * @mode_request: TMU mode requested to set. Related to upstream router. 104 + * Don't care for host router. 106 105 */ 107 106 struct tb_switch_tmu { 108 107 int cap; 109 108 bool has_ucap; 110 - enum tb_switch_tmu_rate rate; 111 - bool unidirectional; 112 - bool unidirectional_request; 113 - enum tb_switch_tmu_rate rate_request; 114 - }; 115 - 116 - enum tb_clx { 117 - TB_CLX_DISABLE, 118 - /* CL0s and CL1 are enabled and supported together */ 119 - TB_CL1 = BIT(0), 120 - TB_CL2 = BIT(1), 109 + enum tb_switch_tmu_mode mode; 110 + enum tb_switch_tmu_mode mode_request; 121 111 }; 122 112 123 113 /** ··· 124 142 * @vendor_name: Name of the vendor (or %NULL if not known) 125 143 * @device_name: Name of the device (or %NULL if not known) 126 144 * @link_speed: Speed of the link in Gb/s 127 - * @link_width: Width of the link (1 or 2) 145 + * @link_width: Width of the upstream facing link 128 146 * @link_usb4: Upstream link is USB4 129 147 * @generation: Switch Thunderbolt generation 130 148 * @cap_plug_events: Offset to the plug events capability (%0 if not found) ··· 156 174 * @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN 157 175 * @max_pcie_credits: Router preferred number of buffers for PCIe 158 176 * @max_dma_credits: Router preferred number of buffers for DMA/P2P 159 - * @clx: CLx state on the upstream link of the router 177 + * @clx: CLx states on the upstream link of the router 160 178 * 161 179 * When the switch is being added or removed to the domain (other 162 180 * switches) you need to have domain lock held. 163 181 * 164 182 * In USB4 terminology this structure represents a router. 183 + * 184 + * Note @link_width is not the same as whether link is bonded or not. 185 + * For Gen 4 links the link is also bonded when it is asymmetric. The 186 + * correct way to find out whether the link is bonded or not is to look 187 + * @bonded field of the upstream port. 165 188 */ 166 189 struct tb_switch { 167 190 struct device dev; ··· 182 195 const char *vendor_name; 183 196 const char *device_name; 184 197 unsigned int link_speed; 185 - unsigned int link_width; 198 + enum tb_link_width link_width; 186 199 bool link_usb4; 187 200 unsigned int generation; 188 201 int cap_plug_events; ··· 212 225 unsigned int min_dp_main_credits; 213 226 unsigned int max_pcie_credits; 214 227 unsigned int max_dma_credits; 215 - enum tb_clx clx; 228 + unsigned int clx; 216 229 }; 217 230 218 231 /** ··· 441 454 #define TB_WAKE_ON_USB3 BIT(3) 442 455 #define TB_WAKE_ON_PCIE BIT(4) 443 456 #define TB_WAKE_ON_DP BIT(5) 457 + 458 + /* CL states */ 459 + #define TB_CL0S BIT(0) 460 + #define TB_CL1 BIT(1) 461 + #define TB_CL2 BIT(2) 444 462 445 463 /** 446 464 * struct tb_cm_ops - Connection manager specific operations vector ··· 794 802 struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb, 795 803 struct device *parent, u64 route); 796 804 int tb_switch_configure(struct tb_switch *sw); 805 + int tb_switch_configuration_valid(struct tb_switch *sw); 797 806 int tb_switch_add(struct tb_switch *sw); 798 807 void tb_switch_remove(struct tb_switch *sw); 799 808 void tb_switch_suspend(struct tb_switch *sw, bool runtime); ··· 848 855 static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw) 849 856 { 850 857 return tb_to_switch(sw->dev.parent); 858 + } 859 + 860 + /** 861 + * tb_switch_downstream_port() - Return downstream facing port of parent router 862 + * @sw: Device router pointer 863 + * 864 + * Only call for device routers. Returns the downstream facing port of 865 + * the parent router. 866 + */ 867 + static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw) 868 + { 869 + if (WARN_ON(!tb_route(sw))) 870 + return NULL; 871 + return tb_port_at(tb_route(sw), tb_switch_parent(sw)); 851 872 } 852 873 853 874 static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw) ··· 943 936 } 944 937 945 938 /** 946 - * tb_switch_is_usb4() - Is the switch USB4 compliant 947 - * @sw: Switch to check 948 - * 949 - * Returns true if the @sw is USB4 compliant router, false otherwise. 950 - */ 951 - static inline bool tb_switch_is_usb4(const struct tb_switch *sw) 952 - { 953 - return sw->config.thunderbolt_version == USB4_VERSION_1_0; 954 - } 955 - 956 - /** 957 939 * tb_switch_is_icm() - Is the switch handled by ICM firmware 958 940 * @sw: Switch to check 959 941 * ··· 969 973 int tb_switch_tmu_post_time(struct tb_switch *sw); 970 974 int tb_switch_tmu_disable(struct tb_switch *sw); 971 975 int tb_switch_tmu_enable(struct tb_switch *sw); 972 - void tb_switch_tmu_configure(struct tb_switch *sw, 973 - enum tb_switch_tmu_rate rate, 974 - bool unidirectional); 975 - void tb_switch_enable_tmu_1st_child(struct tb_switch *sw, 976 - enum tb_switch_tmu_rate rate); 976 + int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode); 977 + 978 + /** 979 + * tb_switch_tmu_is_configured() - Is given TMU mode configured 980 + * @sw: Router whose mode to check 981 + * @mode: Mode to check 982 + * 983 + * Checks if given router TMU mode is configured to @mode. Note the 984 + * router TMU might not be enabled to this mode. 985 + */ 986 + static inline bool tb_switch_tmu_is_configured(const struct tb_switch *sw, 987 + enum tb_switch_tmu_mode mode) 988 + { 989 + return sw->tmu.mode_request == mode; 990 + } 991 + 977 992 /** 978 993 * tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled 979 994 * @sw: Router whose TMU mode to check 980 - * @unidirectional: If uni-directional (bi-directional otherwise) 981 995 * 982 - * Return true if hardware TMU configuration matches the one passed in 983 - * as parameter. That is HiFi/Normal and either uni-directional or bi-directional. 996 + * Return true if hardware TMU configuration matches the requested 997 + * configuration (and is not %TB_SWITCH_TMU_MODE_OFF). 984 998 */ 985 - static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw, 986 - bool unidirectional) 999 + static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw) 987 1000 { 988 - return sw->tmu.rate == sw->tmu.rate_request && 989 - sw->tmu.unidirectional == unidirectional; 1001 + return sw->tmu.mode != TB_SWITCH_TMU_MODE_OFF && 1002 + sw->tmu.mode == sw->tmu.mode_request; 990 1003 } 991 1004 992 - static inline const char *tb_switch_clx_name(enum tb_clx clx) 993 - { 994 - switch (clx) { 995 - /* CL0s and CL1 are enabled and supported together */ 996 - case TB_CL1: 997 - return "CL0s/CL1"; 998 - default: 999 - return "unknown"; 1000 - } 1001 - } 1005 + bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx); 1002 1006 1003 - int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx); 1004 - int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx); 1007 + int tb_switch_clx_init(struct tb_switch *sw); 1008 + bool tb_switch_clx_is_supported(const struct tb_switch *sw); 1009 + int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx); 1010 + int tb_switch_clx_disable(struct tb_switch *sw); 1005 1011 1006 1012 /** 1007 - * tb_switch_is_clx_enabled() - Checks if the CLx is enabled 1013 + * tb_switch_clx_is_enabled() - Checks if the CLx is enabled 1008 1014 * @sw: Router to check for the CLx 1009 - * @clx: The CLx state to check for 1015 + * @clx: The CLx states to check for 1010 1016 * 1011 1017 * Checks if the specified CLx is enabled on the router upstream link. 1018 + * Returns true if any of the given states is enabled. 1019 + * 1012 1020 * Not applicable for a host router. 1013 1021 */ 1014 - static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw, 1015 - enum tb_clx clx) 1022 + static inline bool tb_switch_clx_is_enabled(const struct tb_switch *sw, 1023 + unsigned int clx) 1016 1024 { 1017 - return sw->clx == clx; 1025 + return sw->clx & clx; 1018 1026 } 1019 - 1020 - /** 1021 - * tb_switch_is_clx_supported() - Is CLx supported on this type of router 1022 - * @sw: The router to check CLx support for 1023 - */ 1024 - static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw) 1025 - { 1026 - if (sw->quirks & QUIRK_NO_CLX) 1027 - return false; 1028 - 1029 - return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); 1030 - } 1031 - 1032 - int tb_switch_mask_clx_objections(struct tb_switch *sw); 1033 1027 1034 1028 int tb_switch_pcie_l1_enable(struct tb_switch *sw); 1035 1029 ··· 1059 1073 1060 1074 int tb_port_get_link_speed(struct tb_port *port); 1061 1075 int tb_port_get_link_width(struct tb_port *port); 1062 - int tb_port_set_link_width(struct tb_port *port, unsigned int width); 1063 - int tb_port_set_lane_bonding(struct tb_port *port, bool bonding); 1076 + int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width); 1064 1077 int tb_port_lane_bonding_enable(struct tb_port *port); 1065 1078 void tb_port_lane_bonding_disable(struct tb_port *port); 1066 - int tb_port_wait_for_link_width(struct tb_port *port, int width, 1079 + int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask, 1067 1080 int timeout_msec); 1068 1081 int tb_port_update_credits(struct tb_port *port); 1069 - bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx); 1070 1082 1071 1083 int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); 1072 1084 int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); ··· 1167 1183 return tb_to_switch(xd->dev.parent); 1168 1184 } 1169 1185 1186 + /** 1187 + * tb_xdomain_downstream_port() - Return downstream facing port of parent router 1188 + * @xd: Xdomain pointer 1189 + * 1190 + * Returns the downstream port the XDomain is connected to. 1191 + */ 1192 + static inline struct tb_port *tb_xdomain_downstream_port(struct tb_xdomain *xd) 1193 + { 1194 + return tb_port_at(xd->route, tb_xdomain_parent(xd)); 1195 + } 1196 + 1170 1197 int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf, 1171 1198 size_t size); 1172 1199 int tb_retimer_scan(struct tb_port *port, bool add); ··· 1195 1200 return NULL; 1196 1201 } 1197 1202 1203 + /** 1204 + * usb4_switch_version() - Returns USB4 version of the router 1205 + * @sw: Router to check 1206 + * 1207 + * Returns major version of USB4 router (%1 for v1, %2 for v2 and so 1208 + * on). Can be called to pre-USB4 router too and in that case returns %0. 1209 + */ 1210 + static inline unsigned int usb4_switch_version(const struct tb_switch *sw) 1211 + { 1212 + return FIELD_GET(USB4_VERSION_MAJOR_MASK, sw->config.thunderbolt_version); 1213 + } 1214 + 1215 + /** 1216 + * tb_switch_is_usb4() - Is the switch USB4 compliant 1217 + * @sw: Switch to check 1218 + * 1219 + * Returns true if the @sw is USB4 compliant router, false otherwise. 1220 + */ 1221 + static inline bool tb_switch_is_usb4(const struct tb_switch *sw) 1222 + { 1223 + return usb4_switch_version(sw) > 0; 1224 + } 1225 + 1198 1226 int usb4_switch_setup(struct tb_switch *sw); 1227 + int usb4_switch_configuration_valid(struct tb_switch *sw); 1199 1228 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); 1200 1229 int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, 1201 1230 size_t size); ··· 1292 1273 int *downstream_bw); 1293 1274 1294 1275 int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id); 1295 - bool usb4_dp_port_bw_mode_supported(struct tb_port *port); 1296 - bool usb4_dp_port_bw_mode_enabled(struct tb_port *port); 1297 - int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported); 1276 + bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port); 1277 + bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port); 1278 + int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port, 1279 + bool supported); 1298 1280 int usb4_dp_port_group_id(struct tb_port *port); 1299 1281 int usb4_dp_port_set_group_id(struct tb_port *port, int group_id); 1300 1282 int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes); 1301 1283 int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes); 1302 1284 int usb4_dp_port_granularity(struct tb_port *port); 1303 1285 int usb4_dp_port_set_granularity(struct tb_port *port, int granularity); 1304 - int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw); 1305 - int usb4_dp_port_allocated_bw(struct tb_port *port); 1306 - int usb4_dp_port_allocate_bw(struct tb_port *port, int bw); 1307 - int usb4_dp_port_requested_bw(struct tb_port *port); 1286 + int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw); 1287 + int usb4_dp_port_allocated_bandwidth(struct tb_port *port); 1288 + int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw); 1289 + int usb4_dp_port_requested_bandwidth(struct tb_port *port); 1290 + 1291 + int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable); 1308 1292 1309 1293 static inline bool tb_is_usb4_port_device(const struct device *dev) 1310 1294 { ··· 1324 1302 struct usb4_port *usb4_port_device_add(struct tb_port *port); 1325 1303 void usb4_port_device_remove(struct usb4_port *usb4); 1326 1304 int usb4_port_device_resume(struct usb4_port *usb4); 1305 + 1306 + static inline bool usb4_port_device_is_offline(const struct usb4_port *usb4) 1307 + { 1308 + return usb4->offline; 1309 + } 1327 1310 1328 1311 void tb_check_quirks(struct tb_switch *sw); 1329 1312
+7
drivers/thunderbolt/tb_msgs.h
··· 30 30 TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13, 31 31 TB_CFG_ERROR_LOCK = 15, 32 32 TB_CFG_ERROR_DP_BW = 32, 33 + TB_CFG_ERROR_ROP_CMPLT = 33, 34 + TB_CFG_ERROR_POP_CMPLT = 34, 35 + TB_CFG_ERROR_PCIE_WAKE = 35, 36 + TB_CFG_ERROR_DP_CON_CHANGE = 36, 37 + TB_CFG_ERROR_DPTX_DISCOVERY = 37, 38 + TB_CFG_ERROR_LINK_RECOVERY = 38, 39 + TB_CFG_ERROR_ASYM_LINK = 39, 33 40 }; 34 41 35 42 /* common header */
+22 -3
drivers/thunderbolt/tb_regs.h
··· 190 190 u32 thunderbolt_version:8; 191 191 } __packed; 192 192 193 - /* USB4 version 1.0 */ 194 - #define USB4_VERSION_1_0 0x20 193 + /* Used with the router thunderbolt_version */ 194 + #define USB4_VERSION_MAJOR_MASK GENMASK(7, 5) 195 195 196 196 #define ROUTER_CS_1 0x01 197 197 #define ROUTER_CS_4 0x04 198 + /* Used with the router cmuv field */ 199 + #define ROUTER_CS_4_CMUV_V1 0x10 200 + #define ROUTER_CS_4_CMUV_V2 0x20 198 201 #define ROUTER_CS_5 0x05 199 202 #define ROUTER_CS_5_SLP BIT(0) 200 203 #define ROUTER_CS_5_WOP BIT(1) ··· 252 249 #define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0) 253 250 #define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16) 254 251 #define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16 255 - #define TMU_RTR_CS_15 0xf 252 + #define TMU_RTR_CS_15 0x0f 256 253 #define TMU_RTR_CS_15_FREQ_AVG_MASK GENMASK(5, 0) 257 254 #define TMU_RTR_CS_15_DELAY_AVG_MASK GENMASK(11, 6) 258 255 #define TMU_RTR_CS_15_OFFSET_AVG_MASK GENMASK(17, 12) 259 256 #define TMU_RTR_CS_15_ERROR_AVG_MASK GENMASK(23, 18) 257 + #define TMU_RTR_CS_18 0x12 258 + #define TMU_RTR_CS_18_DELTA_AVG_CONST_MASK GENMASK(23, 16) 260 259 #define TMU_RTR_CS_22 0x16 261 260 #define TMU_RTR_CS_24 0x18 262 261 #define TMU_RTR_CS_25 0x19 ··· 324 319 #define TMU_ADP_CS_3_UDM BIT(29) 325 320 #define TMU_ADP_CS_6 0x06 326 321 #define TMU_ADP_CS_6_DTS BIT(1) 322 + #define TMU_ADP_CS_8 0x08 323 + #define TMU_ADP_CS_8_REPL_TIMEOUT_MASK GENMASK(14, 0) 324 + #define TMU_ADP_CS_8_EUDM BIT(15) 325 + #define TMU_ADP_CS_8_REPL_THRESHOLD_MASK GENMASK(25, 16) 326 + #define TMU_ADP_CS_9 0x09 327 + #define TMU_ADP_CS_9_REPL_N_MASK GENMASK(7, 0) 328 + #define TMU_ADP_CS_9_DIRSWITCH_N_MASK GENMASK(15, 8) 329 + #define TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK GENMASK(31, 16) 327 330 328 331 /* Lane adapter registers */ 329 332 #define LANE_ADP_CS_0 0x00 ··· 359 346 #define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16 360 347 #define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8 361 348 #define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4 349 + #define LANE_ADP_CS_1_CURRENT_SPEED_GEN4 0x2 362 350 #define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20) 363 351 #define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20 364 352 #define LANE_ADP_CS_1_PMS BIT(30) ··· 450 436 #define DP_COMMON_CAP_1_LANE 0x0 451 437 #define DP_COMMON_CAP_2_LANES 0x1 452 438 #define DP_COMMON_CAP_4_LANES 0x2 439 + #define DP_COMMON_CAP_UHBR10 BIT(17) 440 + #define DP_COMMON_CAP_UHBR20 BIT(18) 441 + #define DP_COMMON_CAP_UHBR13_5 BIT(19) 453 442 #define DP_COMMON_CAP_LTTPR_NS BIT(27) 454 443 #define DP_COMMON_CAP_BW_MODE BIT(28) 455 444 #define DP_COMMON_CAP_DPRX_DONE BIT(31) ··· 464 447 /* PCIe adapter registers */ 465 448 #define ADP_PCIE_CS_0 0x00 466 449 #define ADP_PCIE_CS_0_PE BIT(31) 450 + #define ADP_PCIE_CS_1 0x01 451 + #define ADP_PCIE_CS_1_EE BIT(0) 467 452 468 453 /* USB adapter registers */ 469 454 #define ADP_USB3_CS_0 0x00
+83
drivers/thunderbolt/test.c
··· 170 170 return sw; 171 171 } 172 172 173 + static struct tb_switch *alloc_host_br(struct kunit *test) 174 + { 175 + struct tb_switch *sw; 176 + 177 + sw = alloc_host_usb4(test); 178 + if (!sw) 179 + return NULL; 180 + 181 + sw->ports[10].config.type = TB_TYPE_DP_HDMI_IN; 182 + sw->ports[10].config.max_in_hop_id = 9; 183 + sw->ports[10].config.max_out_hop_id = 9; 184 + sw->ports[10].cap_adap = -1; 185 + sw->ports[10].disabled = false; 186 + 187 + return sw; 188 + } 189 + 173 190 static struct tb_switch *alloc_dev_default(struct kunit *test, 174 191 struct tb_switch *parent, 175 192 u64 route, bool bonded) ··· 1600 1583 tb_tunnel_free(tunnel); 1601 1584 } 1602 1585 1586 + static void tb_test_tunnel_3dp(struct kunit *test) 1587 + { 1588 + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5; 1589 + struct tb_port *in1, *in2, *in3, *out1, *out2, *out3; 1590 + struct tb_tunnel *tunnel1, *tunnel2, *tunnel3; 1591 + 1592 + /* 1593 + * Create 3 DP tunnels from Host to Devices #2, #5 and #4. 1594 + * 1595 + * [Host] 1596 + * 3 | 1597 + * 1 | 1598 + * [Device #1] 1599 + * 3 / | 5 \ 7 1600 + * 1 / | \ 1 1601 + * [Device #2] | [Device #4] 1602 + * | 1 1603 + * [Device #3] 1604 + * | 5 1605 + * | 1 1606 + * [Device #5] 1607 + */ 1608 + host = alloc_host_br(test); 1609 + dev1 = alloc_dev_default(test, host, 0x3, true); 1610 + dev2 = alloc_dev_default(test, dev1, 0x303, true); 1611 + dev3 = alloc_dev_default(test, dev1, 0x503, true); 1612 + dev4 = alloc_dev_default(test, dev1, 0x703, true); 1613 + dev5 = alloc_dev_default(test, dev3, 0x50503, true); 1614 + 1615 + in1 = &host->ports[5]; 1616 + in2 = &host->ports[6]; 1617 + in3 = &host->ports[10]; 1618 + 1619 + out1 = &dev2->ports[13]; 1620 + out2 = &dev5->ports[13]; 1621 + out3 = &dev4->ports[14]; 1622 + 1623 + tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0); 1624 + KUNIT_ASSERT_TRUE(test, tunnel1 != NULL); 1625 + KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP); 1626 + KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1); 1627 + KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, out1); 1628 + KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3); 1629 + KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3); 1630 + 1631 + tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0); 1632 + KUNIT_ASSERT_TRUE(test, tunnel2 != NULL); 1633 + KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP); 1634 + KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2); 1635 + KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, out2); 1636 + KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3); 1637 + KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4); 1638 + 1639 + tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0); 1640 + KUNIT_ASSERT_TRUE(test, tunnel3 != NULL); 1641 + KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP); 1642 + KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3); 1643 + KUNIT_EXPECT_PTR_EQ(test, tunnel3->dst_port, out3); 1644 + KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3); 1645 + KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3); 1646 + 1647 + tb_tunnel_free(tunnel2); 1648 + tb_tunnel_free(tunnel1); 1649 + } 1650 + 1603 1651 static void tb_test_tunnel_usb3(struct kunit *test) 1604 1652 { 1605 1653 struct tb_switch *host, *dev1, *dev2; ··· 2872 2790 KUNIT_CASE(tb_test_tunnel_dp_chain), 2873 2791 KUNIT_CASE(tb_test_tunnel_dp_tree), 2874 2792 KUNIT_CASE(tb_test_tunnel_dp_max_length), 2793 + KUNIT_CASE(tb_test_tunnel_3dp), 2875 2794 KUNIT_CASE(tb_test_tunnel_port_on_path), 2876 2795 KUNIT_CASE(tb_test_tunnel_usb3), 2877 2796 KUNIT_CASE(tb_test_tunnel_dma),
+501 -214
drivers/thunderbolt/tmu.c
··· 11 11 12 12 #include "tb.h" 13 13 14 - static int tb_switch_set_tmu_mode_params(struct tb_switch *sw, 15 - enum tb_switch_tmu_rate rate) 14 + static const unsigned int tmu_rates[] = { 15 + [TB_SWITCH_TMU_MODE_OFF] = 0, 16 + [TB_SWITCH_TMU_MODE_LOWRES] = 1000, 17 + [TB_SWITCH_TMU_MODE_HIFI_UNI] = 16, 18 + [TB_SWITCH_TMU_MODE_HIFI_BI] = 16, 19 + [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16, 20 + }; 21 + 22 + const struct { 23 + unsigned int freq_meas_window; 24 + unsigned int avg_const; 25 + unsigned int delta_avg_const; 26 + unsigned int repl_timeout; 27 + unsigned int repl_threshold; 28 + unsigned int repl_n; 29 + unsigned int dirswitch_n; 30 + } tmu_params[] = { 31 + [TB_SWITCH_TMU_MODE_OFF] = { }, 32 + [TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, }, 33 + [TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, }, 34 + [TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, }, 35 + [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = { 36 + 800, 4, 0, 3125, 25, 128, 255, 37 + }, 38 + }; 39 + 40 + static const char *tmu_mode_name(enum tb_switch_tmu_mode mode) 16 41 { 17 - u32 freq_meas_wind[2] = { 30, 800 }; 18 - u32 avg_const[2] = { 4, 8 }; 42 + switch (mode) { 43 + case TB_SWITCH_TMU_MODE_OFF: 44 + return "off"; 45 + case TB_SWITCH_TMU_MODE_LOWRES: 46 + return "uni-directional, LowRes"; 47 + case TB_SWITCH_TMU_MODE_HIFI_UNI: 48 + return "uni-directional, HiFi"; 49 + case TB_SWITCH_TMU_MODE_HIFI_BI: 50 + return "bi-directional, HiFi"; 51 + case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: 52 + return "enhanced uni-directional, MedRes"; 53 + default: 54 + return "unknown"; 55 + } 56 + } 57 + 58 + static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw) 59 + { 60 + return usb4_switch_version(sw) > 1; 61 + } 62 + 63 + static int tb_switch_set_tmu_mode_params(struct tb_switch *sw, 64 + enum tb_switch_tmu_mode mode) 65 + { 19 66 u32 freq, avg, val; 20 67 int ret; 21 68 22 - if (rate == TB_SWITCH_TMU_RATE_NORMAL) { 23 - freq = freq_meas_wind[0]; 24 - avg = avg_const[0]; 25 - } else if (rate == TB_SWITCH_TMU_RATE_HIFI) { 26 - freq = freq_meas_wind[1]; 27 - avg = avg_const[1]; 28 - } else { 29 - return 0; 30 - } 69 + freq = tmu_params[mode].freq_meas_window; 70 + avg = tmu_params[mode].avg_const; 31 71 32 72 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, 33 73 sw->tmu.cap + TMU_RTR_CS_0, 1); ··· 96 56 FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) | 97 57 FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg); 98 58 99 - return tb_sw_write(sw, &val, TB_CFG_SWITCH, 100 - sw->tmu.cap + TMU_RTR_CS_15, 1); 101 - } 59 + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, 60 + sw->tmu.cap + TMU_RTR_CS_15, 1); 61 + if (ret) 62 + return ret; 102 63 103 - static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw) 104 - { 105 - bool root_switch = !tb_route(sw); 64 + if (tb_switch_tmu_enhanced_is_supported(sw)) { 65 + u32 delta_avg = tmu_params[mode].delta_avg_const; 106 66 107 - switch (sw->tmu.rate) { 108 - case TB_SWITCH_TMU_RATE_OFF: 109 - return "off"; 67 + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, 68 + sw->tmu.cap + TMU_RTR_CS_18, 1); 69 + if (ret) 70 + return ret; 110 71 111 - case TB_SWITCH_TMU_RATE_HIFI: 112 - /* Root switch does not have upstream directionality */ 113 - if (root_switch) 114 - return "HiFi"; 115 - if (sw->tmu.unidirectional) 116 - return "uni-directional, HiFi"; 117 - return "bi-directional, HiFi"; 72 + val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK; 73 + val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg); 118 74 119 - case TB_SWITCH_TMU_RATE_NORMAL: 120 - if (root_switch) 121 - return "normal"; 122 - return "uni-directional, normal"; 123 - 124 - default: 125 - return "unknown"; 75 + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, 76 + sw->tmu.cap + TMU_RTR_CS_18, 1); 126 77 } 78 + 79 + return ret; 127 80 } 128 81 129 - static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw) 82 + static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw) 130 83 { 131 84 int ret; 132 85 u32 val; ··· 215 182 return val & TMU_ADP_CS_3_UDM; 216 183 } 217 184 185 + static bool tb_port_tmu_is_enhanced(struct tb_port *port) 186 + { 187 + int ret; 188 + u32 val; 189 + 190 + ret = tb_port_read(port, &val, TB_CFG_PORT, 191 + port->cap_tmu + TMU_ADP_CS_8, 1); 192 + if (ret) 193 + return false; 194 + 195 + return val & TMU_ADP_CS_8_EUDM; 196 + } 197 + 198 + /* Can be called to non-v2 lane adapters too */ 199 + static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable) 200 + { 201 + int ret; 202 + u32 val; 203 + 204 + if (!tb_switch_tmu_enhanced_is_supported(port->sw)) 205 + return 0; 206 + 207 + ret = tb_port_read(port, &val, TB_CFG_PORT, 208 + port->cap_tmu + TMU_ADP_CS_8, 1); 209 + if (ret) 210 + return ret; 211 + 212 + if (enable) 213 + val |= TMU_ADP_CS_8_EUDM; 214 + else 215 + val &= ~TMU_ADP_CS_8_EUDM; 216 + 217 + return tb_port_write(port, &val, TB_CFG_PORT, 218 + port->cap_tmu + TMU_ADP_CS_8, 1); 219 + } 220 + 221 + static int tb_port_set_tmu_mode_params(struct tb_port *port, 222 + enum tb_switch_tmu_mode mode) 223 + { 224 + u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val; 225 + int ret; 226 + 227 + repl_timeout = tmu_params[mode].repl_timeout; 228 + repl_threshold = tmu_params[mode].repl_threshold; 229 + repl_n = tmu_params[mode].repl_n; 230 + dirswitch_n = tmu_params[mode].dirswitch_n; 231 + 232 + ret = tb_port_read(port, &val, TB_CFG_PORT, 233 + port->cap_tmu + TMU_ADP_CS_8, 1); 234 + if (ret) 235 + return ret; 236 + 237 + val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK; 238 + val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK; 239 + val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout); 240 + val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold); 241 + 242 + ret = tb_port_write(port, &val, TB_CFG_PORT, 243 + port->cap_tmu + TMU_ADP_CS_8, 1); 244 + if (ret) 245 + return ret; 246 + 247 + ret = tb_port_read(port, &val, TB_CFG_PORT, 248 + port->cap_tmu + TMU_ADP_CS_9, 1); 249 + if (ret) 250 + return ret; 251 + 252 + val &= ~TMU_ADP_CS_9_REPL_N_MASK; 253 + val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK; 254 + val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n); 255 + val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n); 256 + 257 + return tb_port_write(port, &val, TB_CFG_PORT, 258 + port->cap_tmu + TMU_ADP_CS_9, 1); 259 + } 260 + 261 + /* Can be called to non-v2 lane adapters too */ 262 + static int tb_port_tmu_rate_write(struct tb_port *port, int rate) 263 + { 264 + int ret; 265 + u32 val; 266 + 267 + if (!tb_switch_tmu_enhanced_is_supported(port->sw)) 268 + return 0; 269 + 270 + ret = tb_port_read(port, &val, TB_CFG_PORT, 271 + port->cap_tmu + TMU_ADP_CS_9, 1); 272 + if (ret) 273 + return ret; 274 + 275 + val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK; 276 + val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate); 277 + 278 + return tb_port_write(port, &val, TB_CFG_PORT, 279 + port->cap_tmu + TMU_ADP_CS_9, 1); 280 + } 281 + 218 282 static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync) 219 283 { 220 284 u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0; ··· 354 224 return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1); 355 225 } 356 226 227 + static int tmu_mode_init(struct tb_switch *sw) 228 + { 229 + bool enhanced, ucap; 230 + int ret, rate; 231 + 232 + ucap = tb_switch_tmu_ucap_is_supported(sw); 233 + if (ucap) 234 + tb_sw_dbg(sw, "TMU: supports uni-directional mode\n"); 235 + enhanced = tb_switch_tmu_enhanced_is_supported(sw); 236 + if (enhanced) 237 + tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n"); 238 + 239 + ret = tb_switch_tmu_rate_read(sw); 240 + if (ret < 0) 241 + return ret; 242 + rate = ret; 243 + 244 + /* Off by default */ 245 + sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF; 246 + 247 + if (tb_route(sw)) { 248 + struct tb_port *up = tb_upstream_port(sw); 249 + 250 + if (enhanced && tb_port_tmu_is_enhanced(up)) { 251 + sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI; 252 + } else if (ucap && tb_port_tmu_is_unidirectional(up)) { 253 + if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate) 254 + sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES; 255 + else if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate) 256 + sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI; 257 + } else if (rate) { 258 + sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI; 259 + } 260 + } else if (rate) { 261 + sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI; 262 + } 263 + 264 + /* Update the initial request to match the current mode */ 265 + sw->tmu.mode_request = sw->tmu.mode; 266 + sw->tmu.has_ucap = ucap; 267 + 268 + return 0; 269 + } 270 + 357 271 /** 358 272 * tb_switch_tmu_init() - Initialize switch TMU structures 359 273 * @sw: Switch to initialized ··· 426 252 port->cap_tmu = cap; 427 253 } 428 254 429 - ret = tb_switch_tmu_rate_read(sw); 430 - if (ret < 0) 255 + ret = tmu_mode_init(sw); 256 + if (ret) 431 257 return ret; 432 258 433 - sw->tmu.rate = ret; 434 - 435 - sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw); 436 - if (sw->tmu.has_ucap) { 437 - tb_sw_dbg(sw, "TMU: supports uni-directional mode\n"); 438 - 439 - if (tb_route(sw)) { 440 - struct tb_port *up = tb_upstream_port(sw); 441 - 442 - sw->tmu.unidirectional = 443 - tb_port_tmu_is_unidirectional(up); 444 - } 445 - } else { 446 - sw->tmu.unidirectional = false; 447 - } 448 - 449 - tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw)); 259 + tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode)); 450 260 return 0; 451 261 } 452 262 ··· 466 308 return ret; 467 309 468 310 for (i = 0; i < ARRAY_SIZE(gm_local_time); i++) 469 - tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i, 311 + tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i, 470 312 gm_local_time[i]); 471 313 472 314 /* Convert to nanoseconds (drop fractional part) */ ··· 533 375 return ret; 534 376 } 535 377 378 + static int disable_enhanced(struct tb_port *up, struct tb_port *down) 379 + { 380 + int ret; 381 + 382 + /* 383 + * Router may already been disconnected so ignore errors on the 384 + * upstream port. 385 + */ 386 + tb_port_tmu_rate_write(up, 0); 387 + tb_port_tmu_enhanced_enable(up, false); 388 + 389 + ret = tb_port_tmu_rate_write(down, 0); 390 + if (ret) 391 + return ret; 392 + return tb_port_tmu_enhanced_enable(down, false); 393 + } 394 + 536 395 /** 537 396 * tb_switch_tmu_disable() - Disable TMU of a switch 538 397 * @sw: Switch whose TMU to disable ··· 558 383 */ 559 384 int tb_switch_tmu_disable(struct tb_switch *sw) 560 385 { 561 - /* 562 - * No need to disable TMU on devices that don't support CLx since 563 - * on these devices e.g. Alpine Ridge and earlier, the TMU mode 564 - * HiFi bi-directional is enabled by default and we don't change it. 565 - */ 566 - if (!tb_switch_is_clx_supported(sw)) 567 - return 0; 568 - 569 386 /* Already disabled? */ 570 - if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) 387 + if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) 571 388 return 0; 572 - 573 389 574 390 if (tb_route(sw)) { 575 - bool unidirectional = sw->tmu.unidirectional; 576 - struct tb_switch *parent = tb_switch_parent(sw); 577 391 struct tb_port *down, *up; 578 392 int ret; 579 393 580 - down = tb_port_at(tb_route(sw), parent); 394 + down = tb_switch_downstream_port(sw); 581 395 up = tb_upstream_port(sw); 582 396 /* 583 397 * In case of uni-directional time sync, TMU handshake is ··· 579 415 * uni-directional mode and we don't want to change it's TMU 580 416 * mode. 581 417 */ 582 - tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); 418 + tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]); 583 419 584 420 tb_port_tmu_time_sync_disable(up); 585 421 ret = tb_port_tmu_time_sync_disable(down); 586 422 if (ret) 587 423 return ret; 588 424 589 - if (unidirectional) { 425 + switch (sw->tmu.mode) { 426 + case TB_SWITCH_TMU_MODE_LOWRES: 427 + case TB_SWITCH_TMU_MODE_HIFI_UNI: 590 428 /* The switch may be unplugged so ignore any errors */ 591 429 tb_port_tmu_unidirectional_disable(up); 592 430 ret = tb_port_tmu_unidirectional_disable(down); 593 431 if (ret) 594 432 return ret; 433 + break; 434 + 435 + case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: 436 + ret = disable_enhanced(up, down); 437 + if (ret) 438 + return ret; 439 + break; 440 + 441 + default: 442 + break; 595 443 } 596 444 } else { 597 - tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); 445 + tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]); 598 446 } 599 447 600 - sw->tmu.unidirectional = false; 601 - sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF; 448 + sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF; 602 449 603 450 tb_sw_dbg(sw, "TMU: disabled\n"); 604 451 return 0; 605 452 } 606 453 607 - static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional) 454 + /* Called only when there is failure enabling requested mode */ 455 + static void tb_switch_tmu_off(struct tb_switch *sw) 608 456 { 609 - struct tb_switch *parent = tb_switch_parent(sw); 457 + unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF]; 610 458 struct tb_port *down, *up; 611 459 612 - down = tb_port_at(tb_route(sw), parent); 460 + down = tb_switch_downstream_port(sw); 613 461 up = tb_upstream_port(sw); 614 462 /* 615 463 * In case of any failure in one of the steps when setting ··· 632 456 */ 633 457 tb_port_tmu_time_sync_disable(down); 634 458 tb_port_tmu_time_sync_disable(up); 635 - if (unidirectional) 636 - tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF); 637 - else 638 - tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); 639 459 640 - tb_switch_set_tmu_mode_params(sw, sw->tmu.rate); 460 + switch (sw->tmu.mode_request) { 461 + case TB_SWITCH_TMU_MODE_LOWRES: 462 + case TB_SWITCH_TMU_MODE_HIFI_UNI: 463 + tb_switch_tmu_rate_write(tb_switch_parent(sw), rate); 464 + break; 465 + case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: 466 + disable_enhanced(up, down); 467 + break; 468 + default: 469 + break; 470 + } 471 + 472 + /* Always set the rate to 0 */ 473 + tb_switch_tmu_rate_write(sw, rate); 474 + 475 + tb_switch_set_tmu_mode_params(sw, sw->tmu.mode); 641 476 tb_port_tmu_unidirectional_disable(down); 642 477 tb_port_tmu_unidirectional_disable(up); 643 478 } 644 479 645 480 /* 646 481 * This function is called when the previous TMU mode was 647 - * TB_SWITCH_TMU_RATE_OFF. 482 + * TB_SWITCH_TMU_MODE_OFF. 648 483 */ 649 - static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw) 484 + static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw) 650 485 { 651 - struct tb_switch *parent = tb_switch_parent(sw); 652 486 struct tb_port *up, *down; 653 487 int ret; 654 488 655 489 up = tb_upstream_port(sw); 656 - down = tb_port_at(tb_route(sw), parent); 490 + down = tb_switch_downstream_port(sw); 657 491 658 492 ret = tb_port_tmu_unidirectional_disable(up); 659 493 if (ret) ··· 673 487 if (ret) 674 488 goto out; 675 489 676 - ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI); 490 + ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]); 677 491 if (ret) 678 492 goto out; 679 493 ··· 688 502 return 0; 689 503 690 504 out: 691 - __tb_switch_tmu_off(sw, false); 505 + tb_switch_tmu_off(sw); 692 506 return ret; 693 507 } 694 508 695 - static int tb_switch_tmu_objection_mask(struct tb_switch *sw) 509 + /* Only needed for Titan Ridge */ 510 + static int tb_switch_tmu_disable_objections(struct tb_switch *sw) 696 511 { 512 + struct tb_port *up = tb_upstream_port(sw); 697 513 u32 val; 698 514 int ret; 699 515 ··· 706 518 707 519 val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK; 708 520 709 - return tb_sw_write(sw, &val, TB_CFG_SWITCH, 710 - sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1); 711 - } 712 - 713 - static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw) 714 - { 715 - struct tb_port *up = tb_upstream_port(sw); 521 + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, 522 + sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1); 523 + if (ret) 524 + return ret; 716 525 717 526 return tb_port_tmu_write(up, TMU_ADP_CS_6, 718 527 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK, 719 - TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK); 528 + TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 | 529 + TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2); 720 530 } 721 531 722 532 /* 723 533 * This function is called when the previous TMU mode was 724 - * TB_SWITCH_TMU_RATE_OFF. 534 + * TB_SWITCH_TMU_MODE_OFF. 725 535 */ 726 - static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw) 536 + static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw) 727 537 { 728 - struct tb_switch *parent = tb_switch_parent(sw); 729 538 struct tb_port *up, *down; 730 539 int ret; 731 540 732 541 up = tb_upstream_port(sw); 733 - down = tb_port_at(tb_route(sw), parent); 734 - ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request); 542 + down = tb_switch_downstream_port(sw); 543 + ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), 544 + tmu_rates[sw->tmu.mode_request]); 735 545 if (ret) 736 546 return ret; 737 547 738 - ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request); 548 + ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request); 739 549 if (ret) 740 550 return ret; 741 551 ··· 756 570 return 0; 757 571 758 572 out: 759 - __tb_switch_tmu_off(sw, true); 573 + tb_switch_tmu_off(sw); 760 574 return ret; 761 575 } 762 576 763 - static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw) 577 + /* 578 + * This function is called when the previous TMU mode was 579 + * TB_SWITCH_TMU_RATE_OFF. 580 + */ 581 + static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw) 764 582 { 765 - struct tb_switch *parent = tb_switch_parent(sw); 583 + unsigned int rate = tmu_rates[sw->tmu.mode_request]; 584 + struct tb_port *up, *down; 585 + int ret; 586 + 587 + /* Router specific parameters first */ 588 + ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request); 589 + if (ret) 590 + return ret; 591 + 592 + up = tb_upstream_port(sw); 593 + down = tb_switch_downstream_port(sw); 594 + 595 + ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request); 596 + if (ret) 597 + goto out; 598 + 599 + ret = tb_port_tmu_rate_write(up, rate); 600 + if (ret) 601 + goto out; 602 + 603 + ret = tb_port_tmu_enhanced_enable(up, true); 604 + if (ret) 605 + goto out; 606 + 607 + ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request); 608 + if (ret) 609 + goto out; 610 + 611 + ret = tb_port_tmu_rate_write(down, rate); 612 + if (ret) 613 + goto out; 614 + 615 + ret = tb_port_tmu_enhanced_enable(down, true); 616 + if (ret) 617 + goto out; 618 + 619 + return 0; 620 + 621 + out: 622 + tb_switch_tmu_off(sw); 623 + return ret; 624 + } 625 + 626 + static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw) 627 + { 628 + unsigned int rate = tmu_rates[sw->tmu.mode]; 766 629 struct tb_port *down, *up; 767 630 768 - down = tb_port_at(tb_route(sw), parent); 631 + down = tb_switch_downstream_port(sw); 769 632 up = tb_upstream_port(sw); 770 633 /* 771 634 * In case of any failure in one of the steps when change mode, ··· 822 587 * In case of additional failures in the functions below, 823 588 * ignore them since the caller shall already report a failure. 824 589 */ 825 - tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional); 826 - if (sw->tmu.unidirectional_request) 827 - tb_switch_tmu_rate_write(parent, sw->tmu.rate); 828 - else 829 - tb_switch_tmu_rate_write(sw, sw->tmu.rate); 590 + switch (sw->tmu.mode) { 591 + case TB_SWITCH_TMU_MODE_LOWRES: 592 + case TB_SWITCH_TMU_MODE_HIFI_UNI: 593 + tb_port_tmu_set_unidirectional(down, true); 594 + tb_switch_tmu_rate_write(tb_switch_parent(sw), rate); 595 + break; 830 596 831 - tb_switch_set_tmu_mode_params(sw, sw->tmu.rate); 832 - tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional); 597 + case TB_SWITCH_TMU_MODE_HIFI_BI: 598 + tb_port_tmu_set_unidirectional(down, false); 599 + tb_switch_tmu_rate_write(sw, rate); 600 + break; 601 + 602 + default: 603 + break; 604 + } 605 + 606 + tb_switch_set_tmu_mode_params(sw, sw->tmu.mode); 607 + 608 + switch (sw->tmu.mode) { 609 + case TB_SWITCH_TMU_MODE_LOWRES: 610 + case TB_SWITCH_TMU_MODE_HIFI_UNI: 611 + tb_port_tmu_set_unidirectional(up, true); 612 + break; 613 + 614 + case TB_SWITCH_TMU_MODE_HIFI_BI: 615 + tb_port_tmu_set_unidirectional(up, false); 616 + break; 617 + 618 + default: 619 + break; 620 + } 833 621 } 834 622 835 - static int __tb_switch_tmu_change_mode(struct tb_switch *sw) 623 + static int tb_switch_tmu_change_mode(struct tb_switch *sw) 836 624 { 837 - struct tb_switch *parent = tb_switch_parent(sw); 625 + unsigned int rate = tmu_rates[sw->tmu.mode_request]; 838 626 struct tb_port *up, *down; 839 627 int ret; 840 628 841 629 up = tb_upstream_port(sw); 842 - down = tb_port_at(tb_route(sw), parent); 843 - ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request); 844 - if (ret) 845 - goto out; 630 + down = tb_switch_downstream_port(sw); 846 631 847 - if (sw->tmu.unidirectional_request) 848 - ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request); 849 - else 850 - ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request); 632 + /* Program the upstream router downstream facing lane adapter */ 633 + switch (sw->tmu.mode_request) { 634 + case TB_SWITCH_TMU_MODE_LOWRES: 635 + case TB_SWITCH_TMU_MODE_HIFI_UNI: 636 + ret = tb_port_tmu_set_unidirectional(down, true); 637 + if (ret) 638 + goto out; 639 + ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate); 640 + if (ret) 641 + goto out; 642 + break; 643 + 644 + case TB_SWITCH_TMU_MODE_HIFI_BI: 645 + ret = tb_port_tmu_set_unidirectional(down, false); 646 + if (ret) 647 + goto out; 648 + ret = tb_switch_tmu_rate_write(sw, rate); 649 + if (ret) 650 + goto out; 651 + break; 652 + 653 + default: 654 + /* Not allowed to change modes from other than above */ 655 + return -EINVAL; 656 + } 657 + 658 + ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request); 851 659 if (ret) 852 660 return ret; 853 661 854 - ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request); 855 - if (ret) 856 - return ret; 662 + /* Program the new mode and the downstream router lane adapter */ 663 + switch (sw->tmu.mode_request) { 664 + case TB_SWITCH_TMU_MODE_LOWRES: 665 + case TB_SWITCH_TMU_MODE_HIFI_UNI: 666 + ret = tb_port_tmu_set_unidirectional(up, true); 667 + if (ret) 668 + goto out; 669 + break; 857 670 858 - ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request); 859 - if (ret) 860 - goto out; 671 + case TB_SWITCH_TMU_MODE_HIFI_BI: 672 + ret = tb_port_tmu_set_unidirectional(up, false); 673 + if (ret) 674 + goto out; 675 + break; 676 + 677 + default: 678 + /* Not allowed to change modes from other than above */ 679 + return -EINVAL; 680 + } 861 681 862 682 ret = tb_port_tmu_time_sync_enable(down); 863 683 if (ret) ··· 925 635 return 0; 926 636 927 637 out: 928 - __tb_switch_tmu_change_mode_prev(sw); 638 + tb_switch_tmu_change_mode_prev(sw); 929 639 return ret; 930 640 } 931 641 ··· 933 643 * tb_switch_tmu_enable() - Enable TMU on a router 934 644 * @sw: Router whose TMU to enable 935 645 * 936 - * Enables TMU of a router to be in uni-directional Normal/HiFi 937 - * or bi-directional HiFi mode. Calling tb_switch_tmu_configure() is required 938 - * before calling this function, to select the mode Normal/HiFi and 939 - * directionality (uni-directional/bi-directional). 940 - * In HiFi mode all tunneling should work. In Normal mode, DP tunneling can't 941 - * work. Uni-directional mode is required for CLx (Link Low-Power) to work. 646 + * Enables TMU of a router to be in uni-directional Normal/HiFi or 647 + * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is 648 + * required before calling this function. 942 649 */ 943 650 int tb_switch_tmu_enable(struct tb_switch *sw) 944 651 { 945 - bool unidirectional = sw->tmu.unidirectional_request; 946 652 int ret; 947 653 948 - if (unidirectional && !sw->tmu.has_ucap) 949 - return -EOPNOTSUPP; 950 - 951 - /* 952 - * No need to enable TMU on devices that don't support CLx since on 953 - * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi 954 - * bi-directional is enabled by default. 955 - */ 956 - if (!tb_switch_is_clx_supported(sw)) 654 + if (tb_switch_tmu_is_enabled(sw)) 957 655 return 0; 958 656 959 - if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request)) 960 - return 0; 961 - 962 - if (tb_switch_is_titan_ridge(sw) && unidirectional) { 963 - /* 964 - * Titan Ridge supports CL0s and CL1 only. CL0s and CL1 are 965 - * enabled and supported together. 966 - */ 967 - if (!tb_switch_is_clx_enabled(sw, TB_CL1)) 968 - return -EOPNOTSUPP; 969 - 970 - ret = tb_switch_tmu_objection_mask(sw); 971 - if (ret) 972 - return ret; 973 - 974 - ret = tb_switch_tmu_unidirectional_enable(sw); 657 + if (tb_switch_is_titan_ridge(sw) && 658 + (sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES || 659 + sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) { 660 + ret = tb_switch_tmu_disable_objections(sw); 975 661 if (ret) 976 662 return ret; 977 663 } ··· 962 696 * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to 963 697 * HiFi-Uni. 964 698 */ 965 - if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) { 966 - if (unidirectional) 967 - ret = __tb_switch_tmu_enable_unidirectional(sw); 968 - else 969 - ret = __tb_switch_tmu_enable_bidirectional(sw); 970 - if (ret) 971 - return ret; 972 - } else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) { 973 - ret = __tb_switch_tmu_change_mode(sw); 974 - if (ret) 975 - return ret; 699 + if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) { 700 + switch (sw->tmu.mode_request) { 701 + case TB_SWITCH_TMU_MODE_LOWRES: 702 + case TB_SWITCH_TMU_MODE_HIFI_UNI: 703 + ret = tb_switch_tmu_enable_unidirectional(sw); 704 + break; 705 + 706 + case TB_SWITCH_TMU_MODE_HIFI_BI: 707 + ret = tb_switch_tmu_enable_bidirectional(sw); 708 + break; 709 + case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: 710 + ret = tb_switch_tmu_enable_enhanced(sw); 711 + break; 712 + default: 713 + ret = -EINVAL; 714 + break; 715 + } 716 + } else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES || 717 + sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI || 718 + sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) { 719 + ret = tb_switch_tmu_change_mode(sw); 720 + } else { 721 + ret = -EINVAL; 976 722 } 977 - sw->tmu.unidirectional = unidirectional; 978 723 } else { 979 724 /* 980 725 * Host router port configurations are written as ··· 993 716 * of the child node - see above. 994 717 * Here only the host router' rate configuration is written. 995 718 */ 996 - ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request); 997 - if (ret) 998 - return ret; 719 + ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]); 999 720 } 1000 721 1001 - sw->tmu.rate = sw->tmu.rate_request; 722 + if (ret) { 723 + tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n", 724 + tmu_mode_name(sw->tmu.mode_request), ret); 725 + } else { 726 + sw->tmu.mode = sw->tmu.mode_request; 727 + tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode)); 728 + } 1002 729 1003 - tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw)); 1004 730 return tb_switch_tmu_set_time_disruption(sw, false); 1005 731 } 1006 732 1007 733 /** 1008 - * tb_switch_tmu_configure() - Configure the TMU rate and directionality 734 + * tb_switch_tmu_configure() - Configure the TMU mode 1009 735 * @sw: Router whose mode to change 1010 - * @rate: Rate to configure Off/Normal/HiFi 1011 - * @unidirectional: If uni-directional (bi-directional otherwise) 736 + * @mode: Mode to configure 1012 737 * 1013 - * Selects the rate of the TMU and directionality (uni-directional or 1014 - * bi-directional). Must be called before tb_switch_tmu_enable(). 738 + * Selects the TMU mode that is enabled when tb_switch_tmu_enable() is 739 + * next called. 740 + * 741 + * Returns %0 in success and negative errno otherwise. Specifically 742 + * returns %-EOPNOTSUPP if the requested mode is not possible (not 743 + * supported by the router and/or topology). 1015 744 */ 1016 - void tb_switch_tmu_configure(struct tb_switch *sw, 1017 - enum tb_switch_tmu_rate rate, bool unidirectional) 745 + int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode) 1018 746 { 1019 - sw->tmu.unidirectional_request = unidirectional; 1020 - sw->tmu.rate_request = rate; 1021 - } 747 + switch (mode) { 748 + case TB_SWITCH_TMU_MODE_OFF: 749 + break; 1022 750 1023 - static int tb_switch_tmu_config_enable(struct device *dev, void *rate) 1024 - { 1025 - if (tb_is_switch(dev)) { 1026 - struct tb_switch *sw = tb_to_switch(dev); 751 + case TB_SWITCH_TMU_MODE_LOWRES: 752 + case TB_SWITCH_TMU_MODE_HIFI_UNI: 753 + if (!sw->tmu.has_ucap) 754 + return -EOPNOTSUPP; 755 + break; 1027 756 1028 - tb_switch_tmu_configure(sw, *(enum tb_switch_tmu_rate *)rate, 1029 - tb_switch_is_clx_enabled(sw, TB_CL1)); 1030 - if (tb_switch_tmu_enable(sw)) 1031 - tb_sw_dbg(sw, "fail switching TMU mode for 1st depth router\n"); 757 + case TB_SWITCH_TMU_MODE_HIFI_BI: 758 + break; 759 + 760 + case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: { 761 + const struct tb_switch *parent_sw = tb_switch_parent(sw); 762 + 763 + if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw)) 764 + return -EOPNOTSUPP; 765 + if (!tb_switch_tmu_enhanced_is_supported(sw)) 766 + return -EOPNOTSUPP; 767 + 768 + break; 769 + } 770 + 771 + default: 772 + tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode); 773 + return -EINVAL; 774 + } 775 + 776 + if (sw->tmu.mode_request != mode) { 777 + tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n", 778 + tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode)); 779 + sw->tmu.mode_request = mode; 1032 780 } 1033 781 1034 782 return 0; 1035 - } 1036 - 1037 - /** 1038 - * tb_switch_enable_tmu_1st_child - Configure and enable TMU for 1st chidren 1039 - * @sw: The router to configure and enable it's children TMU 1040 - * @rate: Rate of the TMU to configure the router's chidren to 1041 - * 1042 - * Configures and enables the TMU mode of 1st depth children of the specified 1043 - * router to the specified rate. 1044 - */ 1045 - void tb_switch_enable_tmu_1st_child(struct tb_switch *sw, 1046 - enum tb_switch_tmu_rate rate) 1047 - { 1048 - device_for_each_child(&sw->dev, &rate, 1049 - tb_switch_tmu_config_enable); 1050 783 }
+197 -44
drivers/thunderbolt/tunnel.c
··· 10 10 #include <linux/slab.h> 11 11 #include <linux/list.h> 12 12 #include <linux/ktime.h> 13 + #include <linux/string_helpers.h> 13 14 14 15 #include "tunnel.h" 15 16 #include "tb.h" ··· 42 41 * Number of credits we try to allocate for each DMA path if not limited 43 42 * by the host router baMaxHI. 44 43 */ 45 - #define TB_DMA_CREDITS 14U 44 + #define TB_DMA_CREDITS 14 46 45 /* Minimum number of credits for DMA path */ 47 - #define TB_MIN_DMA_CREDITS 1U 46 + #define TB_MIN_DMA_CREDITS 1 47 + 48 + static unsigned int dma_credits = TB_DMA_CREDITS; 49 + module_param(dma_credits, uint, 0444); 50 + MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: " 51 + __MODULE_STRING(TB_DMA_CREDITS) ")"); 48 52 49 53 static bool bw_alloc_mode = true; 50 54 module_param(bw_alloc_mode, bool, 0444); ··· 101 95 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0; 102 96 103 97 if (tb_acpi_is_xdomain_allowed()) { 104 - spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS); 98 + spare = min_not_zero(sw->max_dma_credits, dma_credits); 105 99 /* Add some credits for potential second DMA tunnel */ 106 100 spare += TB_MIN_DMA_CREDITS; 107 101 } else { ··· 154 148 return tunnel; 155 149 } 156 150 151 + static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) 152 + { 153 + int ret; 154 + 155 + /* Only supported of both routers are at least USB4 v2 */ 156 + if (usb4_switch_version(tunnel->src_port->sw) < 2 || 157 + usb4_switch_version(tunnel->dst_port->sw) < 2) 158 + return 0; 159 + 160 + ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable); 161 + if (ret) 162 + return ret; 163 + 164 + ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable); 165 + if (ret) 166 + return ret; 167 + 168 + tb_tunnel_dbg(tunnel, "extended encapsulation %s\n", 169 + str_enabled_disabled(enable)); 170 + return 0; 171 + } 172 + 157 173 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) 158 174 { 159 175 int res; 176 + 177 + if (activate) { 178 + res = tb_pci_set_ext_encapsulation(tunnel, activate); 179 + if (res) 180 + return res; 181 + } 160 182 161 183 res = tb_pci_port_enable(tunnel->src_port, activate); 162 184 if (res) 163 185 return res; 164 186 165 - if (tb_port_is_pcie_up(tunnel->dst_port)) 166 - return tb_pci_port_enable(tunnel->dst_port, activate); 187 + if (tb_port_is_pcie_up(tunnel->dst_port)) { 188 + res = tb_pci_port_enable(tunnel->dst_port, activate); 189 + if (res) 190 + return res; 191 + } 167 192 168 - return 0; 193 + return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate); 169 194 } 170 195 171 196 static int tb_pci_init_credits(struct tb_path_hop *hop) ··· 418 381 return -ETIMEDOUT; 419 382 } 420 383 384 + /* 385 + * Returns maximum possible rate from capability supporting only DP 2.0 386 + * and below. Used when DP BW allocation mode is not enabled. 387 + */ 421 388 static inline u32 tb_dp_cap_get_rate(u32 val) 422 389 { 423 390 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT; ··· 438 397 default: 439 398 return 0; 440 399 } 400 + } 401 + 402 + /* 403 + * Returns maximum possible rate from capability supporting DP 2.1 404 + * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation 405 + * mode is enabled. 406 + */ 407 + static inline u32 tb_dp_cap_get_rate_ext(u32 val) 408 + { 409 + if (val & DP_COMMON_CAP_UHBR20) 410 + return 20000; 411 + else if (val & DP_COMMON_CAP_UHBR13_5) 412 + return 13500; 413 + else if (val & DP_COMMON_CAP_UHBR10) 414 + return 10000; 415 + 416 + return tb_dp_cap_get_rate(val); 417 + } 418 + 419 + static inline bool tb_dp_is_uhbr_rate(unsigned int rate) 420 + { 421 + return rate >= 10000; 441 422 } 442 423 443 424 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate) ··· 524 461 525 462 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes) 526 463 { 527 - /* Tunneling removes the DP 8b/10b encoding */ 464 + /* Tunneling removes the DP 8b/10b 128/132b encoding */ 465 + if (tb_dp_is_uhbr_rate(rate)) 466 + return rate * lanes * 128 / 132; 528 467 return rate * lanes * 8 / 10; 529 468 } 530 469 ··· 591 526 * Perform connection manager handshake between IN and OUT ports 592 527 * before capabilities exchange can take place. 593 528 */ 594 - ret = tb_dp_cm_handshake(in, out, 1500); 529 + ret = tb_dp_cm_handshake(in, out, 3000); 595 530 if (ret) 596 531 return ret; 597 532 ··· 669 604 in->cap_adap + DP_REMOTE_CAP, 1); 670 605 } 671 606 672 - static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel) 607 + static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) 673 608 { 674 609 int ret, estimated_bw, granularity, tmp; 675 610 struct tb_port *out = tunnel->dst_port; ··· 681 616 if (!bw_alloc_mode) 682 617 return 0; 683 618 684 - ret = usb4_dp_port_set_cm_bw_mode_supported(in, true); 619 + ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true); 685 620 if (ret) 686 621 return ret; 687 622 ··· 719 654 if (ret) 720 655 return ret; 721 656 657 + /* 658 + * Pick up granularity that supports maximum possible bandwidth. 659 + * For that we use the UHBR rates too. 660 + */ 661 + in_rate = tb_dp_cap_get_rate_ext(in_dp_cap); 662 + out_rate = tb_dp_cap_get_rate_ext(out_dp_cap); 663 + rate = min(in_rate, out_rate); 664 + tmp = tb_dp_bandwidth(rate, lanes); 665 + 666 + tb_port_dbg(in, 667 + "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n", 668 + rate, lanes, tmp); 669 + 722 670 for (granularity = 250; tmp / granularity > 255 && granularity <= 1000; 723 671 granularity *= 2) 724 672 ; ··· 758 680 759 681 tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw); 760 682 761 - ret = usb4_dp_port_set_estimated_bw(in, estimated_bw); 683 + ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw); 762 684 if (ret) 763 685 return ret; 764 686 765 687 /* Initial allocation should be 0 according the spec */ 766 - ret = usb4_dp_port_allocate_bw(in, 0); 688 + ret = usb4_dp_port_allocate_bandwidth(in, 0); 767 689 if (ret) 768 690 return ret; 769 691 ··· 785 707 if (!tb_switch_is_usb4(sw)) 786 708 return 0; 787 709 788 - if (!usb4_dp_port_bw_mode_supported(in)) 710 + if (!usb4_dp_port_bandwidth_mode_supported(in)) 789 711 return 0; 790 712 791 713 tb_port_dbg(in, "bandwidth allocation mode supported\n"); ··· 794 716 if (ret) 795 717 return ret; 796 718 797 - return tb_dp_bw_alloc_mode_enable(tunnel); 719 + return tb_dp_bandwidth_alloc_mode_enable(tunnel); 798 720 } 799 721 800 722 static void tb_dp_deinit(struct tb_tunnel *tunnel) 801 723 { 802 724 struct tb_port *in = tunnel->src_port; 803 725 804 - if (!usb4_dp_port_bw_mode_supported(in)) 726 + if (!usb4_dp_port_bandwidth_mode_supported(in)) 805 727 return; 806 - if (usb4_dp_port_bw_mode_enabled(in)) { 807 - usb4_dp_port_set_cm_bw_mode_supported(in, false); 728 + if (usb4_dp_port_bandwidth_mode_enabled(in)) { 729 + usb4_dp_port_set_cm_bandwidth_mode_supported(in, false); 808 730 tb_port_dbg(in, "bandwidth allocation mode disabled\n"); 809 731 } 810 732 } ··· 847 769 } 848 770 849 771 /* max_bw is rounded up to next granularity */ 850 - static int tb_dp_nrd_bandwidth(struct tb_tunnel *tunnel, int *max_bw) 772 + static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel, 773 + int *max_bw) 851 774 { 852 775 struct tb_port *in = tunnel->src_port; 853 776 int ret, rate, lanes, nrd_bw; 777 + u32 cap; 854 778 855 - ret = usb4_dp_port_nrd(in, &rate, &lanes); 779 + /* 780 + * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX 781 + * read parameter values so this so we can use this to determine 782 + * the maximum possible bandwidth over this link. 783 + * 784 + * See USB4 v2 spec 1.0 10.4.4.5. 785 + */ 786 + ret = tb_port_read(in, &cap, TB_CFG_PORT, 787 + in->cap_adap + DP_LOCAL_CAP, 1); 856 788 if (ret) 857 789 return ret; 790 + 791 + rate = tb_dp_cap_get_rate_ext(cap); 792 + if (tb_dp_is_uhbr_rate(rate)) { 793 + /* 794 + * When UHBR is used there is no reduction in lanes so 795 + * we can use this directly. 796 + */ 797 + lanes = tb_dp_cap_get_lanes(cap); 798 + } else { 799 + /* 800 + * If there is no UHBR supported then check the 801 + * non-reduced rate and lanes. 802 + */ 803 + ret = usb4_dp_port_nrd(in, &rate, &lanes); 804 + if (ret) 805 + return ret; 806 + } 858 807 859 808 nrd_bw = tb_dp_bandwidth(rate, lanes); 860 809 ··· 895 790 return nrd_bw; 896 791 } 897 792 898 - static int tb_dp_bw_mode_consumed_bandwidth(struct tb_tunnel *tunnel, 899 - int *consumed_up, int *consumed_down) 793 + static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, 794 + int *consumed_up, 795 + int *consumed_down) 900 796 { 901 797 struct tb_port *out = tunnel->dst_port; 902 798 struct tb_port *in = tunnel->src_port; 903 799 int ret, allocated_bw, max_bw; 904 800 905 - if (!usb4_dp_port_bw_mode_enabled(in)) 801 + if (!usb4_dp_port_bandwidth_mode_enabled(in)) 906 802 return -EOPNOTSUPP; 907 803 908 804 if (!tunnel->bw_mode) 909 805 return -EOPNOTSUPP; 910 806 911 807 /* Read what was allocated previously if any */ 912 - ret = usb4_dp_port_allocated_bw(in); 808 + ret = usb4_dp_port_allocated_bandwidth(in); 913 809 if (ret < 0) 914 810 return ret; 915 811 allocated_bw = ret; 916 812 917 - ret = tb_dp_nrd_bandwidth(tunnel, &max_bw); 813 + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); 918 814 if (ret < 0) 919 815 return ret; 920 816 if (allocated_bw == max_bw) ··· 945 839 * If we have already set the allocated bandwidth then use that. 946 840 * Otherwise we read it from the DPRX. 947 841 */ 948 - if (usb4_dp_port_bw_mode_enabled(in) && tunnel->bw_mode) { 842 + if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) { 949 843 int ret, allocated_bw, max_bw; 950 844 951 - ret = usb4_dp_port_allocated_bw(in); 845 + ret = usb4_dp_port_allocated_bandwidth(in); 952 846 if (ret < 0) 953 847 return ret; 954 848 allocated_bw = ret; 955 849 956 - ret = tb_dp_nrd_bandwidth(tunnel, &max_bw); 850 + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); 957 851 if (ret < 0) 958 852 return ret; 959 853 if (allocated_bw == max_bw) ··· 980 874 struct tb_port *in = tunnel->src_port; 981 875 int max_bw, ret, tmp; 982 876 983 - if (!usb4_dp_port_bw_mode_enabled(in)) 877 + if (!usb4_dp_port_bandwidth_mode_enabled(in)) 984 878 return -EOPNOTSUPP; 985 879 986 - ret = tb_dp_nrd_bandwidth(tunnel, &max_bw); 880 + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); 987 881 if (ret < 0) 988 882 return ret; 989 883 990 884 if (in->sw->config.depth < out->sw->config.depth) { 991 885 tmp = min(*alloc_down, max_bw); 992 - ret = usb4_dp_port_allocate_bw(in, tmp); 886 + ret = usb4_dp_port_allocate_bandwidth(in, tmp); 993 887 if (ret) 994 888 return ret; 995 889 *alloc_down = tmp; 996 890 *alloc_up = 0; 997 891 } else { 998 892 tmp = min(*alloc_up, max_bw); 999 - ret = usb4_dp_port_allocate_bw(in, tmp); 893 + ret = usb4_dp_port_allocate_bandwidth(in, tmp); 1000 894 if (ret) 1001 895 return ret; 1002 896 *alloc_down = 0; ··· 1006 900 /* Now we can use BW mode registers to figure out the bandwidth */ 1007 901 /* TODO: need to handle discovery too */ 1008 902 tunnel->bw_mode = true; 903 + 904 + tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n", 905 + tmp); 1009 906 return 0; 1010 907 } 1011 908 ··· 1083 974 int *max_down) 1084 975 { 1085 976 struct tb_port *in = tunnel->src_port; 1086 - u32 rate, lanes; 1087 977 int ret; 1088 978 1089 - /* 1090 - * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX read 1091 - * parameter values so this so we can use this to determine the 1092 - * maximum possible bandwidth over this link. 1093 - */ 1094 - ret = tb_dp_read_cap(tunnel, DP_LOCAL_CAP, &rate, &lanes); 1095 - if (ret) 979 + if (!usb4_dp_port_bandwidth_mode_enabled(in)) 980 + return -EOPNOTSUPP; 981 + 982 + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL); 983 + if (ret < 0) 1096 984 return ret; 1097 985 1098 986 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { 1099 987 *max_up = 0; 1100 - *max_down = tb_dp_bandwidth(rate, lanes); 988 + *max_down = ret; 1101 989 } else { 1102 - *max_up = tb_dp_bandwidth(rate, lanes); 990 + *max_up = ret; 1103 991 *max_down = 0; 1104 992 } 1105 993 ··· 1117 1011 * mode is enabled first and then read the bandwidth 1118 1012 * through those registers. 1119 1013 */ 1120 - ret = tb_dp_bw_mode_consumed_bandwidth(tunnel, consumed_up, 1121 - consumed_down); 1014 + ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up, 1015 + consumed_down); 1122 1016 if (ret < 0) { 1123 1017 if (ret != -EOPNOTSUPP) 1124 1018 return ret; ··· 1238 1132 return 0; 1239 1133 } 1240 1134 1135 + static void tb_dp_dump(struct tb_tunnel *tunnel) 1136 + { 1137 + struct tb_port *in, *out; 1138 + u32 dp_cap, rate, lanes; 1139 + 1140 + in = tunnel->src_port; 1141 + out = tunnel->dst_port; 1142 + 1143 + if (tb_port_read(in, &dp_cap, TB_CFG_PORT, 1144 + in->cap_adap + DP_LOCAL_CAP, 1)) 1145 + return; 1146 + 1147 + rate = tb_dp_cap_get_rate(dp_cap); 1148 + lanes = tb_dp_cap_get_lanes(dp_cap); 1149 + 1150 + tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", 1151 + rate, lanes, tb_dp_bandwidth(rate, lanes)); 1152 + 1153 + out = tunnel->dst_port; 1154 + 1155 + if (tb_port_read(out, &dp_cap, TB_CFG_PORT, 1156 + out->cap_adap + DP_LOCAL_CAP, 1)) 1157 + return; 1158 + 1159 + rate = tb_dp_cap_get_rate(dp_cap); 1160 + lanes = tb_dp_cap_get_lanes(dp_cap); 1161 + 1162 + tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", 1163 + rate, lanes, tb_dp_bandwidth(rate, lanes)); 1164 + 1165 + if (tb_port_read(in, &dp_cap, TB_CFG_PORT, 1166 + in->cap_adap + DP_REMOTE_CAP, 1)) 1167 + return; 1168 + 1169 + rate = tb_dp_cap_get_rate(dp_cap); 1170 + lanes = tb_dp_cap_get_lanes(dp_cap); 1171 + 1172 + tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n", 1173 + rate, lanes, tb_dp_bandwidth(rate, lanes)); 1174 + } 1175 + 1241 1176 /** 1242 1177 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels 1243 1178 * @tb: Pointer to the domain structure ··· 1355 1208 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); 1356 1209 goto err_deactivate; 1357 1210 } 1211 + 1212 + tb_dp_dump(tunnel); 1358 1213 1359 1214 tb_tunnel_dbg(tunnel, "discovered\n"); 1360 1215 return tunnel; ··· 1601 1452 struct tb_path *path; 1602 1453 int credits; 1603 1454 1455 + /* Ring 0 is reserved for control channel */ 1456 + if (WARN_ON(!receive_ring || !transmit_ring)) 1457 + return NULL; 1458 + 1604 1459 if (receive_ring > 0) 1605 1460 npaths++; 1606 1461 if (transmit_ring > 0) ··· 1621 1468 tunnel->dst_port = dst; 1622 1469 tunnel->deinit = tb_dma_deinit; 1623 1470 1624 - credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits); 1471 + credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits); 1625 1472 1626 1473 if (receive_ring > 0) { 1627 1474 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
+89 -25
drivers/thunderbolt/usb4.c
··· 15 15 #include "tb.h" 16 16 17 17 #define USB4_DATA_RETRIES 3 18 + #define USB4_DATA_DWORDS 16 18 19 19 20 enum usb4_sb_target { 20 21 USB4_SB_TARGET_ROUTER, ··· 113 112 { 114 113 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 115 114 116 - if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS) 115 + if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS) 117 116 return -EINVAL; 118 117 119 118 /* ··· 232 231 * is not available for some reason (like that there is Thunderbolt 3 233 232 * switch upstream) then the internal xHCI controller is enabled 234 233 * instead. 234 + * 235 + * This does not set the configuration valid bit of the router. To do 236 + * that call usb4_switch_configuration_valid(). 235 237 */ 236 238 int usb4_switch_setup(struct tb_switch *sw) 237 239 { 238 - struct tb_port *downstream_port; 239 - struct tb_switch *parent; 240 + struct tb_switch *parent = tb_switch_parent(sw); 241 + struct tb_port *down; 240 242 bool tbt3, xhci; 241 243 u32 val = 0; 242 244 int ret; ··· 253 249 if (ret) 254 250 return ret; 255 251 256 - parent = tb_switch_parent(sw); 257 - downstream_port = tb_port_at(tb_route(sw), parent); 258 - sw->link_usb4 = link_is_usb4(downstream_port); 252 + down = tb_switch_downstream_port(sw); 253 + sw->link_usb4 = link_is_usb4(down); 259 254 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT"); 260 255 261 256 xhci = val & ROUTER_CS_6_HCI; ··· 291 288 292 289 /* TBT3 supported by the CM */ 293 290 val |= ROUTER_CS_5_C3S; 294 - /* Tunneling configuration is ready now */ 291 + 292 + return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 293 + } 294 + 295 + /** 296 + * usb4_switch_configuration_valid() - Set tunneling configuration to be valid 297 + * @sw: USB4 router 298 + * 299 + * Sets configuration valid bit for the router. Must be called before 300 + * any tunnels can be set through the router and after 301 + * usb4_switch_setup() has been called. Can be called to host and device 302 + * routers (does nothing for the latter). 303 + * 304 + * Returns %0 in success and negative errno otherwise. 305 + */ 306 + int usb4_switch_configuration_valid(struct tb_switch *sw) 307 + { 308 + u32 val; 309 + int ret; 310 + 311 + if (!tb_route(sw)) 312 + return 0; 313 + 314 + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 315 + if (ret) 316 + return ret; 317 + 295 318 val |= ROUTER_CS_5_CV; 296 319 297 320 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); ··· 732 703 int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma; 733 704 int ret, length, i, nports; 734 705 const struct tb_port *port; 735 - u32 data[NVM_DATA_DWORDS]; 706 + u32 data[USB4_DATA_DWORDS]; 736 707 u32 metadata = 0; 737 708 u8 status = 0; 738 709 ··· 1228 1199 1229 1200 static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords) 1230 1201 { 1231 - if (dwords > NVM_DATA_DWORDS) 1202 + if (dwords > USB4_DATA_DWORDS) 1232 1203 return -EINVAL; 1233 1204 1234 1205 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, ··· 1238 1209 static int usb4_port_write_data(struct tb_port *port, const void *data, 1239 1210 size_t dwords) 1240 1211 { 1241 - if (dwords > NVM_DATA_DWORDS) 1212 + if (dwords > USB4_DATA_DWORDS) 1242 1213 return -EINVAL; 1243 1214 1244 1215 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, ··· 1874 1845 int ret; 1875 1846 1876 1847 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT; 1877 - if (dwords < NVM_DATA_DWORDS) 1848 + if (dwords < USB4_DATA_DWORDS) 1878 1849 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT; 1879 1850 1880 1851 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, ··· 2294 2265 } 2295 2266 2296 2267 /** 2297 - * usb4_dp_port_bw_mode_supported() - Is the bandwidth allocation mode supported 2268 + * usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode 2269 + * supported 2298 2270 * @port: DP IN adapter to check 2299 2271 * 2300 2272 * Can be called to any DP IN adapter. Returns true if the adapter 2301 2273 * supports USB4 bandwidth allocation mode, false otherwise. 2302 2274 */ 2303 - bool usb4_dp_port_bw_mode_supported(struct tb_port *port) 2275 + bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port) 2304 2276 { 2305 2277 int ret; 2306 2278 u32 val; ··· 2318 2288 } 2319 2289 2320 2290 /** 2321 - * usb4_dp_port_bw_mode_enabled() - Is the bandwidth allocation mode enabled 2291 + * usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode 2292 + * enabled 2322 2293 * @port: DP IN adapter to check 2323 2294 * 2324 2295 * Can be called to any DP IN adapter. Returns true if the bandwidth 2325 2296 * allocation mode has been enabled, false otherwise. 2326 2297 */ 2327 - bool usb4_dp_port_bw_mode_enabled(struct tb_port *port) 2298 + bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port) 2328 2299 { 2329 2300 int ret; 2330 2301 u32 val; ··· 2342 2311 } 2343 2312 2344 2313 /** 2345 - * usb4_dp_port_set_cm_bw_mode_supported() - Set/clear CM support for bandwidth allocation mode 2314 + * usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for 2315 + * bandwidth allocation mode 2346 2316 * @port: DP IN adapter 2347 2317 * @supported: Does the CM support bandwidth allocation mode 2348 2318 * ··· 2352 2320 * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter 2353 2321 * does not support this. 2354 2322 */ 2355 - int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported) 2323 + int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port, 2324 + bool supported) 2356 2325 { 2357 2326 u32 val; 2358 2327 int ret; ··· 2627 2594 } 2628 2595 2629 2596 /** 2630 - * usb4_dp_port_set_estimated_bw() - Set estimated bandwidth 2597 + * usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth 2631 2598 * @port: DP IN adapter 2632 2599 * @bw: Estimated bandwidth in Mb/s. 2633 2600 * ··· 2637 2604 * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if 2638 2605 * the adapter does not support this. 2639 2606 */ 2640 - int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw) 2607 + int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw) 2641 2608 { 2642 2609 u32 val, granularity; 2643 2610 int ret; ··· 2663 2630 } 2664 2631 2665 2632 /** 2666 - * usb4_dp_port_allocated_bw() - Return allocated bandwidth 2633 + * usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth 2667 2634 * @port: DP IN adapter 2668 2635 * 2669 2636 * Reads and returns allocated bandwidth for @port in Mb/s (taking into 2670 2637 * account the programmed granularity). Returns negative errno in case 2671 2638 * of error. 2672 2639 */ 2673 - int usb4_dp_port_allocated_bw(struct tb_port *port) 2640 + int usb4_dp_port_allocated_bandwidth(struct tb_port *port) 2674 2641 { 2675 2642 u32 val, granularity; 2676 2643 int ret; ··· 2756 2723 } 2757 2724 2758 2725 /** 2759 - * usb4_dp_port_allocate_bw() - Set allocated bandwidth 2726 + * usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth 2760 2727 * @port: DP IN adapter 2761 2728 * @bw: New allocated bandwidth in Mb/s 2762 2729 * ··· 2764 2731 * driver). Takes into account the programmed granularity. Returns %0 in 2765 2732 * success and negative errno in case of error. 2766 2733 */ 2767 - int usb4_dp_port_allocate_bw(struct tb_port *port, int bw) 2734 + int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw) 2768 2735 { 2769 2736 u32 val, granularity; 2770 2737 int ret; ··· 2798 2765 } 2799 2766 2800 2767 /** 2801 - * usb4_dp_port_requested_bw() - Read requested bandwidth 2768 + * usb4_dp_port_requested_bandwidth() - Read requested bandwidth 2802 2769 * @port: DP IN adapter 2803 2770 * 2804 2771 * Reads the DPCD (graphics driver) requested bandwidth and returns it ··· 2807 2774 * the adapter does not support bandwidth allocation mode, and %ENODATA 2808 2775 * if there is no active bandwidth request from the graphics driver. 2809 2776 */ 2810 - int usb4_dp_port_requested_bw(struct tb_port *port) 2777 + int usb4_dp_port_requested_bandwidth(struct tb_port *port) 2811 2778 { 2812 2779 u32 val, granularity; 2813 2780 int ret; ··· 2829 2796 return -ENODATA; 2830 2797 2831 2798 return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity; 2799 + } 2800 + 2801 + /** 2802 + * usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation 2803 + * @port: PCIe adapter 2804 + * @enable: Enable/disable extended encapsulation 2805 + * 2806 + * Enables or disables extended encapsulation used in PCIe tunneling. Caller 2807 + * needs to make sure both adapters support this before enabling. Returns %0 on 2808 + * success and negative errno otherwise. 2809 + */ 2810 + int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable) 2811 + { 2812 + u32 val; 2813 + int ret; 2814 + 2815 + if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port)) 2816 + return -EINVAL; 2817 + 2818 + ret = tb_port_read(port, &val, TB_CFG_PORT, 2819 + port->cap_adap + ADP_PCIE_CS_1, 1); 2820 + if (ret) 2821 + return ret; 2822 + 2823 + if (enable) 2824 + val |= ADP_PCIE_CS_1_EE; 2825 + else 2826 + val &= ~ADP_PCIE_CS_1_EE; 2827 + 2828 + return tb_port_write(port, &val, TB_CFG_PORT, 2829 + port->cap_adap + ADP_PCIE_CS_1, 1); 2832 2830 }
+75 -23
drivers/thunderbolt/xdomain.c
··· 537 537 static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl, 538 538 struct tb_xdomain *xd, u8 sequence) 539 539 { 540 - struct tb_switch *sw = tb_to_switch(xd->dev.parent); 541 540 struct tb_xdp_link_state_status_response res; 542 - struct tb_port *port = tb_port_at(xd->route, sw); 541 + struct tb_port *port = tb_xdomain_downstream_port(xd); 543 542 u32 val[2]; 544 543 int ret; 545 544 ··· 1136 1137 struct tb_port *port; 1137 1138 int ret; 1138 1139 1139 - port = tb_port_at(xd->route, tb_xdomain_parent(xd)); 1140 + port = tb_xdomain_downstream_port(xd); 1140 1141 1141 1142 ret = tb_port_get_link_speed(port); 1142 1143 if (ret < 0) ··· 1250 1251 static int tb_xdomain_link_state_change(struct tb_xdomain *xd, 1251 1252 unsigned int width) 1252 1253 { 1253 - struct tb_switch *sw = tb_to_switch(xd->dev.parent); 1254 - struct tb_port *port = tb_port_at(xd->route, sw); 1254 + struct tb_port *port = tb_xdomain_downstream_port(xd); 1255 1255 struct tb *tb = xd->tb; 1256 1256 u8 tlw, tls; 1257 1257 u32 val; ··· 1290 1292 1291 1293 static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd) 1292 1294 { 1295 + unsigned int width, width_mask; 1293 1296 struct tb_port *port; 1294 - int ret, width; 1297 + int ret; 1295 1298 1296 1299 if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) { 1297 - width = 1; 1300 + width = TB_LINK_WIDTH_SINGLE; 1301 + width_mask = width; 1298 1302 } else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) { 1299 - width = 2; 1303 + width = TB_LINK_WIDTH_DUAL; 1304 + width_mask = width | TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX; 1300 1305 } else { 1301 1306 if (xd->state_retries-- > 0) { 1302 1307 dev_dbg(&xd->dev, ··· 1310 1309 return -ETIMEDOUT; 1311 1310 } 1312 1311 1313 - port = tb_port_at(xd->route, tb_xdomain_parent(xd)); 1312 + port = tb_xdomain_downstream_port(xd); 1314 1313 1315 1314 /* 1316 1315 * We can't use tb_xdomain_lane_bonding_enable() here because it ··· 1331 1330 return ret; 1332 1331 } 1333 1332 1334 - ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT); 1333 + ret = tb_port_wait_for_link_width(port, width_mask, 1334 + XDOMAIN_BONDING_TIMEOUT); 1335 1335 if (ret) { 1336 1336 dev_warn(&xd->dev, "error waiting for link width to become %d\n", 1337 - width); 1337 + width_mask); 1338 1338 return ret; 1339 1339 } 1340 1340 1341 - port->bonded = width == 2; 1342 - port->dual_link_port->bonded = width == 2; 1341 + port->bonded = width > TB_LINK_WIDTH_SINGLE; 1342 + port->dual_link_port->bonded = width > TB_LINK_WIDTH_SINGLE; 1343 1343 1344 1344 tb_port_update_credits(port); 1345 1345 tb_xdomain_update_link_attributes(xd); ··· 1427 1425 if (xd->bonding_possible) { 1428 1426 struct tb_port *port; 1429 1427 1430 - port = tb_port_at(xd->route, tb_xdomain_parent(xd)); 1428 + port = tb_xdomain_downstream_port(xd); 1431 1429 if (!port->bonded) 1432 1430 tb_port_disable(port->dual_link_port); 1433 1431 } ··· 1739 1737 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); 1740 1738 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); 1741 1739 1742 - static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, 1743 - char *buf) 1740 + static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, 1741 + char *buf) 1744 1742 { 1745 1743 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); 1744 + unsigned int width; 1746 1745 1747 - return sysfs_emit(buf, "%u\n", xd->link_width); 1746 + switch (xd->link_width) { 1747 + case TB_LINK_WIDTH_SINGLE: 1748 + case TB_LINK_WIDTH_ASYM_RX: 1749 + width = 1; 1750 + break; 1751 + case TB_LINK_WIDTH_DUAL: 1752 + width = 2; 1753 + break; 1754 + case TB_LINK_WIDTH_ASYM_TX: 1755 + width = 3; 1756 + break; 1757 + default: 1758 + WARN_ON_ONCE(1); 1759 + return -EINVAL; 1760 + } 1761 + 1762 + return sysfs_emit(buf, "%u\n", width); 1748 1763 } 1764 + static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL); 1749 1765 1750 - static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); 1751 - static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); 1766 + static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, 1767 + char *buf) 1768 + { 1769 + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); 1770 + unsigned int width; 1771 + 1772 + switch (xd->link_width) { 1773 + case TB_LINK_WIDTH_SINGLE: 1774 + case TB_LINK_WIDTH_ASYM_TX: 1775 + width = 1; 1776 + break; 1777 + case TB_LINK_WIDTH_DUAL: 1778 + width = 2; 1779 + break; 1780 + case TB_LINK_WIDTH_ASYM_RX: 1781 + width = 3; 1782 + break; 1783 + default: 1784 + WARN_ON_ONCE(1); 1785 + return -EINVAL; 1786 + } 1787 + 1788 + return sysfs_emit(buf, "%u\n", width); 1789 + } 1790 + static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL); 1752 1791 1753 1792 static struct attribute *xdomain_attrs[] = { 1754 1793 &dev_attr_device.attr, ··· 2019 1976 */ 2020 1977 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd) 2021 1978 { 1979 + unsigned int width_mask; 2022 1980 struct tb_port *port; 2023 1981 int ret; 2024 1982 2025 - port = tb_port_at(xd->route, tb_xdomain_parent(xd)); 1983 + port = tb_xdomain_downstream_port(xd); 2026 1984 if (!port->dual_link_port) 2027 1985 return -ENODEV; 2028 1986 ··· 2043 1999 return ret; 2044 2000 } 2045 2001 2046 - ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT); 2002 + /* Any of the widths are all bonded */ 2003 + width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | 2004 + TB_LINK_WIDTH_ASYM_RX; 2005 + 2006 + ret = tb_port_wait_for_link_width(port, width_mask, 2007 + XDOMAIN_BONDING_TIMEOUT); 2047 2008 if (ret) { 2048 2009 tb_port_warn(port, "failed to enable lane bonding\n"); 2049 2010 return ret; ··· 2073 2024 { 2074 2025 struct tb_port *port; 2075 2026 2076 - port = tb_port_at(xd->route, tb_xdomain_parent(xd)); 2027 + port = tb_xdomain_downstream_port(xd); 2077 2028 if (port->dual_link_port) { 2029 + int ret; 2030 + 2078 2031 tb_port_lane_bonding_disable(port); 2079 - if (tb_port_wait_for_link_width(port, 1, 100) == -ETIMEDOUT) 2032 + ret = tb_port_wait_for_link_width(port, TB_LINK_WIDTH_SINGLE, 100); 2033 + if (ret == -ETIMEDOUT) 2080 2034 tb_port_warn(port, "timeout disabling lane bonding\n"); 2081 2035 tb_port_disable(port->dual_link_port); 2082 2036 tb_port_update_credits(port);
+16 -2
include/linux/thunderbolt.h
··· 172 172 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); 173 173 174 174 /** 175 + * enum tb_link_width - Thunderbolt/USB4 link width 176 + * @TB_LINK_WIDTH_SINGLE: Single lane link 177 + * @TB_LINK_WIDTH_DUAL: Dual lane symmetric link 178 + * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 trasmitters 179 + * @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers 180 + */ 181 + enum tb_link_width { 182 + TB_LINK_WIDTH_SINGLE = BIT(0), 183 + TB_LINK_WIDTH_DUAL = BIT(1), 184 + TB_LINK_WIDTH_ASYM_TX = BIT(2), 185 + TB_LINK_WIDTH_ASYM_RX = BIT(3), 186 + }; 187 + 188 + /** 175 189 * struct tb_xdomain - Cross-domain (XDomain) connection 176 190 * @dev: XDomain device 177 191 * @tb: Pointer to the domain ··· 200 186 * @vendor_name: Name of the vendor (or %NULL if not known) 201 187 * @device_name: Name of the device (or %NULL if not known) 202 188 * @link_speed: Speed of the link in Gb/s 203 - * @link_width: Width of the link (1 or 2) 189 + * @link_width: Width of the downstream facing link 204 190 * @link_usb4: Downstream link is USB4 205 191 * @is_unplugged: The XDomain is unplugged 206 192 * @needs_uuid: If the XDomain does not have @remote_uuid it will be ··· 248 234 const char *vendor_name; 249 235 const char *device_name; 250 236 unsigned int link_speed; 251 - unsigned int link_width; 237 + enum tb_link_width link_width; 252 238 bool link_usb4; 253 239 bool is_unplugged; 254 240 bool needs_uuid;