Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt driver - switch/port utility functions
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9#include <linux/delay.h>
10#include <linux/idr.h>
11#include <linux/nvmem-provider.h>
12#include <linux/pm_runtime.h>
13#include <linux/sched/signal.h>
14#include <linux/sizes.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17
18#include "tb.h"
19
20/* Switch NVM support */
21
22#define NVM_CSS 0x10
23
24struct nvm_auth_status {
25 struct list_head list;
26 uuid_t uuid;
27 u32 status;
28};
29
30static bool clx_enabled = true;
31module_param_named(clx, clx_enabled, bool, 0444);
32MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
33
34/*
35 * Hold NVM authentication failure status per switch This information
36 * needs to stay around even when the switch gets power cycled so we
37 * keep it separately.
38 */
39static LIST_HEAD(nvm_auth_status_cache);
40static DEFINE_MUTEX(nvm_auth_status_lock);
41
42static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
43{
44 struct nvm_auth_status *st;
45
46 list_for_each_entry(st, &nvm_auth_status_cache, list) {
47 if (uuid_equal(&st->uuid, sw->uuid))
48 return st;
49 }
50
51 return NULL;
52}
53
54static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
55{
56 struct nvm_auth_status *st;
57
58 mutex_lock(&nvm_auth_status_lock);
59 st = __nvm_get_auth_status(sw);
60 mutex_unlock(&nvm_auth_status_lock);
61
62 *status = st ? st->status : 0;
63}
64
65static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
66{
67 struct nvm_auth_status *st;
68
69 if (WARN_ON(!sw->uuid))
70 return;
71
72 mutex_lock(&nvm_auth_status_lock);
73 st = __nvm_get_auth_status(sw);
74
75 if (!st) {
76 st = kzalloc(sizeof(*st), GFP_KERNEL);
77 if (!st)
78 goto unlock;
79
80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
81 INIT_LIST_HEAD(&st->list);
82 list_add_tail(&st->list, &nvm_auth_status_cache);
83 }
84
85 st->status = status;
86unlock:
87 mutex_unlock(&nvm_auth_status_lock);
88}
89
90static void nvm_clear_auth_status(const struct tb_switch *sw)
91{
92 struct nvm_auth_status *st;
93
94 mutex_lock(&nvm_auth_status_lock);
95 st = __nvm_get_auth_status(sw);
96 if (st) {
97 list_del(&st->list);
98 kfree(st);
99 }
100 mutex_unlock(&nvm_auth_status_lock);
101}
102
103static int nvm_validate_and_write(struct tb_switch *sw)
104{
105 unsigned int image_size, hdr_size;
106 const u8 *buf = sw->nvm->buf;
107 u16 ds_size;
108 int ret;
109
110 if (!buf)
111 return -EINVAL;
112
113 image_size = sw->nvm->buf_data_size;
114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
115 return -EINVAL;
116
117 /*
118 * FARB pointer must point inside the image and must at least
119 * contain parts of the digital section we will be reading here.
120 */
121 hdr_size = (*(u32 *)buf) & 0xffffff;
122 if (hdr_size + NVM_DEVID + 2 >= image_size)
123 return -EINVAL;
124
125 /* Digital section start should be aligned to 4k page */
126 if (!IS_ALIGNED(hdr_size, SZ_4K))
127 return -EINVAL;
128
129 /*
130 * Read digital section size and check that it also fits inside
131 * the image.
132 */
133 ds_size = *(u16 *)(buf + hdr_size);
134 if (ds_size >= image_size)
135 return -EINVAL;
136
137 if (!sw->safe_mode) {
138 u16 device_id;
139
140 /*
141 * Make sure the device ID in the image matches the one
142 * we read from the switch config space.
143 */
144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
145 if (device_id != sw->config.device_id)
146 return -EINVAL;
147
148 if (sw->generation < 3) {
149 /* Write CSS headers first */
150 ret = dma_port_flash_write(sw->dma_port,
151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
152 DMA_PORT_CSS_MAX_SIZE);
153 if (ret)
154 return ret;
155 }
156
157 /* Skip headers in the image */
158 buf += hdr_size;
159 image_size -= hdr_size;
160 }
161
162 if (tb_switch_is_usb4(sw))
163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
164 else
165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
166 if (!ret)
167 sw->nvm->flushed = true;
168 return ret;
169}
170
171static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
172{
173 int ret = 0;
174
175 /*
176 * Root switch NVM upgrade requires that we disconnect the
177 * existing paths first (in case it is not in safe mode
178 * already).
179 */
180 if (!sw->safe_mode) {
181 u32 status;
182
183 ret = tb_domain_disconnect_all_paths(sw->tb);
184 if (ret)
185 return ret;
186 /*
187 * The host controller goes away pretty soon after this if
188 * everything goes well so getting timeout is expected.
189 */
190 ret = dma_port_flash_update_auth(sw->dma_port);
191 if (!ret || ret == -ETIMEDOUT)
192 return 0;
193
194 /*
195 * Any error from update auth operation requires power
196 * cycling of the host router.
197 */
198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200 nvm_set_auth_status(sw, status);
201 }
202
203 /*
204 * From safe mode we can get out by just power cycling the
205 * switch.
206 */
207 dma_port_power_cycle(sw->dma_port);
208 return ret;
209}
210
211static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
212{
213 int ret, retries = 10;
214
215 ret = dma_port_flash_update_auth(sw->dma_port);
216 switch (ret) {
217 case 0:
218 case -ETIMEDOUT:
219 case -EACCES:
220 case -EINVAL:
221 /* Power cycle is required */
222 break;
223 default:
224 return ret;
225 }
226
227 /*
228 * Poll here for the authentication status. It takes some time
229 * for the device to respond (we get timeout for a while). Once
230 * we get response the device needs to be power cycled in order
231 * to the new NVM to be taken into use.
232 */
233 do {
234 u32 status;
235
236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
237 if (ret < 0 && ret != -ETIMEDOUT)
238 return ret;
239 if (ret > 0) {
240 if (status) {
241 tb_sw_warn(sw, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw, status);
243 }
244
245 tb_sw_info(sw, "power cycling the switch now\n");
246 dma_port_power_cycle(sw->dma_port);
247 return 0;
248 }
249
250 msleep(500);
251 } while (--retries);
252
253 return -ETIMEDOUT;
254}
255
256static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
257{
258 struct pci_dev *root_port;
259
260 /*
261 * During host router NVM upgrade we should not allow root port to
262 * go into D3cold because some root ports cannot trigger PME
263 * itself. To be on the safe side keep the root port in D0 during
264 * the whole upgrade process.
265 */
266 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
267 if (root_port)
268 pm_runtime_get_noresume(&root_port->dev);
269}
270
271static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
272{
273 struct pci_dev *root_port;
274
275 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
276 if (root_port)
277 pm_runtime_put(&root_port->dev);
278}
279
280static inline bool nvm_readable(struct tb_switch *sw)
281{
282 if (tb_switch_is_usb4(sw)) {
283 /*
284 * USB4 devices must support NVM operations but it is
285 * optional for hosts. Therefore we query the NVM sector
286 * size here and if it is supported assume NVM
287 * operations are implemented.
288 */
289 return usb4_switch_nvm_sector_size(sw) > 0;
290 }
291
292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 return !!sw->dma_port;
294}
295
296static inline bool nvm_upgradeable(struct tb_switch *sw)
297{
298 if (sw->no_nvm_upgrade)
299 return false;
300 return nvm_readable(sw);
301}
302
303static inline int nvm_read(struct tb_switch *sw, unsigned int address,
304 void *buf, size_t size)
305{
306 if (tb_switch_is_usb4(sw))
307 return usb4_switch_nvm_read(sw, address, buf, size);
308 return dma_port_flash_read(sw->dma_port, address, buf, size);
309}
310
311static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
312{
313 int ret;
314
315 if (tb_switch_is_usb4(sw)) {
316 if (auth_only) {
317 ret = usb4_switch_nvm_set_offset(sw, 0);
318 if (ret)
319 return ret;
320 }
321 sw->nvm->authenticating = true;
322 return usb4_switch_nvm_authenticate(sw);
323 } else if (auth_only) {
324 return -EOPNOTSUPP;
325 }
326
327 sw->nvm->authenticating = true;
328 if (!tb_route(sw)) {
329 nvm_authenticate_start_dma_port(sw);
330 ret = nvm_authenticate_host_dma_port(sw);
331 } else {
332 ret = nvm_authenticate_device_dma_port(sw);
333 }
334
335 return ret;
336}
337
338static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
339 size_t bytes)
340{
341 struct tb_nvm *nvm = priv;
342 struct tb_switch *sw = tb_to_switch(nvm->dev);
343 int ret;
344
345 pm_runtime_get_sync(&sw->dev);
346
347 if (!mutex_trylock(&sw->tb->lock)) {
348 ret = restart_syscall();
349 goto out;
350 }
351
352 ret = nvm_read(sw, offset, val, bytes);
353 mutex_unlock(&sw->tb->lock);
354
355out:
356 pm_runtime_mark_last_busy(&sw->dev);
357 pm_runtime_put_autosuspend(&sw->dev);
358
359 return ret;
360}
361
362static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
363 size_t bytes)
364{
365 struct tb_nvm *nvm = priv;
366 struct tb_switch *sw = tb_to_switch(nvm->dev);
367 int ret;
368
369 if (!mutex_trylock(&sw->tb->lock))
370 return restart_syscall();
371
372 /*
373 * Since writing the NVM image might require some special steps,
374 * for example when CSS headers are written, we cache the image
375 * locally here and handle the special cases when the user asks
376 * us to authenticate the image.
377 */
378 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
379 mutex_unlock(&sw->tb->lock);
380
381 return ret;
382}
383
384static int tb_switch_nvm_add(struct tb_switch *sw)
385{
386 struct tb_nvm *nvm;
387 u32 val;
388 int ret;
389
390 if (!nvm_readable(sw))
391 return 0;
392
393 /*
394 * The NVM format of non-Intel hardware is not known so
395 * currently restrict NVM upgrade for Intel hardware. We may
396 * relax this in the future when we learn other NVM formats.
397 */
398 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
399 sw->config.vendor_id != 0x8087) {
400 dev_info(&sw->dev,
401 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
402 sw->config.vendor_id);
403 return 0;
404 }
405
406 nvm = tb_nvm_alloc(&sw->dev);
407 if (IS_ERR(nvm))
408 return PTR_ERR(nvm);
409
410 /*
411 * If the switch is in safe-mode the only accessible portion of
412 * the NVM is the non-active one where userspace is expected to
413 * write new functional NVM.
414 */
415 if (!sw->safe_mode) {
416 u32 nvm_size, hdr_size;
417
418 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
419 if (ret)
420 goto err_nvm;
421
422 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
423 nvm_size = (SZ_1M << (val & 7)) / 8;
424 nvm_size = (nvm_size - hdr_size) / 2;
425
426 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
427 if (ret)
428 goto err_nvm;
429
430 nvm->major = val >> 16;
431 nvm->minor = val >> 8;
432
433 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
434 if (ret)
435 goto err_nvm;
436 }
437
438 if (!sw->no_nvm_upgrade) {
439 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
440 tb_switch_nvm_write);
441 if (ret)
442 goto err_nvm;
443 }
444
445 sw->nvm = nvm;
446 return 0;
447
448err_nvm:
449 tb_nvm_free(nvm);
450 return ret;
451}
452
453static void tb_switch_nvm_remove(struct tb_switch *sw)
454{
455 struct tb_nvm *nvm;
456
457 nvm = sw->nvm;
458 sw->nvm = NULL;
459
460 if (!nvm)
461 return;
462
463 /* Remove authentication status in case the switch is unplugged */
464 if (!nvm->authenticating)
465 nvm_clear_auth_status(sw);
466
467 tb_nvm_free(nvm);
468}
469
470/* port utility functions */
471
472static const char *tb_port_type(const struct tb_regs_port_header *port)
473{
474 switch (port->type >> 16) {
475 case 0:
476 switch ((u8) port->type) {
477 case 0:
478 return "Inactive";
479 case 1:
480 return "Port";
481 case 2:
482 return "NHI";
483 default:
484 return "unknown";
485 }
486 case 0x2:
487 return "Ethernet";
488 case 0x8:
489 return "SATA";
490 case 0xe:
491 return "DP/HDMI";
492 case 0x10:
493 return "PCIe";
494 case 0x20:
495 return "USB";
496 default:
497 return "unknown";
498 }
499}
500
501static void tb_dump_port(struct tb *tb, const struct tb_port *port)
502{
503 const struct tb_regs_port_header *regs = &port->config;
504
505 tb_dbg(tb,
506 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
507 regs->port_number, regs->vendor_id, regs->device_id,
508 regs->revision, regs->thunderbolt_version, tb_port_type(regs),
509 regs->type);
510 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
511 regs->max_in_hop_id, regs->max_out_hop_id);
512 tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
513 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
514 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
515 port->ctl_credits);
516}
517
518/**
519 * tb_port_state() - get connectedness state of a port
520 * @port: the port to check
521 *
522 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
523 *
524 * Return: Returns an enum tb_port_state on success or an error code on failure.
525 */
526int tb_port_state(struct tb_port *port)
527{
528 struct tb_cap_phy phy;
529 int res;
530 if (port->cap_phy == 0) {
531 tb_port_WARN(port, "does not have a PHY\n");
532 return -EINVAL;
533 }
534 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
535 if (res)
536 return res;
537 return phy.state;
538}
539
540/**
541 * tb_wait_for_port() - wait for a port to become ready
542 * @port: Port to wait
543 * @wait_if_unplugged: Wait also when port is unplugged
544 *
545 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
546 * wait_if_unplugged is set then we also wait if the port is in state
547 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
548 * switch resume). Otherwise we only wait if a device is registered but the link
549 * has not yet been established.
550 *
551 * Return: Returns an error code on failure. Returns 0 if the port is not
552 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
553 * if the port is connected and in state TB_PORT_UP.
554 */
555int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
556{
557 int retries = 10;
558 int state;
559 if (!port->cap_phy) {
560 tb_port_WARN(port, "does not have PHY\n");
561 return -EINVAL;
562 }
563 if (tb_is_upstream_port(port)) {
564 tb_port_WARN(port, "is the upstream port\n");
565 return -EINVAL;
566 }
567
568 while (retries--) {
569 state = tb_port_state(port);
570 if (state < 0)
571 return state;
572 if (state == TB_PORT_DISABLED) {
573 tb_port_dbg(port, "is disabled (state: 0)\n");
574 return 0;
575 }
576 if (state == TB_PORT_UNPLUGGED) {
577 if (wait_if_unplugged) {
578 /* used during resume */
579 tb_port_dbg(port,
580 "is unplugged (state: 7), retrying...\n");
581 msleep(100);
582 continue;
583 }
584 tb_port_dbg(port, "is unplugged (state: 7)\n");
585 return 0;
586 }
587 if (state == TB_PORT_UP) {
588 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
589 return 1;
590 }
591
592 /*
593 * After plug-in the state is TB_PORT_CONNECTING. Give it some
594 * time.
595 */
596 tb_port_dbg(port,
597 "is connected, link is not up (state: %d), retrying...\n",
598 state);
599 msleep(100);
600 }
601 tb_port_warn(port,
602 "failed to reach state TB_PORT_UP. Ignoring port...\n");
603 return 0;
604}
605
606/**
607 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
608 * @port: Port to add/remove NFC credits
609 * @credits: Credits to add/remove
610 *
611 * Change the number of NFC credits allocated to @port by @credits. To remove
612 * NFC credits pass a negative amount of credits.
613 *
614 * Return: Returns 0 on success or an error code on failure.
615 */
616int tb_port_add_nfc_credits(struct tb_port *port, int credits)
617{
618 u32 nfc_credits;
619
620 if (credits == 0 || port->sw->is_unplugged)
621 return 0;
622
623 /*
624 * USB4 restricts programming NFC buffers to lane adapters only
625 * so skip other ports.
626 */
627 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
628 return 0;
629
630 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
631 if (credits < 0)
632 credits = max_t(int, -nfc_credits, credits);
633
634 nfc_credits += credits;
635
636 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
637 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
638
639 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
640 port->config.nfc_credits |= nfc_credits;
641
642 return tb_port_write(port, &port->config.nfc_credits,
643 TB_CFG_PORT, ADP_CS_4, 1);
644}
645
646/**
647 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
648 * @port: Port whose counters to clear
649 * @counter: Counter index to clear
650 *
651 * Return: Returns 0 on success or an error code on failure.
652 */
653int tb_port_clear_counter(struct tb_port *port, int counter)
654{
655 u32 zero[3] = { 0, 0, 0 };
656 tb_port_dbg(port, "clearing counter %d\n", counter);
657 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
658}
659
660/**
661 * tb_port_unlock() - Unlock downstream port
662 * @port: Port to unlock
663 *
664 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
665 * downstream router accessible for CM.
666 */
667int tb_port_unlock(struct tb_port *port)
668{
669 if (tb_switch_is_icm(port->sw))
670 return 0;
671 if (!tb_port_is_null(port))
672 return -EINVAL;
673 if (tb_switch_is_usb4(port->sw))
674 return usb4_port_unlock(port);
675 return 0;
676}
677
678static int __tb_port_enable(struct tb_port *port, bool enable)
679{
680 int ret;
681 u32 phy;
682
683 if (!tb_port_is_null(port))
684 return -EINVAL;
685
686 ret = tb_port_read(port, &phy, TB_CFG_PORT,
687 port->cap_phy + LANE_ADP_CS_1, 1);
688 if (ret)
689 return ret;
690
691 if (enable)
692 phy &= ~LANE_ADP_CS_1_LD;
693 else
694 phy |= LANE_ADP_CS_1_LD;
695
696 return tb_port_write(port, &phy, TB_CFG_PORT,
697 port->cap_phy + LANE_ADP_CS_1, 1);
698}
699
700/**
701 * tb_port_enable() - Enable lane adapter
702 * @port: Port to enable (can be %NULL)
703 *
704 * This is used for lane 0 and 1 adapters to enable it.
705 */
706int tb_port_enable(struct tb_port *port)
707{
708 return __tb_port_enable(port, true);
709}
710
711/**
712 * tb_port_disable() - Disable lane adapter
713 * @port: Port to disable (can be %NULL)
714 *
715 * This is used for lane 0 and 1 adapters to disable it.
716 */
717int tb_port_disable(struct tb_port *port)
718{
719 return __tb_port_enable(port, false);
720}
721
722/*
723 * tb_init_port() - initialize a port
724 *
725 * This is a helper method for tb_switch_alloc. Does not check or initialize
726 * any downstream switches.
727 *
728 * Return: Returns 0 on success or an error code on failure.
729 */
730static int tb_init_port(struct tb_port *port)
731{
732 int res;
733 int cap;
734
735 INIT_LIST_HEAD(&port->list);
736
737 /* Control adapter does not have configuration space */
738 if (!port->port)
739 return 0;
740
741 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
742 if (res) {
743 if (res == -ENODEV) {
744 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
745 port->port);
746 port->disabled = true;
747 return 0;
748 }
749 return res;
750 }
751
752 /* Port 0 is the switch itself and has no PHY. */
753 if (port->config.type == TB_TYPE_PORT) {
754 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
755
756 if (cap > 0)
757 port->cap_phy = cap;
758 else
759 tb_port_WARN(port, "non switch port without a PHY\n");
760
761 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
762 if (cap > 0)
763 port->cap_usb4 = cap;
764
765 /*
766 * USB4 ports the buffers allocated for the control path
767 * can be read from the path config space. Legacy
768 * devices we use hard-coded value.
769 */
770 if (tb_switch_is_usb4(port->sw)) {
771 struct tb_regs_hop hop;
772
773 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
774 port->ctl_credits = hop.initial_credits;
775 }
776 if (!port->ctl_credits)
777 port->ctl_credits = 2;
778
779 } else {
780 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
781 if (cap > 0)
782 port->cap_adap = cap;
783 }
784
785 port->total_credits =
786 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
787 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
788
789 tb_dump_port(port->sw->tb, port);
790 return 0;
791}
792
793static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
794 int max_hopid)
795{
796 int port_max_hopid;
797 struct ida *ida;
798
799 if (in) {
800 port_max_hopid = port->config.max_in_hop_id;
801 ida = &port->in_hopids;
802 } else {
803 port_max_hopid = port->config.max_out_hop_id;
804 ida = &port->out_hopids;
805 }
806
807 /*
808 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
809 * reserved.
810 */
811 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
812 min_hopid = TB_PATH_MIN_HOPID;
813
814 if (max_hopid < 0 || max_hopid > port_max_hopid)
815 max_hopid = port_max_hopid;
816
817 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
818}
819
820/**
821 * tb_port_alloc_in_hopid() - Allocate input HopID from port
822 * @port: Port to allocate HopID for
823 * @min_hopid: Minimum acceptable input HopID
824 * @max_hopid: Maximum acceptable input HopID
825 *
826 * Return: HopID between @min_hopid and @max_hopid or negative errno in
827 * case of error.
828 */
829int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
830{
831 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
832}
833
834/**
835 * tb_port_alloc_out_hopid() - Allocate output HopID from port
836 * @port: Port to allocate HopID for
837 * @min_hopid: Minimum acceptable output HopID
838 * @max_hopid: Maximum acceptable output HopID
839 *
840 * Return: HopID between @min_hopid and @max_hopid or negative errno in
841 * case of error.
842 */
843int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
844{
845 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
846}
847
848/**
849 * tb_port_release_in_hopid() - Release allocated input HopID from port
850 * @port: Port whose HopID to release
851 * @hopid: HopID to release
852 */
853void tb_port_release_in_hopid(struct tb_port *port, int hopid)
854{
855 ida_simple_remove(&port->in_hopids, hopid);
856}
857
858/**
859 * tb_port_release_out_hopid() - Release allocated output HopID from port
860 * @port: Port whose HopID to release
861 * @hopid: HopID to release
862 */
863void tb_port_release_out_hopid(struct tb_port *port, int hopid)
864{
865 ida_simple_remove(&port->out_hopids, hopid);
866}
867
868static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
869 const struct tb_switch *sw)
870{
871 u64 mask = (1ULL << parent->config.depth * 8) - 1;
872 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
873}
874
875/**
876 * tb_next_port_on_path() - Return next port for given port on a path
877 * @start: Start port of the walk
878 * @end: End port of the walk
879 * @prev: Previous port (%NULL if this is the first)
880 *
881 * This function can be used to walk from one port to another if they
882 * are connected through zero or more switches. If the @prev is dual
883 * link port, the function follows that link and returns another end on
884 * that same link.
885 *
886 * If the @end port has been reached, return %NULL.
887 *
888 * Domain tb->lock must be held when this function is called.
889 */
890struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
891 struct tb_port *prev)
892{
893 struct tb_port *next;
894
895 if (!prev)
896 return start;
897
898 if (prev->sw == end->sw) {
899 if (prev == end)
900 return NULL;
901 return end;
902 }
903
904 if (tb_switch_is_reachable(prev->sw, end->sw)) {
905 next = tb_port_at(tb_route(end->sw), prev->sw);
906 /* Walk down the topology if next == prev */
907 if (prev->remote &&
908 (next == prev || next->dual_link_port == prev))
909 next = prev->remote;
910 } else {
911 if (tb_is_upstream_port(prev)) {
912 next = prev->remote;
913 } else {
914 next = tb_upstream_port(prev->sw);
915 /*
916 * Keep the same link if prev and next are both
917 * dual link ports.
918 */
919 if (next->dual_link_port &&
920 next->link_nr != prev->link_nr) {
921 next = next->dual_link_port;
922 }
923 }
924 }
925
926 return next != prev ? next : NULL;
927}
928
929/**
930 * tb_port_get_link_speed() - Get current link speed
931 * @port: Port to check (USB4 or CIO)
932 *
933 * Returns link speed in Gb/s or negative errno in case of failure.
934 */
935int tb_port_get_link_speed(struct tb_port *port)
936{
937 u32 val, speed;
938 int ret;
939
940 if (!port->cap_phy)
941 return -EINVAL;
942
943 ret = tb_port_read(port, &val, TB_CFG_PORT,
944 port->cap_phy + LANE_ADP_CS_1, 1);
945 if (ret)
946 return ret;
947
948 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
949 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
950 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
951}
952
953/**
954 * tb_port_get_link_width() - Get current link width
955 * @port: Port to check (USB4 or CIO)
956 *
957 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
958 * or negative errno in case of failure.
959 */
960int tb_port_get_link_width(struct tb_port *port)
961{
962 u32 val;
963 int ret;
964
965 if (!port->cap_phy)
966 return -EINVAL;
967
968 ret = tb_port_read(port, &val, TB_CFG_PORT,
969 port->cap_phy + LANE_ADP_CS_1, 1);
970 if (ret)
971 return ret;
972
973 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
974 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
975}
976
977static bool tb_port_is_width_supported(struct tb_port *port, int width)
978{
979 u32 phy, widths;
980 int ret;
981
982 if (!port->cap_phy)
983 return false;
984
985 ret = tb_port_read(port, &phy, TB_CFG_PORT,
986 port->cap_phy + LANE_ADP_CS_0, 1);
987 if (ret)
988 return false;
989
990 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
991 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
992
993 return !!(widths & width);
994}
995
996static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
997{
998 u32 val;
999 int ret;
1000
1001 if (!port->cap_phy)
1002 return -EINVAL;
1003
1004 ret = tb_port_read(port, &val, TB_CFG_PORT,
1005 port->cap_phy + LANE_ADP_CS_1, 1);
1006 if (ret)
1007 return ret;
1008
1009 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1010 switch (width) {
1011 case 1:
1012 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1013 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1014 break;
1015 case 2:
1016 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1017 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1018 break;
1019 default:
1020 return -EINVAL;
1021 }
1022
1023 val |= LANE_ADP_CS_1_LB;
1024
1025 return tb_port_write(port, &val, TB_CFG_PORT,
1026 port->cap_phy + LANE_ADP_CS_1, 1);
1027}
1028
1029/**
1030 * tb_port_lane_bonding_enable() - Enable bonding on port
1031 * @port: port to enable
1032 *
1033 * Enable bonding by setting the link width of the port and the other
1034 * port in case of dual link port. Does not wait for the link to
1035 * actually reach the bonded state so caller needs to call
1036 * tb_port_wait_for_link_width() before enabling any paths through the
1037 * link to make sure the link is in expected state.
1038 *
1039 * Return: %0 in case of success and negative errno in case of error
1040 */
1041int tb_port_lane_bonding_enable(struct tb_port *port)
1042{
1043 int ret;
1044
1045 /*
1046 * Enable lane bonding for both links if not already enabled by
1047 * for example the boot firmware.
1048 */
1049 ret = tb_port_get_link_width(port);
1050 if (ret == 1) {
1051 ret = tb_port_set_link_width(port, 2);
1052 if (ret)
1053 return ret;
1054 }
1055
1056 ret = tb_port_get_link_width(port->dual_link_port);
1057 if (ret == 1) {
1058 ret = tb_port_set_link_width(port->dual_link_port, 2);
1059 if (ret) {
1060 tb_port_set_link_width(port, 1);
1061 return ret;
1062 }
1063 }
1064
1065 port->bonded = true;
1066 port->dual_link_port->bonded = true;
1067
1068 return 0;
1069}
1070
1071/**
1072 * tb_port_lane_bonding_disable() - Disable bonding on port
1073 * @port: port to disable
1074 *
1075 * Disable bonding by setting the link width of the port and the
1076 * other port in case of dual link port.
1077 *
1078 */
1079void tb_port_lane_bonding_disable(struct tb_port *port)
1080{
1081 port->dual_link_port->bonded = false;
1082 port->bonded = false;
1083
1084 tb_port_set_link_width(port->dual_link_port, 1);
1085 tb_port_set_link_width(port, 1);
1086}
1087
1088/**
1089 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1090 * @port: Port to wait for
1091 * @width: Expected link width (%1 or %2)
1092 * @timeout_msec: Timeout in ms how long to wait
1093 *
1094 * Should be used after both ends of the link have been bonded (or
1095 * bonding has been disabled) to wait until the link actually reaches
1096 * the expected state. Returns %-ETIMEDOUT if the @width was not reached
1097 * within the given timeout, %0 if it did.
1098 */
1099int tb_port_wait_for_link_width(struct tb_port *port, int width,
1100 int timeout_msec)
1101{
1102 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1103 int ret;
1104
1105 do {
1106 ret = tb_port_get_link_width(port);
1107 if (ret < 0)
1108 return ret;
1109 else if (ret == width)
1110 return 0;
1111
1112 usleep_range(1000, 2000);
1113 } while (ktime_before(ktime_get(), timeout));
1114
1115 return -ETIMEDOUT;
1116}
1117
1118static int tb_port_do_update_credits(struct tb_port *port)
1119{
1120 u32 nfc_credits;
1121 int ret;
1122
1123 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1124 if (ret)
1125 return ret;
1126
1127 if (nfc_credits != port->config.nfc_credits) {
1128 u32 total;
1129
1130 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1131 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1132
1133 tb_port_dbg(port, "total credits changed %u -> %u\n",
1134 port->total_credits, total);
1135
1136 port->config.nfc_credits = nfc_credits;
1137 port->total_credits = total;
1138 }
1139
1140 return 0;
1141}
1142
1143/**
1144 * tb_port_update_credits() - Re-read port total credits
1145 * @port: Port to update
1146 *
1147 * After the link is bonded (or bonding was disabled) the port total
1148 * credits may change, so this function needs to be called to re-read
1149 * the credits. Updates also the second lane adapter.
1150 */
1151int tb_port_update_credits(struct tb_port *port)
1152{
1153 int ret;
1154
1155 ret = tb_port_do_update_credits(port);
1156 if (ret)
1157 return ret;
1158 return tb_port_do_update_credits(port->dual_link_port);
1159}
1160
1161static int tb_port_start_lane_initialization(struct tb_port *port)
1162{
1163 int ret;
1164
1165 if (tb_switch_is_usb4(port->sw))
1166 return 0;
1167
1168 ret = tb_lc_start_lane_initialization(port);
1169 return ret == -EINVAL ? 0 : ret;
1170}
1171
1172/*
1173 * Returns true if the port had something (router, XDomain) connected
1174 * before suspend.
1175 */
1176static bool tb_port_resume(struct tb_port *port)
1177{
1178 bool has_remote = tb_port_has_remote(port);
1179
1180 if (port->usb4) {
1181 usb4_port_device_resume(port->usb4);
1182 } else if (!has_remote) {
1183 /*
1184 * For disconnected downstream lane adapters start lane
1185 * initialization now so we detect future connects.
1186 *
1187 * For XDomain start the lane initialzation now so the
1188 * link gets re-established.
1189 *
1190 * This is only needed for non-USB4 ports.
1191 */
1192 if (!tb_is_upstream_port(port) || port->xdomain)
1193 tb_port_start_lane_initialization(port);
1194 }
1195
1196 return has_remote || port->xdomain;
1197}
1198
1199/**
1200 * tb_port_is_enabled() - Is the adapter port enabled
1201 * @port: Port to check
1202 */
1203bool tb_port_is_enabled(struct tb_port *port)
1204{
1205 switch (port->config.type) {
1206 case TB_TYPE_PCIE_UP:
1207 case TB_TYPE_PCIE_DOWN:
1208 return tb_pci_port_is_enabled(port);
1209
1210 case TB_TYPE_DP_HDMI_IN:
1211 case TB_TYPE_DP_HDMI_OUT:
1212 return tb_dp_port_is_enabled(port);
1213
1214 case TB_TYPE_USB3_UP:
1215 case TB_TYPE_USB3_DOWN:
1216 return tb_usb3_port_is_enabled(port);
1217
1218 default:
1219 return false;
1220 }
1221}
1222
1223/**
1224 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1225 * @port: USB3 adapter port to check
1226 */
1227bool tb_usb3_port_is_enabled(struct tb_port *port)
1228{
1229 u32 data;
1230
1231 if (tb_port_read(port, &data, TB_CFG_PORT,
1232 port->cap_adap + ADP_USB3_CS_0, 1))
1233 return false;
1234
1235 return !!(data & ADP_USB3_CS_0_PE);
1236}
1237
1238/**
1239 * tb_usb3_port_enable() - Enable USB3 adapter port
1240 * @port: USB3 adapter port to enable
1241 * @enable: Enable/disable the USB3 adapter
1242 */
1243int tb_usb3_port_enable(struct tb_port *port, bool enable)
1244{
1245 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1246 : ADP_USB3_CS_0_V;
1247
1248 if (!port->cap_adap)
1249 return -ENXIO;
1250 return tb_port_write(port, &word, TB_CFG_PORT,
1251 port->cap_adap + ADP_USB3_CS_0, 1);
1252}
1253
1254/**
1255 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1256 * @port: PCIe port to check
1257 */
1258bool tb_pci_port_is_enabled(struct tb_port *port)
1259{
1260 u32 data;
1261
1262 if (tb_port_read(port, &data, TB_CFG_PORT,
1263 port->cap_adap + ADP_PCIE_CS_0, 1))
1264 return false;
1265
1266 return !!(data & ADP_PCIE_CS_0_PE);
1267}
1268
1269/**
1270 * tb_pci_port_enable() - Enable PCIe adapter port
1271 * @port: PCIe port to enable
1272 * @enable: Enable/disable the PCIe adapter
1273 */
1274int tb_pci_port_enable(struct tb_port *port, bool enable)
1275{
1276 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1277 if (!port->cap_adap)
1278 return -ENXIO;
1279 return tb_port_write(port, &word, TB_CFG_PORT,
1280 port->cap_adap + ADP_PCIE_CS_0, 1);
1281}
1282
1283/**
1284 * tb_dp_port_hpd_is_active() - Is HPD already active
1285 * @port: DP out port to check
1286 *
1287 * Checks if the DP OUT adapter port has HDP bit already set.
1288 */
1289int tb_dp_port_hpd_is_active(struct tb_port *port)
1290{
1291 u32 data;
1292 int ret;
1293
1294 ret = tb_port_read(port, &data, TB_CFG_PORT,
1295 port->cap_adap + ADP_DP_CS_2, 1);
1296 if (ret)
1297 return ret;
1298
1299 return !!(data & ADP_DP_CS_2_HDP);
1300}
1301
1302/**
1303 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1304 * @port: Port to clear HPD
1305 *
1306 * If the DP IN port has HDP set, this function can be used to clear it.
1307 */
1308int tb_dp_port_hpd_clear(struct tb_port *port)
1309{
1310 u32 data;
1311 int ret;
1312
1313 ret = tb_port_read(port, &data, TB_CFG_PORT,
1314 port->cap_adap + ADP_DP_CS_3, 1);
1315 if (ret)
1316 return ret;
1317
1318 data |= ADP_DP_CS_3_HDPC;
1319 return tb_port_write(port, &data, TB_CFG_PORT,
1320 port->cap_adap + ADP_DP_CS_3, 1);
1321}
1322
1323/**
1324 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1325 * @port: DP IN/OUT port to set hops
1326 * @video: Video Hop ID
1327 * @aux_tx: AUX TX Hop ID
1328 * @aux_rx: AUX RX Hop ID
1329 *
1330 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1331 * router DP adapters too but does not program the values as the fields
1332 * are read-only.
1333 */
1334int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1335 unsigned int aux_tx, unsigned int aux_rx)
1336{
1337 u32 data[2];
1338 int ret;
1339
1340 if (tb_switch_is_usb4(port->sw))
1341 return 0;
1342
1343 ret = tb_port_read(port, data, TB_CFG_PORT,
1344 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1345 if (ret)
1346 return ret;
1347
1348 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1349 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1350 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1351
1352 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1353 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1354 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1355 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1356 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1357
1358 return tb_port_write(port, data, TB_CFG_PORT,
1359 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1360}
1361
1362/**
1363 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1364 * @port: DP adapter port to check
1365 */
1366bool tb_dp_port_is_enabled(struct tb_port *port)
1367{
1368 u32 data[2];
1369
1370 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1371 ARRAY_SIZE(data)))
1372 return false;
1373
1374 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1375}
1376
1377/**
1378 * tb_dp_port_enable() - Enables/disables DP paths of a port
1379 * @port: DP IN/OUT port
1380 * @enable: Enable/disable DP path
1381 *
1382 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1383 * calling this function.
1384 */
1385int tb_dp_port_enable(struct tb_port *port, bool enable)
1386{
1387 u32 data[2];
1388 int ret;
1389
1390 ret = tb_port_read(port, data, TB_CFG_PORT,
1391 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1392 if (ret)
1393 return ret;
1394
1395 if (enable)
1396 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1397 else
1398 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1399
1400 return tb_port_write(port, data, TB_CFG_PORT,
1401 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1402}
1403
1404/* switch utility functions */
1405
1406static const char *tb_switch_generation_name(const struct tb_switch *sw)
1407{
1408 switch (sw->generation) {
1409 case 1:
1410 return "Thunderbolt 1";
1411 case 2:
1412 return "Thunderbolt 2";
1413 case 3:
1414 return "Thunderbolt 3";
1415 case 4:
1416 return "USB4";
1417 default:
1418 return "Unknown";
1419 }
1420}
1421
1422static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1423{
1424 const struct tb_regs_switch_header *regs = &sw->config;
1425
1426 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1427 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1428 regs->revision, regs->thunderbolt_version);
1429 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
1430 tb_dbg(tb, " Config:\n");
1431 tb_dbg(tb,
1432 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1433 regs->upstream_port_number, regs->depth,
1434 (((u64) regs->route_hi) << 32) | regs->route_lo,
1435 regs->enabled, regs->plug_events_delay);
1436 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
1437 regs->__unknown1, regs->__unknown4);
1438}
1439
1440/**
1441 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1442 * @sw: Switch to reset
1443 *
1444 * Return: Returns 0 on success or an error code on failure.
1445 */
1446int tb_switch_reset(struct tb_switch *sw)
1447{
1448 struct tb_cfg_result res;
1449
1450 if (sw->generation > 1)
1451 return 0;
1452
1453 tb_sw_dbg(sw, "resetting switch\n");
1454
1455 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1456 TB_CFG_SWITCH, 2, 2);
1457 if (res.err)
1458 return res.err;
1459 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1460 if (res.err > 0)
1461 return -EIO;
1462 return res.err;
1463}
1464
1465/**
1466 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1467 * @sw: Router to read the offset value from
1468 * @offset: Offset in the router config space to read from
1469 * @bit: Bit mask in the offset to wait for
1470 * @value: Value of the bits to wait for
1471 * @timeout_msec: Timeout in ms how long to wait
1472 *
1473 * Wait till the specified bits in specified offset reach specified value.
1474 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1475 * within the given timeout or a negative errno in case of failure.
1476 */
1477int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1478 u32 value, int timeout_msec)
1479{
1480 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1481
1482 do {
1483 u32 val;
1484 int ret;
1485
1486 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1487 if (ret)
1488 return ret;
1489
1490 if ((val & bit) == value)
1491 return 0;
1492
1493 usleep_range(50, 100);
1494 } while (ktime_before(ktime_get(), timeout));
1495
1496 return -ETIMEDOUT;
1497}
1498
1499/*
1500 * tb_plug_events_active() - enable/disable plug events on a switch
1501 *
1502 * Also configures a sane plug_events_delay of 255ms.
1503 *
1504 * Return: Returns 0 on success or an error code on failure.
1505 */
1506static int tb_plug_events_active(struct tb_switch *sw, bool active)
1507{
1508 u32 data;
1509 int res;
1510
1511 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1512 return 0;
1513
1514 sw->config.plug_events_delay = 0xff;
1515 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1516 if (res)
1517 return res;
1518
1519 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1520 if (res)
1521 return res;
1522
1523 if (active) {
1524 data = data & 0xFFFFFF83;
1525 switch (sw->config.device_id) {
1526 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1527 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1528 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1529 break;
1530 default:
1531 data |= 4;
1532 }
1533 } else {
1534 data = data | 0x7c;
1535 }
1536 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1537 sw->cap_plug_events + 1, 1);
1538}
1539
1540static ssize_t authorized_show(struct device *dev,
1541 struct device_attribute *attr,
1542 char *buf)
1543{
1544 struct tb_switch *sw = tb_to_switch(dev);
1545
1546 return sprintf(buf, "%u\n", sw->authorized);
1547}
1548
1549static int disapprove_switch(struct device *dev, void *not_used)
1550{
1551 char *envp[] = { "AUTHORIZED=0", NULL };
1552 struct tb_switch *sw;
1553
1554 sw = tb_to_switch(dev);
1555 if (sw && sw->authorized) {
1556 int ret;
1557
1558 /* First children */
1559 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1560 if (ret)
1561 return ret;
1562
1563 ret = tb_domain_disapprove_switch(sw->tb, sw);
1564 if (ret)
1565 return ret;
1566
1567 sw->authorized = 0;
1568 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1569 }
1570
1571 return 0;
1572}
1573
1574static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1575{
1576 char envp_string[13];
1577 int ret = -EINVAL;
1578 char *envp[] = { envp_string, NULL };
1579
1580 if (!mutex_trylock(&sw->tb->lock))
1581 return restart_syscall();
1582
1583 if (!!sw->authorized == !!val)
1584 goto unlock;
1585
1586 switch (val) {
1587 /* Disapprove switch */
1588 case 0:
1589 if (tb_route(sw)) {
1590 ret = disapprove_switch(&sw->dev, NULL);
1591 goto unlock;
1592 }
1593 break;
1594
1595 /* Approve switch */
1596 case 1:
1597 if (sw->key)
1598 ret = tb_domain_approve_switch_key(sw->tb, sw);
1599 else
1600 ret = tb_domain_approve_switch(sw->tb, sw);
1601 break;
1602
1603 /* Challenge switch */
1604 case 2:
1605 if (sw->key)
1606 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1607 break;
1608
1609 default:
1610 break;
1611 }
1612
1613 if (!ret) {
1614 sw->authorized = val;
1615 /*
1616 * Notify status change to the userspace, informing the new
1617 * value of /sys/bus/thunderbolt/devices/.../authorized.
1618 */
1619 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1620 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1621 }
1622
1623unlock:
1624 mutex_unlock(&sw->tb->lock);
1625 return ret;
1626}
1627
1628static ssize_t authorized_store(struct device *dev,
1629 struct device_attribute *attr,
1630 const char *buf, size_t count)
1631{
1632 struct tb_switch *sw = tb_to_switch(dev);
1633 unsigned int val;
1634 ssize_t ret;
1635
1636 ret = kstrtouint(buf, 0, &val);
1637 if (ret)
1638 return ret;
1639 if (val > 2)
1640 return -EINVAL;
1641
1642 pm_runtime_get_sync(&sw->dev);
1643 ret = tb_switch_set_authorized(sw, val);
1644 pm_runtime_mark_last_busy(&sw->dev);
1645 pm_runtime_put_autosuspend(&sw->dev);
1646
1647 return ret ? ret : count;
1648}
1649static DEVICE_ATTR_RW(authorized);
1650
1651static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1652 char *buf)
1653{
1654 struct tb_switch *sw = tb_to_switch(dev);
1655
1656 return sprintf(buf, "%u\n", sw->boot);
1657}
1658static DEVICE_ATTR_RO(boot);
1659
1660static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1661 char *buf)
1662{
1663 struct tb_switch *sw = tb_to_switch(dev);
1664
1665 return sprintf(buf, "%#x\n", sw->device);
1666}
1667static DEVICE_ATTR_RO(device);
1668
1669static ssize_t
1670device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1671{
1672 struct tb_switch *sw = tb_to_switch(dev);
1673
1674 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1675}
1676static DEVICE_ATTR_RO(device_name);
1677
1678static ssize_t
1679generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1680{
1681 struct tb_switch *sw = tb_to_switch(dev);
1682
1683 return sprintf(buf, "%u\n", sw->generation);
1684}
1685static DEVICE_ATTR_RO(generation);
1686
1687static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1688 char *buf)
1689{
1690 struct tb_switch *sw = tb_to_switch(dev);
1691 ssize_t ret;
1692
1693 if (!mutex_trylock(&sw->tb->lock))
1694 return restart_syscall();
1695
1696 if (sw->key)
1697 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1698 else
1699 ret = sprintf(buf, "\n");
1700
1701 mutex_unlock(&sw->tb->lock);
1702 return ret;
1703}
1704
1705static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1706 const char *buf, size_t count)
1707{
1708 struct tb_switch *sw = tb_to_switch(dev);
1709 u8 key[TB_SWITCH_KEY_SIZE];
1710 ssize_t ret = count;
1711 bool clear = false;
1712
1713 if (!strcmp(buf, "\n"))
1714 clear = true;
1715 else if (hex2bin(key, buf, sizeof(key)))
1716 return -EINVAL;
1717
1718 if (!mutex_trylock(&sw->tb->lock))
1719 return restart_syscall();
1720
1721 if (sw->authorized) {
1722 ret = -EBUSY;
1723 } else {
1724 kfree(sw->key);
1725 if (clear) {
1726 sw->key = NULL;
1727 } else {
1728 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1729 if (!sw->key)
1730 ret = -ENOMEM;
1731 }
1732 }
1733
1734 mutex_unlock(&sw->tb->lock);
1735 return ret;
1736}
1737static DEVICE_ATTR(key, 0600, key_show, key_store);
1738
1739static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1740 char *buf)
1741{
1742 struct tb_switch *sw = tb_to_switch(dev);
1743
1744 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1745}
1746
1747/*
1748 * Currently all lanes must run at the same speed but we expose here
1749 * both directions to allow possible asymmetric links in the future.
1750 */
1751static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1752static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1753
1754static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1755 char *buf)
1756{
1757 struct tb_switch *sw = tb_to_switch(dev);
1758
1759 return sprintf(buf, "%u\n", sw->link_width);
1760}
1761
1762/*
1763 * Currently link has same amount of lanes both directions (1 or 2) but
1764 * expose them separately to allow possible asymmetric links in the future.
1765 */
1766static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1767static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1768
1769static ssize_t nvm_authenticate_show(struct device *dev,
1770 struct device_attribute *attr, char *buf)
1771{
1772 struct tb_switch *sw = tb_to_switch(dev);
1773 u32 status;
1774
1775 nvm_get_auth_status(sw, &status);
1776 return sprintf(buf, "%#x\n", status);
1777}
1778
1779static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1780 bool disconnect)
1781{
1782 struct tb_switch *sw = tb_to_switch(dev);
1783 int val, ret;
1784
1785 pm_runtime_get_sync(&sw->dev);
1786
1787 if (!mutex_trylock(&sw->tb->lock)) {
1788 ret = restart_syscall();
1789 goto exit_rpm;
1790 }
1791
1792 /* If NVMem devices are not yet added */
1793 if (!sw->nvm) {
1794 ret = -EAGAIN;
1795 goto exit_unlock;
1796 }
1797
1798 ret = kstrtoint(buf, 10, &val);
1799 if (ret)
1800 goto exit_unlock;
1801
1802 /* Always clear the authentication status */
1803 nvm_clear_auth_status(sw);
1804
1805 if (val > 0) {
1806 if (val == AUTHENTICATE_ONLY) {
1807 if (disconnect)
1808 ret = -EINVAL;
1809 else
1810 ret = nvm_authenticate(sw, true);
1811 } else {
1812 if (!sw->nvm->flushed) {
1813 if (!sw->nvm->buf) {
1814 ret = -EINVAL;
1815 goto exit_unlock;
1816 }
1817
1818 ret = nvm_validate_and_write(sw);
1819 if (ret || val == WRITE_ONLY)
1820 goto exit_unlock;
1821 }
1822 if (val == WRITE_AND_AUTHENTICATE) {
1823 if (disconnect)
1824 ret = tb_lc_force_power(sw);
1825 else
1826 ret = nvm_authenticate(sw, false);
1827 }
1828 }
1829 }
1830
1831exit_unlock:
1832 mutex_unlock(&sw->tb->lock);
1833exit_rpm:
1834 pm_runtime_mark_last_busy(&sw->dev);
1835 pm_runtime_put_autosuspend(&sw->dev);
1836
1837 return ret;
1838}
1839
1840static ssize_t nvm_authenticate_store(struct device *dev,
1841 struct device_attribute *attr, const char *buf, size_t count)
1842{
1843 int ret = nvm_authenticate_sysfs(dev, buf, false);
1844 if (ret)
1845 return ret;
1846 return count;
1847}
1848static DEVICE_ATTR_RW(nvm_authenticate);
1849
1850static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1851 struct device_attribute *attr, char *buf)
1852{
1853 return nvm_authenticate_show(dev, attr, buf);
1854}
1855
1856static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1857 struct device_attribute *attr, const char *buf, size_t count)
1858{
1859 int ret;
1860
1861 ret = nvm_authenticate_sysfs(dev, buf, true);
1862 return ret ? ret : count;
1863}
1864static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1865
1866static ssize_t nvm_version_show(struct device *dev,
1867 struct device_attribute *attr, char *buf)
1868{
1869 struct tb_switch *sw = tb_to_switch(dev);
1870 int ret;
1871
1872 if (!mutex_trylock(&sw->tb->lock))
1873 return restart_syscall();
1874
1875 if (sw->safe_mode)
1876 ret = -ENODATA;
1877 else if (!sw->nvm)
1878 ret = -EAGAIN;
1879 else
1880 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1881
1882 mutex_unlock(&sw->tb->lock);
1883
1884 return ret;
1885}
1886static DEVICE_ATTR_RO(nvm_version);
1887
1888static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1889 char *buf)
1890{
1891 struct tb_switch *sw = tb_to_switch(dev);
1892
1893 return sprintf(buf, "%#x\n", sw->vendor);
1894}
1895static DEVICE_ATTR_RO(vendor);
1896
1897static ssize_t
1898vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1899{
1900 struct tb_switch *sw = tb_to_switch(dev);
1901
1902 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1903}
1904static DEVICE_ATTR_RO(vendor_name);
1905
1906static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1907 char *buf)
1908{
1909 struct tb_switch *sw = tb_to_switch(dev);
1910
1911 return sprintf(buf, "%pUb\n", sw->uuid);
1912}
1913static DEVICE_ATTR_RO(unique_id);
1914
1915static struct attribute *switch_attrs[] = {
1916 &dev_attr_authorized.attr,
1917 &dev_attr_boot.attr,
1918 &dev_attr_device.attr,
1919 &dev_attr_device_name.attr,
1920 &dev_attr_generation.attr,
1921 &dev_attr_key.attr,
1922 &dev_attr_nvm_authenticate.attr,
1923 &dev_attr_nvm_authenticate_on_disconnect.attr,
1924 &dev_attr_nvm_version.attr,
1925 &dev_attr_rx_speed.attr,
1926 &dev_attr_rx_lanes.attr,
1927 &dev_attr_tx_speed.attr,
1928 &dev_attr_tx_lanes.attr,
1929 &dev_attr_vendor.attr,
1930 &dev_attr_vendor_name.attr,
1931 &dev_attr_unique_id.attr,
1932 NULL,
1933};
1934
1935static umode_t switch_attr_is_visible(struct kobject *kobj,
1936 struct attribute *attr, int n)
1937{
1938 struct device *dev = kobj_to_dev(kobj);
1939 struct tb_switch *sw = tb_to_switch(dev);
1940
1941 if (attr == &dev_attr_authorized.attr) {
1942 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
1943 sw->tb->security_level == TB_SECURITY_DPONLY)
1944 return 0;
1945 } else if (attr == &dev_attr_device.attr) {
1946 if (!sw->device)
1947 return 0;
1948 } else if (attr == &dev_attr_device_name.attr) {
1949 if (!sw->device_name)
1950 return 0;
1951 } else if (attr == &dev_attr_vendor.attr) {
1952 if (!sw->vendor)
1953 return 0;
1954 } else if (attr == &dev_attr_vendor_name.attr) {
1955 if (!sw->vendor_name)
1956 return 0;
1957 } else if (attr == &dev_attr_key.attr) {
1958 if (tb_route(sw) &&
1959 sw->tb->security_level == TB_SECURITY_SECURE &&
1960 sw->security_level == TB_SECURITY_SECURE)
1961 return attr->mode;
1962 return 0;
1963 } else if (attr == &dev_attr_rx_speed.attr ||
1964 attr == &dev_attr_rx_lanes.attr ||
1965 attr == &dev_attr_tx_speed.attr ||
1966 attr == &dev_attr_tx_lanes.attr) {
1967 if (tb_route(sw))
1968 return attr->mode;
1969 return 0;
1970 } else if (attr == &dev_attr_nvm_authenticate.attr) {
1971 if (nvm_upgradeable(sw))
1972 return attr->mode;
1973 return 0;
1974 } else if (attr == &dev_attr_nvm_version.attr) {
1975 if (nvm_readable(sw))
1976 return attr->mode;
1977 return 0;
1978 } else if (attr == &dev_attr_boot.attr) {
1979 if (tb_route(sw))
1980 return attr->mode;
1981 return 0;
1982 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
1983 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1984 return attr->mode;
1985 return 0;
1986 }
1987
1988 return sw->safe_mode ? 0 : attr->mode;
1989}
1990
1991static const struct attribute_group switch_group = {
1992 .is_visible = switch_attr_is_visible,
1993 .attrs = switch_attrs,
1994};
1995
1996static const struct attribute_group *switch_groups[] = {
1997 &switch_group,
1998 NULL,
1999};
2000
2001static void tb_switch_release(struct device *dev)
2002{
2003 struct tb_switch *sw = tb_to_switch(dev);
2004 struct tb_port *port;
2005
2006 dma_port_free(sw->dma_port);
2007
2008 tb_switch_for_each_port(sw, port) {
2009 ida_destroy(&port->in_hopids);
2010 ida_destroy(&port->out_hopids);
2011 }
2012
2013 kfree(sw->uuid);
2014 kfree(sw->device_name);
2015 kfree(sw->vendor_name);
2016 kfree(sw->ports);
2017 kfree(sw->drom);
2018 kfree(sw->key);
2019 kfree(sw);
2020}
2021
2022static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2023{
2024 struct tb_switch *sw = tb_to_switch(dev);
2025 const char *type;
2026
2027 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
2028 if (add_uevent_var(env, "USB4_VERSION=1.0"))
2029 return -ENOMEM;
2030 }
2031
2032 if (!tb_route(sw)) {
2033 type = "host";
2034 } else {
2035 const struct tb_port *port;
2036 bool hub = false;
2037
2038 /* Device is hub if it has any downstream ports */
2039 tb_switch_for_each_port(sw, port) {
2040 if (!port->disabled && !tb_is_upstream_port(port) &&
2041 tb_port_is_null(port)) {
2042 hub = true;
2043 break;
2044 }
2045 }
2046
2047 type = hub ? "hub" : "device";
2048 }
2049
2050 if (add_uevent_var(env, "USB4_TYPE=%s", type))
2051 return -ENOMEM;
2052 return 0;
2053}
2054
2055/*
2056 * Currently only need to provide the callbacks. Everything else is handled
2057 * in the connection manager.
2058 */
2059static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2060{
2061 struct tb_switch *sw = tb_to_switch(dev);
2062 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2063
2064 if (cm_ops->runtime_suspend_switch)
2065 return cm_ops->runtime_suspend_switch(sw);
2066
2067 return 0;
2068}
2069
2070static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2071{
2072 struct tb_switch *sw = tb_to_switch(dev);
2073 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2074
2075 if (cm_ops->runtime_resume_switch)
2076 return cm_ops->runtime_resume_switch(sw);
2077 return 0;
2078}
2079
2080static const struct dev_pm_ops tb_switch_pm_ops = {
2081 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2082 NULL)
2083};
2084
2085struct device_type tb_switch_type = {
2086 .name = "thunderbolt_device",
2087 .release = tb_switch_release,
2088 .uevent = tb_switch_uevent,
2089 .pm = &tb_switch_pm_ops,
2090};
2091
2092static int tb_switch_get_generation(struct tb_switch *sw)
2093{
2094 switch (sw->config.device_id) {
2095 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2096 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2097 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2098 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2099 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2100 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2101 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2102 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2103 return 1;
2104
2105 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2106 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2107 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2108 return 2;
2109
2110 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2111 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2112 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2113 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2114 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
2115 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2116 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2117 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
2118 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2119 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2120 return 3;
2121
2122 default:
2123 if (tb_switch_is_usb4(sw))
2124 return 4;
2125
2126 /*
2127 * For unknown switches assume generation to be 1 to be
2128 * on the safe side.
2129 */
2130 tb_sw_warn(sw, "unsupported switch device id %#x\n",
2131 sw->config.device_id);
2132 return 1;
2133 }
2134}
2135
2136static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2137{
2138 int max_depth;
2139
2140 if (tb_switch_is_usb4(sw) ||
2141 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2142 max_depth = USB4_SWITCH_MAX_DEPTH;
2143 else
2144 max_depth = TB_SWITCH_MAX_DEPTH;
2145
2146 return depth > max_depth;
2147}
2148
2149/**
2150 * tb_switch_alloc() - allocate a switch
2151 * @tb: Pointer to the owning domain
2152 * @parent: Parent device for this switch
2153 * @route: Route string for this switch
2154 *
2155 * Allocates and initializes a switch. Will not upload configuration to
2156 * the switch. For that you need to call tb_switch_configure()
2157 * separately. The returned switch should be released by calling
2158 * tb_switch_put().
2159 *
2160 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2161 * failure.
2162 */
2163struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2164 u64 route)
2165{
2166 struct tb_switch *sw;
2167 int upstream_port;
2168 int i, ret, depth;
2169
2170 /* Unlock the downstream port so we can access the switch below */
2171 if (route) {
2172 struct tb_switch *parent_sw = tb_to_switch(parent);
2173 struct tb_port *down;
2174
2175 down = tb_port_at(route, parent_sw);
2176 tb_port_unlock(down);
2177 }
2178
2179 depth = tb_route_length(route);
2180
2181 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
2182 if (upstream_port < 0)
2183 return ERR_PTR(upstream_port);
2184
2185 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2186 if (!sw)
2187 return ERR_PTR(-ENOMEM);
2188
2189 sw->tb = tb;
2190 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2191 if (ret)
2192 goto err_free_sw_ports;
2193
2194 sw->generation = tb_switch_get_generation(sw);
2195
2196 tb_dbg(tb, "current switch config:\n");
2197 tb_dump_switch(tb, sw);
2198
2199 /* configure switch */
2200 sw->config.upstream_port_number = upstream_port;
2201 sw->config.depth = depth;
2202 sw->config.route_hi = upper_32_bits(route);
2203 sw->config.route_lo = lower_32_bits(route);
2204 sw->config.enabled = 0;
2205
2206 /* Make sure we do not exceed maximum topology limit */
2207 if (tb_switch_exceeds_max_depth(sw, depth)) {
2208 ret = -EADDRNOTAVAIL;
2209 goto err_free_sw_ports;
2210 }
2211
2212 /* initialize ports */
2213 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2214 GFP_KERNEL);
2215 if (!sw->ports) {
2216 ret = -ENOMEM;
2217 goto err_free_sw_ports;
2218 }
2219
2220 for (i = 0; i <= sw->config.max_port_number; i++) {
2221 /* minimum setup for tb_find_cap and tb_drom_read to work */
2222 sw->ports[i].sw = sw;
2223 sw->ports[i].port = i;
2224
2225 /* Control port does not need HopID allocation */
2226 if (i) {
2227 ida_init(&sw->ports[i].in_hopids);
2228 ida_init(&sw->ports[i].out_hopids);
2229 }
2230 }
2231
2232 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2233 if (ret > 0)
2234 sw->cap_plug_events = ret;
2235
2236 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2237 if (ret > 0)
2238 sw->cap_vsec_tmu = ret;
2239
2240 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2241 if (ret > 0)
2242 sw->cap_lc = ret;
2243
2244 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2245 if (ret > 0)
2246 sw->cap_lp = ret;
2247
2248 /* Root switch is always authorized */
2249 if (!route)
2250 sw->authorized = true;
2251
2252 device_initialize(&sw->dev);
2253 sw->dev.parent = parent;
2254 sw->dev.bus = &tb_bus_type;
2255 sw->dev.type = &tb_switch_type;
2256 sw->dev.groups = switch_groups;
2257 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2258
2259 return sw;
2260
2261err_free_sw_ports:
2262 kfree(sw->ports);
2263 kfree(sw);
2264
2265 return ERR_PTR(ret);
2266}
2267
2268/**
2269 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2270 * @tb: Pointer to the owning domain
2271 * @parent: Parent device for this switch
2272 * @route: Route string for this switch
2273 *
2274 * This creates a switch in safe mode. This means the switch pretty much
2275 * lacks all capabilities except DMA configuration port before it is
2276 * flashed with a valid NVM firmware.
2277 *
2278 * The returned switch must be released by calling tb_switch_put().
2279 *
2280 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2281 */
2282struct tb_switch *
2283tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2284{
2285 struct tb_switch *sw;
2286
2287 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2288 if (!sw)
2289 return ERR_PTR(-ENOMEM);
2290
2291 sw->tb = tb;
2292 sw->config.depth = tb_route_length(route);
2293 sw->config.route_hi = upper_32_bits(route);
2294 sw->config.route_lo = lower_32_bits(route);
2295 sw->safe_mode = true;
2296
2297 device_initialize(&sw->dev);
2298 sw->dev.parent = parent;
2299 sw->dev.bus = &tb_bus_type;
2300 sw->dev.type = &tb_switch_type;
2301 sw->dev.groups = switch_groups;
2302 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2303
2304 return sw;
2305}
2306
2307/**
2308 * tb_switch_configure() - Uploads configuration to the switch
2309 * @sw: Switch to configure
2310 *
2311 * Call this function before the switch is added to the system. It will
2312 * upload configuration to the switch and makes it available for the
2313 * connection manager to use. Can be called to the switch again after
2314 * resume from low power states to re-initialize it.
2315 *
2316 * Return: %0 in case of success and negative errno in case of failure
2317 */
2318int tb_switch_configure(struct tb_switch *sw)
2319{
2320 struct tb *tb = sw->tb;
2321 u64 route;
2322 int ret;
2323
2324 route = tb_route(sw);
2325
2326 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2327 sw->config.enabled ? "restoring" : "initializing", route,
2328 tb_route_length(route), sw->config.upstream_port_number);
2329
2330 sw->config.enabled = 1;
2331
2332 if (tb_switch_is_usb4(sw)) {
2333 /*
2334 * For USB4 devices, we need to program the CM version
2335 * accordingly so that it knows to expose all the
2336 * additional capabilities.
2337 */
2338 sw->config.cmuv = USB4_VERSION_1_0;
2339
2340 /* Enumerate the switch */
2341 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2342 ROUTER_CS_1, 4);
2343 if (ret)
2344 return ret;
2345
2346 ret = usb4_switch_setup(sw);
2347 } else {
2348 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2349 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2350 sw->config.vendor_id);
2351
2352 if (!sw->cap_plug_events) {
2353 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2354 return -ENODEV;
2355 }
2356
2357 /* Enumerate the switch */
2358 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2359 ROUTER_CS_1, 3);
2360 }
2361 if (ret)
2362 return ret;
2363
2364 return tb_plug_events_active(sw, true);
2365}
2366
2367static int tb_switch_set_uuid(struct tb_switch *sw)
2368{
2369 bool uid = false;
2370 u32 uuid[4];
2371 int ret;
2372
2373 if (sw->uuid)
2374 return 0;
2375
2376 if (tb_switch_is_usb4(sw)) {
2377 ret = usb4_switch_read_uid(sw, &sw->uid);
2378 if (ret)
2379 return ret;
2380 uid = true;
2381 } else {
2382 /*
2383 * The newer controllers include fused UUID as part of
2384 * link controller specific registers
2385 */
2386 ret = tb_lc_read_uuid(sw, uuid);
2387 if (ret) {
2388 if (ret != -EINVAL)
2389 return ret;
2390 uid = true;
2391 }
2392 }
2393
2394 if (uid) {
2395 /*
2396 * ICM generates UUID based on UID and fills the upper
2397 * two words with ones. This is not strictly following
2398 * UUID format but we want to be compatible with it so
2399 * we do the same here.
2400 */
2401 uuid[0] = sw->uid & 0xffffffff;
2402 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2403 uuid[2] = 0xffffffff;
2404 uuid[3] = 0xffffffff;
2405 }
2406
2407 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2408 if (!sw->uuid)
2409 return -ENOMEM;
2410 return 0;
2411}
2412
2413static int tb_switch_add_dma_port(struct tb_switch *sw)
2414{
2415 u32 status;
2416 int ret;
2417
2418 switch (sw->generation) {
2419 case 2:
2420 /* Only root switch can be upgraded */
2421 if (tb_route(sw))
2422 return 0;
2423
2424 fallthrough;
2425 case 3:
2426 case 4:
2427 ret = tb_switch_set_uuid(sw);
2428 if (ret)
2429 return ret;
2430 break;
2431
2432 default:
2433 /*
2434 * DMA port is the only thing available when the switch
2435 * is in safe mode.
2436 */
2437 if (!sw->safe_mode)
2438 return 0;
2439 break;
2440 }
2441
2442 if (sw->no_nvm_upgrade)
2443 return 0;
2444
2445 if (tb_switch_is_usb4(sw)) {
2446 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2447 if (ret)
2448 return ret;
2449
2450 if (status) {
2451 tb_sw_info(sw, "switch flash authentication failed\n");
2452 nvm_set_auth_status(sw, status);
2453 }
2454
2455 return 0;
2456 }
2457
2458 /* Root switch DMA port requires running firmware */
2459 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2460 return 0;
2461
2462 sw->dma_port = dma_port_alloc(sw);
2463 if (!sw->dma_port)
2464 return 0;
2465
2466 /*
2467 * If there is status already set then authentication failed
2468 * when the dma_port_flash_update_auth() returned. Power cycling
2469 * is not needed (it was done already) so only thing we do here
2470 * is to unblock runtime PM of the root port.
2471 */
2472 nvm_get_auth_status(sw, &status);
2473 if (status) {
2474 if (!tb_route(sw))
2475 nvm_authenticate_complete_dma_port(sw);
2476 return 0;
2477 }
2478
2479 /*
2480 * Check status of the previous flash authentication. If there
2481 * is one we need to power cycle the switch in any case to make
2482 * it functional again.
2483 */
2484 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2485 if (ret <= 0)
2486 return ret;
2487
2488 /* Now we can allow root port to suspend again */
2489 if (!tb_route(sw))
2490 nvm_authenticate_complete_dma_port(sw);
2491
2492 if (status) {
2493 tb_sw_info(sw, "switch flash authentication failed\n");
2494 nvm_set_auth_status(sw, status);
2495 }
2496
2497 tb_sw_info(sw, "power cycling the switch now\n");
2498 dma_port_power_cycle(sw->dma_port);
2499
2500 /*
2501 * We return error here which causes the switch adding failure.
2502 * It should appear back after power cycle is complete.
2503 */
2504 return -ESHUTDOWN;
2505}
2506
2507static void tb_switch_default_link_ports(struct tb_switch *sw)
2508{
2509 int i;
2510
2511 for (i = 1; i <= sw->config.max_port_number; i++) {
2512 struct tb_port *port = &sw->ports[i];
2513 struct tb_port *subordinate;
2514
2515 if (!tb_port_is_null(port))
2516 continue;
2517
2518 /* Check for the subordinate port */
2519 if (i == sw->config.max_port_number ||
2520 !tb_port_is_null(&sw->ports[i + 1]))
2521 continue;
2522
2523 /* Link them if not already done so (by DROM) */
2524 subordinate = &sw->ports[i + 1];
2525 if (!port->dual_link_port && !subordinate->dual_link_port) {
2526 port->link_nr = 0;
2527 port->dual_link_port = subordinate;
2528 subordinate->link_nr = 1;
2529 subordinate->dual_link_port = port;
2530
2531 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2532 port->port, subordinate->port);
2533 }
2534 }
2535}
2536
2537static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2538{
2539 const struct tb_port *up = tb_upstream_port(sw);
2540
2541 if (!up->dual_link_port || !up->dual_link_port->remote)
2542 return false;
2543
2544 if (tb_switch_is_usb4(sw))
2545 return usb4_switch_lane_bonding_possible(sw);
2546 return tb_lc_lane_bonding_possible(sw);
2547}
2548
2549static int tb_switch_update_link_attributes(struct tb_switch *sw)
2550{
2551 struct tb_port *up;
2552 bool change = false;
2553 int ret;
2554
2555 if (!tb_route(sw) || tb_switch_is_icm(sw))
2556 return 0;
2557
2558 up = tb_upstream_port(sw);
2559
2560 ret = tb_port_get_link_speed(up);
2561 if (ret < 0)
2562 return ret;
2563 if (sw->link_speed != ret)
2564 change = true;
2565 sw->link_speed = ret;
2566
2567 ret = tb_port_get_link_width(up);
2568 if (ret < 0)
2569 return ret;
2570 if (sw->link_width != ret)
2571 change = true;
2572 sw->link_width = ret;
2573
2574 /* Notify userspace that there is possible link attribute change */
2575 if (device_is_registered(&sw->dev) && change)
2576 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2577
2578 return 0;
2579}
2580
2581/**
2582 * tb_switch_lane_bonding_enable() - Enable lane bonding
2583 * @sw: Switch to enable lane bonding
2584 *
2585 * Connection manager can call this function to enable lane bonding of a
2586 * switch. If conditions are correct and both switches support the feature,
2587 * lanes are bonded. It is safe to call this to any switch.
2588 */
2589int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2590{
2591 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2592 struct tb_port *up, *down;
2593 u64 route = tb_route(sw);
2594 int ret;
2595
2596 if (!route)
2597 return 0;
2598
2599 if (!tb_switch_lane_bonding_possible(sw))
2600 return 0;
2601
2602 up = tb_upstream_port(sw);
2603 down = tb_port_at(route, parent);
2604
2605 if (!tb_port_is_width_supported(up, 2) ||
2606 !tb_port_is_width_supported(down, 2))
2607 return 0;
2608
2609 ret = tb_port_lane_bonding_enable(up);
2610 if (ret) {
2611 tb_port_warn(up, "failed to enable lane bonding\n");
2612 return ret;
2613 }
2614
2615 ret = tb_port_lane_bonding_enable(down);
2616 if (ret) {
2617 tb_port_warn(down, "failed to enable lane bonding\n");
2618 tb_port_lane_bonding_disable(up);
2619 return ret;
2620 }
2621
2622 ret = tb_port_wait_for_link_width(down, 2, 100);
2623 if (ret) {
2624 tb_port_warn(down, "timeout enabling lane bonding\n");
2625 return ret;
2626 }
2627
2628 tb_port_update_credits(down);
2629 tb_port_update_credits(up);
2630 tb_switch_update_link_attributes(sw);
2631
2632 tb_sw_dbg(sw, "lane bonding enabled\n");
2633 return ret;
2634}
2635
2636/**
2637 * tb_switch_lane_bonding_disable() - Disable lane bonding
2638 * @sw: Switch whose lane bonding to disable
2639 *
2640 * Disables lane bonding between @sw and parent. This can be called even
2641 * if lanes were not bonded originally.
2642 */
2643void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2644{
2645 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2646 struct tb_port *up, *down;
2647
2648 if (!tb_route(sw))
2649 return;
2650
2651 up = tb_upstream_port(sw);
2652 if (!up->bonded)
2653 return;
2654
2655 down = tb_port_at(tb_route(sw), parent);
2656
2657 tb_port_lane_bonding_disable(up);
2658 tb_port_lane_bonding_disable(down);
2659
2660 /*
2661 * It is fine if we get other errors as the router might have
2662 * been unplugged.
2663 */
2664 if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
2665 tb_sw_warn(sw, "timeout disabling lane bonding\n");
2666
2667 tb_port_update_credits(down);
2668 tb_port_update_credits(up);
2669 tb_switch_update_link_attributes(sw);
2670
2671 tb_sw_dbg(sw, "lane bonding disabled\n");
2672}
2673
2674/**
2675 * tb_switch_configure_link() - Set link configured
2676 * @sw: Switch whose link is configured
2677 *
2678 * Sets the link upstream from @sw configured (from both ends) so that
2679 * it will not be disconnected when the domain exits sleep. Can be
2680 * called for any switch.
2681 *
2682 * It is recommended that this is called after lane bonding is enabled.
2683 *
2684 * Returns %0 on success and negative errno in case of error.
2685 */
2686int tb_switch_configure_link(struct tb_switch *sw)
2687{
2688 struct tb_port *up, *down;
2689 int ret;
2690
2691 if (!tb_route(sw) || tb_switch_is_icm(sw))
2692 return 0;
2693
2694 up = tb_upstream_port(sw);
2695 if (tb_switch_is_usb4(up->sw))
2696 ret = usb4_port_configure(up);
2697 else
2698 ret = tb_lc_configure_port(up);
2699 if (ret)
2700 return ret;
2701
2702 down = up->remote;
2703 if (tb_switch_is_usb4(down->sw))
2704 return usb4_port_configure(down);
2705 return tb_lc_configure_port(down);
2706}
2707
2708/**
2709 * tb_switch_unconfigure_link() - Unconfigure link
2710 * @sw: Switch whose link is unconfigured
2711 *
2712 * Sets the link unconfigured so the @sw will be disconnected if the
2713 * domain exists sleep.
2714 */
2715void tb_switch_unconfigure_link(struct tb_switch *sw)
2716{
2717 struct tb_port *up, *down;
2718
2719 if (sw->is_unplugged)
2720 return;
2721 if (!tb_route(sw) || tb_switch_is_icm(sw))
2722 return;
2723
2724 up = tb_upstream_port(sw);
2725 if (tb_switch_is_usb4(up->sw))
2726 usb4_port_unconfigure(up);
2727 else
2728 tb_lc_unconfigure_port(up);
2729
2730 down = up->remote;
2731 if (tb_switch_is_usb4(down->sw))
2732 usb4_port_unconfigure(down);
2733 else
2734 tb_lc_unconfigure_port(down);
2735}
2736
2737static void tb_switch_credits_init(struct tb_switch *sw)
2738{
2739 if (tb_switch_is_icm(sw))
2740 return;
2741 if (!tb_switch_is_usb4(sw))
2742 return;
2743 if (usb4_switch_credits_init(sw))
2744 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2745}
2746
2747/**
2748 * tb_switch_add() - Add a switch to the domain
2749 * @sw: Switch to add
2750 *
2751 * This is the last step in adding switch to the domain. It will read
2752 * identification information from DROM and initializes ports so that
2753 * they can be used to connect other switches. The switch will be
2754 * exposed to the userspace when this function successfully returns. To
2755 * remove and release the switch, call tb_switch_remove().
2756 *
2757 * Return: %0 in case of success and negative errno in case of failure
2758 */
2759int tb_switch_add(struct tb_switch *sw)
2760{
2761 int i, ret;
2762
2763 /*
2764 * Initialize DMA control port now before we read DROM. Recent
2765 * host controllers have more complete DROM on NVM that includes
2766 * vendor and model identification strings which we then expose
2767 * to the userspace. NVM can be accessed through DMA
2768 * configuration based mailbox.
2769 */
2770 ret = tb_switch_add_dma_port(sw);
2771 if (ret) {
2772 dev_err(&sw->dev, "failed to add DMA port\n");
2773 return ret;
2774 }
2775
2776 if (!sw->safe_mode) {
2777 tb_switch_credits_init(sw);
2778
2779 /* read drom */
2780 ret = tb_drom_read(sw);
2781 if (ret) {
2782 dev_err(&sw->dev, "reading DROM failed\n");
2783 return ret;
2784 }
2785 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2786
2787 tb_check_quirks(sw);
2788
2789 ret = tb_switch_set_uuid(sw);
2790 if (ret) {
2791 dev_err(&sw->dev, "failed to set UUID\n");
2792 return ret;
2793 }
2794
2795 for (i = 0; i <= sw->config.max_port_number; i++) {
2796 if (sw->ports[i].disabled) {
2797 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2798 continue;
2799 }
2800 ret = tb_init_port(&sw->ports[i]);
2801 if (ret) {
2802 dev_err(&sw->dev, "failed to initialize port %d\n", i);
2803 return ret;
2804 }
2805 }
2806
2807 tb_switch_default_link_ports(sw);
2808
2809 ret = tb_switch_update_link_attributes(sw);
2810 if (ret)
2811 return ret;
2812
2813 ret = tb_switch_tmu_init(sw);
2814 if (ret)
2815 return ret;
2816 }
2817
2818 ret = device_add(&sw->dev);
2819 if (ret) {
2820 dev_err(&sw->dev, "failed to add device: %d\n", ret);
2821 return ret;
2822 }
2823
2824 if (tb_route(sw)) {
2825 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2826 sw->vendor, sw->device);
2827 if (sw->vendor_name && sw->device_name)
2828 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2829 sw->device_name);
2830 }
2831
2832 ret = usb4_switch_add_ports(sw);
2833 if (ret) {
2834 dev_err(&sw->dev, "failed to add USB4 ports\n");
2835 goto err_del;
2836 }
2837
2838 ret = tb_switch_nvm_add(sw);
2839 if (ret) {
2840 dev_err(&sw->dev, "failed to add NVM devices\n");
2841 goto err_ports;
2842 }
2843
2844 /*
2845 * Thunderbolt routers do not generate wakeups themselves but
2846 * they forward wakeups from tunneled protocols, so enable it
2847 * here.
2848 */
2849 device_init_wakeup(&sw->dev, true);
2850
2851 pm_runtime_set_active(&sw->dev);
2852 if (sw->rpm) {
2853 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2854 pm_runtime_use_autosuspend(&sw->dev);
2855 pm_runtime_mark_last_busy(&sw->dev);
2856 pm_runtime_enable(&sw->dev);
2857 pm_request_autosuspend(&sw->dev);
2858 }
2859
2860 tb_switch_debugfs_init(sw);
2861 return 0;
2862
2863err_ports:
2864 usb4_switch_remove_ports(sw);
2865err_del:
2866 device_del(&sw->dev);
2867
2868 return ret;
2869}
2870
2871/**
2872 * tb_switch_remove() - Remove and release a switch
2873 * @sw: Switch to remove
2874 *
2875 * This will remove the switch from the domain and release it after last
2876 * reference count drops to zero. If there are switches connected below
2877 * this switch, they will be removed as well.
2878 */
2879void tb_switch_remove(struct tb_switch *sw)
2880{
2881 struct tb_port *port;
2882
2883 tb_switch_debugfs_remove(sw);
2884
2885 if (sw->rpm) {
2886 pm_runtime_get_sync(&sw->dev);
2887 pm_runtime_disable(&sw->dev);
2888 }
2889
2890 /* port 0 is the switch itself and never has a remote */
2891 tb_switch_for_each_port(sw, port) {
2892 if (tb_port_has_remote(port)) {
2893 tb_switch_remove(port->remote->sw);
2894 port->remote = NULL;
2895 } else if (port->xdomain) {
2896 tb_xdomain_remove(port->xdomain);
2897 port->xdomain = NULL;
2898 }
2899
2900 /* Remove any downstream retimers */
2901 tb_retimer_remove_all(port);
2902 }
2903
2904 if (!sw->is_unplugged)
2905 tb_plug_events_active(sw, false);
2906
2907 tb_switch_nvm_remove(sw);
2908 usb4_switch_remove_ports(sw);
2909
2910 if (tb_route(sw))
2911 dev_info(&sw->dev, "device disconnected\n");
2912 device_unregister(&sw->dev);
2913}
2914
2915/**
2916 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2917 * @sw: Router to mark unplugged
2918 */
2919void tb_sw_set_unplugged(struct tb_switch *sw)
2920{
2921 struct tb_port *port;
2922
2923 if (sw == sw->tb->root_switch) {
2924 tb_sw_WARN(sw, "cannot unplug root switch\n");
2925 return;
2926 }
2927 if (sw->is_unplugged) {
2928 tb_sw_WARN(sw, "is_unplugged already set\n");
2929 return;
2930 }
2931 sw->is_unplugged = true;
2932 tb_switch_for_each_port(sw, port) {
2933 if (tb_port_has_remote(port))
2934 tb_sw_set_unplugged(port->remote->sw);
2935 else if (port->xdomain)
2936 port->xdomain->is_unplugged = true;
2937 }
2938}
2939
2940static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
2941{
2942 if (flags)
2943 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
2944 else
2945 tb_sw_dbg(sw, "disabling wakeup\n");
2946
2947 if (tb_switch_is_usb4(sw))
2948 return usb4_switch_set_wake(sw, flags);
2949 return tb_lc_set_wake(sw, flags);
2950}
2951
2952int tb_switch_resume(struct tb_switch *sw)
2953{
2954 struct tb_port *port;
2955 int err;
2956
2957 tb_sw_dbg(sw, "resuming switch\n");
2958
2959 /*
2960 * Check for UID of the connected switches except for root
2961 * switch which we assume cannot be removed.
2962 */
2963 if (tb_route(sw)) {
2964 u64 uid;
2965
2966 /*
2967 * Check first that we can still read the switch config
2968 * space. It may be that there is now another domain
2969 * connected.
2970 */
2971 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2972 if (err < 0) {
2973 tb_sw_info(sw, "switch not present anymore\n");
2974 return err;
2975 }
2976
2977 if (tb_switch_is_usb4(sw))
2978 err = usb4_switch_read_uid(sw, &uid);
2979 else
2980 err = tb_drom_read_uid_only(sw, &uid);
2981 if (err) {
2982 tb_sw_warn(sw, "uid read failed\n");
2983 return err;
2984 }
2985 if (sw->uid != uid) {
2986 tb_sw_info(sw,
2987 "changed while suspended (uid %#llx -> %#llx)\n",
2988 sw->uid, uid);
2989 return -ENODEV;
2990 }
2991 }
2992
2993 err = tb_switch_configure(sw);
2994 if (err)
2995 return err;
2996
2997 /* Disable wakes */
2998 tb_switch_set_wake(sw, 0);
2999
3000 err = tb_switch_tmu_init(sw);
3001 if (err)
3002 return err;
3003
3004 /* check for surviving downstream switches */
3005 tb_switch_for_each_port(sw, port) {
3006 if (!tb_port_is_null(port))
3007 continue;
3008
3009 if (!tb_port_resume(port))
3010 continue;
3011
3012 if (tb_wait_for_port(port, true) <= 0) {
3013 tb_port_warn(port,
3014 "lost during suspend, disconnecting\n");
3015 if (tb_port_has_remote(port))
3016 tb_sw_set_unplugged(port->remote->sw);
3017 else if (port->xdomain)
3018 port->xdomain->is_unplugged = true;
3019 } else {
3020 /*
3021 * Always unlock the port so the downstream
3022 * switch/domain is accessible.
3023 */
3024 if (tb_port_unlock(port))
3025 tb_port_warn(port, "failed to unlock port\n");
3026 if (port->remote && tb_switch_resume(port->remote->sw)) {
3027 tb_port_warn(port,
3028 "lost during suspend, disconnecting\n");
3029 tb_sw_set_unplugged(port->remote->sw);
3030 }
3031 }
3032 }
3033 return 0;
3034}
3035
3036/**
3037 * tb_switch_suspend() - Put a switch to sleep
3038 * @sw: Switch to suspend
3039 * @runtime: Is this runtime suspend or system sleep
3040 *
3041 * Suspends router and all its children. Enables wakes according to
3042 * value of @runtime and then sets sleep bit for the router. If @sw is
3043 * host router the domain is ready to go to sleep once this function
3044 * returns.
3045 */
3046void tb_switch_suspend(struct tb_switch *sw, bool runtime)
3047{
3048 unsigned int flags = 0;
3049 struct tb_port *port;
3050 int err;
3051
3052 tb_sw_dbg(sw, "suspending switch\n");
3053
3054 /*
3055 * Actually only needed for Titan Ridge but for simplicity can be
3056 * done for USB4 device too as CLx is re-enabled at resume.
3057 */
3058 if (tb_switch_disable_clx(sw, TB_CL0S))
3059 tb_sw_warn(sw, "failed to disable CLx on upstream port\n");
3060
3061 err = tb_plug_events_active(sw, false);
3062 if (err)
3063 return;
3064
3065 tb_switch_for_each_port(sw, port) {
3066 if (tb_port_has_remote(port))
3067 tb_switch_suspend(port->remote->sw, runtime);
3068 }
3069
3070 if (runtime) {
3071 /* Trigger wake when something is plugged in/out */
3072 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
3073 flags |= TB_WAKE_ON_USB4;
3074 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
3075 } else if (device_may_wakeup(&sw->dev)) {
3076 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3077 }
3078
3079 tb_switch_set_wake(sw, flags);
3080
3081 if (tb_switch_is_usb4(sw))
3082 usb4_switch_set_sleep(sw);
3083 else
3084 tb_lc_set_sleep(sw);
3085}
3086
3087/**
3088 * tb_switch_query_dp_resource() - Query availability of DP resource
3089 * @sw: Switch whose DP resource is queried
3090 * @in: DP IN port
3091 *
3092 * Queries availability of DP resource for DP tunneling using switch
3093 * specific means. Returns %true if resource is available.
3094 */
3095bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3096{
3097 if (tb_switch_is_usb4(sw))
3098 return usb4_switch_query_dp_resource(sw, in);
3099 return tb_lc_dp_sink_query(sw, in);
3100}
3101
3102/**
3103 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3104 * @sw: Switch whose DP resource is allocated
3105 * @in: DP IN port
3106 *
3107 * Allocates DP resource for DP tunneling. The resource must be
3108 * available for this to succeed (see tb_switch_query_dp_resource()).
3109 * Returns %0 in success and negative errno otherwise.
3110 */
3111int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3112{
3113 int ret;
3114
3115 if (tb_switch_is_usb4(sw))
3116 ret = usb4_switch_alloc_dp_resource(sw, in);
3117 else
3118 ret = tb_lc_dp_sink_alloc(sw, in);
3119
3120 if (ret)
3121 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3122 in->port);
3123 else
3124 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3125
3126 return ret;
3127}
3128
3129/**
3130 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3131 * @sw: Switch whose DP resource is de-allocated
3132 * @in: DP IN port
3133 *
3134 * De-allocates DP resource that was previously allocated for DP
3135 * tunneling.
3136 */
3137void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3138{
3139 int ret;
3140
3141 if (tb_switch_is_usb4(sw))
3142 ret = usb4_switch_dealloc_dp_resource(sw, in);
3143 else
3144 ret = tb_lc_dp_sink_dealloc(sw, in);
3145
3146 if (ret)
3147 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3148 in->port);
3149 else
3150 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
3151}
3152
3153struct tb_sw_lookup {
3154 struct tb *tb;
3155 u8 link;
3156 u8 depth;
3157 const uuid_t *uuid;
3158 u64 route;
3159};
3160
3161static int tb_switch_match(struct device *dev, const void *data)
3162{
3163 struct tb_switch *sw = tb_to_switch(dev);
3164 const struct tb_sw_lookup *lookup = data;
3165
3166 if (!sw)
3167 return 0;
3168 if (sw->tb != lookup->tb)
3169 return 0;
3170
3171 if (lookup->uuid)
3172 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3173
3174 if (lookup->route) {
3175 return sw->config.route_lo == lower_32_bits(lookup->route) &&
3176 sw->config.route_hi == upper_32_bits(lookup->route);
3177 }
3178
3179 /* Root switch is matched only by depth */
3180 if (!lookup->depth)
3181 return !sw->depth;
3182
3183 return sw->link == lookup->link && sw->depth == lookup->depth;
3184}
3185
3186/**
3187 * tb_switch_find_by_link_depth() - Find switch by link and depth
3188 * @tb: Domain the switch belongs
3189 * @link: Link number the switch is connected
3190 * @depth: Depth of the switch in link
3191 *
3192 * Returned switch has reference count increased so the caller needs to
3193 * call tb_switch_put() when done with the switch.
3194 */
3195struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3196{
3197 struct tb_sw_lookup lookup;
3198 struct device *dev;
3199
3200 memset(&lookup, 0, sizeof(lookup));
3201 lookup.tb = tb;
3202 lookup.link = link;
3203 lookup.depth = depth;
3204
3205 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3206 if (dev)
3207 return tb_to_switch(dev);
3208
3209 return NULL;
3210}
3211
3212/**
3213 * tb_switch_find_by_uuid() - Find switch by UUID
3214 * @tb: Domain the switch belongs
3215 * @uuid: UUID to look for
3216 *
3217 * Returned switch has reference count increased so the caller needs to
3218 * call tb_switch_put() when done with the switch.
3219 */
3220struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
3221{
3222 struct tb_sw_lookup lookup;
3223 struct device *dev;
3224
3225 memset(&lookup, 0, sizeof(lookup));
3226 lookup.tb = tb;
3227 lookup.uuid = uuid;
3228
3229 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3230 if (dev)
3231 return tb_to_switch(dev);
3232
3233 return NULL;
3234}
3235
3236/**
3237 * tb_switch_find_by_route() - Find switch by route string
3238 * @tb: Domain the switch belongs
3239 * @route: Route string to look for
3240 *
3241 * Returned switch has reference count increased so the caller needs to
3242 * call tb_switch_put() when done with the switch.
3243 */
3244struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3245{
3246 struct tb_sw_lookup lookup;
3247 struct device *dev;
3248
3249 if (!route)
3250 return tb_switch_get(tb->root_switch);
3251
3252 memset(&lookup, 0, sizeof(lookup));
3253 lookup.tb = tb;
3254 lookup.route = route;
3255
3256 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3257 if (dev)
3258 return tb_to_switch(dev);
3259
3260 return NULL;
3261}
3262
3263/**
3264 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3265 * @sw: Switch to find the port from
3266 * @type: Port type to look for
3267 */
3268struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3269 enum tb_port_type type)
3270{
3271 struct tb_port *port;
3272
3273 tb_switch_for_each_port(sw, port) {
3274 if (port->config.type == type)
3275 return port;
3276 }
3277
3278 return NULL;
3279}
3280
3281static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
3282{
3283 u32 phy;
3284 int ret;
3285
3286 ret = tb_port_read(port, &phy, TB_CFG_PORT,
3287 port->cap_phy + LANE_ADP_CS_1, 1);
3288 if (ret)
3289 return ret;
3290
3291 if (secondary)
3292 phy |= LANE_ADP_CS_1_PMS;
3293 else
3294 phy &= ~LANE_ADP_CS_1_PMS;
3295
3296 return tb_port_write(port, &phy, TB_CFG_PORT,
3297 port->cap_phy + LANE_ADP_CS_1, 1);
3298}
3299
3300static int tb_port_pm_secondary_enable(struct tb_port *port)
3301{
3302 return __tb_port_pm_secondary_set(port, true);
3303}
3304
3305static int tb_port_pm_secondary_disable(struct tb_port *port)
3306{
3307 return __tb_port_pm_secondary_set(port, false);
3308}
3309
3310static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
3311{
3312 struct tb_switch *parent = tb_switch_parent(sw);
3313 struct tb_port *up, *down;
3314 int ret;
3315
3316 if (!tb_route(sw))
3317 return 0;
3318
3319 up = tb_upstream_port(sw);
3320 down = tb_port_at(tb_route(sw), parent);
3321 ret = tb_port_pm_secondary_enable(up);
3322 if (ret)
3323 return ret;
3324
3325 return tb_port_pm_secondary_disable(down);
3326}
3327
3328/* Called for USB4 or Titan Ridge routers only */
3329static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
3330{
3331 u32 mask, val;
3332 bool ret;
3333
3334 /* Don't enable CLx in case of two single-lane links */
3335 if (!port->bonded && port->dual_link_port)
3336 return false;
3337
3338 /* Don't enable CLx in case of inter-domain link */
3339 if (port->xdomain)
3340 return false;
3341
3342 if (tb_switch_is_usb4(port->sw)) {
3343 if (!usb4_port_clx_supported(port))
3344 return false;
3345 } else if (!tb_lc_is_clx_supported(port)) {
3346 return false;
3347 }
3348
3349 switch (clx) {
3350 case TB_CL0S:
3351 /* CL0s support requires also CL1 support */
3352 mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
3353 break;
3354
3355 /* For now we support only CL0s. Not CL1, CL2 */
3356 case TB_CL1:
3357 case TB_CL2:
3358 default:
3359 return false;
3360 }
3361
3362 ret = tb_port_read(port, &val, TB_CFG_PORT,
3363 port->cap_phy + LANE_ADP_CS_0, 1);
3364 if (ret)
3365 return false;
3366
3367 return !!(val & mask);
3368}
3369
3370static inline bool tb_port_cl0s_supported(struct tb_port *port)
3371{
3372 return tb_port_clx_supported(port, TB_CL0S);
3373}
3374
3375static int __tb_port_cl0s_set(struct tb_port *port, bool enable)
3376{
3377 u32 phy, mask;
3378 int ret;
3379
3380 /* To enable CL0s also required to enable CL1 */
3381 mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
3382 ret = tb_port_read(port, &phy, TB_CFG_PORT,
3383 port->cap_phy + LANE_ADP_CS_1, 1);
3384 if (ret)
3385 return ret;
3386
3387 if (enable)
3388 phy |= mask;
3389 else
3390 phy &= ~mask;
3391
3392 return tb_port_write(port, &phy, TB_CFG_PORT,
3393 port->cap_phy + LANE_ADP_CS_1, 1);
3394}
3395
3396static int tb_port_cl0s_disable(struct tb_port *port)
3397{
3398 return __tb_port_cl0s_set(port, false);
3399}
3400
3401static int tb_port_cl0s_enable(struct tb_port *port)
3402{
3403 return __tb_port_cl0s_set(port, true);
3404}
3405
3406static int tb_switch_enable_cl0s(struct tb_switch *sw)
3407{
3408 struct tb_switch *parent = tb_switch_parent(sw);
3409 bool up_cl0s_support, down_cl0s_support;
3410 struct tb_port *up, *down;
3411 int ret;
3412
3413 if (!tb_switch_is_clx_supported(sw))
3414 return 0;
3415
3416 /*
3417 * Enable CLx for host router's downstream port as part of the
3418 * downstream router enabling procedure.
3419 */
3420 if (!tb_route(sw))
3421 return 0;
3422
3423 /* Enable CLx only for first hop router (depth = 1) */
3424 if (tb_route(parent))
3425 return 0;
3426
3427 ret = tb_switch_pm_secondary_resolve(sw);
3428 if (ret)
3429 return ret;
3430
3431 up = tb_upstream_port(sw);
3432 down = tb_port_at(tb_route(sw), parent);
3433
3434 up_cl0s_support = tb_port_cl0s_supported(up);
3435 down_cl0s_support = tb_port_cl0s_supported(down);
3436
3437 tb_port_dbg(up, "CL0s %ssupported\n",
3438 up_cl0s_support ? "" : "not ");
3439 tb_port_dbg(down, "CL0s %ssupported\n",
3440 down_cl0s_support ? "" : "not ");
3441
3442 if (!up_cl0s_support || !down_cl0s_support)
3443 return -EOPNOTSUPP;
3444
3445 ret = tb_port_cl0s_enable(up);
3446 if (ret)
3447 return ret;
3448
3449 ret = tb_port_cl0s_enable(down);
3450 if (ret) {
3451 tb_port_cl0s_disable(up);
3452 return ret;
3453 }
3454
3455 ret = tb_switch_mask_clx_objections(sw);
3456 if (ret) {
3457 tb_port_cl0s_disable(up);
3458 tb_port_cl0s_disable(down);
3459 return ret;
3460 }
3461
3462 sw->clx = TB_CL0S;
3463
3464 tb_port_dbg(up, "CL0s enabled\n");
3465 return 0;
3466}
3467
3468/**
3469 * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
3470 * @sw: Router to enable CLx for
3471 * @clx: The CLx state to enable
3472 *
3473 * Enable CLx state only for first hop router. That is the most common
3474 * use-case, that is intended for better thermal management, and so helps
3475 * to improve performance. CLx is enabled only if both sides of the link
3476 * support CLx, and if both sides of the link are not configured as two
3477 * single lane links and only if the link is not inter-domain link. The
3478 * complete set of conditions is descibed in CM Guide 1.0 section 8.1.
3479 *
3480 * Return: Returns 0 on success or an error code on failure.
3481 */
3482int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3483{
3484 struct tb_switch *root_sw = sw->tb->root_switch;
3485
3486 if (!clx_enabled)
3487 return 0;
3488
3489 /*
3490 * CLx is not enabled and validated on Intel USB4 platforms before
3491 * Alder Lake.
3492 */
3493 if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
3494 return 0;
3495
3496 switch (clx) {
3497 case TB_CL0S:
3498 return tb_switch_enable_cl0s(sw);
3499
3500 default:
3501 return -EOPNOTSUPP;
3502 }
3503}
3504
3505static int tb_switch_disable_cl0s(struct tb_switch *sw)
3506{
3507 struct tb_switch *parent = tb_switch_parent(sw);
3508 struct tb_port *up, *down;
3509 int ret;
3510
3511 if (!tb_switch_is_clx_supported(sw))
3512 return 0;
3513
3514 /*
3515 * Disable CLx for host router's downstream port as part of the
3516 * downstream router enabling procedure.
3517 */
3518 if (!tb_route(sw))
3519 return 0;
3520
3521 /* Disable CLx only for first hop router (depth = 1) */
3522 if (tb_route(parent))
3523 return 0;
3524
3525 up = tb_upstream_port(sw);
3526 down = tb_port_at(tb_route(sw), parent);
3527 ret = tb_port_cl0s_disable(up);
3528 if (ret)
3529 return ret;
3530
3531 ret = tb_port_cl0s_disable(down);
3532 if (ret)
3533 return ret;
3534
3535 sw->clx = TB_CLX_DISABLE;
3536
3537 tb_port_dbg(up, "CL0s disabled\n");
3538 return 0;
3539}
3540
3541/**
3542 * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
3543 * @sw: Router to disable CLx for
3544 * @clx: The CLx state to disable
3545 *
3546 * Return: Returns 0 on success or an error code on failure.
3547 */
3548int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3549{
3550 if (!clx_enabled)
3551 return 0;
3552
3553 switch (clx) {
3554 case TB_CL0S:
3555 return tb_switch_disable_cl0s(sw);
3556
3557 default:
3558 return -EOPNOTSUPP;
3559 }
3560}
3561
3562/**
3563 * tb_switch_mask_clx_objections() - Mask CLx objections for a router
3564 * @sw: Router to mask objections for
3565 *
3566 * Mask the objections coming from the second depth routers in order to
3567 * stop these objections from interfering with the CLx states of the first
3568 * depth link.
3569 */
3570int tb_switch_mask_clx_objections(struct tb_switch *sw)
3571{
3572 int up_port = sw->config.upstream_port_number;
3573 u32 offset, val[2], mask_obj, unmask_obj;
3574 int ret, i;
3575
3576 /* Only Titan Ridge of pre-USB4 devices support CLx states */
3577 if (!tb_switch_is_titan_ridge(sw))
3578 return 0;
3579
3580 if (!tb_route(sw))
3581 return 0;
3582
3583 /*
3584 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
3585 * Port A consists of lane adapters 1,2 and
3586 * Port B consists of lane adapters 3,4
3587 * If upstream port is A, (lanes are 1,2), we mask objections from
3588 * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
3589 */
3590 if (up_port == 1) {
3591 mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3592 unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3593 offset = TB_LOW_PWR_C1_CL1;
3594 } else {
3595 mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3596 unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3597 offset = TB_LOW_PWR_C3_CL1;
3598 }
3599
3600 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
3601 sw->cap_lp + offset, ARRAY_SIZE(val));
3602 if (ret)
3603 return ret;
3604
3605 for (i = 0; i < ARRAY_SIZE(val); i++) {
3606 val[i] |= mask_obj;
3607 val[i] &= ~unmask_obj;
3608 }
3609
3610 return tb_sw_write(sw, &val, TB_CFG_SWITCH,
3611 sw->cap_lp + offset, ARRAY_SIZE(val));
3612}
3613
3614/*
3615 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3616 * device. For now used only for Titan Ridge.
3617 */
3618static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3619 unsigned int pcie_offset, u32 value)
3620{
3621 u32 offset, command, val;
3622 int ret;
3623
3624 if (sw->generation != 3)
3625 return -EOPNOTSUPP;
3626
3627 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3628 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3629 if (ret)
3630 return ret;
3631
3632 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
3633 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
3634 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
3635 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3636 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
3637 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
3638
3639 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3640
3641 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3642 if (ret)
3643 return ret;
3644
3645 ret = tb_switch_wait_for_bit(sw, offset,
3646 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
3647 if (ret)
3648 return ret;
3649
3650 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3651 if (ret)
3652 return ret;
3653
3654 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
3655 return -ETIMEDOUT;
3656
3657 return 0;
3658}
3659
3660/**
3661 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3662 * @sw: Router to enable PCIe L1
3663 *
3664 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3665 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3666 * was configured. Due to Intel platforms limitation, shall be called only
3667 * for first hop switch.
3668 */
3669int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3670{
3671 struct tb_switch *parent = tb_switch_parent(sw);
3672 int ret;
3673
3674 if (!tb_route(sw))
3675 return 0;
3676
3677 if (!tb_switch_is_titan_ridge(sw))
3678 return 0;
3679
3680 /* Enable PCIe L1 enable only for first hop router (depth = 1) */
3681 if (tb_route(parent))
3682 return 0;
3683
3684 /* Write to downstream PCIe bridge #5 aka Dn4 */
3685 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3686 if (ret)
3687 return ret;
3688
3689 /* Write to Upstream PCIe bridge #0 aka Up0 */
3690 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3691}