Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

thunderbolt: Add support for Intel Ice Lake

The Thunderbolt controller is integrated into the Ice Lake CPU itself
and requires special flows to power it on and off using force power bit
in NHI VSEC registers. Runtime PM (RTD3) and Sx flows also differ from
the discrete solutions. Now the firmware notifies the driver whether
RTD3 entry or exit are possible. The driver is responsible of sending
Go2Sx command through link controller mailbox when system enters Sx
states (suspend-to-mem/disk). Rest of the ICM firwmare flows follow
Titan Ridge.

Signed-off-by: Raanan Avargil <raanan.avargil@intel.com>
Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Reviewed-by: Yehezkel Bernat <YehezkelShB@gmail.com>
Tested-by: Mario Limonciello <mario.limonciello@dell.com>

+526 -22
+1 -1
drivers/thunderbolt/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 obj-${CONFIG_THUNDERBOLT} := thunderbolt.o 3 - thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o 3 + thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o 4 4 thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
+166 -10
drivers/thunderbolt/icm.c
··· 56 56 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) 57 57 * @rpm: Does the controller support runtime PM (RTD3) 58 58 * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller 59 + * @veto: Is RTD3 veto in effect 59 60 * @is_supported: Checks if we can support ICM on this controller 60 61 * @cio_reset: Trigger CIO reset 61 62 * @get_mode: Read and return the ICM firmware mode (optional) 62 63 * @get_route: Find a route string for given switch 63 64 * @save_devices: Ask ICM to save devices to ACL when suspending (optional) 64 65 * @driver_ready: Send driver ready message to ICM 66 + * @set_uuid: Set UUID for the root switch (optional) 65 67 * @device_connected: Handle device connected ICM message 66 68 * @device_disconnected: Handle device disconnected ICM message 67 69 * @xdomain_connected - Handle XDomain connected ICM message 68 70 * @xdomain_disconnected - Handle XDomain disconnected ICM message 71 + * @rtd3_veto: Handle RTD3 veto notification ICM message 69 72 */ 70 73 struct icm { 71 74 struct mutex request_lock; ··· 79 76 bool safe_mode; 80 77 bool rpm; 81 78 bool can_upgrade_nvm; 79 + bool veto; 82 80 bool (*is_supported)(struct tb *tb); 83 81 int (*cio_reset)(struct tb *tb); 84 82 int (*get_mode)(struct tb *tb); ··· 88 84 int (*driver_ready)(struct tb *tb, 89 85 enum tb_security_level *security_level, 90 86 size_t *nboot_acl, bool *rpm); 87 + void (*set_uuid)(struct tb *tb); 91 88 void (*device_connected)(struct tb *tb, 92 89 const struct icm_pkg_header *hdr); 93 90 void (*device_disconnected)(struct tb *tb, ··· 97 92 const struct icm_pkg_header *hdr); 98 93 void (*xdomain_disconnected)(struct tb *tb, 99 94 const struct icm_pkg_header *hdr); 95 + void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr); 100 96 }; 101 97 102 98 struct icm_notification { ··· 300 294 } while (retries--); 301 295 302 296 return -ETIMEDOUT; 297 + } 298 + 299 + /* 300 + * If rescan is queued to run (we are resuming), postpone it to give the 301 + * firmware some more time to send device connected notifications for next 302 + * devices in the chain. 303 + */ 304 + static void icm_postpone_rescan(struct tb *tb) 305 + { 306 + struct icm *icm = tb_priv(tb); 307 + 308 + if (delayed_work_pending(&icm->rescan_work)) 309 + mod_delayed_work(tb->wq, &icm->rescan_work, 310 + msecs_to_jiffies(500)); 311 + } 312 + 313 + static void icm_veto_begin(struct tb *tb) 314 + { 315 + struct icm *icm = tb_priv(tb); 316 + 317 + if (!icm->veto) { 318 + icm->veto = true; 319 + /* Keep the domain powered while veto is in effect */ 320 + pm_runtime_get(&tb->dev); 321 + } 322 + } 323 + 324 + static void icm_veto_end(struct tb *tb) 325 + { 326 + struct icm *icm = tb_priv(tb); 327 + 328 + if (icm->veto) { 329 + icm->veto = false; 330 + /* Allow the domain suspend now */ 331 + pm_runtime_mark_last_busy(&tb->dev); 332 + pm_runtime_put_autosuspend(&tb->dev); 333 + } 303 334 } 304 335 305 336 static bool icm_fr_is_supported(struct tb *tb) ··· 562 519 return 0; 563 520 } 564 521 565 - static void add_switch(struct tb_switch *parent_sw, u64 route, 566 - const uuid_t *uuid, const u8 *ep_name, 567 - size_t ep_name_size, u8 connection_id, u8 connection_key, 568 - u8 link, u8 depth, enum tb_security_level security_level, 569 - bool authorized, bool boot) 522 + static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route, 523 + const uuid_t *uuid, const u8 *ep_name, 524 + size_t ep_name_size, u8 connection_id, 525 + u8 connection_key, u8 link, u8 depth, 526 + enum tb_security_level security_level, 527 + bool authorized, bool boot) 570 528 { 571 529 const struct intel_vss *vss; 572 530 struct tb_switch *sw; 531 + int ret; 573 532 574 533 pm_runtime_get_sync(&parent_sw->dev); 575 534 ··· 602 557 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); 603 558 tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); 604 559 605 - if (tb_switch_add(sw)) { 560 + ret = tb_switch_add(sw); 561 + if (ret) { 606 562 tb_port_at(tb_route(sw), parent_sw)->remote = NULL; 607 563 tb_switch_put(sw); 564 + sw = ERR_PTR(ret); 608 565 } 609 566 610 567 out: 611 568 pm_runtime_mark_last_busy(&parent_sw->dev); 612 569 pm_runtime_put_autosuspend(&parent_sw->dev); 570 + 571 + return sw; 613 572 } 614 573 615 574 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, ··· 704 655 bool boot; 705 656 u64 route; 706 657 int ret; 658 + 659 + icm_postpone_rescan(tb); 707 660 708 661 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 709 662 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> ··· 1137 1086 } 1138 1087 1139 1088 static void 1140 - icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 1089 + __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr, 1090 + bool force_rtd3) 1141 1091 { 1142 1092 const struct icm_tr_event_device_connected *pkg = 1143 1093 (const struct icm_tr_event_device_connected *)hdr; ··· 1147 1095 struct tb_xdomain *xd; 1148 1096 bool authorized, boot; 1149 1097 u64 route; 1098 + 1099 + icm_postpone_rescan(tb); 1150 1100 1151 1101 /* 1152 1102 * Currently we don't use the QoS information coming with the ··· 1205 1151 return; 1206 1152 } 1207 1153 1208 - add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, 1209 - sizeof(pkg->ep_name), pkg->connection_id, 1210 - 0, 0, 0, security_level, authorized, boot); 1154 + sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, 1155 + sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0, 1156 + security_level, authorized, boot); 1157 + if (!IS_ERR(sw) && force_rtd3) 1158 + sw->rpm = true; 1211 1159 1212 1160 tb_switch_put(parent_sw); 1161 + } 1162 + 1163 + static void 1164 + icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 1165 + { 1166 + __icm_tr_device_connected(tb, hdr, false); 1213 1167 } 1214 1168 1215 1169 static void ··· 1530 1468 return 0; 1531 1469 } 1532 1470 1471 + static int 1472 + icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, 1473 + size_t *nboot_acl, bool *rpm) 1474 + { 1475 + struct icm_tr_pkg_driver_ready_response reply; 1476 + struct icm_pkg_driver_ready request = { 1477 + .hdr.code = ICM_DRIVER_READY, 1478 + }; 1479 + int ret; 1480 + 1481 + memset(&reply, 0, sizeof(reply)); 1482 + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), 1483 + 1, 20000); 1484 + if (ret) 1485 + return ret; 1486 + 1487 + /* Ice Lake always supports RTD3 */ 1488 + if (rpm) 1489 + *rpm = true; 1490 + 1491 + return 0; 1492 + } 1493 + 1494 + static void icm_icl_set_uuid(struct tb *tb) 1495 + { 1496 + struct tb_nhi *nhi = tb->nhi; 1497 + u32 uuid[4]; 1498 + 1499 + pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]); 1500 + pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]); 1501 + uuid[2] = 0xffffffff; 1502 + uuid[3] = 0xffffffff; 1503 + 1504 + tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 1505 + } 1506 + 1507 + static void 1508 + icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) 1509 + { 1510 + __icm_tr_device_connected(tb, hdr, true); 1511 + } 1512 + 1513 + static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr) 1514 + { 1515 + const struct icm_icl_event_rtd3_veto *pkg = 1516 + (const struct icm_icl_event_rtd3_veto *)hdr; 1517 + 1518 + tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason); 1519 + 1520 + if (pkg->veto_reason) 1521 + icm_veto_begin(tb); 1522 + else 1523 + icm_veto_end(tb); 1524 + } 1525 + 1533 1526 static void icm_handle_notification(struct work_struct *work) 1534 1527 { 1535 1528 struct icm_notification *n = container_of(work, typeof(*n), work); ··· 1611 1494 break; 1612 1495 case ICM_EVENT_XDOMAIN_DISCONNECTED: 1613 1496 icm->xdomain_disconnected(tb, n->pkg); 1497 + break; 1498 + case ICM_EVENT_RTD3_VETO: 1499 + icm->rtd3_veto(tb, n->pkg); 1614 1500 break; 1615 1501 } 1616 1502 } ··· 1973 1853 if (tb->nhi->going_away) 1974 1854 return; 1975 1855 1856 + /* 1857 + * If RTD3 was vetoed before we entered system suspend allow it 1858 + * again now before driver ready is sent. Firmware sends a new RTD3 1859 + * veto if it is still the case after we have sent it driver ready 1860 + * command. 1861 + */ 1862 + icm_veto_end(tb); 1976 1863 icm_unplug_children(tb->root_switch); 1977 1864 1978 1865 /* ··· 2044 1917 2045 1918 tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm; 2046 1919 tb->root_switch->rpm = icm->rpm; 1920 + 1921 + if (icm->set_uuid) 1922 + icm->set_uuid(tb); 2047 1923 2048 1924 ret = tb_switch_add(tb->root_switch); 2049 1925 if (ret) { ··· 2132 2002 .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, 2133 2003 }; 2134 2004 2005 + /* Ice Lake */ 2006 + static const struct tb_cm_ops icm_icl_ops = { 2007 + .driver_ready = icm_driver_ready, 2008 + .start = icm_start, 2009 + .stop = icm_stop, 2010 + .complete = icm_complete, 2011 + .runtime_suspend = icm_runtime_suspend, 2012 + .runtime_resume = icm_runtime_resume, 2013 + .handle_event = icm_handle_event, 2014 + .approve_xdomain_paths = icm_tr_approve_xdomain_paths, 2015 + .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, 2016 + }; 2017 + 2135 2018 struct tb *icm_probe(struct tb_nhi *nhi) 2136 2019 { 2137 2020 struct icm *icm; ··· 2212 2069 icm->xdomain_connected = icm_tr_xdomain_connected; 2213 2070 icm->xdomain_disconnected = icm_tr_xdomain_disconnected; 2214 2071 tb->cm_ops = &icm_tr_ops; 2072 + break; 2073 + 2074 + case PCI_DEVICE_ID_INTEL_ICL_NHI0: 2075 + case PCI_DEVICE_ID_INTEL_ICL_NHI1: 2076 + icm->is_supported = icm_ar_is_supported; 2077 + icm->driver_ready = icm_icl_driver_ready; 2078 + icm->set_uuid = icm_icl_set_uuid; 2079 + icm->device_connected = icm_icl_device_connected; 2080 + icm->device_disconnected = icm_tr_device_disconnected; 2081 + icm->xdomain_connected = icm_tr_xdomain_connected; 2082 + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; 2083 + icm->rtd3_veto = icm_icl_rtd3_veto; 2084 + tb->cm_ops = &icm_icl_ops; 2215 2085 break; 2216 2086 } 2217 2087
+105 -7
drivers/thunderbolt/nhi.c
··· 16 16 #include <linux/interrupt.h> 17 17 #include <linux/module.h> 18 18 #include <linux/delay.h> 19 + #include <linux/property.h> 19 20 20 21 #include "nhi.h" 21 22 #include "nhi_regs.h" ··· 860 859 return IRQ_HANDLED; 861 860 } 862 861 863 - static int nhi_suspend_noirq(struct device *dev) 862 + static int __nhi_suspend_noirq(struct device *dev, bool wakeup) 864 863 { 865 864 struct pci_dev *pdev = to_pci_dev(dev); 866 865 struct tb *tb = pci_get_drvdata(pdev); 866 + struct tb_nhi *nhi = tb->nhi; 867 + int ret; 867 868 868 - return tb_domain_suspend_noirq(tb); 869 + ret = tb_domain_suspend_noirq(tb); 870 + if (ret) 871 + return ret; 872 + 873 + if (nhi->ops && nhi->ops->suspend_noirq) { 874 + ret = nhi->ops->suspend_noirq(tb->nhi, wakeup); 875 + if (ret) 876 + return ret; 877 + } 878 + 879 + return 0; 880 + } 881 + 882 + static int nhi_suspend_noirq(struct device *dev) 883 + { 884 + return __nhi_suspend_noirq(dev, device_may_wakeup(dev)); 885 + } 886 + 887 + static bool nhi_wake_supported(struct pci_dev *pdev) 888 + { 889 + u8 val; 890 + 891 + /* 892 + * If power rails are sustainable for wakeup from S4 this 893 + * property is set by the BIOS. 894 + */ 895 + if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val)) 896 + return !!val; 897 + 898 + return true; 899 + } 900 + 901 + static int nhi_poweroff_noirq(struct device *dev) 902 + { 903 + struct pci_dev *pdev = to_pci_dev(dev); 904 + bool wakeup; 905 + 906 + wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev); 907 + return __nhi_suspend_noirq(dev, wakeup); 869 908 } 870 909 871 910 static void nhi_enable_int_throttling(struct tb_nhi *nhi) ··· 928 887 { 929 888 struct pci_dev *pdev = to_pci_dev(dev); 930 889 struct tb *tb = pci_get_drvdata(pdev); 890 + struct tb_nhi *nhi = tb->nhi; 891 + int ret; 931 892 932 893 /* 933 894 * Check that the device is still there. It may be that the user 934 895 * unplugged last device which causes the host controller to go 935 896 * away on PCs. 936 897 */ 937 - if (!pci_device_is_present(pdev)) 938 - tb->nhi->going_away = true; 939 - else 898 + if (!pci_device_is_present(pdev)) { 899 + nhi->going_away = true; 900 + } else { 901 + if (nhi->ops && nhi->ops->resume_noirq) { 902 + ret = nhi->ops->resume_noirq(nhi); 903 + if (ret) 904 + return ret; 905 + } 940 906 nhi_enable_int_throttling(tb->nhi); 907 + } 941 908 942 909 return tb_domain_resume_noirq(tb); 943 910 } ··· 978 929 { 979 930 struct pci_dev *pdev = to_pci_dev(dev); 980 931 struct tb *tb = pci_get_drvdata(pdev); 932 + struct tb_nhi *nhi = tb->nhi; 933 + int ret; 981 934 982 - return tb_domain_runtime_suspend(tb); 935 + ret = tb_domain_runtime_suspend(tb); 936 + if (ret) 937 + return ret; 938 + 939 + if (nhi->ops && nhi->ops->runtime_suspend) { 940 + ret = nhi->ops->runtime_suspend(tb->nhi); 941 + if (ret) 942 + return ret; 943 + } 944 + return 0; 983 945 } 984 946 985 947 static int nhi_runtime_resume(struct device *dev) 986 948 { 987 949 struct pci_dev *pdev = to_pci_dev(dev); 988 950 struct tb *tb = pci_get_drvdata(pdev); 951 + struct tb_nhi *nhi = tb->nhi; 952 + int ret; 989 953 990 - nhi_enable_int_throttling(tb->nhi); 954 + if (nhi->ops && nhi->ops->runtime_resume) { 955 + ret = nhi->ops->runtime_resume(nhi); 956 + if (ret) 957 + return ret; 958 + } 959 + 960 + nhi_enable_int_throttling(nhi); 991 961 return tb_domain_runtime_resume(tb); 992 962 } 993 963 ··· 1034 966 flush_work(&nhi->interrupt_work); 1035 967 } 1036 968 ida_destroy(&nhi->msix_ida); 969 + 970 + if (nhi->ops && nhi->ops->shutdown) 971 + nhi->ops->shutdown(nhi); 1037 972 } 1038 973 1039 974 static int nhi_init_msi(struct tb_nhi *nhi) ··· 1081 1010 return 0; 1082 1011 } 1083 1012 1013 + static bool nhi_imr_valid(struct pci_dev *pdev) 1014 + { 1015 + u8 val; 1016 + 1017 + if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val)) 1018 + return !!val; 1019 + 1020 + return true; 1021 + } 1022 + 1084 1023 static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1085 1024 { 1086 1025 struct tb_nhi *nhi; 1087 1026 struct tb *tb; 1088 1027 int res; 1028 + 1029 + if (!nhi_imr_valid(pdev)) { 1030 + dev_warn(&pdev->dev, "firmware image not valid, aborting\n"); 1031 + return -ENODEV; 1032 + } 1089 1033 1090 1034 res = pcim_enable_device(pdev); 1091 1035 if (res) { ··· 1119 1033 return -ENOMEM; 1120 1034 1121 1035 nhi->pdev = pdev; 1036 + nhi->ops = (const struct tb_nhi_ops *)id->driver_data; 1122 1037 /* cannot fail - table is allocated bin pcim_iomap_regions */ 1123 1038 nhi->iobase = pcim_iomap_table(pdev)[0]; 1124 1039 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; ··· 1151 1064 } 1152 1065 1153 1066 pci_set_master(pdev); 1067 + 1068 + if (nhi->ops && nhi->ops->init) { 1069 + res = nhi->ops->init(nhi); 1070 + if (res) 1071 + return res; 1072 + } 1154 1073 1155 1074 tb = icm_probe(nhi); 1156 1075 if (!tb) ··· 1218 1125 .restore_noirq = nhi_resume_noirq, 1219 1126 .suspend = nhi_suspend, 1220 1127 .freeze = nhi_suspend, 1128 + .poweroff_noirq = nhi_poweroff_noirq, 1221 1129 .poweroff = nhi_suspend, 1222 1130 .complete = nhi_complete, 1223 1131 .runtime_suspend = nhi_runtime_suspend, ··· 1266 1172 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, 1267 1173 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) }, 1268 1174 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) }, 1175 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0), 1176 + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 1177 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1), 1178 + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, 1269 1179 1270 1180 { 0,} 1271 1181 };
+22
drivers/thunderbolt/nhi.h
··· 30 30 int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data); 31 31 enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi); 32 32 33 + /** 34 + * struct tb_nhi_ops - NHI specific optional operations 35 + * @init: NHI specific initialization 36 + * @suspend_noirq: NHI specific suspend_noirq hook 37 + * @resume_noirq: NHI specific resume_noirq hook 38 + * @runtime_suspend: NHI specific runtime_suspend hook 39 + * @runtime_resume: NHI specific runtime_resume hook 40 + * @shutdown: NHI specific shutdown 41 + */ 42 + struct tb_nhi_ops { 43 + int (*init)(struct tb_nhi *nhi); 44 + int (*suspend_noirq)(struct tb_nhi *nhi, bool wakeup); 45 + int (*resume_noirq)(struct tb_nhi *nhi); 46 + int (*runtime_suspend)(struct tb_nhi *nhi); 47 + int (*runtime_resume)(struct tb_nhi *nhi); 48 + void (*shutdown)(struct tb_nhi *nhi); 49 + }; 50 + 51 + extern const struct tb_nhi_ops icl_nhi_ops; 52 + 33 53 /* 34 54 * PCI IDs used in this driver from Win Ridge forward. There is no 35 55 * need for the PCI quirk anymore as we will use ICM also on Apple ··· 71 51 #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea 72 52 #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb 73 53 #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef 54 + #define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d 55 + #define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17 74 56 75 57 #endif
+179
drivers/thunderbolt/nhi_ops.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * NHI specific operations 4 + * 5 + * Copyright (C) 2019, Intel Corporation 6 + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 + */ 8 + 9 + #include <linux/delay.h> 10 + #include <linux/suspend.h> 11 + 12 + #include "nhi.h" 13 + #include "nhi_regs.h" 14 + #include "tb.h" 15 + 16 + /* Ice Lake specific NHI operations */ 17 + 18 + #define ICL_LC_MAILBOX_TIMEOUT 500 /* ms */ 19 + 20 + static int check_for_device(struct device *dev, void *data) 21 + { 22 + return tb_is_switch(dev); 23 + } 24 + 25 + static bool icl_nhi_is_device_connected(struct tb_nhi *nhi) 26 + { 27 + struct tb *tb = pci_get_drvdata(nhi->pdev); 28 + int ret; 29 + 30 + ret = device_for_each_child(&tb->root_switch->dev, NULL, 31 + check_for_device); 32 + return ret > 0; 33 + } 34 + 35 + static int icl_nhi_force_power(struct tb_nhi *nhi, bool power) 36 + { 37 + u32 vs_cap; 38 + 39 + /* 40 + * The Thunderbolt host controller is present always in Ice Lake 41 + * but the firmware may not be loaded and running (depending 42 + * whether there is device connected and so on). Each time the 43 + * controller is used we need to "Force Power" it first and wait 44 + * for the firmware to indicate it is up and running. This "Force 45 + * Power" is really not about actually powering on/off the 46 + * controller so it is accessible even if "Force Power" is off. 47 + * 48 + * The actual power management happens inside shared ACPI power 49 + * resources using standard ACPI methods. 50 + */ 51 + pci_read_config_dword(nhi->pdev, VS_CAP_22, &vs_cap); 52 + if (power) { 53 + vs_cap &= ~VS_CAP_22_DMA_DELAY_MASK; 54 + vs_cap |= 0x22 << VS_CAP_22_DMA_DELAY_SHIFT; 55 + vs_cap |= VS_CAP_22_FORCE_POWER; 56 + } else { 57 + vs_cap &= ~VS_CAP_22_FORCE_POWER; 58 + } 59 + pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap); 60 + 61 + if (power) { 62 + unsigned int retries = 10; 63 + u32 val; 64 + 65 + /* Wait until the firmware tells it is up and running */ 66 + do { 67 + pci_read_config_dword(nhi->pdev, VS_CAP_9, &val); 68 + if (val & VS_CAP_9_FW_READY) 69 + return 0; 70 + msleep(250); 71 + } while (--retries); 72 + 73 + return -ETIMEDOUT; 74 + } 75 + 76 + return 0; 77 + } 78 + 79 + static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd cmd) 80 + { 81 + u32 data; 82 + 83 + pci_read_config_dword(nhi->pdev, VS_CAP_19, &data); 84 + data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK; 85 + pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID); 86 + } 87 + 88 + static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout) 89 + { 90 + unsigned long end; 91 + u32 data; 92 + 93 + if (!timeout) 94 + goto clear; 95 + 96 + end = jiffies + msecs_to_jiffies(timeout); 97 + do { 98 + pci_read_config_dword(nhi->pdev, VS_CAP_18, &data); 99 + if (data & VS_CAP_18_DONE) 100 + goto clear; 101 + msleep(100); 102 + } while (time_before(jiffies, end)); 103 + 104 + return -ETIMEDOUT; 105 + 106 + clear: 107 + /* Clear the valid bit */ 108 + pci_write_config_dword(nhi->pdev, VS_CAP_19, 0); 109 + return 0; 110 + } 111 + 112 + static void icl_nhi_set_ltr(struct tb_nhi *nhi) 113 + { 114 + u32 max_ltr, ltr; 115 + 116 + pci_read_config_dword(nhi->pdev, VS_CAP_16, &max_ltr); 117 + max_ltr &= 0xffff; 118 + /* Program the same value for both snoop and no-snoop */ 119 + ltr = max_ltr << 16 | max_ltr; 120 + pci_write_config_dword(nhi->pdev, VS_CAP_15, ltr); 121 + } 122 + 123 + static int icl_nhi_suspend(struct tb_nhi *nhi) 124 + { 125 + int ret; 126 + 127 + if (icl_nhi_is_device_connected(nhi)) 128 + return 0; 129 + 130 + /* 131 + * If there is no device connected we need to perform both: a 132 + * handshake through LC mailbox and force power down before 133 + * entering D3. 134 + */ 135 + icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET); 136 + ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); 137 + if (ret) 138 + return ret; 139 + 140 + return icl_nhi_force_power(nhi, false); 141 + } 142 + 143 + static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup) 144 + { 145 + enum icl_lc_mailbox_cmd cmd; 146 + 147 + if (!pm_suspend_via_firmware()) 148 + return icl_nhi_suspend(nhi); 149 + 150 + cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE; 151 + icl_nhi_lc_mailbox_cmd(nhi, cmd); 152 + return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); 153 + } 154 + 155 + static int icl_nhi_resume(struct tb_nhi *nhi) 156 + { 157 + int ret; 158 + 159 + ret = icl_nhi_force_power(nhi, true); 160 + if (ret) 161 + return ret; 162 + 163 + icl_nhi_set_ltr(nhi); 164 + return 0; 165 + } 166 + 167 + static void icl_nhi_shutdown(struct tb_nhi *nhi) 168 + { 169 + icl_nhi_force_power(nhi, false); 170 + } 171 + 172 + const struct tb_nhi_ops icl_nhi_ops = { 173 + .init = icl_nhi_resume, 174 + .suspend_noirq = icl_nhi_suspend_noirq, 175 + .resume_noirq = icl_nhi_resume, 176 + .runtime_suspend = icl_nhi_suspend, 177 + .runtime_resume = icl_nhi_resume, 178 + .shutdown = icl_nhi_shutdown, 179 + };
+37
drivers/thunderbolt/nhi_regs.h
··· 124 124 #define REG_FW_STS_ICM_EN_INVERT BIT(1) 125 125 #define REG_FW_STS_ICM_EN BIT(0) 126 126 127 + /* ICL NHI VSEC registers */ 128 + 129 + /* FW ready */ 130 + #define VS_CAP_9 0xc8 131 + #define VS_CAP_9_FW_READY BIT(31) 132 + /* UUID */ 133 + #define VS_CAP_10 0xcc 134 + #define VS_CAP_11 0xd0 135 + /* LTR */ 136 + #define VS_CAP_15 0xe0 137 + #define VS_CAP_16 0xe4 138 + /* TBT2PCIe */ 139 + #define VS_CAP_18 0xec 140 + #define VS_CAP_18_DONE BIT(0) 141 + /* PCIe2TBT */ 142 + #define VS_CAP_19 0xf0 143 + #define VS_CAP_19_VALID BIT(0) 144 + #define VS_CAP_19_CMD_SHIFT 1 145 + #define VS_CAP_19_CMD_MASK GENMASK(7, 1) 146 + /* Force power */ 147 + #define VS_CAP_22 0xfc 148 + #define VS_CAP_22_FORCE_POWER BIT(1) 149 + #define VS_CAP_22_DMA_DELAY_MASK GENMASK(31, 24) 150 + #define VS_CAP_22_DMA_DELAY_SHIFT 24 151 + 152 + /** 153 + * enum icl_lc_mailbox_cmd - ICL specific LC mailbox commands 154 + * @ICL_LC_GO2SX: Ask LC to enter Sx without wake 155 + * @ICL_LC_GO2SX_NO_WAKE: Ask LC to enter Sx with wake 156 + * @ICL_LC_PREPARE_FOR_RESET: Prepare LC for reset 157 + */ 158 + enum icl_lc_mailbox_cmd { 159 + ICL_LC_GO2SX = 0x02, 160 + ICL_LC_GO2SX_NO_WAKE = 0x03, 161 + ICL_LC_PREPARE_FOR_RESET = 0x21, 162 + }; 163 + 127 164 #endif
+2
drivers/thunderbolt/switch.c
··· 1470 1470 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: 1471 1471 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: 1472 1472 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: 1473 + case PCI_DEVICE_ID_INTEL_ICL_NHI0: 1474 + case PCI_DEVICE_ID_INTEL_ICL_NHI1: 1473 1475 return 3; 1474 1476 1475 1477 default:
+12 -4
drivers/thunderbolt/tb_msgs.h
··· 104 104 }; 105 105 106 106 enum icm_event_code { 107 - ICM_EVENT_DEVICE_CONNECTED = 3, 108 - ICM_EVENT_DEVICE_DISCONNECTED = 4, 109 - ICM_EVENT_XDOMAIN_CONNECTED = 6, 110 - ICM_EVENT_XDOMAIN_DISCONNECTED = 7, 107 + ICM_EVENT_DEVICE_CONNECTED = 0x3, 108 + ICM_EVENT_DEVICE_DISCONNECTED = 0x4, 109 + ICM_EVENT_XDOMAIN_CONNECTED = 0x6, 110 + ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7, 111 + ICM_EVENT_RTD3_VETO = 0xa, 111 112 }; 112 113 113 114 struct icm_pkg_header { ··· 462 461 u32 route_hi; 463 462 u32 route_lo; 464 463 uuid_t remote_uuid; 464 + }; 465 + 466 + /* Ice Lake messages */ 467 + 468 + struct icm_icl_event_rtd3_veto { 469 + struct icm_pkg_header hdr; 470 + u32 veto_reason; 465 471 }; 466 472 467 473 /* XDomain messages */
+2
include/linux/thunderbolt.h
··· 429 429 * @lock: Must be held during ring creation/destruction. Is acquired by 430 430 * interrupt_work when dispatching interrupts to individual rings. 431 431 * @pdev: Pointer to the PCI device 432 + * @ops: NHI specific optional ops 432 433 * @iobase: MMIO space of the NHI 433 434 * @tx_rings: All Tx rings available on this host controller 434 435 * @rx_rings: All Rx rings available on this host controller ··· 443 442 struct tb_nhi { 444 443 spinlock_t lock; 445 444 struct pci_dev *pdev; 445 + const struct tb_nhi_ops *ops; 446 446 void __iomem *iobase; 447 447 struct tb_ring **tx_rings; 448 448 struct tb_ring **rx_rings;