Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-6.18/cxl-delay-dport' into cxl-for-next

Add changes to delay the allocation and setup of dports until when the
endpoint device is being probed. At this point, the CXL link is
established from endpoint to host bridge. Addresses issues seen on
some platforms when dports are probed earlier.

Link: https://lore.kernel.org/linux-cxl/20250829180928.842707-1-dave.jiang@intel.com/

+642 -265
+3 -4
drivers/cxl/acpi.c
··· 401 401 static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, 402 402 struct cxl_cfmws_context *ctx) 403 403 { 404 - int target_map[CXL_DECODER_MAX_INTERLEAVE]; 405 404 struct cxl_port *root_port = ctx->root_port; 406 405 struct cxl_cxims_context cxims_ctx; 407 406 struct device *dev = ctx->dev; ··· 418 419 rc = eig_to_granularity(cfmws->granularity, &ig); 419 420 if (rc) 420 421 return rc; 421 - for (i = 0; i < ways; i++) 422 - target_map[i] = cfmws->interleave_targets[i]; 423 422 424 423 struct resource *res __free(del_cxl_resource) = alloc_cxl_resource( 425 424 cfmws->base_hpa, cfmws->window_size, ctx->id++); ··· 443 446 .end = cfmws->base_hpa + cfmws->window_size - 1, 444 447 }; 445 448 cxld->interleave_ways = ways; 449 + for (i = 0; i < ways; i++) 450 + cxld->target_map[i] = cfmws->interleave_targets[i]; 446 451 /* 447 452 * Minimize the x1 granularity to advertise support for any 448 453 * valid region granularity ··· 483 484 cxlrd->ops->spa_to_hpa = cxl_apply_xor_maps; 484 485 } 485 486 486 - rc = cxl_decoder_add(cxld, target_map); 487 + rc = cxl_decoder_add(cxld); 487 488 if (rc) 488 489 return rc; 489 490
+11 -14
drivers/cxl/core/cdat.c
··· 338 338 339 339 guard(rwsem_read)(&cxl_rwsem.region); 340 340 for (int i = 0; i < cxlsd->nr_targets; i++) { 341 - if (host_bridge == cxlsd->target[i]->dport_dev) 341 + if (cxlsd->target[i] && host_bridge == cxlsd->target[i]->dport_dev) 342 342 return 1; 343 343 } 344 344 ··· 440 440 } *tbl = (struct acpi_cdat_sslbis_table *)header; 441 441 int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header); 442 442 struct acpi_cdat_sslbis *sslbis; 443 - struct cxl_port *port = arg; 444 - struct device *dev = &port->dev; 443 + struct cxl_dport *dport = arg; 444 + struct device *dev = &dport->port->dev; 445 445 int remain, entries, i; 446 446 u16 len; 447 447 ··· 467 467 u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id); 468 468 __le64 le_base; 469 469 __le16 le_val; 470 - struct cxl_dport *dport; 471 - unsigned long index; 472 470 u16 dsp_id; 473 471 u64 val; 474 472 ··· 497 499 val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base), 498 500 sslbis->data_type); 499 501 500 - xa_for_each(&port->dports, index, dport) { 501 - if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT || 502 - dsp_id == dport->port_id) { 503 - cxl_access_coordinate_set(dport->coord, 504 - sslbis->data_type, 505 - val); 506 - } 502 + if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT || 503 + dsp_id == dport->port_id) { 504 + cxl_access_coordinate_set(dport->coord, 505 + sslbis->data_type, val); 506 + return 0; 507 507 } 508 508 } 509 509 510 510 return 0; 511 511 } 512 512 513 - void cxl_switch_parse_cdat(struct cxl_port *port) 513 + void cxl_switch_parse_cdat(struct cxl_dport *dport) 514 514 { 515 + struct cxl_port *port = dport->port; 515 516 int rc; 516 517 517 518 if (!port->cdat.table) 518 519 return; 519 520 520 521 rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler, 521 - port, port->cdat.table, port->cdat.length); 522 + dport, port->cdat.table, port->cdat.length); 522 523 rc = cdat_table_parse_output(rc); 523 524 if (rc) 524 525 dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
+5
drivers/cxl/core/core.h
··· 148 148 void cxl_ras_exit(void); 149 149 int cxl_gpf_port_setup(struct cxl_dport *dport); 150 150 151 + struct cxl_hdm; 152 + int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, 153 + struct cxl_endpoint_dvsec_info *info); 154 + int cxl_port_get_possible_dports(struct cxl_port *port); 155 + 151 156 #ifdef CONFIG_CXL_FEATURES 152 157 struct cxl_feat_entry * 153 158 cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
+80 -25
drivers/cxl/core/hdm.c
··· 21 21 .dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa), 22 22 }; 23 23 24 - static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 25 - int *target_map) 24 + static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld) 26 25 { 27 26 int rc; 28 27 29 - rc = cxl_decoder_add_locked(cxld, target_map); 28 + rc = cxl_decoder_add_locked(cxld); 30 29 if (rc) { 31 30 put_device(&cxld->dev); 32 31 dev_err(&port->dev, "Failed to add decoder\n"); ··· 49 50 * are claimed and passed to the single dport. Disable the range until the first 50 51 * CXL region is enumerated / activated. 51 52 */ 52 - int devm_cxl_add_passthrough_decoder(struct cxl_port *port) 53 + static int devm_cxl_add_passthrough_decoder(struct cxl_port *port) 53 54 { 54 55 struct cxl_switch_decoder *cxlsd; 55 - struct cxl_dport *dport = NULL; 56 - int single_port_map[1]; 57 - unsigned long index; 58 56 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 59 57 60 58 /* ··· 67 71 68 72 device_lock_assert(&port->dev); 69 73 70 - xa_for_each(&port->dports, index, dport) 71 - break; 72 - single_port_map[0] = dport->port_id; 73 - 74 - return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); 74 + return add_hdm_decoder(port, &cxlsd->cxld); 75 75 } 76 - EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, "CXL"); 77 76 78 77 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) 79 78 { ··· 138 147 * @port: cxl_port to map 139 148 * @info: cached DVSEC range register info 140 149 */ 141 - struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, 142 - struct cxl_endpoint_dvsec_info *info) 150 + static struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, 151 + struct cxl_endpoint_dvsec_info *info) 143 152 { 144 153 struct cxl_register_map *reg_map = &port->reg_map; 145 154 struct device *dev = &port->dev; ··· 194 203 195 204 return cxlhdm; 196 205 } 197 - EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, "CXL"); 198 206 199 207 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth) 200 208 { ··· 974 984 } 975 985 976 986 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 977 - int *target_map, void __iomem *hdm, int which, 987 + void __iomem *hdm, int which, 978 988 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info) 979 989 { 980 990 struct cxl_endpoint_decoder *cxled = NULL; ··· 1093 1103 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which)); 1094 1104 target_list.value = (hi << 32) + lo; 1095 1105 for (i = 0; i < cxld->interleave_ways; i++) 1096 - target_map[i] = target_list.target_id[i]; 1106 + cxld->target_map[i] = target_list.target_id[i]; 1097 1107 1098 1108 return 0; 1099 1109 } ··· 1158 1168 * @cxlhdm: Structure to populate with HDM capabilities 1159 1169 * @info: cached DVSEC range register info 1160 1170 */ 1161 - int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, 1162 - struct cxl_endpoint_dvsec_info *info) 1171 + static int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, 1172 + struct cxl_endpoint_dvsec_info *info) 1163 1173 { 1164 1174 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 1165 1175 struct cxl_port *port = cxlhdm->port; ··· 1169 1179 cxl_settle_decoders(cxlhdm); 1170 1180 1171 1181 for (i = 0; i < cxlhdm->decoder_count; i++) { 1172 - int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 }; 1173 1182 int rc, target_count = cxlhdm->target_count; 1174 1183 struct cxl_decoder *cxld; 1175 1184 ··· 1196 1207 cxld = &cxlsd->cxld; 1197 1208 } 1198 1209 1199 - rc = init_hdm_decoder(port, cxld, target_map, hdm, i, 1200 - &dpa_base, info); 1210 + rc = init_hdm_decoder(port, cxld, hdm, i, &dpa_base, info); 1201 1211 if (rc) { 1202 1212 dev_warn(&port->dev, 1203 1213 "Failed to initialize decoder%d.%d\n", ··· 1204 1216 put_device(&cxld->dev); 1205 1217 return rc; 1206 1218 } 1207 - rc = add_hdm_decoder(port, cxld, target_map); 1219 + rc = add_hdm_decoder(port, cxld); 1208 1220 if (rc) { 1209 1221 dev_warn(&port->dev, 1210 1222 "Failed to add decoder%d.%d\n", port->id, i); ··· 1214 1226 1215 1227 return 0; 1216 1228 } 1217 - EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, "CXL"); 1229 + 1230 + /** 1231 + * __devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders 1232 + * @port: CXL port context 1233 + * 1234 + * Return 0 or -errno on error 1235 + */ 1236 + int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port) 1237 + { 1238 + struct cxl_hdm *cxlhdm; 1239 + 1240 + if (is_cxl_root(port) || is_cxl_endpoint(port)) 1241 + return -EOPNOTSUPP; 1242 + 1243 + cxlhdm = devm_cxl_setup_hdm(port, NULL); 1244 + if (!IS_ERR(cxlhdm)) 1245 + return devm_cxl_enumerate_decoders(cxlhdm, NULL); 1246 + 1247 + if (PTR_ERR(cxlhdm) != -ENODEV) { 1248 + dev_err(&port->dev, "Failed to map HDM decoder capability\n"); 1249 + return PTR_ERR(cxlhdm); 1250 + } 1251 + 1252 + if (cxl_port_get_possible_dports(port) == 1) { 1253 + dev_dbg(&port->dev, "Fallback to passthrough decoder\n"); 1254 + return devm_cxl_add_passthrough_decoder(port); 1255 + } 1256 + 1257 + dev_err(&port->dev, "HDM decoder capability not found\n"); 1258 + return -ENXIO; 1259 + } 1260 + EXPORT_SYMBOL_NS_GPL(__devm_cxl_switch_port_decoders_setup, "CXL"); 1261 + 1262 + /** 1263 + * devm_cxl_endpoint_decoders_setup - allocate and setup endpoint decoders 1264 + * @port: CXL port context 1265 + * 1266 + * Return 0 or -errno on error 1267 + */ 1268 + int devm_cxl_endpoint_decoders_setup(struct cxl_port *port) 1269 + { 1270 + struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); 1271 + struct cxl_endpoint_dvsec_info info = { .port = port }; 1272 + struct cxl_dev_state *cxlds = cxlmd->cxlds; 1273 + struct cxl_hdm *cxlhdm; 1274 + int rc; 1275 + 1276 + if (!is_cxl_endpoint(port)) 1277 + return -EOPNOTSUPP; 1278 + 1279 + rc = cxl_dvsec_rr_decode(cxlds, &info); 1280 + if (rc < 0) 1281 + return rc; 1282 + 1283 + cxlhdm = devm_cxl_setup_hdm(port, &info); 1284 + if (IS_ERR(cxlhdm)) { 1285 + if (PTR_ERR(cxlhdm) == -ENODEV) 1286 + dev_err(&port->dev, "HDM decoder registers not found\n"); 1287 + return PTR_ERR(cxlhdm); 1288 + } 1289 + 1290 + rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info); 1291 + if (rc) 1292 + return rc; 1293 + 1294 + return devm_cxl_enumerate_decoders(cxlhdm, &info); 1295 + } 1296 + EXPORT_SYMBOL_NS_GPL(devm_cxl_endpoint_decoders_setup, "CXL");
+89
drivers/cxl/core/pci.c
··· 24 24 module_param(media_ready_timeout, ushort, 0644); 25 25 MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready"); 26 26 27 + static int pci_get_port_num(struct pci_dev *pdev) 28 + { 29 + u32 lnkcap; 30 + int type; 31 + 32 + type = pci_pcie_type(pdev); 33 + if (type != PCI_EXP_TYPE_DOWNSTREAM && type != PCI_EXP_TYPE_ROOT_PORT) 34 + return -EINVAL; 35 + 36 + if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP, 37 + &lnkcap)) 38 + return -ENXIO; 39 + 40 + return FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap); 41 + } 42 + 43 + /** 44 + * __devm_cxl_add_dport_by_dev - allocate a dport by dport device 45 + * @port: cxl_port that hosts the dport 46 + * @dport_dev: 'struct device' of the dport 47 + * 48 + * Returns the allocated dport on success or ERR_PTR() of -errno on error 49 + */ 50 + struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port, 51 + struct device *dport_dev) 52 + { 53 + struct cxl_register_map map; 54 + struct pci_dev *pdev; 55 + int port_num, rc; 56 + 57 + if (!dev_is_pci(dport_dev)) 58 + return ERR_PTR(-EINVAL); 59 + 60 + pdev = to_pci_dev(dport_dev); 61 + port_num = pci_get_port_num(pdev); 62 + if (port_num < 0) 63 + return ERR_PTR(port_num); 64 + 65 + rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); 66 + if (rc) 67 + return ERR_PTR(rc); 68 + 69 + device_lock_assert(&port->dev); 70 + return devm_cxl_add_dport(port, dport_dev, port_num, map.resource); 71 + } 72 + EXPORT_SYMBOL_NS_GPL(__devm_cxl_add_dport_by_dev, "CXL"); 73 + 27 74 struct cxl_walk_context { 28 75 struct pci_bus *bus; 29 76 struct cxl_port *port; ··· 1215 1168 } 1216 1169 1217 1170 return 0; 1171 + } 1172 + 1173 + static int count_dports(struct pci_dev *pdev, void *data) 1174 + { 1175 + struct cxl_walk_context *ctx = data; 1176 + int type = pci_pcie_type(pdev); 1177 + 1178 + if (pdev->bus != ctx->bus) 1179 + return 0; 1180 + if (!pci_is_pcie(pdev)) 1181 + return 0; 1182 + if (type != ctx->type) 1183 + return 0; 1184 + 1185 + ctx->count++; 1186 + return 0; 1187 + } 1188 + 1189 + int cxl_port_get_possible_dports(struct cxl_port *port) 1190 + { 1191 + struct pci_bus *bus = cxl_port_to_pci_bus(port); 1192 + struct cxl_walk_context ctx; 1193 + int type; 1194 + 1195 + if (!bus) { 1196 + dev_err(&port->dev, "No PCI bus found for port %s\n", 1197 + dev_name(&port->dev)); 1198 + return -ENXIO; 1199 + } 1200 + 1201 + if (pci_is_root_bus(bus)) 1202 + type = PCI_EXP_TYPE_ROOT_PORT; 1203 + else 1204 + type = PCI_EXP_TYPE_DOWNSTREAM; 1205 + 1206 + ctx = (struct cxl_walk_context) { 1207 + .bus = bus, 1208 + .type = type, 1209 + }; 1210 + pci_walk_bus(bus, count_dports, &ctx); 1211 + 1212 + return ctx.count; 1218 1213 }
+241 -77
drivers/cxl/core/port.c
··· 33 33 static DEFINE_IDA(cxl_port_ida); 34 34 static DEFINE_XARRAY(cxl_root_buses); 35 35 36 + /* 37 + * The terminal device in PCI is NULL and @platform_bus 38 + * for platform devices (for cxl_test) 39 + */ 40 + static bool is_cxl_host_bridge(struct device *dev) 41 + { 42 + return (!dev || dev == &platform_bus); 43 + } 44 + 36 45 int cxl_num_decoders_committed(struct cxl_port *port) 37 46 { 38 47 lockdep_assert_held(&cxl_rwsem.region); ··· 750 741 xa_init(&port->dports); 751 742 xa_init(&port->endpoints); 752 743 xa_init(&port->regions); 744 + port->component_reg_phys = CXL_RESOURCE_NONE; 753 745 754 746 device_initialize(dev); 755 747 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); ··· 869 859 if (rc) 870 860 return rc; 871 861 872 - rc = cxl_port_setup_regs(port, component_reg_phys); 873 - if (rc) 874 - return rc; 862 + port->component_reg_phys = component_reg_phys; 875 863 } else { 876 864 rc = dev_set_name(dev, "root%d", port->id); 877 865 if (rc) ··· 1200 1192 1201 1193 cxl_debugfs_create_dport_dir(dport); 1202 1194 1195 + /* 1196 + * Setup port register if this is the first dport showed up. Having 1197 + * a dport also means that there is at least 1 active link. 1198 + */ 1199 + if (port->nr_dports == 1 && 1200 + port->component_reg_phys != CXL_RESOURCE_NONE) { 1201 + rc = cxl_port_setup_regs(port, port->component_reg_phys); 1202 + if (rc) 1203 + return ERR_PTR(rc); 1204 + port->component_reg_phys = CXL_RESOURCE_NONE; 1205 + } 1206 + 1203 1207 return dport; 1204 1208 } 1205 1209 ··· 1369 1349 return port; 1370 1350 } 1371 1351 1372 - static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port, 1373 - struct device *dport_dev, 1374 - struct cxl_dport **dport) 1375 - { 1376 - struct cxl_find_port_ctx ctx = { 1377 - .dport_dev = dport_dev, 1378 - .parent_port = parent_port, 1379 - .dport = dport, 1380 - }; 1381 - struct cxl_port *port; 1382 - 1383 - port = __find_cxl_port(&ctx); 1384 - return port; 1385 - } 1386 - 1387 1352 /* 1388 1353 * All users of grandparent() are using it to walk PCIe-like switch port 1389 1354 * hierarchy. A PCIe switch is comprised of a bridge device representing the ··· 1429 1424 * through ->remove(). This "bottom-up" removal selectively removes individual 1430 1425 * child ports manually. This depends on devm_cxl_add_port() to not change is 1431 1426 * devm action registration order, and for dports to have already been 1432 - * destroyed by reap_dports(). 1427 + * destroyed by del_dports(). 1433 1428 */ 1434 1429 static void delete_switch_port(struct cxl_port *port) 1435 1430 { ··· 1438 1433 devm_release_action(port->dev.parent, unregister_port, port); 1439 1434 } 1440 1435 1441 - static void reap_dports(struct cxl_port *port) 1436 + static void del_dport(struct cxl_dport *dport) 1437 + { 1438 + struct cxl_port *port = dport->port; 1439 + 1440 + devm_release_action(&port->dev, cxl_dport_unlink, dport); 1441 + devm_release_action(&port->dev, cxl_dport_remove, dport); 1442 + devm_kfree(&port->dev, dport); 1443 + } 1444 + 1445 + static void del_dports(struct cxl_port *port) 1442 1446 { 1443 1447 struct cxl_dport *dport; 1444 1448 unsigned long index; 1445 1449 1446 1450 device_lock_assert(&port->dev); 1447 1451 1448 - xa_for_each(&port->dports, index, dport) { 1449 - devm_release_action(&port->dev, cxl_dport_unlink, dport); 1450 - devm_release_action(&port->dev, cxl_dport_remove, dport); 1451 - devm_kfree(&port->dev, dport); 1452 - } 1452 + xa_for_each(&port->dports, index, dport) 1453 + del_dport(dport); 1453 1454 } 1454 1455 1455 1456 struct detach_ctx { ··· 1513 1502 */ 1514 1503 died = true; 1515 1504 port->dead = true; 1516 - reap_dports(port); 1505 + del_dports(port); 1517 1506 } 1518 1507 device_unlock(&port->dev); 1519 1508 ··· 1544 1533 return map.resource; 1545 1534 } 1546 1535 1536 + static int match_port_by_uport(struct device *dev, const void *data) 1537 + { 1538 + const struct device *uport_dev = data; 1539 + struct cxl_port *port; 1540 + 1541 + if (!is_cxl_port(dev)) 1542 + return 0; 1543 + 1544 + port = to_cxl_port(dev); 1545 + return uport_dev == port->uport_dev; 1546 + } 1547 + 1548 + /* 1549 + * Function takes a device reference on the port device. Caller should do a 1550 + * put_device() when done. 1551 + */ 1552 + static struct cxl_port *find_cxl_port_by_uport(struct device *uport_dev) 1553 + { 1554 + struct device *dev; 1555 + 1556 + dev = bus_find_device(&cxl_bus_type, NULL, uport_dev, match_port_by_uport); 1557 + if (dev) 1558 + return to_cxl_port(dev); 1559 + return NULL; 1560 + } 1561 + 1562 + static int update_decoder_targets(struct device *dev, void *data) 1563 + { 1564 + struct cxl_dport *dport = data; 1565 + struct cxl_switch_decoder *cxlsd; 1566 + struct cxl_decoder *cxld; 1567 + int i; 1568 + 1569 + if (!is_switch_decoder(dev)) 1570 + return 0; 1571 + 1572 + cxlsd = to_cxl_switch_decoder(dev); 1573 + cxld = &cxlsd->cxld; 1574 + guard(rwsem_write)(&cxl_rwsem.region); 1575 + 1576 + for (i = 0; i < cxld->interleave_ways; i++) { 1577 + if (cxld->target_map[i] == dport->port_id) { 1578 + cxlsd->target[i] = dport; 1579 + dev_dbg(dev, "dport%d found in target list, index %d\n", 1580 + dport->port_id, i); 1581 + return 1; 1582 + } 1583 + } 1584 + 1585 + return 0; 1586 + } 1587 + 1588 + DEFINE_FREE(del_cxl_dport, struct cxl_dport *, if (!IS_ERR_OR_NULL(_T)) del_dport(_T)) 1589 + static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port, 1590 + struct device *dport_dev) 1591 + { 1592 + struct cxl_dport *dport; 1593 + int rc; 1594 + 1595 + device_lock_assert(&port->dev); 1596 + if (!port->dev.driver) 1597 + return ERR_PTR(-ENXIO); 1598 + 1599 + dport = cxl_find_dport_by_dev(port, dport_dev); 1600 + if (dport) { 1601 + dev_dbg(&port->dev, "dport%d:%s already exists\n", 1602 + dport->port_id, dev_name(dport_dev)); 1603 + return ERR_PTR(-EBUSY); 1604 + } 1605 + 1606 + struct cxl_dport *new_dport __free(del_cxl_dport) = 1607 + devm_cxl_add_dport_by_dev(port, dport_dev); 1608 + if (IS_ERR(new_dport)) 1609 + return new_dport; 1610 + 1611 + cxl_switch_parse_cdat(new_dport); 1612 + 1613 + if (ida_is_empty(&port->decoder_ida)) { 1614 + rc = devm_cxl_switch_port_decoders_setup(port); 1615 + if (rc) 1616 + return ERR_PTR(rc); 1617 + dev_dbg(&port->dev, "first dport%d:%s added with decoders\n", 1618 + new_dport->port_id, dev_name(dport_dev)); 1619 + return no_free_ptr(new_dport); 1620 + } 1621 + 1622 + /* New dport added, update the decoder targets */ 1623 + device_for_each_child(&port->dev, new_dport, update_decoder_targets); 1624 + 1625 + dev_dbg(&port->dev, "dport%d:%s added\n", new_dport->port_id, 1626 + dev_name(dport_dev)); 1627 + 1628 + return no_free_ptr(new_dport); 1629 + } 1630 + 1631 + static struct cxl_dport *devm_cxl_create_port(struct device *ep_dev, 1632 + struct cxl_port *parent_port, 1633 + struct cxl_dport *parent_dport, 1634 + struct device *uport_dev, 1635 + struct device *dport_dev) 1636 + { 1637 + resource_size_t component_reg_phys; 1638 + 1639 + device_lock_assert(&parent_port->dev); 1640 + if (!parent_port->dev.driver) { 1641 + dev_warn(ep_dev, 1642 + "port %s:%s:%s disabled, failed to enumerate CXL.mem\n", 1643 + dev_name(&parent_port->dev), dev_name(uport_dev), 1644 + dev_name(dport_dev)); 1645 + } 1646 + 1647 + struct cxl_port *port __free(put_cxl_port) = 1648 + find_cxl_port_by_uport(uport_dev); 1649 + if (!port) { 1650 + component_reg_phys = find_component_registers(uport_dev); 1651 + port = devm_cxl_add_port(&parent_port->dev, uport_dev, 1652 + component_reg_phys, parent_dport); 1653 + if (IS_ERR(port)) 1654 + return ERR_CAST(port); 1655 + 1656 + /* 1657 + * retry to make sure a port is found. a port device 1658 + * reference is taken. 1659 + */ 1660 + port = find_cxl_port_by_uport(uport_dev); 1661 + if (!port) 1662 + return ERR_PTR(-ENODEV); 1663 + 1664 + dev_dbg(ep_dev, "created port %s:%s\n", 1665 + dev_name(&port->dev), dev_name(port->uport_dev)); 1666 + } else { 1667 + /* 1668 + * Port was created before right before this function is 1669 + * called. Signal the caller to deal with it. 1670 + */ 1671 + return ERR_PTR(-EAGAIN); 1672 + } 1673 + 1674 + guard(device)(&port->dev); 1675 + return cxl_port_add_dport(port, dport_dev); 1676 + } 1677 + 1547 1678 static int add_port_attach_ep(struct cxl_memdev *cxlmd, 1548 1679 struct device *uport_dev, 1549 1680 struct device *dport_dev) 1550 1681 { 1551 1682 struct device *dparent = grandparent(dport_dev); 1552 1683 struct cxl_dport *dport, *parent_dport; 1553 - resource_size_t component_reg_phys; 1554 1684 int rc; 1555 1685 1556 - if (!dparent) { 1686 + if (is_cxl_host_bridge(dparent)) { 1557 1687 /* 1558 1688 * The iteration reached the topology root without finding the 1559 1689 * CXL-root 'cxl_port' on a previous iteration, fail for now to ··· 1706 1554 } 1707 1555 1708 1556 struct cxl_port *parent_port __free(put_cxl_port) = 1709 - find_cxl_port(dparent, &parent_dport); 1557 + find_cxl_port_by_uport(dparent->parent); 1710 1558 if (!parent_port) { 1711 1559 /* iterate to create this parent_port */ 1712 1560 return -EAGAIN; 1713 1561 } 1714 1562 1715 - /* 1716 - * Definition with __free() here to keep the sequence of 1717 - * dereferencing the device of the port before the parent_port releasing. 1718 - */ 1719 - struct cxl_port *port __free(put_cxl_port) = NULL; 1720 1563 scoped_guard(device, &parent_port->dev) { 1721 - if (!parent_port->dev.driver) { 1722 - dev_warn(&cxlmd->dev, 1723 - "port %s:%s disabled, failed to enumerate CXL.mem\n", 1724 - dev_name(&parent_port->dev), dev_name(uport_dev)); 1725 - return -ENXIO; 1564 + parent_dport = cxl_find_dport_by_dev(parent_port, dparent); 1565 + if (!parent_dport) { 1566 + parent_dport = cxl_port_add_dport(parent_port, dparent); 1567 + if (IS_ERR(parent_dport)) 1568 + return PTR_ERR(parent_dport); 1726 1569 } 1727 1570 1728 - port = find_cxl_port_at(parent_port, dport_dev, &dport); 1729 - if (!port) { 1730 - component_reg_phys = find_component_registers(uport_dev); 1731 - port = devm_cxl_add_port(&parent_port->dev, uport_dev, 1732 - component_reg_phys, parent_dport); 1733 - if (IS_ERR(port)) 1734 - return PTR_ERR(port); 1735 - 1736 - /* retry find to pick up the new dport information */ 1737 - port = find_cxl_port_at(parent_port, dport_dev, &dport); 1738 - if (!port) 1739 - return -ENXIO; 1571 + dport = devm_cxl_create_port(&cxlmd->dev, parent_port, 1572 + parent_dport, uport_dev, 1573 + dport_dev); 1574 + if (IS_ERR(dport)) { 1575 + /* Port already exists, restart iteration */ 1576 + if (PTR_ERR(dport) == -EAGAIN) 1577 + return 0; 1578 + return PTR_ERR(dport); 1740 1579 } 1741 1580 } 1742 1581 1743 - dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", 1744 - dev_name(&port->dev), dev_name(port->uport_dev)); 1745 1582 rc = cxl_add_ep(dport, &cxlmd->dev); 1746 1583 if (rc == -EBUSY) { 1747 1584 /* ··· 1741 1600 } 1742 1601 1743 1602 return rc; 1603 + } 1604 + 1605 + static struct cxl_dport *find_or_add_dport(struct cxl_port *port, 1606 + struct device *dport_dev) 1607 + { 1608 + struct cxl_dport *dport; 1609 + 1610 + device_lock_assert(&port->dev); 1611 + dport = cxl_find_dport_by_dev(port, dport_dev); 1612 + if (!dport) { 1613 + dport = cxl_port_add_dport(port, dport_dev); 1614 + if (IS_ERR(dport)) 1615 + return dport; 1616 + 1617 + /* New dport added, restart iteration */ 1618 + return ERR_PTR(-EAGAIN); 1619 + } 1620 + 1621 + return dport; 1744 1622 } 1745 1623 1746 1624 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) ··· 1790 1630 struct device *uport_dev; 1791 1631 struct cxl_dport *dport; 1792 1632 1793 - /* 1794 - * The terminal "grandparent" in PCI is NULL and @platform_bus 1795 - * for platform devices 1796 - */ 1797 - if (!dport_dev || dport_dev == &platform_bus) 1633 + if (is_cxl_host_bridge(dport_dev)) 1798 1634 return 0; 1799 1635 1800 1636 uport_dev = dport_dev->parent; ··· 1804 1648 dev_name(iter), dev_name(dport_dev), 1805 1649 dev_name(uport_dev)); 1806 1650 struct cxl_port *port __free(put_cxl_port) = 1807 - find_cxl_port(dport_dev, &dport); 1651 + find_cxl_port_by_uport(uport_dev); 1808 1652 if (port) { 1809 1653 dev_dbg(&cxlmd->dev, 1810 1654 "found already registered port %s:%s\n", 1811 1655 dev_name(&port->dev), 1812 1656 dev_name(port->uport_dev)); 1657 + 1658 + /* 1659 + * RP port enumerated by cxl_acpi without dport will 1660 + * have the dport added here. 1661 + */ 1662 + scoped_guard(device, &port->dev) { 1663 + dport = find_or_add_dport(port, dport_dev); 1664 + if (IS_ERR(dport)) { 1665 + if (PTR_ERR(dport) == -EAGAIN) 1666 + goto retry; 1667 + return PTR_ERR(dport); 1668 + } 1669 + } 1670 + 1813 1671 rc = cxl_add_ep(dport, &cxlmd->dev); 1814 1672 1815 1673 /* ··· 1875 1705 EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, "CXL"); 1876 1706 1877 1707 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, 1878 - struct cxl_port *port, int *target_map) 1708 + struct cxl_port *port) 1879 1709 { 1710 + struct cxl_decoder *cxld = &cxlsd->cxld; 1880 1711 int i; 1881 - 1882 - if (!target_map) 1883 - return 0; 1884 1712 1885 1713 device_lock_assert(&port->dev); 1886 1714 1887 1715 if (xa_empty(&port->dports)) 1888 - return -EINVAL; 1716 + return 0; 1889 1717 1890 1718 guard(rwsem_write)(&cxl_rwsem.region); 1891 1719 for (i = 0; i < cxlsd->cxld.interleave_ways; i++) { 1892 - struct cxl_dport *dport = find_dport(port, target_map[i]); 1720 + struct cxl_dport *dport = find_dport(port, cxld->target_map[i]); 1893 1721 1894 - if (!dport) 1895 - return -ENXIO; 1722 + if (!dport) { 1723 + /* dport may be activated later */ 1724 + continue; 1725 + } 1896 1726 cxlsd->target[i] = dport; 1897 1727 } 1898 1728 ··· 2081 1911 /** 2082 1912 * cxl_decoder_add_locked - Add a decoder with targets 2083 1913 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 2084 - * @target_map: A list of downstream ports that this decoder can direct memory 2085 - * traffic to. These numbers should correspond with the port number 2086 - * in the PCIe Link Capabilities structure. 2087 1914 * 2088 1915 * Certain types of decoders may not have any targets. The main example of this 2089 1916 * is an endpoint device. A more awkward example is a hostbridge whose root ··· 2094 1927 * Return: Negative error code if the decoder wasn't properly configured; else 2095 1928 * returns 0. 2096 1929 */ 2097 - int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) 1930 + int cxl_decoder_add_locked(struct cxl_decoder *cxld) 2098 1931 { 2099 1932 struct cxl_port *port; 2100 1933 struct device *dev; ··· 2115 1948 if (!is_endpoint_decoder(dev)) { 2116 1949 struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); 2117 1950 2118 - rc = decoder_populate_targets(cxlsd, port, target_map); 1951 + rc = decoder_populate_targets(cxlsd, port); 2119 1952 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) { 2120 1953 dev_err(&port->dev, 2121 1954 "Failed to populate active decoder targets\n"); ··· 2134 1967 /** 2135 1968 * cxl_decoder_add - Add a decoder with targets 2136 1969 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() 2137 - * @target_map: A list of downstream ports that this decoder can direct memory 2138 - * traffic to. These numbers should correspond with the port number 2139 - * in the PCIe Link Capabilities structure. 2140 1970 * 2141 1971 * This is the unlocked variant of cxl_decoder_add_locked(). 2142 1972 * See cxl_decoder_add_locked(). ··· 2141 1977 * Context: Process context. Takes and releases the device lock of the port that 2142 1978 * owns the @cxld. 2143 1979 */ 2144 - int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) 1980 + int cxl_decoder_add(struct cxl_decoder *cxld) 2145 1981 { 2146 1982 struct cxl_port *port; 2147 1983 ··· 2154 1990 port = to_cxl_port(cxld->dev.parent); 2155 1991 2156 1992 guard(device)(&port->dev); 2157 - return cxl_decoder_add_locked(cxld, target_map); 1993 + return cxl_decoder_add_locked(cxld); 2158 1994 } 2159 1995 EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL"); 2160 1996
+3 -1
drivers/cxl/core/region.c
··· 1516 1516 cxl_rr->nr_targets_set); 1517 1517 return -ENXIO; 1518 1518 } 1519 - } else 1519 + } else { 1520 1520 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport; 1521 + cxlsd->cxld.target_map[cxl_rr->nr_targets_set] = ep->dport->port_id; 1522 + } 1521 1523 inc = 1; 1522 1524 out_target_set: 1523 1525 cxl_rr->nr_targets_set += inc;
+34 -9
drivers/cxl/cxl.h
··· 357 357 * @target_type: accelerator vs expander (type2 vs type3) selector 358 358 * @region: currently assigned region for this decoder 359 359 * @flags: memory type capabilities and locking 360 + * @target_map: cached copy of hardware port-id list, available at init 361 + * before all @dport objects have been instantiated. While 362 + * dport id is 8bit, CFMWS interleave targets are 32bits. 360 363 * @commit: device/decoder-type specific callback to commit settings to hw 361 364 * @reset: device/decoder-type specific callback to reset hw settings 362 365 */ ··· 372 369 enum cxl_decoder_type target_type; 373 370 struct cxl_region *region; 374 371 unsigned long flags; 372 + u32 target_map[CXL_DECODER_MAX_INTERLEAVE]; 375 373 int (*commit)(struct cxl_decoder *cxld); 376 374 void (*reset)(struct cxl_decoder *cxld); 377 375 }; ··· 607 603 * @cdat: Cached CDAT data 608 604 * @cdat_available: Should a CDAT attribute be available in sysfs 609 605 * @pci_latency: Upstream latency in picoseconds 606 + * @component_reg_phys: Physical address of component register 610 607 */ 611 608 struct cxl_port { 612 609 struct device dev; ··· 631 626 } cdat; 632 627 bool cdat_available; 633 628 long pci_latency; 629 + resource_size_t component_reg_phys; 634 630 }; 635 631 636 632 /** ··· 795 789 unsigned int nr_targets); 796 790 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, 797 791 unsigned int nr_targets); 798 - int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); 792 + int cxl_decoder_add(struct cxl_decoder *cxld); 799 793 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port); 800 - int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map); 794 + int cxl_decoder_add_locked(struct cxl_decoder *cxld); 801 795 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); 802 796 static inline int cxl_root_decoder_autoremove(struct device *host, 803 797 struct cxl_root_decoder *cxlrd) ··· 820 814 struct range dvsec_range[2]; 821 815 }; 822 816 823 - struct cxl_hdm; 824 - struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, 825 - struct cxl_endpoint_dvsec_info *info); 826 - int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, 827 - struct cxl_endpoint_dvsec_info *info); 828 - int devm_cxl_add_passthrough_decoder(struct cxl_port *port); 817 + int devm_cxl_switch_port_decoders_setup(struct cxl_port *port); 818 + int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port); 819 + int devm_cxl_endpoint_decoders_setup(struct cxl_port *port); 820 + 829 821 struct cxl_dev_state; 830 822 int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds, 831 823 struct cxl_endpoint_dvsec_info *info); ··· 902 898 #endif 903 899 904 900 void cxl_endpoint_parse_cdat(struct cxl_port *port); 905 - void cxl_switch_parse_cdat(struct cxl_port *port); 901 + void cxl_switch_parse_cdat(struct cxl_dport *dport); 906 902 907 903 int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, 908 904 struct access_coordinate *coord); ··· 917 913 struct access_coordinate *c2); 918 914 919 915 bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port); 916 + struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port, 917 + struct device *dport_dev); 918 + struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port, 919 + struct device *dport_dev); 920 920 921 921 /* 922 922 * Unit test builds overrides this to __weak, find the 'strong' version ··· 931 923 #endif 932 924 933 925 u16 cxl_gpf_get_dvsec(struct device *dev); 926 + 927 + /* 928 + * Declaration for functions that are mocked by cxl_test that are called by 929 + * cxl_core. The respective functions are defined as __foo() and called by 930 + * cxl_core as foo(). The macros below ensures that those functions would 931 + * exist as foo(). See tools/testing/cxl/cxl_core_exports.c and 932 + * tools/testing/cxl/exports.h for setting up the mock functions. The dance 933 + * is done to avoid a circular dependency where cxl_core calls a function that 934 + * ends up being a mock function and goes to * cxl_test where it calls a 935 + * cxl_core function. 936 + */ 937 + #ifndef CXL_TEST_ENABLE 938 + #define DECLARE_TESTABLE(x) __##x 939 + #define devm_cxl_add_dport_by_dev DECLARE_TESTABLE(devm_cxl_add_dport_by_dev) 940 + #define devm_cxl_switch_port_decoders_setup DECLARE_TESTABLE(devm_cxl_switch_port_decoders_setup) 941 + #endif 942 + 934 943 #endif /* __CXL_H__ */
-2
drivers/cxl/cxlpci.h
··· 129 129 130 130 int devm_cxl_port_enumerate_dports(struct cxl_port *port); 131 131 struct cxl_dev_state; 132 - int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, 133 - struct cxl_endpoint_dvsec_info *info); 134 132 void read_cdat_data(struct cxl_port *port); 135 133 void cxl_cor_error_detected(struct pci_dev *pdev); 136 134 pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
+4 -43
drivers/cxl/port.c
··· 59 59 60 60 static int cxl_switch_port_probe(struct cxl_port *port) 61 61 { 62 - struct cxl_hdm *cxlhdm; 63 - int rc; 62 + /* Reset nr_dports for rebind of driver */ 63 + port->nr_dports = 0; 64 64 65 65 /* Cache the data early to ensure is_visible() works */ 66 66 read_cdat_data(port); 67 67 68 - rc = devm_cxl_port_enumerate_dports(port); 69 - if (rc < 0) 70 - return rc; 71 - 72 - cxl_switch_parse_cdat(port); 73 - 74 - cxlhdm = devm_cxl_setup_hdm(port, NULL); 75 - if (!IS_ERR(cxlhdm)) 76 - return devm_cxl_enumerate_decoders(cxlhdm, NULL); 77 - 78 - if (PTR_ERR(cxlhdm) != -ENODEV) { 79 - dev_err(&port->dev, "Failed to map HDM decoder capability\n"); 80 - return PTR_ERR(cxlhdm); 81 - } 82 - 83 - if (rc == 1) { 84 - dev_dbg(&port->dev, "Fallback to passthrough decoder\n"); 85 - return devm_cxl_add_passthrough_decoder(port); 86 - } 87 - 88 - dev_err(&port->dev, "HDM decoder capability not found\n"); 89 - return -ENXIO; 68 + return 0; 90 69 } 91 70 92 71 static int cxl_endpoint_port_probe(struct cxl_port *port) 93 72 { 94 - struct cxl_endpoint_dvsec_info info = { .port = port }; 95 73 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); 96 - struct cxl_dev_state *cxlds = cxlmd->cxlds; 97 - struct cxl_hdm *cxlhdm; 98 74 int rc; 99 - 100 - rc = cxl_dvsec_rr_decode(cxlds, &info); 101 - if (rc < 0) 102 - return rc; 103 - 104 - cxlhdm = devm_cxl_setup_hdm(port, &info); 105 - if (IS_ERR(cxlhdm)) { 106 - if (PTR_ERR(cxlhdm) == -ENODEV) 107 - dev_err(&port->dev, "HDM decoder registers not found\n"); 108 - return PTR_ERR(cxlhdm); 109 - } 110 75 111 76 /* Cache the data early to ensure is_visible() works */ 112 77 read_cdat_data(port); ··· 82 117 if (rc) 83 118 return rc; 84 119 85 - rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info); 86 - if (rc) 87 - return rc; 88 - 89 - rc = devm_cxl_enumerate_decoders(cxlhdm, &info); 120 + rc = devm_cxl_endpoint_decoders_setup(port); 90 121 if (rc) 91 122 return rc; 92 123
+2 -5
tools/testing/cxl/Kbuild
··· 5 5 ldflags-y += --wrap=acpi_pci_find_root 6 6 ldflags-y += --wrap=nvdimm_bus_register 7 7 ldflags-y += --wrap=devm_cxl_port_enumerate_dports 8 - ldflags-y += --wrap=devm_cxl_setup_hdm 9 - ldflags-y += --wrap=devm_cxl_add_passthrough_decoder 10 - ldflags-y += --wrap=devm_cxl_enumerate_decoders 11 8 ldflags-y += --wrap=cxl_await_media_ready 12 - ldflags-y += --wrap=cxl_hdm_decode_init 13 - ldflags-y += --wrap=cxl_dvsec_rr_decode 14 9 ldflags-y += --wrap=devm_cxl_add_rch_dport 15 10 ldflags-y += --wrap=cxl_rcd_component_reg_phys 16 11 ldflags-y += --wrap=cxl_endpoint_parse_cdat 17 12 ldflags-y += --wrap=cxl_dport_init_ras_reporting 13 + ldflags-y += --wrap=devm_cxl_endpoint_decoders_setup 18 14 19 15 DRIVERS := ../../../drivers 20 16 CXL_SRC := $(DRIVERS)/cxl 21 17 CXL_CORE_SRC := $(DRIVERS)/cxl/core 22 18 ccflags-y := -I$(srctree)/drivers/cxl/ 23 19 ccflags-y += -D__mock=__weak 20 + ccflags-y += -DCXL_TEST_ENABLE=1 24 21 ccflags-y += -DTRACE_INCLUDE_PATH=$(CXL_CORE_SRC) -I$(srctree)/drivers/cxl/core/ 25 22 26 23 obj-m += cxl_acpi.o
+22
tools/testing/cxl/cxl_core_exports.c
··· 2 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 3 4 4 #include "cxl.h" 5 + #include "exports.h" 5 6 6 7 /* Exporting of cxl_core symbols that are only used by cxl_test */ 7 8 EXPORT_SYMBOL_NS_GPL(cxl_num_decoders_committed, "CXL"); 9 + 10 + cxl_add_dport_by_dev_fn _devm_cxl_add_dport_by_dev = 11 + __devm_cxl_add_dport_by_dev; 12 + EXPORT_SYMBOL_NS_GPL(_devm_cxl_add_dport_by_dev, "CXL"); 13 + 14 + struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port, 15 + struct device *dport_dev) 16 + { 17 + return _devm_cxl_add_dport_by_dev(port, dport_dev); 18 + } 19 + EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport_by_dev, "CXL"); 20 + 21 + cxl_switch_decoders_setup_fn _devm_cxl_switch_port_decoders_setup = 22 + __devm_cxl_switch_port_decoders_setup; 23 + EXPORT_SYMBOL_NS_GPL(_devm_cxl_switch_port_decoders_setup, "CXL"); 24 + 25 + int devm_cxl_switch_port_decoders_setup(struct cxl_port *port) 26 + { 27 + return _devm_cxl_switch_port_decoders_setup(port); 28 + } 29 + EXPORT_SYMBOL_NS_GPL(devm_cxl_switch_port_decoders_setup, "CXL");
+13
tools/testing/cxl/exports.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright(c) 2025 Intel Corporation */ 3 + #ifndef __MOCK_CXL_EXPORTS_H_ 4 + #define __MOCK_CXL_EXPORTS_H_ 5 + 6 + typedef struct cxl_dport *(*cxl_add_dport_by_dev_fn)(struct cxl_port *port, 7 + struct device *dport_dev); 8 + extern cxl_add_dport_by_dev_fn _devm_cxl_add_dport_by_dev; 9 + 10 + typedef int(*cxl_switch_decoders_setup_fn)(struct cxl_port *port); 11 + extern cxl_switch_decoders_setup_fn _devm_cxl_switch_port_decoders_setup; 12 + 13 + #endif
+96 -19
tools/testing/cxl/test/cxl.c
··· 643 643 return cxlhdm; 644 644 } 645 645 646 - static int mock_cxl_add_passthrough_decoder(struct cxl_port *port) 647 - { 648 - dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n"); 649 - return -EOPNOTSUPP; 650 - } 651 - 652 - 653 646 struct target_map_ctx { 654 - int *target_map; 647 + u32 *target_map; 655 648 int index; 656 649 int target_count; 657 650 }; ··· 811 818 */ 812 819 if (WARN_ON(!dev)) 813 820 continue; 821 + 814 822 cxlsd = to_cxl_switch_decoder(dev); 815 823 if (i == 0) { 816 824 /* put cxl_mem.4 second in the decode order */ 817 - if (pdev->id == 4) 825 + if (pdev->id == 4) { 818 826 cxlsd->target[1] = dport; 819 - else 827 + cxld->target_map[1] = dport->port_id; 828 + } else { 820 829 cxlsd->target[0] = dport; 821 - } else 830 + cxld->target_map[0] = dport->port_id; 831 + } 832 + } else { 822 833 cxlsd->target[0] = dport; 834 + cxld->target_map[0] = dport->port_id; 835 + } 823 836 cxld = &cxlsd->cxld; 824 837 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 825 838 cxld->flags = CXL_DECODER_F_ENABLE; ··· 862 863 target_count = NR_CXL_SWITCH_PORTS; 863 864 864 865 for (i = 0; i < NR_CXL_PORT_DECODERS; i++) { 865 - int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 }; 866 866 struct target_map_ctx ctx = { 867 - .target_map = target_map, 868 867 .target_count = target_count, 869 868 }; 870 869 struct cxl_decoder *cxld; ··· 891 894 cxld = &cxled->cxld; 892 895 } 893 896 897 + ctx.target_map = cxld->target_map; 898 + 894 899 mock_init_hdm_decoder(cxld); 895 900 896 901 if (target_count) { ··· 904 905 } 905 906 } 906 907 907 - rc = cxl_decoder_add_locked(cxld, target_map); 908 + rc = cxl_decoder_add_locked(cxld); 908 909 if (rc) { 909 910 put_device(&cxld->dev); 910 911 dev_err(&port->dev, "Failed to add decoder\n"); ··· 920 921 return 0; 921 922 } 922 923 923 - static int mock_cxl_port_enumerate_dports(struct cxl_port *port) 924 + static int __mock_cxl_decoders_setup(struct cxl_port *port) 925 + { 926 + struct cxl_hdm *cxlhdm; 927 + 928 + cxlhdm = mock_cxl_setup_hdm(port, NULL); 929 + if (IS_ERR(cxlhdm)) { 930 + if (PTR_ERR(cxlhdm) != -ENODEV) 931 + dev_err(&port->dev, "Failed to map HDM decoder capability\n"); 932 + return PTR_ERR(cxlhdm); 933 + } 934 + 935 + return mock_cxl_enumerate_decoders(cxlhdm, NULL); 936 + } 937 + 938 + static int mock_cxl_switch_port_decoders_setup(struct cxl_port *port) 939 + { 940 + if (is_cxl_root(port) || is_cxl_endpoint(port)) 941 + return -EOPNOTSUPP; 942 + 943 + return __mock_cxl_decoders_setup(port); 944 + } 945 + 946 + static int mock_cxl_endpoint_decoders_setup(struct cxl_port *port) 947 + { 948 + if (!is_cxl_endpoint(port)) 949 + return -EOPNOTSUPP; 950 + 951 + return __mock_cxl_decoders_setup(port); 952 + } 953 + 954 + static int get_port_array(struct cxl_port *port, 955 + struct platform_device ***port_array, 956 + int *port_array_size) 924 957 { 925 958 struct platform_device **array; 926 - int i, array_size; 959 + int array_size; 927 960 928 961 if (port->depth == 1) { 929 962 if (is_multi_bridge(port->uport_dev)) { ··· 989 958 return -ENXIO; 990 959 } 991 960 961 + *port_array = array; 962 + *port_array_size = array_size; 963 + 964 + return 0; 965 + } 966 + 967 + static int mock_cxl_port_enumerate_dports(struct cxl_port *port) 968 + { 969 + struct platform_device **array; 970 + int i, array_size; 971 + int rc; 972 + 973 + rc = get_port_array(port, &array, &array_size); 974 + if (rc) 975 + return rc; 976 + 992 977 for (i = 0; i < array_size; i++) { 993 978 struct platform_device *pdev = array[i]; 994 979 struct cxl_dport *dport; ··· 1024 977 } 1025 978 1026 979 return 0; 980 + } 981 + 982 + static struct cxl_dport *mock_cxl_add_dport_by_dev(struct cxl_port *port, 983 + struct device *dport_dev) 984 + { 985 + struct platform_device **array; 986 + int rc, i, array_size; 987 + 988 + rc = get_port_array(port, &array, &array_size); 989 + if (rc) 990 + return ERR_PTR(rc); 991 + 992 + for (i = 0; i < array_size; i++) { 993 + struct platform_device *pdev = array[i]; 994 + 995 + if (pdev->dev.parent != port->uport_dev) { 996 + dev_dbg(&port->dev, "%s: mismatch parent %s\n", 997 + dev_name(port->uport_dev), 998 + dev_name(pdev->dev.parent)); 999 + continue; 1000 + } 1001 + 1002 + if (&pdev->dev != dport_dev) 1003 + continue; 1004 + 1005 + return devm_cxl_add_dport(port, &pdev->dev, pdev->id, 1006 + CXL_RESOURCE_NONE); 1007 + } 1008 + 1009 + return ERR_PTR(-ENODEV); 1027 1010 } 1028 1011 1029 1012 /* ··· 1112 1035 .acpi_table_parse_cedt = mock_acpi_table_parse_cedt, 1113 1036 .acpi_evaluate_integer = mock_acpi_evaluate_integer, 1114 1037 .acpi_pci_find_root = mock_acpi_pci_find_root, 1038 + .devm_cxl_switch_port_decoders_setup = mock_cxl_switch_port_decoders_setup, 1039 + .devm_cxl_endpoint_decoders_setup = mock_cxl_endpoint_decoders_setup, 1115 1040 .devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports, 1116 - .devm_cxl_setup_hdm = mock_cxl_setup_hdm, 1117 - .devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder, 1118 - .devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders, 1119 1041 .cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat, 1042 + .devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev, 1120 1043 .list = LIST_HEAD_INIT(cxl_mock_ops.list), 1121 1044 }; 1122 1045
+35 -61
tools/testing/cxl/test/mock.c
··· 10 10 #include <cxlmem.h> 11 11 #include <cxlpci.h> 12 12 #include "mock.h" 13 + #include "../exports.h" 13 14 14 15 static LIST_HEAD(mock); 16 + 17 + static struct cxl_dport * 18 + redirect_devm_cxl_add_dport_by_dev(struct cxl_port *port, 19 + struct device *dport_dev); 20 + static int redirect_devm_cxl_switch_port_decoders_setup(struct cxl_port *port); 15 21 16 22 void register_cxl_mock_ops(struct cxl_mock_ops *ops) 17 23 { 18 24 list_add_rcu(&ops->list, &mock); 25 + _devm_cxl_add_dport_by_dev = redirect_devm_cxl_add_dport_by_dev; 26 + _devm_cxl_switch_port_decoders_setup = 27 + redirect_devm_cxl_switch_port_decoders_setup; 19 28 } 20 29 EXPORT_SYMBOL_GPL(register_cxl_mock_ops); 21 30 ··· 32 23 33 24 void unregister_cxl_mock_ops(struct cxl_mock_ops *ops) 34 25 { 26 + _devm_cxl_switch_port_decoders_setup = 27 + __devm_cxl_switch_port_decoders_setup; 28 + _devm_cxl_add_dport_by_dev = __devm_cxl_add_dport_by_dev; 35 29 list_del_rcu(&ops->list); 36 30 synchronize_srcu(&cxl_mock_srcu); 37 31 } ··· 143 131 } 144 132 EXPORT_SYMBOL_GPL(__wrap_nvdimm_bus_register); 145 133 146 - struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port, 147 - struct cxl_endpoint_dvsec_info *info) 148 - 149 - { 150 - int index; 151 - struct cxl_hdm *cxlhdm; 152 - struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); 153 - 154 - if (ops && ops->is_mock_port(port->uport_dev)) 155 - cxlhdm = ops->devm_cxl_setup_hdm(port, info); 156 - else 157 - cxlhdm = devm_cxl_setup_hdm(port, info); 158 - put_cxl_mock_ops(index); 159 - 160 - return cxlhdm; 161 - } 162 - EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_setup_hdm, "CXL"); 163 - 164 - int __wrap_devm_cxl_add_passthrough_decoder(struct cxl_port *port) 134 + int redirect_devm_cxl_switch_port_decoders_setup(struct cxl_port *port) 165 135 { 166 136 int rc, index; 167 137 struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); 168 138 169 139 if (ops && ops->is_mock_port(port->uport_dev)) 170 - rc = ops->devm_cxl_add_passthrough_decoder(port); 140 + rc = ops->devm_cxl_switch_port_decoders_setup(port); 171 141 else 172 - rc = devm_cxl_add_passthrough_decoder(port); 142 + rc = __devm_cxl_switch_port_decoders_setup(port); 173 143 put_cxl_mock_ops(index); 174 144 175 145 return rc; 176 146 } 177 - EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_passthrough_decoder, "CXL"); 178 147 179 - int __wrap_devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, 180 - struct cxl_endpoint_dvsec_info *info) 148 + int __wrap_devm_cxl_endpoint_decoders_setup(struct cxl_port *port) 181 149 { 182 150 int rc, index; 183 - struct cxl_port *port = cxlhdm->port; 184 151 struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); 185 152 186 153 if (ops && ops->is_mock_port(port->uport_dev)) 187 - rc = ops->devm_cxl_enumerate_decoders(cxlhdm, info); 154 + rc = ops->devm_cxl_endpoint_decoders_setup(port); 188 155 else 189 - rc = devm_cxl_enumerate_decoders(cxlhdm, info); 156 + rc = devm_cxl_endpoint_decoders_setup(port); 190 157 put_cxl_mock_ops(index); 191 158 192 159 return rc; 193 160 } 194 - EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_enumerate_decoders, "CXL"); 161 + EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_endpoint_decoders_setup, "CXL"); 195 162 196 163 int __wrap_devm_cxl_port_enumerate_dports(struct cxl_port *port) 197 164 { ··· 201 210 return rc; 202 211 } 203 212 EXPORT_SYMBOL_NS_GPL(__wrap_cxl_await_media_ready, "CXL"); 204 - 205 - int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds, 206 - struct cxl_hdm *cxlhdm, 207 - struct cxl_endpoint_dvsec_info *info) 208 - { 209 - int rc = 0, index; 210 - struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); 211 - 212 - if (ops && ops->is_mock_dev(cxlds->dev)) 213 - rc = 0; 214 - else 215 - rc = cxl_hdm_decode_init(cxlds, cxlhdm, info); 216 - put_cxl_mock_ops(index); 217 - 218 - return rc; 219 - } 220 - EXPORT_SYMBOL_NS_GPL(__wrap_cxl_hdm_decode_init, "CXL"); 221 - 222 - int __wrap_cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds, 223 - struct cxl_endpoint_dvsec_info *info) 224 - { 225 - int rc = 0, index; 226 - struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); 227 - 228 - if (ops && ops->is_mock_dev(cxlds->dev)) 229 - rc = 0; 230 - else 231 - rc = cxl_dvsec_rr_decode(cxlds, info); 232 - put_cxl_mock_ops(index); 233 - 234 - return rc; 235 - } 236 - EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dvsec_rr_decode, "CXL"); 237 213 238 214 struct cxl_dport *__wrap_devm_cxl_add_rch_dport(struct cxl_port *port, 239 215 struct device *dport_dev, ··· 268 310 put_cxl_mock_ops(index); 269 311 } 270 312 EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dport_init_ras_reporting, "CXL"); 313 + 314 + struct cxl_dport *redirect_devm_cxl_add_dport_by_dev(struct cxl_port *port, 315 + struct device *dport_dev) 316 + { 317 + int index; 318 + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); 319 + struct cxl_dport *dport; 320 + 321 + if (ops && ops->is_mock_port(port->uport_dev)) 322 + dport = ops->devm_cxl_add_dport_by_dev(port, dport_dev); 323 + else 324 + dport = __devm_cxl_add_dport_by_dev(port, dport_dev); 325 + put_cxl_mock_ops(index); 326 + 327 + return dport; 328 + } 271 329 272 330 MODULE_LICENSE("GPL v2"); 273 331 MODULE_DESCRIPTION("cxl_test: emulation module");
+4 -5
tools/testing/cxl/test/mock.h
··· 20 20 bool (*is_mock_port)(struct device *dev); 21 21 bool (*is_mock_dev)(struct device *dev); 22 22 int (*devm_cxl_port_enumerate_dports)(struct cxl_port *port); 23 - struct cxl_hdm *(*devm_cxl_setup_hdm)( 24 - struct cxl_port *port, struct cxl_endpoint_dvsec_info *info); 25 - int (*devm_cxl_add_passthrough_decoder)(struct cxl_port *port); 26 - int (*devm_cxl_enumerate_decoders)( 27 - struct cxl_hdm *hdm, struct cxl_endpoint_dvsec_info *info); 23 + int (*devm_cxl_switch_port_decoders_setup)(struct cxl_port *port); 24 + int (*devm_cxl_endpoint_decoders_setup)(struct cxl_port *port); 28 25 void (*cxl_endpoint_parse_cdat)(struct cxl_port *port); 26 + struct cxl_dport *(*devm_cxl_add_dport_by_dev)(struct cxl_port *port, 27 + struct device *dport_dev); 29 28 }; 30 29 31 30 void register_cxl_mock_ops(struct cxl_mock_ops *ops);