Merge tag 'cxl-fixes-for-6.1-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull cxl fixes from Dan Williams:
"Several fixes for CXL region creation crashes, leaks and failures.

This is mainly fallout from the original implementation of dynamic CXL
region creation (instantiate new physical memory pools) that arrived
in v6.0-rc1.

Given the theme of "failures in the presence of pass-through decoders"
this also includes new regression test infrastructure for that case.

Summary:

- Fix region creation crash with pass-through decoders

- Fix region creation crash when no decoder allocation fails

- Fix region creation crash when scanning regions to enforce the
increasing physical address order constraint that CXL mandates

- Fix a memory leak for cxl_pmem_region objects, track 1:N instead of
1:1 memory-device-to-region associations.

- Fix a memory leak for cxl_region objects when regions with active
targets are deleted

- Fix assignment of NUMA nodes to CXL regions by CFMWS (CXL Window)
emulated proximity domains.

- Fix region creation failure for switch attached devices downstream
of a single-port host-bridge

- Fix false positive memory leak of cxl_region objects by recycling
recently used region ids rather than freeing them

- Add regression test infrastructure for a pass-through decoder
configuration

- Fix some mailbox payload handling corner cases"

* tag 'cxl-fixes-for-6.1-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl:
cxl/region: Recycle region ids
cxl/region: Fix 'distance' calculation with passthrough ports
tools/testing/cxl: Add a single-port host-bridge regression config
tools/testing/cxl: Fix some error exits
cxl/pmem: Fix cxl_pmem_region and cxl_memdev leak
cxl/region: Fix cxl_region leak, cleanup targets at region delete
cxl/region: Fix region HPA ordering validation
cxl/pmem: Use size_add() against integer overflow
cxl/region: Fix decoder allocation crash
ACPI: NUMA: Add CXL CFMWS 'nodes' to the possible nodes set
cxl/pmem: Fix failure to account for 8 byte header for writes to the device LSA.
cxl/region: Fix null pointer dereference due to pass through decoder commit
cxl/mbox: Add a check on input payload size

Changed files
+449 -92
drivers
tools
testing
cxl
test
+1
drivers/acpi/numa/srat.c
··· 327 327 pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n", 328 328 node, start, end); 329 329 } 330 + node_set(node, numa_nodes_parsed); 330 331 331 332 /* Set the next available fake_pxm value */ 332 333 (*fake_pxm)++;
+1 -1
drivers/cxl/core/mbox.c
··· 174 174 }; 175 175 int rc; 176 176 177 - if (out_size > cxlds->payload_size) 177 + if (in_size > cxlds->payload_size || out_size > cxlds->payload_size) 178 178 return -E2BIG; 179 179 180 180 rc = cxlds->mbox_send(cxlds, &mbox_cmd);
+2
drivers/cxl/core/pmem.c
··· 188 188 { 189 189 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); 190 190 191 + xa_destroy(&cxl_nvd->pmem_regions); 191 192 kfree(cxl_nvd); 192 193 } 193 194 ··· 231 230 232 231 dev = &cxl_nvd->dev; 233 232 cxl_nvd->cxlmd = cxlmd; 233 + xa_init(&cxl_nvd->pmem_regions); 234 234 device_initialize(dev); 235 235 lockdep_set_class(&dev->mutex, &cxl_nvdimm_key); 236 236 device_set_pm_not_required(dev);
+9 -2
drivers/cxl/core/port.c
··· 811 811 static int add_dport(struct cxl_port *port, struct cxl_dport *new) 812 812 { 813 813 struct cxl_dport *dup; 814 + int rc; 814 815 815 816 device_lock_assert(&port->dev); 816 817 dup = find_dport(port, new->port_id); ··· 822 821 dev_name(dup->dport)); 823 822 return -EBUSY; 824 823 } 825 - return xa_insert(&port->dports, (unsigned long)new->dport, new, 826 - GFP_KERNEL); 824 + 825 + rc = xa_insert(&port->dports, (unsigned long)new->dport, new, 826 + GFP_KERNEL); 827 + if (rc) 828 + return rc; 829 + 830 + port->nr_dports++; 831 + return 0; 827 832 } 828 833 829 834 /*
+85 -28
drivers/cxl/core/region.c
··· 174 174 iter = to_cxl_port(iter->dev.parent)) { 175 175 cxl_rr = cxl_rr_load(iter, cxlr); 176 176 cxld = cxl_rr->decoder; 177 - rc = cxld->commit(cxld); 177 + if (cxld->commit) 178 + rc = cxld->commit(cxld); 178 179 if (rc) 179 180 break; 180 181 } ··· 658 657 xa_for_each(&port->regions, index, iter) { 659 658 struct cxl_region_params *ip = &iter->region->params; 660 659 660 + if (!ip->res) 661 + continue; 662 + 661 663 if (ip->res->start > p->res->start) { 662 664 dev_dbg(&cxlr->dev, 663 665 "%s: HPA order violation %s:%pr vs %pr\n", ··· 690 686 return cxl_rr; 691 687 } 692 688 693 - static void free_region_ref(struct cxl_region_ref *cxl_rr) 689 + static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr) 694 690 { 695 - struct cxl_port *port = cxl_rr->port; 696 691 struct cxl_region *cxlr = cxl_rr->region; 697 692 struct cxl_decoder *cxld = cxl_rr->decoder; 693 + 694 + if (!cxld) 695 + return; 698 696 699 697 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n"); 700 698 if (cxld->region == cxlr) { 701 699 cxld->region = NULL; 702 700 put_device(&cxlr->dev); 703 701 } 702 + } 704 703 704 + static void free_region_ref(struct cxl_region_ref *cxl_rr) 705 + { 706 + struct cxl_port *port = cxl_rr->port; 707 + struct cxl_region *cxlr = cxl_rr->region; 708 + 709 + cxl_rr_free_decoder(cxl_rr); 705 710 xa_erase(&port->regions, (unsigned long)cxlr); 706 711 xa_destroy(&cxl_rr->endpoints); 707 712 kfree(cxl_rr); ··· 738 725 get_device(&cxlr->dev); 739 726 } 740 727 728 + return 0; 729 + } 730 + 731 + static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr, 732 + struct cxl_endpoint_decoder *cxled, 733 + struct cxl_region_ref *cxl_rr) 734 + { 735 + struct cxl_decoder *cxld; 736 + 737 + if (port == cxled_to_port(cxled)) 738 + cxld = &cxled->cxld; 739 + else 740 + cxld = cxl_region_find_decoder(port, cxlr); 741 + if (!cxld) { 742 + dev_dbg(&cxlr->dev, "%s: no decoder available\n", 743 + dev_name(&port->dev)); 744 + return -EBUSY; 745 + } 746 + 747 + if (cxld->region) { 748 + dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", 749 + dev_name(&port->dev), dev_name(&cxld->dev), 750 + dev_name(&cxld->region->dev)); 751 + return -EBUSY; 752 + } 753 + 754 + cxl_rr->decoder = cxld; 741 755 return 0; 742 756 } 743 757 ··· 834 794 cxl_rr->nr_targets++; 835 795 nr_targets_inc = true; 836 796 } 837 - 838 - /* 839 - * The decoder for @cxlr was allocated when the region was first 840 - * attached to @port. 841 - */ 842 - cxld = cxl_rr->decoder; 843 797 } else { 844 798 cxl_rr = alloc_region_ref(port, cxlr); 845 799 if (IS_ERR(cxl_rr)) { ··· 844 810 } 845 811 nr_targets_inc = true; 846 812 847 - if (port == cxled_to_port(cxled)) 848 - cxld = &cxled->cxld; 849 - else 850 - cxld = cxl_region_find_decoder(port, cxlr); 851 - if (!cxld) { 852 - dev_dbg(&cxlr->dev, "%s: no decoder available\n", 853 - dev_name(&port->dev)); 813 + rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr); 814 + if (rc) 854 815 goto out_erase; 855 - } 856 - 857 - if (cxld->region) { 858 - dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", 859 - dev_name(&port->dev), dev_name(&cxld->dev), 860 - dev_name(&cxld->region->dev)); 861 - rc = -EBUSY; 862 - goto out_erase; 863 - } 864 - 865 - cxl_rr->decoder = cxld; 866 816 } 817 + cxld = cxl_rr->decoder; 867 818 868 819 rc = cxl_rr_ep_add(cxl_rr, cxled); 869 820 if (rc) { ··· 990 971 if (cxl_rr->nr_targets_set) { 991 972 int i, distance; 992 973 993 - distance = p->nr_targets / cxl_rr->nr_targets; 974 + /* 975 + * Passthrough ports impose no distance requirements between 976 + * peers 977 + */ 978 + if (port->nr_dports == 1) 979 + distance = 0; 980 + else 981 + distance = p->nr_targets / cxl_rr->nr_targets; 994 982 for (i = 0; i < cxl_rr->nr_targets_set; i++) 995 983 if (ep->dport == cxlsd->target[i]) { 996 984 rc = check_last_peer(cxled, ep, cxl_rr, ··· 1534 1508 1535 1509 static void cxl_region_release(struct device *dev) 1536 1510 { 1511 + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 1537 1512 struct cxl_region *cxlr = to_cxl_region(dev); 1513 + int id = atomic_read(&cxlrd->region_id); 1514 + 1515 + /* 1516 + * Try to reuse the recently idled id rather than the cached 1517 + * next id to prevent the region id space from increasing 1518 + * unnecessarily. 1519 + */ 1520 + if (cxlr->id < id) 1521 + if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) { 1522 + memregion_free(id); 1523 + goto out; 1524 + } 1538 1525 1539 1526 memregion_free(cxlr->id); 1527 + out: 1528 + put_device(dev->parent); 1540 1529 kfree(cxlr); 1541 1530 } 1542 1531 ··· 1579 1538 static void unregister_region(void *dev) 1580 1539 { 1581 1540 struct cxl_region *cxlr = to_cxl_region(dev); 1541 + struct cxl_region_params *p = &cxlr->params; 1542 + int i; 1582 1543 1583 1544 device_del(dev); 1545 + 1546 + /* 1547 + * Now that region sysfs is shutdown, the parameter block is now 1548 + * read-only, so no need to hold the region rwsem to access the 1549 + * region parameters. 1550 + */ 1551 + for (i = 0; i < p->interleave_ways; i++) 1552 + detach_target(cxlr, i); 1553 + 1584 1554 cxl_region_iomem_release(cxlr); 1585 1555 put_device(dev); 1586 1556 } ··· 1613 1561 device_initialize(dev); 1614 1562 lockdep_set_class(&dev->mutex, &cxl_region_key); 1615 1563 dev->parent = &cxlrd->cxlsd.cxld.dev; 1564 + /* 1565 + * Keep root decoder pinned through cxl_region_release to fixup 1566 + * region id allocations 1567 + */ 1568 + get_device(dev->parent); 1616 1569 device_set_pm_not_required(dev); 1617 1570 dev->bus = &cxl_bus_type; 1618 1571 dev->type = &cxl_region_type;
+3 -1
drivers/cxl/cxl.h
··· 423 423 struct device dev; 424 424 struct cxl_memdev *cxlmd; 425 425 struct cxl_nvdimm_bridge *bridge; 426 - struct cxl_pmem_region *region; 426 + struct xarray pmem_regions; 427 427 }; 428 428 429 429 struct cxl_pmem_region_mapping { ··· 457 457 * @regions: cxl_region_ref instances, regions mapped by this port 458 458 * @parent_dport: dport that points to this port in the parent 459 459 * @decoder_ida: allocator for decoder ids 460 + * @nr_dports: number of entries in @dports 460 461 * @hdm_end: track last allocated HDM decoder instance for allocation ordering 461 462 * @commit_end: cursor to track highest committed decoder for commit ordering 462 463 * @component_reg_phys: component register capability base address (optional) ··· 476 475 struct xarray regions; 477 476 struct cxl_dport *parent_dport; 478 477 struct ida decoder_ida; 478 + int nr_dports; 479 479 int hdm_end; 480 480 int commit_end; 481 481 resource_size_t component_reg_phys;
+68 -39
drivers/cxl/pmem.c
··· 30 30 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 31 31 struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge; 32 32 struct cxl_pmem_region *cxlr_pmem; 33 + unsigned long index; 33 34 34 35 device_lock(&cxl_nvb->dev); 35 - cxlr_pmem = cxl_nvd->region; 36 36 dev_set_drvdata(&cxl_nvd->dev, NULL); 37 - cxl_nvd->region = NULL; 38 - device_unlock(&cxl_nvb->dev); 37 + xa_for_each(&cxl_nvd->pmem_regions, index, cxlr_pmem) { 38 + get_device(&cxlr_pmem->dev); 39 + device_unlock(&cxl_nvb->dev); 39 40 40 - if (cxlr_pmem) { 41 41 device_release_driver(&cxlr_pmem->dev); 42 42 put_device(&cxlr_pmem->dev); 43 + 44 + device_lock(&cxl_nvb->dev); 43 45 } 46 + device_unlock(&cxl_nvb->dev); 44 47 45 48 nvdimm_delete(nvdimm); 46 49 cxl_nvd->bridge = NULL; ··· 110 107 111 108 *cmd = (struct nd_cmd_get_config_size) { 112 109 .config_size = cxlds->lsa_size, 113 - .max_xfer = cxlds->payload_size, 110 + .max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa), 114 111 }; 115 112 116 113 return 0; ··· 151 148 return -EINVAL; 152 149 153 150 /* 4-byte status follows the input data in the payload */ 154 - if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len) 151 + if (size_add(struct_size(cmd, in_buf, cmd->in_length), 4) > buf_len) 155 152 return -EINVAL; 156 153 157 154 set_lsa = ··· 369 366 370 367 static void unregister_nvdimm_region(void *nd_region) 371 368 { 372 - struct cxl_nvdimm_bridge *cxl_nvb; 373 - struct cxl_pmem_region *cxlr_pmem; 374 - int i; 369 + nvdimm_region_delete(nd_region); 370 + } 375 371 376 - cxlr_pmem = nd_region_provider_data(nd_region); 377 - cxl_nvb = cxlr_pmem->bridge; 372 + static int cxl_nvdimm_add_region(struct cxl_nvdimm *cxl_nvd, 373 + struct cxl_pmem_region *cxlr_pmem) 374 + { 375 + int rc; 376 + 377 + rc = xa_insert(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem, 378 + cxlr_pmem, GFP_KERNEL); 379 + if (rc) 380 + return rc; 381 + 382 + get_device(&cxlr_pmem->dev); 383 + return 0; 384 + } 385 + 386 + static void cxl_nvdimm_del_region(struct cxl_nvdimm *cxl_nvd, 387 + struct cxl_pmem_region *cxlr_pmem) 388 + { 389 + /* 390 + * It is possible this is called without a corresponding 391 + * cxl_nvdimm_add_region for @cxlr_pmem 392 + */ 393 + cxlr_pmem = xa_erase(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem); 394 + if (cxlr_pmem) 395 + put_device(&cxlr_pmem->dev); 396 + } 397 + 398 + static void release_mappings(void *data) 399 + { 400 + int i; 401 + struct cxl_pmem_region *cxlr_pmem = data; 402 + struct cxl_nvdimm_bridge *cxl_nvb = cxlr_pmem->bridge; 403 + 378 404 device_lock(&cxl_nvb->dev); 379 405 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { 380 406 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; 381 407 struct cxl_nvdimm *cxl_nvd = m->cxl_nvd; 382 408 383 - if (cxl_nvd->region) { 384 - put_device(&cxlr_pmem->dev); 385 - cxl_nvd->region = NULL; 386 - } 409 + cxl_nvdimm_del_region(cxl_nvd, cxlr_pmem); 387 410 } 388 411 device_unlock(&cxl_nvb->dev); 389 - 390 - nvdimm_region_delete(nd_region); 391 412 } 392 413 393 414 static void cxlr_pmem_remove_resource(void *res) ··· 449 422 if (!cxl_nvb->nvdimm_bus) { 450 423 dev_dbg(dev, "nvdimm bus not found\n"); 451 424 rc = -ENXIO; 452 - goto err; 425 + goto out_nvb; 453 426 } 454 427 455 428 memset(&mappings, 0, sizeof(mappings)); ··· 458 431 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); 459 432 if (!res) { 460 433 rc = -ENOMEM; 461 - goto err; 434 + goto out_nvb; 462 435 } 463 436 464 437 res->name = "Persistent Memory"; ··· 469 442 470 443 rc = insert_resource(&iomem_resource, res); 471 444 if (rc) 472 - goto err; 445 + goto out_nvb; 473 446 474 447 rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res); 475 448 if (rc) 476 - goto err; 449 + goto out_nvb; 477 450 478 451 ndr_desc.res = res; 479 452 ndr_desc.provider_data = cxlr_pmem; ··· 489 462 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 490 463 if (!nd_set) { 491 464 rc = -ENOMEM; 492 - goto err; 465 + goto out_nvb; 493 466 } 494 467 495 468 ndr_desc.memregion = cxlr->id; ··· 499 472 info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL); 500 473 if (!info) { 501 474 rc = -ENOMEM; 502 - goto err; 475 + goto out_nvb; 503 476 } 477 + 478 + rc = devm_add_action_or_reset(dev, release_mappings, cxlr_pmem); 479 + if (rc) 480 + goto out_nvd; 504 481 505 482 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { 506 483 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; ··· 517 486 dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i, 518 487 dev_name(&cxlmd->dev)); 519 488 rc = -ENODEV; 520 - goto err; 489 + goto out_nvd; 521 490 } 522 491 523 492 /* safe to drop ref now with bridge lock held */ ··· 529 498 dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i, 530 499 dev_name(&cxlmd->dev)); 531 500 rc = -ENODEV; 532 - goto err; 501 + goto out_nvd; 533 502 } 534 - cxl_nvd->region = cxlr_pmem; 535 - get_device(&cxlr_pmem->dev); 503 + 504 + /* 505 + * Pin the region per nvdimm device as those may be released 506 + * out-of-order with respect to the region, and a single nvdimm 507 + * maybe associated with multiple regions 508 + */ 509 + rc = cxl_nvdimm_add_region(cxl_nvd, cxlr_pmem); 510 + if (rc) 511 + goto out_nvd; 536 512 m->cxl_nvd = cxl_nvd; 537 513 mappings[i] = (struct nd_mapping_desc) { 538 514 .nvdimm = nvdimm, ··· 565 527 nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc); 566 528 if (!cxlr_pmem->nd_region) { 567 529 rc = -ENOMEM; 568 - goto err; 530 + goto out_nvd; 569 531 } 570 532 571 533 rc = devm_add_action_or_reset(dev, unregister_nvdimm_region, 572 534 cxlr_pmem->nd_region); 573 - out: 535 + out_nvd: 574 536 kfree(info); 537 + out_nvb: 575 538 device_unlock(&cxl_nvb->dev); 576 539 put_device(&cxl_nvb->dev); 577 540 578 541 return rc; 579 - 580 - err: 581 - dev_dbg(dev, "failed to create nvdimm region\n"); 582 - for (i--; i >= 0; i--) { 583 - nvdimm = mappings[i].nvdimm; 584 - cxl_nvd = nvdimm_provider_data(nvdimm); 585 - put_device(&cxl_nvd->region->dev); 586 - cxl_nvd->region = NULL; 587 - } 588 - goto out; 589 542 } 590 543 591 544 static struct cxl_driver cxl_pmem_region_driver = {
+280 -21
tools/testing/cxl/test/cxl.c
··· 12 12 #include "mock.h" 13 13 14 14 #define NR_CXL_HOST_BRIDGES 2 15 + #define NR_CXL_SINGLE_HOST 1 15 16 #define NR_CXL_ROOT_PORTS 2 16 17 #define NR_CXL_SWITCH_PORTS 2 17 18 #define NR_CXL_PORT_DECODERS 8 18 19 19 20 static struct platform_device *cxl_acpi; 20 21 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES]; 21 - static struct platform_device 22 - *cxl_root_port[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS]; 23 - static struct platform_device 24 - *cxl_switch_uport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS]; 25 - static struct platform_device 26 - *cxl_switch_dport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * 27 - NR_CXL_SWITCH_PORTS]; 28 - struct platform_device 29 - *cxl_mem[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS]; 22 + #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS) 23 + static struct platform_device *cxl_root_port[NR_MULTI_ROOT]; 24 + static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT]; 25 + #define NR_MEM_MULTI \ 26 + (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS) 27 + static struct platform_device *cxl_switch_dport[NR_MEM_MULTI]; 28 + 29 + static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST]; 30 + static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST]; 31 + static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST]; 32 + #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS) 33 + static struct platform_device *cxl_swd_single[NR_MEM_SINGLE]; 34 + 35 + struct platform_device *cxl_mem[NR_MEM_MULTI]; 36 + struct platform_device *cxl_mem_single[NR_MEM_SINGLE]; 37 + 38 + 39 + static inline bool is_multi_bridge(struct device *dev) 40 + { 41 + int i; 42 + 43 + for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) 44 + if (&cxl_host_bridge[i]->dev == dev) 45 + return true; 46 + return false; 47 + } 48 + 49 + static inline bool is_single_bridge(struct device *dev) 50 + { 51 + int i; 52 + 53 + for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) 54 + if (&cxl_hb_single[i]->dev == dev) 55 + return true; 56 + return false; 57 + } 30 58 31 59 static struct acpi_device acpi0017_mock; 32 - static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = { 60 + static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST] = { 33 61 [0] = { 34 62 .handle = &host_bridge[0], 35 63 }, 36 64 [1] = { 37 65 .handle = &host_bridge[1], 38 66 }, 67 + [2] = { 68 + .handle = &host_bridge[2], 69 + }, 70 + 39 71 }; 40 72 41 73 static bool is_mock_dev(struct device *dev) ··· 76 44 77 45 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) 78 46 if (dev == &cxl_mem[i]->dev) 47 + return true; 48 + for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) 49 + if (dev == &cxl_mem_single[i]->dev) 79 50 return true; 80 51 if (dev == &cxl_acpi->dev) 81 52 return true; ··· 101 66 102 67 static struct { 103 68 struct acpi_table_cedt cedt; 104 - struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES]; 69 + struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST]; 105 70 struct { 106 71 struct acpi_cedt_cfmws cfmws; 107 72 u32 target[1]; ··· 118 83 struct acpi_cedt_cfmws cfmws; 119 84 u32 target[2]; 120 85 } cfmws3; 86 + struct { 87 + struct acpi_cedt_cfmws cfmws; 88 + u32 target[1]; 89 + } cfmws4; 121 90 } __packed mock_cedt = { 122 91 .cedt = { 123 92 .header = { ··· 144 105 .length = sizeof(mock_cedt.chbs[0]), 145 106 }, 146 107 .uid = 1, 108 + .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 109 + }, 110 + .chbs[2] = { 111 + .header = { 112 + .type = ACPI_CEDT_TYPE_CHBS, 113 + .length = sizeof(mock_cedt.chbs[0]), 114 + }, 115 + .uid = 2, 147 116 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 148 117 }, 149 118 .cfmws0 = { ··· 214 167 }, 215 168 .target = { 0, 1, }, 216 169 }, 170 + .cfmws4 = { 171 + .cfmws = { 172 + .header = { 173 + .type = ACPI_CEDT_TYPE_CFMWS, 174 + .length = sizeof(mock_cedt.cfmws4), 175 + }, 176 + .interleave_ways = 0, 177 + .granularity = 4, 178 + .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 179 + ACPI_CEDT_CFMWS_RESTRICT_PMEM, 180 + .qtg_id = 4, 181 + .window_size = SZ_256M * 4UL, 182 + }, 183 + .target = { 2 }, 184 + }, 217 185 }; 218 186 219 - struct acpi_cedt_cfmws *mock_cfmws[4] = { 187 + struct acpi_cedt_cfmws *mock_cfmws[] = { 220 188 [0] = &mock_cedt.cfmws0.cfmws, 221 189 [1] = &mock_cedt.cfmws1.cfmws, 222 190 [2] = &mock_cedt.cfmws2.cfmws, 223 191 [3] = &mock_cedt.cfmws3.cfmws, 192 + [4] = &mock_cedt.cfmws4.cfmws, 224 193 }; 225 194 226 195 struct cxl_mock_res { ··· 367 304 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) 368 305 if (dev == &cxl_host_bridge[i]->dev) 369 306 return true; 307 + for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) 308 + if (dev == &cxl_hb_single[i]->dev) 309 + return true; 370 310 return false; 371 311 } 372 312 ··· 390 324 391 325 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) 392 326 if (dev == &cxl_switch_dport[i]->dev) 327 + return true; 328 + 329 + for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) 330 + if (dev == &cxl_root_single[i]->dev) 331 + return true; 332 + 333 + for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) 334 + if (dev == &cxl_swu_single[i]->dev) 335 + return true; 336 + 337 + for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) 338 + if (dev == &cxl_swd_single[i]->dev) 393 339 return true; 394 340 395 341 if (is_cxl_memdev(dev)) ··· 639 561 int i, array_size; 640 562 641 563 if (port->depth == 1) { 642 - array_size = ARRAY_SIZE(cxl_root_port); 643 - array = cxl_root_port; 564 + if (is_multi_bridge(port->uport)) { 565 + array_size = ARRAY_SIZE(cxl_root_port); 566 + array = cxl_root_port; 567 + } else if (is_single_bridge(port->uport)) { 568 + array_size = ARRAY_SIZE(cxl_root_single); 569 + array = cxl_root_single; 570 + } else { 571 + dev_dbg(&port->dev, "%s: unknown bridge type\n", 572 + dev_name(port->uport)); 573 + return -ENXIO; 574 + } 644 575 } else if (port->depth == 2) { 645 - array_size = ARRAY_SIZE(cxl_switch_dport); 646 - array = cxl_switch_dport; 576 + struct cxl_port *parent = to_cxl_port(port->dev.parent); 577 + 578 + if (is_multi_bridge(parent->uport)) { 579 + array_size = ARRAY_SIZE(cxl_switch_dport); 580 + array = cxl_switch_dport; 581 + } else if (is_single_bridge(parent->uport)) { 582 + array_size = ARRAY_SIZE(cxl_swd_single); 583 + array = cxl_swd_single; 584 + } else { 585 + dev_dbg(&port->dev, "%s: unknown bridge type\n", 586 + dev_name(port->uport)); 587 + return -ENXIO; 588 + } 647 589 } else { 648 590 dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n", 649 591 port->depth); ··· 674 576 struct platform_device *pdev = array[i]; 675 577 struct cxl_dport *dport; 676 578 677 - if (pdev->dev.parent != port->uport) 579 + if (pdev->dev.parent != port->uport) { 580 + dev_dbg(&port->dev, "%s: mismatch parent %s\n", 581 + dev_name(port->uport), 582 + dev_name(pdev->dev.parent)); 678 583 continue; 584 + } 679 585 680 586 dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id, 681 587 CXL_RESOURCE_NONE); ··· 728 626 #ifndef SZ_512G 729 627 #define SZ_512G (SZ_64G * 8) 730 628 #endif 629 + 630 + static __init int cxl_single_init(void) 631 + { 632 + int i, rc; 633 + 634 + for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) { 635 + struct acpi_device *adev = 636 + &host_bridge[NR_CXL_HOST_BRIDGES + i]; 637 + struct platform_device *pdev; 638 + 639 + pdev = platform_device_alloc("cxl_host_bridge", 640 + NR_CXL_HOST_BRIDGES + i); 641 + if (!pdev) 642 + goto err_bridge; 643 + 644 + mock_companion(adev, &pdev->dev); 645 + rc = platform_device_add(pdev); 646 + if (rc) { 647 + platform_device_put(pdev); 648 + goto err_bridge; 649 + } 650 + 651 + cxl_hb_single[i] = pdev; 652 + rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, 653 + "physical_node"); 654 + if (rc) 655 + goto err_bridge; 656 + } 657 + 658 + for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) { 659 + struct platform_device *bridge = 660 + cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)]; 661 + struct platform_device *pdev; 662 + 663 + pdev = platform_device_alloc("cxl_root_port", 664 + NR_MULTI_ROOT + i); 665 + if (!pdev) 666 + goto err_port; 667 + pdev->dev.parent = &bridge->dev; 668 + 669 + rc = platform_device_add(pdev); 670 + if (rc) { 671 + platform_device_put(pdev); 672 + goto err_port; 673 + } 674 + cxl_root_single[i] = pdev; 675 + } 676 + 677 + for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) { 678 + struct platform_device *root_port = cxl_root_single[i]; 679 + struct platform_device *pdev; 680 + 681 + pdev = platform_device_alloc("cxl_switch_uport", 682 + NR_MULTI_ROOT + i); 683 + if (!pdev) 684 + goto err_uport; 685 + pdev->dev.parent = &root_port->dev; 686 + 687 + rc = platform_device_add(pdev); 688 + if (rc) { 689 + platform_device_put(pdev); 690 + goto err_uport; 691 + } 692 + cxl_swu_single[i] = pdev; 693 + } 694 + 695 + for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) { 696 + struct platform_device *uport = 697 + cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)]; 698 + struct platform_device *pdev; 699 + 700 + pdev = platform_device_alloc("cxl_switch_dport", 701 + i + NR_MEM_MULTI); 702 + if (!pdev) 703 + goto err_dport; 704 + pdev->dev.parent = &uport->dev; 705 + 706 + rc = platform_device_add(pdev); 707 + if (rc) { 708 + platform_device_put(pdev); 709 + goto err_dport; 710 + } 711 + cxl_swd_single[i] = pdev; 712 + } 713 + 714 + for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) { 715 + struct platform_device *dport = cxl_swd_single[i]; 716 + struct platform_device *pdev; 717 + 718 + pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i); 719 + if (!pdev) 720 + goto err_mem; 721 + pdev->dev.parent = &dport->dev; 722 + set_dev_node(&pdev->dev, i % 2); 723 + 724 + rc = platform_device_add(pdev); 725 + if (rc) { 726 + platform_device_put(pdev); 727 + goto err_mem; 728 + } 729 + cxl_mem_single[i] = pdev; 730 + } 731 + 732 + return 0; 733 + 734 + err_mem: 735 + for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 736 + platform_device_unregister(cxl_mem_single[i]); 737 + err_dport: 738 + for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 739 + platform_device_unregister(cxl_swd_single[i]); 740 + err_uport: 741 + for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--) 742 + platform_device_unregister(cxl_swu_single[i]); 743 + err_port: 744 + for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--) 745 + platform_device_unregister(cxl_root_single[i]); 746 + err_bridge: 747 + for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) { 748 + struct platform_device *pdev = cxl_hb_single[i]; 749 + 750 + if (!pdev) 751 + continue; 752 + sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 753 + platform_device_unregister(cxl_hb_single[i]); 754 + } 755 + 756 + return rc; 757 + } 758 + 759 + static void cxl_single_exit(void) 760 + { 761 + int i; 762 + 763 + for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 764 + platform_device_unregister(cxl_mem_single[i]); 765 + for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 766 + platform_device_unregister(cxl_swd_single[i]); 767 + for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--) 768 + platform_device_unregister(cxl_swu_single[i]); 769 + for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--) 770 + platform_device_unregister(cxl_root_single[i]); 771 + for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) { 772 + struct platform_device *pdev = cxl_hb_single[i]; 773 + 774 + if (!pdev) 775 + continue; 776 + sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 777 + platform_device_unregister(cxl_hb_single[i]); 778 + } 779 + } 731 780 732 781 static __init int cxl_test_init(void) 733 782 { ··· 948 695 949 696 pdev = platform_device_alloc("cxl_switch_uport", i); 950 697 if (!pdev) 951 - goto err_port; 698 + goto err_uport; 952 699 pdev->dev.parent = &root_port->dev; 953 700 954 701 rc = platform_device_add(pdev); ··· 966 713 967 714 pdev = platform_device_alloc("cxl_switch_dport", i); 968 715 if (!pdev) 969 - goto err_port; 716 + goto err_dport; 970 717 pdev->dev.parent = &uport->dev; 971 718 972 719 rc = platform_device_add(pdev); ··· 977 724 cxl_switch_dport[i] = pdev; 978 725 } 979 726 980 - BUILD_BUG_ON(ARRAY_SIZE(cxl_mem) != ARRAY_SIZE(cxl_switch_dport)); 981 727 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) { 982 728 struct platform_device *dport = cxl_switch_dport[i]; 983 729 struct platform_device *pdev; ··· 995 743 cxl_mem[i] = pdev; 996 744 } 997 745 746 + rc = cxl_single_init(); 747 + if (rc) 748 + goto err_mem; 749 + 998 750 cxl_acpi = platform_device_alloc("cxl_acpi", 0); 999 751 if (!cxl_acpi) 1000 - goto err_mem; 752 + goto err_single; 1001 753 1002 754 mock_companion(&acpi0017_mock, &cxl_acpi->dev); 1003 755 acpi0017_mock.dev.bus = &platform_bus_type; ··· 1014 758 1015 759 err_add: 1016 760 platform_device_put(cxl_acpi); 761 + err_single: 762 + cxl_single_exit(); 1017 763 err_mem: 1018 764 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1019 765 platform_device_unregister(cxl_mem[i]); ··· 1051 793 int i; 1052 794 1053 795 platform_device_unregister(cxl_acpi); 796 + cxl_single_exit(); 1054 797 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1055 798 platform_device_unregister(cxl_mem[i]); 1056 799 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)