tools/testing/cxl: Add a single-port host-bridge regression config

Jonathan reports that region creation fails when a single-port
host-bridge connects to a multi-port switch. Mock up that configuration
so a fix can be tested and regression tested going forward.

Reported-by: Bobo WL <lmw.bobo@gmail.com>
Reported-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: http://lore.kernel.org/r/20221010172057.00001559@huawei.com
Reviewed-by: Vishal Verma <vishal.l.verma@intel.com>
Link: https://lore.kernel.org/r/166752184838.947915.2167957540894293891.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Changed files
+278 -19
tools
testing
cxl
test
+278 -19
tools/testing/cxl/test/cxl.c
··· 12 12 #include "mock.h" 13 13 14 14 #define NR_CXL_HOST_BRIDGES 2 15 + #define NR_CXL_SINGLE_HOST 1 15 16 #define NR_CXL_ROOT_PORTS 2 16 17 #define NR_CXL_SWITCH_PORTS 2 17 18 #define NR_CXL_PORT_DECODERS 8 18 19 19 20 static struct platform_device *cxl_acpi; 20 21 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES]; 21 - static struct platform_device 22 - *cxl_root_port[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS]; 23 - static struct platform_device 24 - *cxl_switch_uport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS]; 25 - static struct platform_device 26 - *cxl_switch_dport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * 27 - NR_CXL_SWITCH_PORTS]; 28 - struct platform_device 29 - *cxl_mem[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS]; 22 + #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS) 23 + static struct platform_device *cxl_root_port[NR_MULTI_ROOT]; 24 + static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT]; 25 + #define NR_MEM_MULTI \ 26 + (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS) 27 + static struct platform_device *cxl_switch_dport[NR_MEM_MULTI]; 28 + 29 + static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST]; 30 + static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST]; 31 + static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST]; 32 + #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS) 33 + static struct platform_device *cxl_swd_single[NR_MEM_SINGLE]; 34 + 35 + struct platform_device *cxl_mem[NR_MEM_MULTI]; 36 + struct platform_device *cxl_mem_single[NR_MEM_SINGLE]; 37 + 38 + 39 + static inline bool is_multi_bridge(struct device *dev) 40 + { 41 + int i; 42 + 43 + for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) 44 + if (&cxl_host_bridge[i]->dev == dev) 45 + return true; 46 + return false; 47 + } 48 + 49 + static inline bool is_single_bridge(struct device *dev) 50 + { 51 + int i; 52 + 53 + for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) 54 + if (&cxl_hb_single[i]->dev == dev) 55 + return true; 56 + return false; 57 + } 30 58 31 59 static struct acpi_device acpi0017_mock; 32 - static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = { 60 + static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST] = { 33 61 [0] = { 34 62 .handle = &host_bridge[0], 35 63 }, 36 64 [1] = { 37 65 .handle = &host_bridge[1], 38 66 }, 67 + [2] = { 68 + .handle = &host_bridge[2], 69 + }, 70 + 39 71 }; 40 72 41 73 static bool is_mock_dev(struct device *dev) ··· 76 44 77 45 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) 78 46 if (dev == &cxl_mem[i]->dev) 47 + return true; 48 + for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) 49 + if (dev == &cxl_mem_single[i]->dev) 79 50 return true; 80 51 if (dev == &cxl_acpi->dev) 81 52 return true; ··· 101 66 102 67 static struct { 103 68 struct acpi_table_cedt cedt; 104 - struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES]; 69 + struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST]; 105 70 struct { 106 71 struct acpi_cedt_cfmws cfmws; 107 72 u32 target[1]; ··· 118 83 struct acpi_cedt_cfmws cfmws; 119 84 u32 target[2]; 120 85 } cfmws3; 86 + struct { 87 + struct acpi_cedt_cfmws cfmws; 88 + u32 target[1]; 89 + } cfmws4; 121 90 } __packed mock_cedt = { 122 91 .cedt = { 123 92 .header = { ··· 144 105 .length = sizeof(mock_cedt.chbs[0]), 145 106 }, 146 107 .uid = 1, 108 + .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 109 + }, 110 + .chbs[2] = { 111 + .header = { 112 + .type = ACPI_CEDT_TYPE_CHBS, 113 + .length = sizeof(mock_cedt.chbs[0]), 114 + }, 115 + .uid = 2, 147 116 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 148 117 }, 149 118 .cfmws0 = { ··· 214 167 }, 215 168 .target = { 0, 1, }, 216 169 }, 170 + .cfmws4 = { 171 + .cfmws = { 172 + .header = { 173 + .type = ACPI_CEDT_TYPE_CFMWS, 174 + .length = sizeof(mock_cedt.cfmws4), 175 + }, 176 + .interleave_ways = 0, 177 + .granularity = 4, 178 + .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 179 + ACPI_CEDT_CFMWS_RESTRICT_PMEM, 180 + .qtg_id = 4, 181 + .window_size = SZ_256M * 4UL, 182 + }, 183 + .target = { 2 }, 184 + }, 217 185 }; 218 186 219 - struct acpi_cedt_cfmws *mock_cfmws[4] = { 187 + struct acpi_cedt_cfmws *mock_cfmws[] = { 220 188 [0] = &mock_cedt.cfmws0.cfmws, 221 189 [1] = &mock_cedt.cfmws1.cfmws, 222 190 [2] = &mock_cedt.cfmws2.cfmws, 223 191 [3] = &mock_cedt.cfmws3.cfmws, 192 + [4] = &mock_cedt.cfmws4.cfmws, 224 193 }; 225 194 226 195 struct cxl_mock_res { ··· 367 304 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) 368 305 if (dev == &cxl_host_bridge[i]->dev) 369 306 return true; 307 + for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) 308 + if (dev == &cxl_hb_single[i]->dev) 309 + return true; 370 310 return false; 371 311 } 372 312 ··· 390 324 391 325 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) 392 326 if (dev == &cxl_switch_dport[i]->dev) 327 + return true; 328 + 329 + for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) 330 + if (dev == &cxl_root_single[i]->dev) 331 + return true; 332 + 333 + for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) 334 + if (dev == &cxl_swu_single[i]->dev) 335 + return true; 336 + 337 + for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) 338 + if (dev == &cxl_swd_single[i]->dev) 393 339 return true; 394 340 395 341 if (is_cxl_memdev(dev)) ··· 639 561 int i, array_size; 640 562 641 563 if (port->depth == 1) { 642 - array_size = ARRAY_SIZE(cxl_root_port); 643 - array = cxl_root_port; 564 + if (is_multi_bridge(port->uport)) { 565 + array_size = ARRAY_SIZE(cxl_root_port); 566 + array = cxl_root_port; 567 + } else if (is_single_bridge(port->uport)) { 568 + array_size = ARRAY_SIZE(cxl_root_single); 569 + array = cxl_root_single; 570 + } else { 571 + dev_dbg(&port->dev, "%s: unknown bridge type\n", 572 + dev_name(port->uport)); 573 + return -ENXIO; 574 + } 644 575 } else if (port->depth == 2) { 645 - array_size = ARRAY_SIZE(cxl_switch_dport); 646 - array = cxl_switch_dport; 576 + struct cxl_port *parent = to_cxl_port(port->dev.parent); 577 + 578 + if (is_multi_bridge(parent->uport)) { 579 + array_size = ARRAY_SIZE(cxl_switch_dport); 580 + array = cxl_switch_dport; 581 + } else if (is_single_bridge(parent->uport)) { 582 + array_size = ARRAY_SIZE(cxl_swd_single); 583 + array = cxl_swd_single; 584 + } else { 585 + dev_dbg(&port->dev, "%s: unknown bridge type\n", 586 + dev_name(port->uport)); 587 + return -ENXIO; 588 + } 647 589 } else { 648 590 dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n", 649 591 port->depth); ··· 674 576 struct platform_device *pdev = array[i]; 675 577 struct cxl_dport *dport; 676 578 677 - if (pdev->dev.parent != port->uport) 579 + if (pdev->dev.parent != port->uport) { 580 + dev_dbg(&port->dev, "%s: mismatch parent %s\n", 581 + dev_name(port->uport), 582 + dev_name(pdev->dev.parent)); 678 583 continue; 584 + } 679 585 680 586 dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id, 681 587 CXL_RESOURCE_NONE); ··· 728 626 #ifndef SZ_512G 729 627 #define SZ_512G (SZ_64G * 8) 730 628 #endif 629 + 630 + static __init int cxl_single_init(void) 631 + { 632 + int i, rc; 633 + 634 + for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) { 635 + struct acpi_device *adev = 636 + &host_bridge[NR_CXL_HOST_BRIDGES + i]; 637 + struct platform_device *pdev; 638 + 639 + pdev = platform_device_alloc("cxl_host_bridge", 640 + NR_CXL_HOST_BRIDGES + i); 641 + if (!pdev) 642 + goto err_bridge; 643 + 644 + mock_companion(adev, &pdev->dev); 645 + rc = platform_device_add(pdev); 646 + if (rc) { 647 + platform_device_put(pdev); 648 + goto err_bridge; 649 + } 650 + 651 + cxl_hb_single[i] = pdev; 652 + rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, 653 + "physical_node"); 654 + if (rc) 655 + goto err_bridge; 656 + } 657 + 658 + for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) { 659 + struct platform_device *bridge = 660 + cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)]; 661 + struct platform_device *pdev; 662 + 663 + pdev = platform_device_alloc("cxl_root_port", 664 + NR_MULTI_ROOT + i); 665 + if (!pdev) 666 + goto err_port; 667 + pdev->dev.parent = &bridge->dev; 668 + 669 + rc = platform_device_add(pdev); 670 + if (rc) { 671 + platform_device_put(pdev); 672 + goto err_port; 673 + } 674 + cxl_root_single[i] = pdev; 675 + } 676 + 677 + for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) { 678 + struct platform_device *root_port = cxl_root_single[i]; 679 + struct platform_device *pdev; 680 + 681 + pdev = platform_device_alloc("cxl_switch_uport", 682 + NR_MULTI_ROOT + i); 683 + if (!pdev) 684 + goto err_uport; 685 + pdev->dev.parent = &root_port->dev; 686 + 687 + rc = platform_device_add(pdev); 688 + if (rc) { 689 + platform_device_put(pdev); 690 + goto err_uport; 691 + } 692 + cxl_swu_single[i] = pdev; 693 + } 694 + 695 + for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) { 696 + struct platform_device *uport = 697 + cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)]; 698 + struct platform_device *pdev; 699 + 700 + pdev = platform_device_alloc("cxl_switch_dport", 701 + i + NR_MEM_MULTI); 702 + if (!pdev) 703 + goto err_dport; 704 + pdev->dev.parent = &uport->dev; 705 + 706 + rc = platform_device_add(pdev); 707 + if (rc) { 708 + platform_device_put(pdev); 709 + goto err_dport; 710 + } 711 + cxl_swd_single[i] = pdev; 712 + } 713 + 714 + for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) { 715 + struct platform_device *dport = cxl_swd_single[i]; 716 + struct platform_device *pdev; 717 + 718 + pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i); 719 + if (!pdev) 720 + goto err_mem; 721 + pdev->dev.parent = &dport->dev; 722 + set_dev_node(&pdev->dev, i % 2); 723 + 724 + rc = platform_device_add(pdev); 725 + if (rc) { 726 + platform_device_put(pdev); 727 + goto err_mem; 728 + } 729 + cxl_mem_single[i] = pdev; 730 + } 731 + 732 + return 0; 733 + 734 + err_mem: 735 + for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 736 + platform_device_unregister(cxl_mem_single[i]); 737 + err_dport: 738 + for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 739 + platform_device_unregister(cxl_swd_single[i]); 740 + err_uport: 741 + for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--) 742 + platform_device_unregister(cxl_swu_single[i]); 743 + err_port: 744 + for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--) 745 + platform_device_unregister(cxl_root_single[i]); 746 + err_bridge: 747 + for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) { 748 + struct platform_device *pdev = cxl_hb_single[i]; 749 + 750 + if (!pdev) 751 + continue; 752 + sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 753 + platform_device_unregister(cxl_hb_single[i]); 754 + } 755 + 756 + return rc; 757 + } 758 + 759 + static void cxl_single_exit(void) 760 + { 761 + int i; 762 + 763 + for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 764 + platform_device_unregister(cxl_mem_single[i]); 765 + for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 766 + platform_device_unregister(cxl_swd_single[i]); 767 + for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--) 768 + platform_device_unregister(cxl_swu_single[i]); 769 + for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--) 770 + platform_device_unregister(cxl_root_single[i]); 771 + for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) { 772 + struct platform_device *pdev = cxl_hb_single[i]; 773 + 774 + if (!pdev) 775 + continue; 776 + sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 777 + platform_device_unregister(cxl_hb_single[i]); 778 + } 779 + } 731 780 732 781 static __init int cxl_test_init(void) 733 782 { ··· 977 724 cxl_switch_dport[i] = pdev; 978 725 } 979 726 980 - BUILD_BUG_ON(ARRAY_SIZE(cxl_mem) != ARRAY_SIZE(cxl_switch_dport)); 981 727 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) { 982 728 struct platform_device *dport = cxl_switch_dport[i]; 983 729 struct platform_device *pdev; ··· 995 743 cxl_mem[i] = pdev; 996 744 } 997 745 746 + rc = cxl_single_init(); 747 + if (rc) 748 + goto err_mem; 749 + 998 750 cxl_acpi = platform_device_alloc("cxl_acpi", 0); 999 751 if (!cxl_acpi) 1000 - goto err_mem; 752 + goto err_single; 1001 753 1002 754 mock_companion(&acpi0017_mock, &cxl_acpi->dev); 1003 755 acpi0017_mock.dev.bus = &platform_bus_type; ··· 1014 758 1015 759 err_add: 1016 760 platform_device_put(cxl_acpi); 761 + err_single: 762 + cxl_single_exit(); 1017 763 err_mem: 1018 764 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1019 765 platform_device_unregister(cxl_mem[i]); ··· 1051 793 int i; 1052 794 1053 795 platform_device_unregister(cxl_acpi); 796 + cxl_single_exit(); 1054 797 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1055 798 platform_device_unregister(cxl_mem[i]); 1056 799 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)