Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'cxl-fixes-6.12-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull cxl fixes from Ira Weiny:
"The bulk of these fixes center around an initialization order bug
reported by Gregory Price and some additional fall out from the
debugging effort.

In summary, cxl_acpi and cxl_mem race and previously worked because of
a bus_rescan_devices() while testing without modules built in.

Unfortunately with modules built in the rescan would fail due to the
cxl_port driver being registered late via the build order. Furthermore
it was found bus_rescan_devices() did not guarantee a probe barrier
which CXL was expecting. Additional fixes to cxl-test and decoder
allocation came along as they were found in this debugging effort.

The other fixes are pretty minor but one affects trace point data seen
by user space.

Summary:

- Fix crashes when running with cxl-test code

- Fix Trace DRAM Event Record field decodes

- Fix module/built in initialization order errors

- Fix use after free on decoder shutdowns

- Fix out of order decoder allocations

- Improve cxl-test to better reflect real world systems"

* tag 'cxl-fixes-6.12-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl:
cxl/test: Improve init-order fidelity relative to real-world systems
cxl/port: Prevent out-of-order decoder allocation
cxl/port: Fix use-after-free, permit out-of-order decoder shutdown
cxl/acpi: Ensure ports ready at cxl_acpi_probe() return
cxl/port: Fix cxl_bus_rescan() vs bus_rescan_devices()
cxl/port: Fix CXL port initialization order when the subsystem is built-in
cxl/events: Fix Trace DRAM Event Record
cxl/core: Return error when cxl_endpoint_gather_bandwidth() handles a non-PCI device

+302 -159
+35
drivers/base/core.c
··· 4038 4038 EXPORT_SYMBOL_GPL(device_for_each_child_reverse); 4039 4039 4040 4040 /** 4041 + * device_for_each_child_reverse_from - device child iterator in reversed order. 4042 + * @parent: parent struct device. 4043 + * @from: optional starting point in child list 4044 + * @fn: function to be called for each device. 4045 + * @data: data for the callback. 4046 + * 4047 + * Iterate over @parent's child devices, starting at @from, and call @fn 4048 + * for each, passing it @data. This helper is identical to 4049 + * device_for_each_child_reverse() when @from is NULL. 4050 + * 4051 + * @fn is checked each iteration. If it returns anything other than 0, 4052 + * iteration stop and that value is returned to the caller of 4053 + * device_for_each_child_reverse_from(); 4054 + */ 4055 + int device_for_each_child_reverse_from(struct device *parent, 4056 + struct device *from, const void *data, 4057 + int (*fn)(struct device *, const void *)) 4058 + { 4059 + struct klist_iter i; 4060 + struct device *child; 4061 + int error = 0; 4062 + 4063 + if (!parent->p) 4064 + return 0; 4065 + 4066 + klist_iter_init_node(&parent->p->klist_children, &i, 4067 + (from ? &from->p->knode_parent : NULL)); 4068 + while ((child = prev_device(&i)) && !error) 4069 + error = fn(child, data); 4070 + klist_iter_exit(&i); 4071 + return error; 4072 + } 4073 + EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from); 4074 + 4075 + /** 4041 4076 * device_find_child - device iterator for locating a particular device. 4042 4077 * @parent: parent struct device 4043 4078 * @match: Callback function to check device
+1
drivers/cxl/Kconfig
··· 60 60 default CXL_BUS 61 61 select ACPI_TABLE_LIB 62 62 select ACPI_HMAT 63 + select CXL_PORT 63 64 help 64 65 Enable support for host managed device memory (HDM) resources 65 66 published by a platform's ACPI CXL memory layout description. See
+14 -6
drivers/cxl/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 + 3 + # Order is important here for the built-in case: 4 + # - 'core' first for fundamental init 5 + # - 'port' before platform root drivers like 'acpi' so that CXL-root ports 6 + # are immediately enabled 7 + # - 'mem' and 'pmem' before endpoint drivers so that memdevs are 8 + # immediately enabled 9 + # - 'pci' last, also mirrors the hardware enumeration hierarchy 2 10 obj-y += core/ 3 - obj-$(CONFIG_CXL_PCI) += cxl_pci.o 4 - obj-$(CONFIG_CXL_MEM) += cxl_mem.o 11 + obj-$(CONFIG_CXL_PORT) += cxl_port.o 5 12 obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o 6 13 obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o 7 - obj-$(CONFIG_CXL_PORT) += cxl_port.o 14 + obj-$(CONFIG_CXL_MEM) += cxl_mem.o 15 + obj-$(CONFIG_CXL_PCI) += cxl_pci.o 8 16 9 - cxl_mem-y := mem.o 10 - cxl_pci-y := pci.o 17 + cxl_port-y := port.o 11 18 cxl_acpi-y := acpi.o 12 19 cxl_pmem-y := pmem.o security.o 13 - cxl_port-y := port.o 20 + cxl_mem-y := mem.o 21 + cxl_pci-y := pci.o
+7
drivers/cxl/acpi.c
··· 924 924 925 925 /* load before dax_hmem sees 'Soft Reserved' CXL ranges */ 926 926 subsys_initcall(cxl_acpi_init); 927 + 928 + /* 929 + * Arrange for host-bridge ports to be active synchronous with 930 + * cxl_acpi_probe() exit. 931 + */ 932 + MODULE_SOFTDEP("pre: cxl_port"); 933 + 927 934 module_exit(cxl_acpi_exit); 928 935 MODULE_DESCRIPTION("CXL ACPI: Platform Support"); 929 936 MODULE_LICENSE("GPL v2");
+3
drivers/cxl/core/cdat.c
··· 641 641 void *ptr; 642 642 int rc; 643 643 644 + if (!dev_is_pci(cxlds->dev)) 645 + return -ENODEV; 646 + 644 647 if (cxlds->rcd) 645 648 return -ENODEV; 646 649
+42 -8
drivers/cxl/core/hdm.c
··· 712 712 return 0; 713 713 } 714 714 715 - static int cxl_decoder_reset(struct cxl_decoder *cxld) 715 + static int commit_reap(struct device *dev, const void *data) 716 + { 717 + struct cxl_port *port = to_cxl_port(dev->parent); 718 + struct cxl_decoder *cxld; 719 + 720 + if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev)) 721 + return 0; 722 + 723 + cxld = to_cxl_decoder(dev); 724 + if (port->commit_end == cxld->id && 725 + ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { 726 + port->commit_end--; 727 + dev_dbg(&port->dev, "reap: %s commit_end: %d\n", 728 + dev_name(&cxld->dev), port->commit_end); 729 + } 730 + 731 + return 0; 732 + } 733 + 734 + void cxl_port_commit_reap(struct cxl_decoder *cxld) 735 + { 736 + struct cxl_port *port = to_cxl_port(cxld->dev.parent); 737 + 738 + lockdep_assert_held_write(&cxl_region_rwsem); 739 + 740 + /* 741 + * Once the highest committed decoder is disabled, free any other 742 + * decoders that were pinned allocated by out-of-order release. 743 + */ 744 + port->commit_end--; 745 + dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev), 746 + port->commit_end); 747 + device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL, 748 + commit_reap); 749 + } 750 + EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, CXL); 751 + 752 + static void cxl_decoder_reset(struct cxl_decoder *cxld) 716 753 { 717 754 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 718 755 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); ··· 758 721 u32 ctrl; 759 722 760 723 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 761 - return 0; 724 + return; 762 725 763 - if (port->commit_end != id) { 726 + if (port->commit_end == id) 727 + cxl_port_commit_reap(cxld); 728 + else 764 729 dev_dbg(&port->dev, 765 730 "%s: out of order reset, expected decoder%d.%d\n", 766 731 dev_name(&cxld->dev), port->id, port->commit_end); 767 - return -EBUSY; 768 - } 769 732 770 733 down_read(&cxl_dpa_rwsem); 771 734 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); ··· 778 741 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 779 742 up_read(&cxl_dpa_rwsem); 780 743 781 - port->commit_end--; 782 744 cxld->flags &= ~CXL_DECODER_F_ENABLE; 783 745 784 746 /* Userspace is now responsible for reconfiguring this decoder */ ··· 787 751 cxled = to_cxl_endpoint_decoder(&cxld->dev); 788 752 cxled->state = CXL_DECODER_STATE_MANUAL; 789 753 } 790 - 791 - return 0; 792 754 } 793 755 794 756 static int cxl_setup_hdm_decoder_from_dvsec(
+10 -3
drivers/cxl/core/port.c
··· 2084 2084 2085 2085 static struct workqueue_struct *cxl_bus_wq; 2086 2086 2087 + static int cxl_rescan_attach(struct device *dev, void *data) 2088 + { 2089 + int rc = device_attach(dev); 2090 + 2091 + dev_vdbg(dev, "rescan: %s\n", rc ? "attach" : "detached"); 2092 + 2093 + return 0; 2094 + } 2095 + 2087 2096 static void cxl_bus_rescan_queue(struct work_struct *w) 2088 2097 { 2089 - int rc = bus_rescan_devices(&cxl_bus_type); 2090 - 2091 - pr_debug("CXL bus rescan result: %d\n", rc); 2098 + bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_rescan_attach); 2092 2099 } 2093 2100 2094 2101 void cxl_bus_rescan(void)
+46 -45
drivers/cxl/core/region.c
··· 232 232 "Bypassing cpu_cache_invalidate_memregion() for testing!\n"); 233 233 return 0; 234 234 } else { 235 - dev_err(&cxlr->dev, 236 - "Failed to synchronize CPU cache state\n"); 235 + dev_WARN(&cxlr->dev, 236 + "Failed to synchronize CPU cache state\n"); 237 237 return -ENXIO; 238 238 } 239 239 } ··· 242 242 return 0; 243 243 } 244 244 245 - static int cxl_region_decode_reset(struct cxl_region *cxlr, int count) 245 + static void cxl_region_decode_reset(struct cxl_region *cxlr, int count) 246 246 { 247 247 struct cxl_region_params *p = &cxlr->params; 248 - int i, rc = 0; 248 + int i; 249 249 250 250 /* 251 - * Before region teardown attempt to flush, and if the flush 252 - * fails cancel the region teardown for data consistency 253 - * concerns 251 + * Before region teardown attempt to flush, evict any data cached for 252 + * this region, or scream loudly about missing arch / platform support 253 + * for CXL teardown. 254 254 */ 255 - rc = cxl_region_invalidate_memregion(cxlr); 256 - if (rc) 257 - return rc; 255 + cxl_region_invalidate_memregion(cxlr); 258 256 259 257 for (i = count - 1; i >= 0; i--) { 260 258 struct cxl_endpoint_decoder *cxled = p->targets[i]; ··· 275 277 cxl_rr = cxl_rr_load(iter, cxlr); 276 278 cxld = cxl_rr->decoder; 277 279 if (cxld->reset) 278 - rc = cxld->reset(cxld); 279 - if (rc) 280 - return rc; 280 + cxld->reset(cxld); 281 281 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 282 282 } 283 283 284 284 endpoint_reset: 285 - rc = cxled->cxld.reset(&cxled->cxld); 286 - if (rc) 287 - return rc; 285 + cxled->cxld.reset(&cxled->cxld); 288 286 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 289 287 } 290 288 291 289 /* all decoders associated with this region have been torn down */ 292 290 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 293 - 294 - return 0; 295 291 } 296 292 297 293 static int commit_decoder(struct cxl_decoder *cxld) ··· 401 409 * still pending. 402 410 */ 403 411 if (p->state == CXL_CONFIG_RESET_PENDING) { 404 - rc = cxl_region_decode_reset(cxlr, p->interleave_ways); 405 - /* 406 - * Revert to committed since there may still be active 407 - * decoders associated with this region, or move forward 408 - * to active to mark the reset successful 409 - */ 410 - if (rc) 411 - p->state = CXL_CONFIG_COMMIT; 412 - else 413 - p->state = CXL_CONFIG_ACTIVE; 412 + cxl_region_decode_reset(cxlr, p->interleave_ways); 413 + p->state = CXL_CONFIG_ACTIVE; 414 414 } 415 415 } 416 416 ··· 778 794 return rc; 779 795 } 780 796 797 + static int check_commit_order(struct device *dev, const void *data) 798 + { 799 + struct cxl_decoder *cxld = to_cxl_decoder(dev); 800 + 801 + /* 802 + * if port->commit_end is not the only free decoder, then out of 803 + * order shutdown has occurred, block further allocations until 804 + * that is resolved 805 + */ 806 + if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) 807 + return -EBUSY; 808 + return 0; 809 + } 810 + 781 811 static int match_free_decoder(struct device *dev, void *data) 782 812 { 813 + struct cxl_port *port = to_cxl_port(dev->parent); 783 814 struct cxl_decoder *cxld; 784 - int *id = data; 815 + int rc; 785 816 786 817 if (!is_switch_decoder(dev)) 787 818 return 0; 788 819 789 820 cxld = to_cxl_decoder(dev); 790 821 791 - /* enforce ordered allocation */ 792 - if (cxld->id != *id) 822 + if (cxld->id != port->commit_end + 1) 793 823 return 0; 794 824 795 - if (!cxld->region) 796 - return 1; 825 + if (cxld->region) { 826 + dev_dbg(dev->parent, 827 + "next decoder to commit (%s) is already reserved (%s)\n", 828 + dev_name(dev), dev_name(&cxld->region->dev)); 829 + return 0; 830 + } 797 831 798 - (*id)++; 799 - 800 - return 0; 832 + rc = device_for_each_child_reverse_from(dev->parent, dev, NULL, 833 + check_commit_order); 834 + if (rc) { 835 + dev_dbg(dev->parent, 836 + "unable to allocate %s due to out of order shutdown\n", 837 + dev_name(dev)); 838 + return 0; 839 + } 840 + return 1; 801 841 } 802 842 803 843 static int match_auto_decoder(struct device *dev, void *data) ··· 848 840 struct cxl_region *cxlr) 849 841 { 850 842 struct device *dev; 851 - int id = 0; 852 843 853 844 if (port == cxled_to_port(cxled)) 854 845 return &cxled->cxld; ··· 856 849 dev = device_find_child(&port->dev, &cxlr->params, 857 850 match_auto_decoder); 858 851 else 859 - dev = device_find_child(&port->dev, &id, match_free_decoder); 852 + dev = device_find_child(&port->dev, NULL, match_free_decoder); 860 853 if (!dev) 861 854 return NULL; 862 855 /* ··· 2061 2054 get_device(&cxlr->dev); 2062 2055 2063 2056 if (p->state > CXL_CONFIG_ACTIVE) { 2064 - /* 2065 - * TODO: tear down all impacted regions if a device is 2066 - * removed out of order 2067 - */ 2068 - rc = cxl_region_decode_reset(cxlr, p->interleave_ways); 2069 - if (rc) 2070 - goto out; 2057 + cxl_region_decode_reset(cxlr, p->interleave_ways); 2071 2058 p->state = CXL_CONFIG_ACTIVE; 2072 2059 } 2073 2060
+14 -3
drivers/cxl/core/trace.h
··· 279 279 #define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00 280 280 #define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01 281 281 #define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02 282 - #define show_mem_event_type(type) __print_symbolic(type, \ 282 + #define show_gmer_mem_event_type(type) __print_symbolic(type, \ 283 283 { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \ 284 284 { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \ 285 285 { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \ ··· 373 373 "hpa=%llx region=%s region_uuid=%pUb", 374 374 __entry->dpa, show_dpa_flags(__entry->dpa_flags), 375 375 show_event_desc_flags(__entry->descriptor), 376 - show_mem_event_type(__entry->type), 376 + show_gmer_mem_event_type(__entry->type), 377 377 show_trans_type(__entry->transaction_type), 378 378 __entry->channel, __entry->rank, __entry->device, 379 379 __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), ··· 391 391 * DRAM Event Record defines many fields the same as the General Media Event 392 392 * Record. Reuse those definitions as appropriate. 393 393 */ 394 + #define CXL_DER_MEM_EVT_TYPE_ECC_ERROR 0x00 395 + #define CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x01 396 + #define CXL_DER_MEM_EVT_TYPE_INV_ADDR 0x02 397 + #define CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x03 398 + #define show_dram_mem_event_type(type) __print_symbolic(type, \ 399 + { CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \ 400 + { CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \ 401 + { CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \ 402 + { CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \ 403 + ) 404 + 394 405 #define CXL_DER_VALID_CHANNEL BIT(0) 395 406 #define CXL_DER_VALID_RANK BIT(1) 396 407 #define CXL_DER_VALID_NIBBLE BIT(2) ··· 488 477 "hpa=%llx region=%s region_uuid=%pUb", 489 478 __entry->dpa, show_dpa_flags(__entry->dpa_flags), 490 479 show_event_desc_flags(__entry->descriptor), 491 - show_mem_event_type(__entry->type), 480 + show_dram_mem_event_type(__entry->type), 492 481 show_trans_type(__entry->transaction_type), 493 482 __entry->channel, __entry->rank, __entry->nibble_mask, 494 483 __entry->bank_group, __entry->bank,
+2 -1
drivers/cxl/cxl.h
··· 359 359 struct cxl_region *region; 360 360 unsigned long flags; 361 361 int (*commit)(struct cxl_decoder *cxld); 362 - int (*reset)(struct cxl_decoder *cxld); 362 + void (*reset)(struct cxl_decoder *cxld); 363 363 }; 364 364 365 365 /* ··· 730 730 int cxl_num_decoders_committed(struct cxl_port *port); 731 731 bool is_cxl_port(const struct device *dev); 732 732 struct cxl_port *to_cxl_port(const struct device *dev); 733 + void cxl_port_commit_reap(struct cxl_decoder *cxld); 733 734 struct pci_bus; 734 735 int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev, 735 736 struct pci_bus *bus);
+16 -1
drivers/cxl/port.c
··· 208 208 }, 209 209 }; 210 210 211 - module_cxl_driver(cxl_port_driver); 211 + static int __init cxl_port_init(void) 212 + { 213 + return cxl_driver_register(&cxl_port_driver); 214 + } 215 + /* 216 + * Be ready to immediately enable ports emitted by the platform CXL root 217 + * (e.g. cxl_acpi) when CONFIG_CXL_PORT=y. 218 + */ 219 + subsys_initcall(cxl_port_init); 220 + 221 + static void __exit cxl_port_exit(void) 222 + { 223 + cxl_driver_unregister(&cxl_port_driver); 224 + } 225 + module_exit(cxl_port_exit); 226 + 212 227 MODULE_DESCRIPTION("CXL: Port enumeration and services"); 213 228 MODULE_LICENSE("GPL v2"); 214 229 MODULE_IMPORT_NS(CXL);
+3
include/linux/device.h
··· 1078 1078 int (*fn)(struct device *dev, void *data)); 1079 1079 int device_for_each_child_reverse(struct device *dev, void *data, 1080 1080 int (*fn)(struct device *dev, void *data)); 1081 + int device_for_each_child_reverse_from(struct device *parent, 1082 + struct device *from, const void *data, 1083 + int (*fn)(struct device *, const void *)); 1081 1084 struct device *device_find_child(struct device *dev, void *data, 1082 1085 int (*match)(struct device *dev, void *data)); 1083 1086 struct device *device_find_child_by_name(struct device *parent,
+108 -92
tools/testing/cxl/test/cxl.c
··· 693 693 return 0; 694 694 } 695 695 696 - static int mock_decoder_reset(struct cxl_decoder *cxld) 696 + static void mock_decoder_reset(struct cxl_decoder *cxld) 697 697 { 698 698 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 699 699 int id = cxld->id; 700 700 701 701 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 702 - return 0; 702 + return; 703 703 704 704 dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev)); 705 - if (port->commit_end != id) { 705 + if (port->commit_end == id) 706 + cxl_port_commit_reap(cxld); 707 + else 706 708 dev_dbg(&port->dev, 707 709 "%s: out of order reset, expected decoder%d.%d\n", 708 710 dev_name(&cxld->dev), port->id, port->commit_end); 709 - return -EBUSY; 710 - } 711 - 712 - port->commit_end--; 713 711 cxld->flags &= ~CXL_DECODER_F_ENABLE; 714 - 715 - return 0; 716 712 } 717 713 718 714 static void default_mock_decoder(struct cxl_decoder *cxld) ··· 1058 1062 #define SZ_64G (SZ_32G * 2) 1059 1063 #endif 1060 1064 1061 - static __init int cxl_rch_init(void) 1065 + static __init int cxl_rch_topo_init(void) 1062 1066 { 1063 1067 int rc, i; 1064 1068 ··· 1086 1090 goto err_bridge; 1087 1091 } 1088 1092 1089 - for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) { 1090 - int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i; 1091 - struct platform_device *rch = cxl_rch[i]; 1092 - struct platform_device *pdev; 1093 - 1094 - pdev = platform_device_alloc("cxl_rcd", idx); 1095 - if (!pdev) 1096 - goto err_mem; 1097 - pdev->dev.parent = &rch->dev; 1098 - set_dev_node(&pdev->dev, i % 2); 1099 - 1100 - rc = platform_device_add(pdev); 1101 - if (rc) { 1102 - platform_device_put(pdev); 1103 - goto err_mem; 1104 - } 1105 - cxl_rcd[i] = pdev; 1106 - } 1107 - 1108 1093 return 0; 1109 1094 1110 - err_mem: 1111 - for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--) 1112 - platform_device_unregister(cxl_rcd[i]); 1113 1095 err_bridge: 1114 1096 for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) { 1115 1097 struct platform_device *pdev = cxl_rch[i]; ··· 1101 1127 return rc; 1102 1128 } 1103 1129 1104 - static void cxl_rch_exit(void) 1130 + static void cxl_rch_topo_exit(void) 1105 1131 { 1106 1132 int i; 1107 1133 1108 - for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--) 1109 - platform_device_unregister(cxl_rcd[i]); 1110 1134 for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) { 1111 1135 struct platform_device *pdev = cxl_rch[i]; 1112 1136 ··· 1115 1143 } 1116 1144 } 1117 1145 1118 - static __init int cxl_single_init(void) 1146 + static __init int cxl_single_topo_init(void) 1119 1147 { 1120 1148 int i, rc; 1121 1149 ··· 1200 1228 cxl_swd_single[i] = pdev; 1201 1229 } 1202 1230 1203 - for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) { 1204 - struct platform_device *dport = cxl_swd_single[i]; 1205 - struct platform_device *pdev; 1206 - 1207 - pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i); 1208 - if (!pdev) 1209 - goto err_mem; 1210 - pdev->dev.parent = &dport->dev; 1211 - set_dev_node(&pdev->dev, i % 2); 1212 - 1213 - rc = platform_device_add(pdev); 1214 - if (rc) { 1215 - platform_device_put(pdev); 1216 - goto err_mem; 1217 - } 1218 - cxl_mem_single[i] = pdev; 1219 - } 1220 - 1221 1231 return 0; 1222 1232 1223 - err_mem: 1224 - for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 1225 - platform_device_unregister(cxl_mem_single[i]); 1226 1233 err_dport: 1227 1234 for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 1228 1235 platform_device_unregister(cxl_swd_single[i]); ··· 1224 1273 return rc; 1225 1274 } 1226 1275 1227 - static void cxl_single_exit(void) 1276 + static void cxl_single_topo_exit(void) 1228 1277 { 1229 1278 int i; 1230 1279 1231 - for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 1232 - platform_device_unregister(cxl_mem_single[i]); 1233 1280 for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 1234 1281 platform_device_unregister(cxl_swd_single[i]); 1235 1282 for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--) ··· 1242 1293 sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 1243 1294 platform_device_unregister(cxl_hb_single[i]); 1244 1295 } 1296 + } 1297 + 1298 + static void cxl_mem_exit(void) 1299 + { 1300 + int i; 1301 + 1302 + for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--) 1303 + platform_device_unregister(cxl_rcd[i]); 1304 + for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 1305 + platform_device_unregister(cxl_mem_single[i]); 1306 + for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1307 + platform_device_unregister(cxl_mem[i]); 1308 + } 1309 + 1310 + static int cxl_mem_init(void) 1311 + { 1312 + int i, rc; 1313 + 1314 + for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) { 1315 + struct platform_device *dport = cxl_switch_dport[i]; 1316 + struct platform_device *pdev; 1317 + 1318 + pdev = platform_device_alloc("cxl_mem", i); 1319 + if (!pdev) 1320 + goto err_mem; 1321 + pdev->dev.parent = &dport->dev; 1322 + set_dev_node(&pdev->dev, i % 2); 1323 + 1324 + rc = platform_device_add(pdev); 1325 + if (rc) { 1326 + platform_device_put(pdev); 1327 + goto err_mem; 1328 + } 1329 + cxl_mem[i] = pdev; 1330 + } 1331 + 1332 + for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) { 1333 + struct platform_device *dport = cxl_swd_single[i]; 1334 + struct platform_device *pdev; 1335 + 1336 + pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i); 1337 + if (!pdev) 1338 + goto err_single; 1339 + pdev->dev.parent = &dport->dev; 1340 + set_dev_node(&pdev->dev, i % 2); 1341 + 1342 + rc = platform_device_add(pdev); 1343 + if (rc) { 1344 + platform_device_put(pdev); 1345 + goto err_single; 1346 + } 1347 + cxl_mem_single[i] = pdev; 1348 + } 1349 + 1350 + for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) { 1351 + int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i; 1352 + struct platform_device *rch = cxl_rch[i]; 1353 + struct platform_device *pdev; 1354 + 1355 + pdev = platform_device_alloc("cxl_rcd", idx); 1356 + if (!pdev) 1357 + goto err_rcd; 1358 + pdev->dev.parent = &rch->dev; 1359 + set_dev_node(&pdev->dev, i % 2); 1360 + 1361 + rc = platform_device_add(pdev); 1362 + if (rc) { 1363 + platform_device_put(pdev); 1364 + goto err_rcd; 1365 + } 1366 + cxl_rcd[i] = pdev; 1367 + } 1368 + 1369 + return 0; 1370 + 1371 + err_rcd: 1372 + for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--) 1373 + platform_device_unregister(cxl_rcd[i]); 1374 + err_single: 1375 + for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 1376 + platform_device_unregister(cxl_mem_single[i]); 1377 + err_mem: 1378 + for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1379 + platform_device_unregister(cxl_mem[i]); 1380 + return rc; 1245 1381 } 1246 1382 1247 1383 static __init int cxl_test_init(void) ··· 1441 1407 cxl_switch_dport[i] = pdev; 1442 1408 } 1443 1409 1444 - for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) { 1445 - struct platform_device *dport = cxl_switch_dport[i]; 1446 - struct platform_device *pdev; 1447 - 1448 - pdev = platform_device_alloc("cxl_mem", i); 1449 - if (!pdev) 1450 - goto err_mem; 1451 - pdev->dev.parent = &dport->dev; 1452 - set_dev_node(&pdev->dev, i % 2); 1453 - 1454 - rc = platform_device_add(pdev); 1455 - if (rc) { 1456 - platform_device_put(pdev); 1457 - goto err_mem; 1458 - } 1459 - cxl_mem[i] = pdev; 1460 - } 1461 - 1462 - rc = cxl_single_init(); 1410 + rc = cxl_single_topo_init(); 1463 1411 if (rc) 1464 - goto err_mem; 1412 + goto err_dport; 1465 1413 1466 - rc = cxl_rch_init(); 1414 + rc = cxl_rch_topo_init(); 1467 1415 if (rc) 1468 1416 goto err_single; 1469 1417 ··· 1458 1442 1459 1443 rc = platform_device_add(cxl_acpi); 1460 1444 if (rc) 1461 - goto err_add; 1445 + goto err_root; 1446 + 1447 + rc = cxl_mem_init(); 1448 + if (rc) 1449 + goto err_root; 1462 1450 1463 1451 return 0; 1464 1452 1465 - err_add: 1453 + err_root: 1466 1454 platform_device_put(cxl_acpi); 1467 1455 err_rch: 1468 - cxl_rch_exit(); 1456 + cxl_rch_topo_exit(); 1469 1457 err_single: 1470 - cxl_single_exit(); 1471 - err_mem: 1472 - for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1473 - platform_device_unregister(cxl_mem[i]); 1458 + cxl_single_topo_exit(); 1474 1459 err_dport: 1475 1460 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--) 1476 1461 platform_device_unregister(cxl_switch_dport[i]); ··· 1503 1486 { 1504 1487 int i; 1505 1488 1489 + cxl_mem_exit(); 1506 1490 platform_device_unregister(cxl_acpi); 1507 - cxl_rch_exit(); 1508 - cxl_single_exit(); 1509 - for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1510 - platform_device_unregister(cxl_mem[i]); 1491 + cxl_rch_topo_exit(); 1492 + cxl_single_topo_exit(); 1511 1493 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--) 1512 1494 platform_device_unregister(cxl_switch_dport[i]); 1513 1495 for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
+1
tools/testing/cxl/test/mem.c
··· 1673 1673 .name = KBUILD_MODNAME, 1674 1674 .dev_groups = cxl_mock_mem_groups, 1675 1675 .groups = cxl_mock_mem_core_groups, 1676 + .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1676 1677 }, 1677 1678 }; 1678 1679