Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dax/hmem: Move hmem device registration to dax_hmem.ko

In preparation for the CXL region driver to take over the responsibility
of registering device-dax instances for CXL regions, move the
registration of "hmem" devices to dax_hmem.ko.

Previously the builtin component of this enabling
(drivers/dax/hmem/device.o) would register platform devices for each
address range and trigger the dax_hmem.ko module to load and attach
device-dax instances to those devices. Now, the ranges are collected
from the HMAT and EFI memory map walking, but the device creation is
deferred. A new "hmem_platform" device is created which triggers
dax_hmem.ko to load and register the platform devices.

Tested-by: Fan Ni <fan.ni@samsung.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/167602002771.1924368.5653558226424530127.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

+157 -54
+1 -1
drivers/acpi/numa/hmat.c
··· 718 718 for (res = target->memregions.child; res; res = res->sibling) { 719 719 int target_nid = pxm_to_node(target->memory_pxm); 720 720 721 - hmem_register_device(target_nid, res); 721 + hmem_register_resource(target_nid, res); 722 722 } 723 723 } 724 724
+1 -1
drivers/dax/Kconfig
··· 46 46 Say M if unsure. 47 47 48 48 config DEV_DAX_HMEM_DEVICES 49 - depends on DEV_DAX_HMEM && DAX=y 49 + depends on DEV_DAX_HMEM && DAX 50 50 def_bool y 51 51 52 52 config DEV_DAX_KMEM
+46 -49
drivers/dax/hmem/device.c
··· 8 8 static bool nohmem; 9 9 module_param_named(disable, nohmem, bool, 0444); 10 10 11 + static bool platform_initialized; 12 + static DEFINE_MUTEX(hmem_resource_lock); 11 13 static struct resource hmem_active = { 12 14 .name = "HMEM devices", 13 15 .start = 0, ··· 17 15 .flags = IORESOURCE_MEM, 18 16 }; 19 17 20 - void hmem_register_device(int target_nid, struct resource *res) 18 + int walk_hmem_resources(struct device *host, walk_hmem_fn fn) 19 + { 20 + struct resource *res; 21 + int rc = 0; 22 + 23 + mutex_lock(&hmem_resource_lock); 24 + for (res = hmem_active.child; res; res = res->sibling) { 25 + rc = fn(host, (int) res->desc, res); 26 + if (rc) 27 + break; 28 + } 29 + mutex_unlock(&hmem_resource_lock); 30 + return rc; 31 + } 32 + EXPORT_SYMBOL_GPL(walk_hmem_resources); 33 + 34 + static void __hmem_register_resource(int target_nid, struct resource *res) 21 35 { 22 36 struct platform_device *pdev; 23 - struct memregion_info info; 24 - int rc, id; 37 + struct resource *new; 38 + int rc; 25 39 26 - if (nohmem) 27 - return; 28 - 29 - rc = region_intersects(res->start, resource_size(res), IORESOURCE_MEM, 30 - IORES_DESC_SOFT_RESERVED); 31 - if (rc != REGION_INTERSECTS) 32 - return; 33 - 34 - id = memregion_alloc(GFP_KERNEL); 35 - if (id < 0) { 36 - pr_err("memregion allocation failure for %pr\n", res); 40 + new = __request_region(&hmem_active, res->start, resource_size(res), "", 41 + 0); 42 + if (!new) { 43 + pr_debug("hmem range %pr already active\n", res); 37 44 return; 38 45 } 39 46 40 - pdev = platform_device_alloc("hmem", id); 47 + new->desc = target_nid; 48 + 49 + if (platform_initialized) 50 + return; 51 + 52 + pdev = platform_device_alloc("hmem_platform", 0); 41 53 if (!pdev) { 42 - pr_err("hmem device allocation failure for %pr\n", res); 43 - goto out_pdev; 44 - } 45 - 46 - if (!__request_region(&hmem_active, res->start, resource_size(res), 47 - dev_name(&pdev->dev), 0)) { 48 - dev_dbg(&pdev->dev, "hmem range %pr already active\n", res); 49 - goto out_active; 50 - } 51 - 52 - pdev->dev.numa_node = numa_map_to_online_node(target_nid); 53 - info = (struct memregion_info) { 54 - .target_node = target_nid, 55 - .range = { 56 - .start = res->start, 57 - .end = res->end, 58 - }, 59 - }; 60 - rc = platform_device_add_data(pdev, &info, sizeof(info)); 61 - if (rc < 0) { 62 - pr_err("hmem memregion_info allocation failure for %pr\n", res); 63 - goto out_resource; 54 + pr_err_once("failed to register device-dax hmem_platform device\n"); 55 + return; 64 56 } 65 57 66 58 rc = platform_device_add(pdev); 67 - if (rc < 0) { 68 - dev_err(&pdev->dev, "device add failed for %pr\n", res); 69 - goto out_resource; 70 - } 59 + if (rc) 60 + platform_device_put(pdev); 61 + else 62 + platform_initialized = true; 63 + } 71 64 72 - return; 65 + void hmem_register_resource(int target_nid, struct resource *res) 66 + { 67 + if (nohmem) 68 + return; 73 69 74 - out_resource: 75 - __release_region(&hmem_active, res->start, resource_size(res)); 76 - out_active: 77 - platform_device_put(pdev); 78 - out_pdev: 79 - memregion_free(id); 70 + mutex_lock(&hmem_resource_lock); 71 + __hmem_register_resource(target_nid, res); 72 + mutex_unlock(&hmem_resource_lock); 80 73 } 81 74 82 75 static __init int hmem_register_one(struct resource *res, void *data) 83 76 { 84 - hmem_register_device(phys_to_target_node(res->start), res); 77 + hmem_register_resource(phys_to_target_node(res->start), res); 85 78 86 79 return 0; 87 80 }
+104 -1
drivers/dax/hmem/hmem.c
··· 3 3 #include <linux/memregion.h> 4 4 #include <linux/module.h> 5 5 #include <linux/pfn_t.h> 6 + #include <linux/dax.h> 6 7 #include "../bus.h" 7 8 8 9 static bool region_idle; ··· 44 43 }, 45 44 }; 46 45 47 - module_platform_driver(dax_hmem_driver); 46 + static void release_memregion(void *data) 47 + { 48 + memregion_free((long) data); 49 + } 50 + 51 + static void release_hmem(void *pdev) 52 + { 53 + platform_device_unregister(pdev); 54 + } 55 + 56 + static int hmem_register_device(struct device *host, int target_nid, 57 + const struct resource *res) 58 + { 59 + struct platform_device *pdev; 60 + struct memregion_info info; 61 + long id; 62 + int rc; 63 + 64 + rc = region_intersects(res->start, resource_size(res), IORESOURCE_MEM, 65 + IORES_DESC_SOFT_RESERVED); 66 + if (rc != REGION_INTERSECTS) 67 + return 0; 68 + 69 + id = memregion_alloc(GFP_KERNEL); 70 + if (id < 0) { 71 + dev_err(host, "memregion allocation failure for %pr\n", res); 72 + return -ENOMEM; 73 + } 74 + rc = devm_add_action_or_reset(host, release_memregion, (void *) id); 75 + if (rc) 76 + return rc; 77 + 78 + pdev = platform_device_alloc("hmem", id); 79 + if (!pdev) { 80 + dev_err(host, "device allocation failure for %pr\n", res); 81 + return -ENOMEM; 82 + } 83 + 84 + pdev->dev.numa_node = numa_map_to_online_node(target_nid); 85 + info = (struct memregion_info) { 86 + .target_node = target_nid, 87 + .range = { 88 + .start = res->start, 89 + .end = res->end, 90 + }, 91 + }; 92 + rc = platform_device_add_data(pdev, &info, sizeof(info)); 93 + if (rc < 0) { 94 + dev_err(host, "memregion_info allocation failure for %pr\n", 95 + res); 96 + goto out_put; 97 + } 98 + 99 + rc = platform_device_add(pdev); 100 + if (rc < 0) { 101 + dev_err(host, "%s add failed for %pr\n", dev_name(&pdev->dev), 102 + res); 103 + goto out_put; 104 + } 105 + 106 + return devm_add_action_or_reset(host, release_hmem, pdev); 107 + 108 + out_put: 109 + platform_device_put(pdev); 110 + return rc; 111 + } 112 + 113 + static int dax_hmem_platform_probe(struct platform_device *pdev) 114 + { 115 + return walk_hmem_resources(&pdev->dev, hmem_register_device); 116 + } 117 + 118 + static struct platform_driver dax_hmem_platform_driver = { 119 + .probe = dax_hmem_platform_probe, 120 + .driver = { 121 + .name = "hmem_platform", 122 + }, 123 + }; 124 + 125 + static __init int dax_hmem_init(void) 126 + { 127 + int rc; 128 + 129 + rc = platform_driver_register(&dax_hmem_platform_driver); 130 + if (rc) 131 + return rc; 132 + 133 + rc = platform_driver_register(&dax_hmem_driver); 134 + if (rc) 135 + platform_driver_unregister(&dax_hmem_platform_driver); 136 + 137 + return rc; 138 + } 139 + 140 + static __exit void dax_hmem_exit(void) 141 + { 142 + platform_driver_unregister(&dax_hmem_driver); 143 + platform_driver_unregister(&dax_hmem_platform_driver); 144 + } 145 + 146 + module_init(dax_hmem_init); 147 + module_exit(dax_hmem_exit); 48 148 49 149 MODULE_ALIAS("platform:hmem*"); 150 + MODULE_ALIAS("platform:hmem_platform*"); 50 151 MODULE_LICENSE("GPL v2"); 51 152 MODULE_AUTHOR("Intel Corporation");
+5 -2
include/linux/dax.h
··· 262 262 } 263 263 264 264 #ifdef CONFIG_DEV_DAX_HMEM_DEVICES 265 - void hmem_register_device(int target_nid, struct resource *r); 265 + void hmem_register_resource(int target_nid, struct resource *r); 266 266 #else 267 - static inline void hmem_register_device(int target_nid, struct resource *r) 267 + static inline void hmem_register_resource(int target_nid, struct resource *r) 268 268 { 269 269 } 270 270 #endif 271 271 272 + typedef int (*walk_hmem_fn)(struct device *dev, int target_nid, 273 + const struct resource *res); 274 + int walk_hmem_resources(struct device *dev, walk_hmem_fn fn); 272 275 #endif