Merge tag 'libnvdimm-for-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm updates from Ira Weiny:
"Primarily bug fixes. Dave introduced the usage of cleanup.h a bit late
in the cycle to help with the new label work required within CXL [1]

nvdimm:
- Return -ENOMEM if devm_kcalloc() fails in ndtest_probe()
- Clean up __nd_ioctl() and remove gotos
- Remove duplicate linux/slab.h header
- Introduce guard() for nvdimm_bus_lock
- Use str_plural() to simplify the code

ACPI:
- NFIT: Fix incorrect ndr_desc being reportedin dev_err message"

Link: https://lore.kernel.org/all/20250917134116.1623730-1-s.neeraj@samsung.com/ [1]

* tag 'libnvdimm-for-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
nvdimm: Remove duplicate linux/slab.h header
nvdimm: ndtest: Return -ENOMEM if devm_kcalloc() fails in ndtest_probe()
nvdimm: Clean up __nd_ioctl() and remove gotos
nvdimm: Introduce guard() for nvdimm_bus_lock
ACPI: NFIT: Fix incorrect ndr_desc being reportedin dev_err message
nvdimm: Use str_plural() to simplify the code

+232 -300
+1 -1
drivers/acpi/nfit/core.c
··· 2637 2637 if (ndr_desc->target_node == NUMA_NO_NODE) { 2638 2638 ndr_desc->target_node = phys_to_target_node(spa->address); 2639 2639 dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]", 2640 - NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); 2640 + NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end); 2641 2641 } 2642 2642 2643 2643 /*
+1 -2
drivers/nvdimm/badrange.c
··· 278 278 } 279 279 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); 280 280 281 - nvdimm_bus_lock(&nvdimm_bus->dev); 281 + guard(nvdimm_bus)(&nvdimm_bus->dev); 282 282 badblocks_populate(&nvdimm_bus->badrange, bb, range); 283 - nvdimm_bus_unlock(&nvdimm_bus->dev); 284 283 } 285 284 EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
+8 -16
drivers/nvdimm/btt_devs.c
··· 50 50 struct nd_btt *nd_btt = to_nd_btt(dev); 51 51 ssize_t rc; 52 52 53 - device_lock(dev); 54 - nvdimm_bus_lock(dev); 53 + guard(device)(dev); 54 + guard(nvdimm_bus)(dev); 55 55 rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, 56 56 btt_lbasize_supported); 57 57 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 58 58 buf[len - 1] == '\n' ? "" : "\n"); 59 - nvdimm_bus_unlock(dev); 60 - device_unlock(dev); 61 59 62 60 return rc ? rc : len; 63 61 } ··· 91 93 struct device_attribute *attr, char *buf) 92 94 { 93 95 struct nd_btt *nd_btt = to_nd_btt(dev); 94 - ssize_t rc; 95 96 96 - nvdimm_bus_lock(dev); 97 - rc = sprintf(buf, "%s\n", nd_btt->ndns 97 + guard(nvdimm_bus)(dev); 98 + return sprintf(buf, "%s\n", nd_btt->ndns 98 99 ? dev_name(&nd_btt->ndns->dev) : ""); 99 - nvdimm_bus_unlock(dev); 100 - return rc; 101 100 } 102 101 103 102 static ssize_t namespace_store(struct device *dev, ··· 103 108 struct nd_btt *nd_btt = to_nd_btt(dev); 104 109 ssize_t rc; 105 110 106 - device_lock(dev); 107 - nvdimm_bus_lock(dev); 111 + guard(device)(dev); 112 + guard(nvdimm_bus)(dev); 108 113 rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); 109 114 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 110 115 buf[len - 1] == '\n' ? "" : "\n"); 111 - nvdimm_bus_unlock(dev); 112 - device_unlock(dev); 113 116 114 117 return rc; 115 118 } ··· 344 351 return -ENODEV; 345 352 } 346 353 347 - nvdimm_bus_lock(&ndns->dev); 348 - btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns); 349 - nvdimm_bus_unlock(&ndns->dev); 354 + scoped_guard(nvdimm_bus, &ndns->dev) 355 + btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns); 350 356 if (!btt_dev) 351 357 return -ENOMEM; 352 358 btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
+24 -48
drivers/nvdimm/bus.c
··· 5 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 6 #include <linux/libnvdimm.h> 7 7 #include <linux/sched/mm.h> 8 - #include <linux/vmalloc.h> 8 + #include <linux/slab.h> 9 9 #include <linux/uaccess.h> 10 10 #include <linux/module.h> 11 11 #include <linux/blkdev.h> ··· 13 13 #include <linux/async.h> 14 14 #include <linux/ndctl.h> 15 15 #include <linux/sched.h> 16 - #include <linux/slab.h> 17 16 #include <linux/cpu.h> 18 17 #include <linux/fs.h> 19 18 #include <linux/io.h> ··· 63 64 64 65 static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus) 65 66 { 66 - nvdimm_bus_lock(&nvdimm_bus->dev); 67 + guard(nvdimm_bus)(&nvdimm_bus->dev); 67 68 nvdimm_bus->probe_active++; 68 - nvdimm_bus_unlock(&nvdimm_bus->dev); 69 69 } 70 70 71 71 static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus) 72 72 { 73 - nvdimm_bus_lock(&nvdimm_bus->dev); 73 + guard(nvdimm_bus)(&nvdimm_bus->dev); 74 74 if (--nvdimm_bus->probe_active == 0) 75 75 wake_up(&nvdimm_bus->wait); 76 - nvdimm_bus_unlock(&nvdimm_bus->dev); 77 76 } 78 77 79 78 static int nvdimm_bus_probe(struct device *dev) ··· 1028 1031 unsigned int cmd = _IOC_NR(ioctl_cmd); 1029 1032 struct device *dev = &nvdimm_bus->dev; 1030 1033 void __user *p = (void __user *) arg; 1031 - char *out_env = NULL, *in_env = NULL; 1032 1034 const char *cmd_name, *dimm_name; 1033 1035 u32 in_len = 0, out_len = 0; 1034 1036 unsigned int func = cmd; 1035 1037 unsigned long cmd_mask; 1036 1038 struct nd_cmd_pkg pkg; 1037 1039 int rc, i, cmd_rc; 1038 - void *buf = NULL; 1039 1040 u64 buf_len = 0; 1040 1041 1041 1042 if (nvdimm) { ··· 1092 1097 } 1093 1098 1094 1099 /* process an input envelope */ 1095 - in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); 1100 + char *in_env __free(kfree) = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); 1096 1101 if (!in_env) 1097 1102 return -ENOMEM; 1098 1103 for (i = 0; i < desc->in_num; i++) { ··· 1102 1107 if (in_size == UINT_MAX) { 1103 1108 dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n", 1104 1109 __func__, dimm_name, cmd_name, i); 1105 - rc = -ENXIO; 1106 - goto out; 1110 + return -ENXIO; 1107 1111 } 1108 1112 if (in_len < ND_CMD_MAX_ENVELOPE) 1109 1113 copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size); 1110 1114 else 1111 1115 copy = 0; 1112 - if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) { 1113 - rc = -EFAULT; 1114 - goto out; 1115 - } 1116 + if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) 1117 + return -EFAULT; 1116 1118 in_len += in_size; 1117 1119 } 1118 1120 ··· 1121 1129 } 1122 1130 1123 1131 /* process an output envelope */ 1124 - out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); 1125 - if (!out_env) { 1126 - rc = -ENOMEM; 1127 - goto out; 1128 - } 1132 + char *out_env __free(kfree) = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); 1133 + if (!out_env) 1134 + return -ENOMEM; 1129 1135 1130 1136 for (i = 0; i < desc->out_num; i++) { 1131 1137 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, ··· 1133 1143 if (out_size == UINT_MAX) { 1134 1144 dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n", 1135 1145 dimm_name, cmd_name, i); 1136 - rc = -EFAULT; 1137 - goto out; 1146 + return -EFAULT; 1138 1147 } 1139 1148 if (out_len < ND_CMD_MAX_ENVELOPE) 1140 1149 copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size); ··· 1141 1152 copy = 0; 1142 1153 if (copy && copy_from_user(&out_env[out_len], 1143 1154 p + in_len + out_len, copy)) { 1144 - rc = -EFAULT; 1145 - goto out; 1155 + return -EFAULT; 1146 1156 } 1147 1157 out_len += out_size; 1148 1158 } ··· 1150 1162 if (buf_len > ND_IOCTL_MAX_BUFLEN) { 1151 1163 dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name, 1152 1164 cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN); 1153 - rc = -EINVAL; 1154 - goto out; 1165 + return -EINVAL; 1155 1166 } 1156 1167 1157 - buf = vmalloc(buf_len); 1158 - if (!buf) { 1159 - rc = -ENOMEM; 1160 - goto out; 1161 - } 1168 + void *buf __free(kvfree) = kvzalloc(buf_len, GFP_KERNEL); 1169 + if (!buf) 1170 + return -ENOMEM; 1162 1171 1163 - if (copy_from_user(buf, p, buf_len)) { 1164 - rc = -EFAULT; 1165 - goto out; 1166 - } 1172 + if (copy_from_user(buf, p, buf_len)) 1173 + return -EFAULT; 1167 1174 1168 - device_lock(dev); 1169 - nvdimm_bus_lock(dev); 1175 + guard(device)(dev); 1176 + guard(nvdimm_bus)(dev); 1170 1177 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); 1171 1178 if (rc) 1172 - goto out_unlock; 1179 + return rc; 1173 1180 1174 1181 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc); 1175 1182 if (rc < 0) 1176 - goto out_unlock; 1183 + return rc; 1177 1184 1178 1185 if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) { 1179 1186 struct nd_cmd_clear_error *clear_err = buf; ··· 1178 1195 } 1179 1196 1180 1197 if (copy_to_user(p, buf, buf_len)) 1181 - rc = -EFAULT; 1198 + return -EFAULT; 1182 1199 1183 - out_unlock: 1184 - nvdimm_bus_unlock(dev); 1185 - device_unlock(dev); 1186 - out: 1187 - kfree(in_env); 1188 - kfree(out_env); 1189 - vfree(buf); 1190 - return rc; 1200 + return 0; 1191 1201 } 1192 1202 1193 1203 enum nd_ioctl_mode {
+3 -4
drivers/nvdimm/claim.c
··· 34 34 35 35 if (!ndns) 36 36 return; 37 - get_device(&ndns->dev); 38 - nvdimm_bus_lock(&ndns->dev); 37 + 38 + struct device *ndev __free(put_device) = get_device(&ndns->dev); 39 + guard(nvdimm_bus)(ndev); 39 40 __nd_detach_ndns(dev, _ndns); 40 - nvdimm_bus_unlock(&ndns->dev); 41 - put_device(&ndns->dev); 42 41 } 43 42 44 43 bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+8 -9
drivers/nvdimm/core.c
··· 141 141 struct nvdimm_map *nvdimm_map = data; 142 142 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus; 143 143 144 - nvdimm_bus_lock(&nvdimm_bus->dev); 144 + guard(nvdimm_bus)(&nvdimm_bus->dev); 145 145 kref_put(&nvdimm_map->kref, nvdimm_map_release); 146 - nvdimm_bus_unlock(&nvdimm_bus->dev); 147 146 } 148 147 149 148 /** ··· 157 158 { 158 159 struct nvdimm_map *nvdimm_map; 159 160 160 - nvdimm_bus_lock(dev); 161 - nvdimm_map = find_nvdimm_map(dev, offset); 162 - if (!nvdimm_map) 163 - nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); 164 - else 165 - kref_get(&nvdimm_map->kref); 166 - nvdimm_bus_unlock(dev); 161 + scoped_guard(nvdimm_bus, dev) { 162 + nvdimm_map = find_nvdimm_map(dev, offset); 163 + if (!nvdimm_map) 164 + nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); 165 + else 166 + kref_get(&nvdimm_map->kref); 167 + } 167 168 168 169 if (!nvdimm_map) 169 170 return NULL;
+6 -6
drivers/nvdimm/dax_devs.c
··· 104 104 return -ENODEV; 105 105 } 106 106 107 - nvdimm_bus_lock(&ndns->dev); 108 - nd_dax = nd_dax_alloc(nd_region); 109 - dax_dev = nd_dax_devinit(nd_dax, ndns); 110 - nvdimm_bus_unlock(&ndns->dev); 111 - if (!dax_dev) 112 - return -ENOMEM; 107 + scoped_guard(nvdimm_bus, &ndns->dev) { 108 + nd_dax = nd_dax_alloc(nd_region); 109 + dax_dev = nd_dax_devinit(nd_dax, ndns); 110 + if (!dax_dev) 111 + return -ENOMEM; 112 + } 113 113 pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); 114 114 nd_pfn = &nd_dax->nd_pfn; 115 115 nd_pfn->pfn_sb = pfn_sb;
+2 -3
drivers/nvdimm/dimm.c
··· 117 117 { 118 118 struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); 119 119 120 - nvdimm_bus_lock(dev); 121 - dev_set_drvdata(dev, NULL); 122 - nvdimm_bus_unlock(dev); 120 + scoped_guard(nvdimm_bus, dev) 121 + dev_set_drvdata(dev, NULL); 123 122 put_ndd(ndd); 124 123 } 125 124
+17 -31
drivers/nvdimm/dimm_devs.c
··· 226 226 struct resource *res, *_r; 227 227 228 228 dev_dbg(dev, "trace\n"); 229 - nvdimm_bus_lock(dev); 230 - for_each_dpa_resource_safe(ndd, res, _r) 231 - nvdimm_free_dpa(ndd, res); 232 - nvdimm_bus_unlock(dev); 229 + scoped_guard(nvdimm_bus, dev) { 230 + for_each_dpa_resource_safe(ndd, res, _r) 231 + nvdimm_free_dpa(ndd, res); 232 + } 233 233 234 234 kvfree(ndd->data); 235 235 kfree(ndd); ··· 319 319 static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) 320 320 { 321 321 struct device *dev; 322 - ssize_t rc; 323 322 u32 nfree; 324 323 325 324 if (!ndd) 326 325 return -ENXIO; 327 326 328 327 dev = ndd->dev; 329 - nvdimm_bus_lock(dev); 328 + guard(nvdimm_bus)(dev); 330 329 nfree = nd_label_nfree(ndd); 331 330 if (nfree - 1 > nfree) { 332 331 dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); 333 332 nfree = 0; 334 333 } else 335 334 nfree--; 336 - rc = sprintf(buf, "%d\n", nfree); 337 - nvdimm_bus_unlock(dev); 338 - return rc; 335 + return sprintf(buf, "%d\n", nfree); 339 336 } 340 337 341 338 static ssize_t available_slots_show(struct device *dev, ··· 385 388 struct device_attribute *attr, const char *buf, size_t len) 386 389 387 390 { 388 - ssize_t rc; 389 - 390 391 /* 391 392 * Require all userspace triggered security management to be 392 393 * done while probing is idle and the DIMM is not in active use 393 394 * in any region. 394 395 */ 395 - device_lock(dev); 396 - nvdimm_bus_lock(dev); 396 + guard(device)(dev); 397 + guard(nvdimm_bus)(dev); 397 398 wait_nvdimm_bus_probe_idle(dev); 398 - rc = nvdimm_security_store(dev, buf, len); 399 - nvdimm_bus_unlock(dev); 400 - device_unlock(dev); 401 - 402 - return rc; 399 + return nvdimm_security_store(dev, buf, len); 403 400 } 404 401 static DEVICE_ATTR_RW(security); 405 402 ··· 445 454 if (!nvdimm->fw_ops) 446 455 return -EOPNOTSUPP; 447 456 448 - nvdimm_bus_lock(dev); 457 + guard(nvdimm_bus)(dev); 449 458 result = nvdimm->fw_ops->activate_result(nvdimm); 450 - nvdimm_bus_unlock(dev); 451 459 452 460 switch (result) { 453 461 case NVDIMM_FWA_RESULT_NONE: ··· 473 483 if (!nvdimm->fw_ops) 474 484 return -EOPNOTSUPP; 475 485 476 - nvdimm_bus_lock(dev); 486 + guard(nvdimm_bus)(dev); 477 487 state = nvdimm->fw_ops->activate_state(nvdimm); 478 - nvdimm_bus_unlock(dev); 479 488 480 489 switch (state) { 481 490 case NVDIMM_FWA_IDLE: ··· 505 516 else 506 517 return -EINVAL; 507 518 508 - nvdimm_bus_lock(dev); 519 + guard(nvdimm_bus)(dev); 509 520 rc = nvdimm->fw_ops->arm(nvdimm, arg); 510 - nvdimm_bus_unlock(dev); 511 521 512 522 if (rc < 0) 513 523 return rc; ··· 533 545 if (!nvdimm->fw_ops) 534 546 return 0; 535 547 536 - nvdimm_bus_lock(dev); 548 + guard(nvdimm_bus)(dev); 537 549 cap = nd_desc->fw_ops->capability(nd_desc); 538 - nvdimm_bus_unlock(dev); 539 550 540 551 if (cap < NVDIMM_FWA_CAP_QUIESCE) 541 552 return 0; ··· 628 641 bool dev_put = false; 629 642 630 643 /* We are shutting down. Make state frozen artificially. */ 631 - nvdimm_bus_lock(dev); 632 - set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags); 633 - if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags)) 634 - dev_put = true; 635 - nvdimm_bus_unlock(dev); 644 + scoped_guard(nvdimm_bus, dev) { 645 + set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags); 646 + dev_put = test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags); 647 + } 636 648 cancel_delayed_work_sync(&nvdimm->dwork); 637 649 if (dev_put) 638 650 put_device(dev);
+61 -60
drivers/nvdimm/namespace_devs.c
··· 264 264 struct nd_region *nd_region = to_nd_region(dev->parent); 265 265 ssize_t rc; 266 266 267 - device_lock(dev); 268 - nvdimm_bus_lock(dev); 267 + guard(device)(dev); 268 + guard(nvdimm_bus)(dev); 269 269 wait_nvdimm_bus_probe_idle(dev); 270 270 rc = __alt_name_store(dev, buf, len); 271 271 if (rc >= 0) 272 272 rc = nd_namespace_label_update(nd_region, dev); 273 273 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); 274 - nvdimm_bus_unlock(dev); 275 - device_unlock(dev); 276 274 277 275 return rc < 0 ? rc : len; 278 276 } ··· 847 849 if (rc) 848 850 return rc; 849 851 850 - device_lock(dev); 851 - nvdimm_bus_lock(dev); 852 + guard(device)(dev); 853 + guard(nvdimm_bus)(dev); 852 854 wait_nvdimm_bus_probe_idle(dev); 853 855 rc = __size_store(dev, val); 854 856 if (rc >= 0) ··· 863 865 } 864 866 865 867 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); 866 - 867 - nvdimm_bus_unlock(dev); 868 - device_unlock(dev); 869 868 870 869 return rc < 0 ? rc : len; 871 870 } ··· 886 891 887 892 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns) 888 893 { 889 - resource_size_t size; 890 - 891 - nvdimm_bus_lock(&ndns->dev); 892 - size = __nvdimm_namespace_capacity(ndns); 893 - nvdimm_bus_unlock(&ndns->dev); 894 - 895 - return size; 894 + guard(nvdimm_bus)(&ndns->dev); 895 + return __nvdimm_namespace_capacity(ndns); 896 896 } 897 897 EXPORT_SYMBOL(nvdimm_namespace_capacity); 898 898 ··· 1034 1044 } else 1035 1045 return -ENXIO; 1036 1046 1037 - device_lock(dev); 1038 - nvdimm_bus_lock(dev); 1047 + guard(device)(dev); 1048 + guard(nvdimm_bus)(dev); 1039 1049 wait_nvdimm_bus_probe_idle(dev); 1040 1050 if (to_ndns(dev)->claim) 1041 1051 rc = -EBUSY; ··· 1049 1059 kfree(uuid); 1050 1060 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 1051 1061 buf[len - 1] == '\n' ? "" : "\n"); 1052 - nvdimm_bus_unlock(dev); 1053 - device_unlock(dev); 1054 1062 1055 1063 return rc < 0 ? rc : len; 1056 1064 } ··· 1107 1119 } else 1108 1120 return -ENXIO; 1109 1121 1110 - device_lock(dev); 1111 - nvdimm_bus_lock(dev); 1112 - if (to_ndns(dev)->claim) 1113 - rc = -EBUSY; 1114 - if (rc >= 0) 1115 - rc = nd_size_select_store(dev, buf, lbasize, supported); 1116 - if (rc >= 0) 1117 - rc = nd_namespace_label_update(nd_region, dev); 1118 - dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", 1119 - buf, buf[len - 1] == '\n' ? "" : "\n"); 1120 - nvdimm_bus_unlock(dev); 1121 - device_unlock(dev); 1122 + guard(device)(dev); 1123 + guard(nvdimm_bus)(dev); 1124 + if (to_ndns(dev)->claim) { 1125 + dev_dbg(dev, "namespace %s already claimed\n", dev_name(dev)); 1126 + return -EBUSY; 1127 + } 1122 1128 1123 - return rc ? rc : len; 1129 + rc = nd_size_select_store(dev, buf, lbasize, supported); 1130 + if (rc < 0) { 1131 + dev_dbg(dev, "size select fail: %zd tried: %s%s", rc, 1132 + buf, buf[len - 1] == '\n' ? "" : "\n"); 1133 + return rc; 1134 + } 1135 + 1136 + rc = nd_namespace_label_update(nd_region, dev); 1137 + if (rc < 0) { 1138 + dev_dbg(dev, "label update fail: %zd tried: %s%s", 1139 + rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 1140 + return rc; 1141 + } 1142 + 1143 + dev_dbg(dev, "wrote: %s%s", buf, buf[len - 1] == '\n' ? "" : "\n"); 1144 + 1145 + return len; 1124 1146 } 1125 1147 static DEVICE_ATTR_RW(sector_size); 1126 1148 ··· 1143 1145 int count = 0, i; 1144 1146 u32 flags = 0; 1145 1147 1146 - nvdimm_bus_lock(dev); 1148 + guard(nvdimm_bus)(dev); 1147 1149 if (is_namespace_pmem(dev)) { 1148 1150 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1149 1151 ··· 1152 1154 } 1153 1155 1154 1156 if (!uuid) 1155 - goto out; 1157 + return sprintf(buf, "%d\n", count); 1156 1158 1157 1159 nd_label_gen_id(&label_id, uuid, flags); 1158 1160 for (i = 0; i < nd_region->ndr_mappings; i++) { ··· 1164 1166 if (strcmp(res->name, label_id.id) == 0) 1165 1167 count++; 1166 1168 } 1167 - out: 1168 - nvdimm_bus_unlock(dev); 1169 1169 1170 1170 return sprintf(buf, "%d\n", count); 1171 1171 } ··· 1275 1279 struct nd_region *nd_region = to_nd_region(dev->parent); 1276 1280 int rc; 1277 1281 1278 - device_lock(dev); 1279 - nvdimm_bus_lock(dev); 1282 + guard(device)(dev); 1283 + guard(nvdimm_bus)(dev); 1280 1284 wait_nvdimm_bus_probe_idle(dev); 1281 1285 rc = __holder_class_store(dev, buf); 1282 1286 if (rc >= 0) 1283 1287 rc = nd_namespace_label_update(nd_region, dev); 1284 1288 dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc); 1285 - nvdimm_bus_unlock(dev); 1286 - device_unlock(dev); 1287 1289 1288 1290 return rc < 0 ? rc : len; 1289 1291 } ··· 1977 1983 } 1978 1984 1979 1985 dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count, 1980 - count == 1 ? "" : "s"); 1986 + str_plural(count)); 1981 1987 1982 1988 if (count == 0) { 1983 1989 struct nd_namespace_pmem *nspm; ··· 2146 2152 nd_region); 2147 2153 } 2148 2154 2155 + static int create_relevant_namespaces(struct nd_region *nd_region, int *type, 2156 + struct device ***devs) 2157 + { 2158 + int rc; 2159 + 2160 + guard(nvdimm_bus)(&nd_region->dev); 2161 + rc = init_active_labels(nd_region); 2162 + if (rc) 2163 + return rc; 2164 + 2165 + *type = nd_region_to_nstype(nd_region); 2166 + switch (*type) { 2167 + case ND_DEVICE_NAMESPACE_IO: 2168 + *devs = create_namespace_io(nd_region); 2169 + break; 2170 + case ND_DEVICE_NAMESPACE_PMEM: 2171 + *devs = create_namespaces(nd_region); 2172 + break; 2173 + } 2174 + 2175 + return 0; 2176 + } 2177 + 2149 2178 int nd_region_register_namespaces(struct nd_region *nd_region, int *err) 2150 2179 { 2151 2180 struct device **devs = NULL; 2152 2181 int i, rc = 0, type; 2153 2182 2154 2183 *err = 0; 2155 - nvdimm_bus_lock(&nd_region->dev); 2156 - rc = init_active_labels(nd_region); 2157 - if (rc) { 2158 - nvdimm_bus_unlock(&nd_region->dev); 2184 + rc = create_relevant_namespaces(nd_region, &type, &devs); 2185 + if (rc) 2159 2186 return rc; 2160 - } 2161 - 2162 - type = nd_region_to_nstype(nd_region); 2163 - switch (type) { 2164 - case ND_DEVICE_NAMESPACE_IO: 2165 - devs = create_namespace_io(nd_region); 2166 - break; 2167 - case ND_DEVICE_NAMESPACE_PMEM: 2168 - devs = create_namespaces(nd_region); 2169 - break; 2170 - default: 2171 - break; 2172 - } 2173 - nvdimm_bus_unlock(&nd_region->dev); 2174 2187 2175 2188 if (!devs) 2176 2189 return -ENODEV;
+3
drivers/nvdimm/nd.h
··· 632 632 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region); 633 633 void nvdimm_bus_lock(struct device *dev); 634 634 void nvdimm_bus_unlock(struct device *dev); 635 + DEFINE_GUARD(nvdimm_bus, struct device *, 636 + if (_T) nvdimm_bus_lock(_T), if (_T) nvdimm_bus_unlock(_T)); 637 + 635 638 bool is_nvdimm_bus_locked(struct device *dev); 636 639 void nvdimm_check_and_set_ro(struct gendisk *disk); 637 640 void nvdimm_drvdata_release(struct kref *kref);
+25 -36
drivers/nvdimm/pfn_devs.c
··· 56 56 { 57 57 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 58 58 ssize_t rc = 0; 59 + size_t n = len - 1; 59 60 60 - device_lock(dev); 61 - nvdimm_bus_lock(dev); 61 + guard(device)(dev); 62 + guard(nvdimm_bus)(dev); 62 63 if (dev->driver) 63 - rc = -EBUSY; 64 - else { 65 - size_t n = len - 1; 64 + return -EBUSY; 66 65 67 - if (strncmp(buf, "pmem\n", n) == 0 68 - || strncmp(buf, "pmem", n) == 0) { 69 - nd_pfn->mode = PFN_MODE_PMEM; 70 - } else if (strncmp(buf, "ram\n", n) == 0 71 - || strncmp(buf, "ram", n) == 0) 72 - nd_pfn->mode = PFN_MODE_RAM; 73 - else if (strncmp(buf, "none\n", n) == 0 74 - || strncmp(buf, "none", n) == 0) 75 - nd_pfn->mode = PFN_MODE_NONE; 76 - else 77 - rc = -EINVAL; 78 - } 66 + if (strncmp(buf, "pmem\n", n) == 0 67 + || strncmp(buf, "pmem", n) == 0) { 68 + nd_pfn->mode = PFN_MODE_PMEM; 69 + } else if (strncmp(buf, "ram\n", n) == 0 70 + || strncmp(buf, "ram", n) == 0) 71 + nd_pfn->mode = PFN_MODE_RAM; 72 + else if (strncmp(buf, "none\n", n) == 0 73 + || strncmp(buf, "none", n) == 0) 74 + nd_pfn->mode = PFN_MODE_NONE; 75 + else 76 + rc = -EINVAL; 79 77 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 80 78 buf[len - 1] == '\n' ? "" : "\n"); 81 - nvdimm_bus_unlock(dev); 82 - device_unlock(dev); 83 79 84 80 return rc ? rc : len; 85 81 } ··· 121 125 unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, }; 122 126 ssize_t rc; 123 127 124 - device_lock(dev); 125 - nvdimm_bus_lock(dev); 128 + guard(device)(dev); 129 + guard(nvdimm_bus)(dev); 126 130 rc = nd_size_select_store(dev, buf, &nd_pfn->align, 127 131 nd_pfn_supported_alignments(aligns)); 128 132 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 129 133 buf[len - 1] == '\n' ? "" : "\n"); 130 - nvdimm_bus_unlock(dev); 131 - device_unlock(dev); 132 134 133 135 return rc ? rc : len; 134 136 } ··· 162 168 struct device_attribute *attr, char *buf) 163 169 { 164 170 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 165 - ssize_t rc; 166 171 167 - nvdimm_bus_lock(dev); 168 - rc = sprintf(buf, "%s\n", nd_pfn->ndns 172 + guard(nvdimm_bus)(dev); 173 + return sprintf(buf, "%s\n", nd_pfn->ndns 169 174 ? dev_name(&nd_pfn->ndns->dev) : ""); 170 - nvdimm_bus_unlock(dev); 171 - return rc; 172 175 } 173 176 174 177 static ssize_t namespace_store(struct device *dev, ··· 174 183 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 175 184 ssize_t rc; 176 185 177 - device_lock(dev); 178 - nvdimm_bus_lock(dev); 186 + guard(device)(dev); 187 + guard(nvdimm_bus)(dev); 179 188 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); 180 189 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 181 190 buf[len - 1] == '\n' ? "" : "\n"); 182 - nvdimm_bus_unlock(dev); 183 - device_unlock(dev); 184 191 185 192 return rc; 186 193 } ··· 628 639 return -ENODEV; 629 640 } 630 641 631 - nvdimm_bus_lock(&ndns->dev); 632 - nd_pfn = nd_pfn_alloc(nd_region); 633 - pfn_dev = nd_pfn_devinit(nd_pfn, ndns); 634 - nvdimm_bus_unlock(&ndns->dev); 642 + scoped_guard(nvdimm_bus, &ndns->dev) { 643 + nd_pfn = nd_pfn_alloc(nd_region); 644 + pfn_dev = nd_pfn_devinit(nd_pfn, ndns); 645 + } 635 646 if (!pfn_dev) 636 647 return -ENOMEM; 637 648 pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+8 -8
drivers/nvdimm/region.c
··· 70 70 * "<async-registered>/<total>" namespace count. 71 71 */ 72 72 dev_err(dev, "failed to register %d namespace%s, continuing...\n", 73 - err, err == 1 ? "" : "s"); 73 + err, str_plural(err)); 74 74 return 0; 75 75 } 76 76 ··· 87 87 device_for_each_child(dev, NULL, child_unregister); 88 88 89 89 /* flush attribute readers and disable */ 90 - nvdimm_bus_lock(dev); 91 - nd_region->ns_seed = NULL; 92 - nd_region->btt_seed = NULL; 93 - nd_region->pfn_seed = NULL; 94 - nd_region->dax_seed = NULL; 95 - dev_set_drvdata(dev, NULL); 96 - nvdimm_bus_unlock(dev); 90 + scoped_guard(nvdimm_bus, dev) { 91 + nd_region->ns_seed = NULL; 92 + nd_region->btt_seed = NULL; 93 + nd_region->pfn_seed = NULL; 94 + nd_region->dax_seed = NULL; 95 + dev_set_drvdata(dev, NULL); 96 + } 97 97 98 98 /* 99 99 * Note, this assumes device_lock() context to not race
+50 -68
drivers/nvdimm/region_devs.c
··· 102 102 return 0; 103 103 } 104 104 105 - int nd_region_activate(struct nd_region *nd_region) 105 + static int get_flush_data(struct nd_region *nd_region, size_t *size, int *num_flush) 106 106 { 107 - int i, j, rc, num_flush = 0; 108 - struct nd_region_data *ndrd; 109 - struct device *dev = &nd_region->dev; 110 107 size_t flush_data_size = sizeof(void *); 108 + int _num_flush = 0; 109 + int i; 111 110 112 - nvdimm_bus_lock(&nd_region->dev); 111 + guard(nvdimm_bus)(&nd_region->dev); 113 112 for (i = 0; i < nd_region->ndr_mappings; i++) { 114 113 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 115 114 struct nvdimm *nvdimm = nd_mapping->nvdimm; 116 115 117 - if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { 118 - nvdimm_bus_unlock(&nd_region->dev); 116 + if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) 119 117 return -EBUSY; 120 - } 121 118 122 119 /* at least one null hint slot per-dimm for the "no-hint" case */ 123 120 flush_data_size += sizeof(void *); 124 - num_flush = min_not_zero(num_flush, nvdimm->num_flush); 121 + _num_flush = min_not_zero(_num_flush, nvdimm->num_flush); 125 122 if (!nvdimm->num_flush) 126 123 continue; 127 124 flush_data_size += nvdimm->num_flush * sizeof(void *); 128 125 } 129 - nvdimm_bus_unlock(&nd_region->dev); 126 + 127 + *size = flush_data_size; 128 + *num_flush = _num_flush; 129 + 130 + return 0; 131 + } 132 + 133 + int nd_region_activate(struct nd_region *nd_region) 134 + { 135 + int i, j, rc, num_flush; 136 + struct nd_region_data *ndrd; 137 + struct device *dev = &nd_region->dev; 138 + size_t flush_data_size; 139 + 140 + rc = get_flush_data(nd_region, &flush_data_size, &num_flush); 141 + if (rc) 142 + return rc; 130 143 131 144 rc = nd_region_invalidate_memregion(nd_region); 132 145 if (rc) ··· 340 327 * the v1.1 namespace label cookie definition. To read all this 341 328 * data we need to wait for probing to settle. 342 329 */ 343 - device_lock(dev); 344 - nvdimm_bus_lock(dev); 330 + guard(device)(dev); 331 + guard(nvdimm_bus)(dev); 345 332 wait_nvdimm_bus_probe_idle(dev); 346 333 if (nd_region->ndr_mappings) { 347 334 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; ··· 356 343 nsindex)); 357 344 } 358 345 } 359 - nvdimm_bus_unlock(dev); 360 - device_unlock(dev); 361 346 362 347 if (rc) 363 348 return rc; ··· 404 393 struct device_attribute *attr, char *buf) 405 394 { 406 395 struct nd_region *nd_region = to_nd_region(dev); 407 - unsigned long long available = 0; 408 396 409 397 /* 410 398 * Flush in-flight updates and grab a snapshot of the available ··· 411 401 * memory nvdimm_bus_lock() is dropped, but that's userspace's 412 402 * problem to not race itself. 413 403 */ 414 - device_lock(dev); 415 - nvdimm_bus_lock(dev); 404 + guard(device)(dev); 405 + guard(nvdimm_bus)(dev); 416 406 wait_nvdimm_bus_probe_idle(dev); 417 - available = nd_region_available_dpa(nd_region); 418 - nvdimm_bus_unlock(dev); 419 - device_unlock(dev); 420 407 421 - return sprintf(buf, "%llu\n", available); 408 + return sprintf(buf, "%llu\n", nd_region_available_dpa(nd_region)); 422 409 } 423 410 static DEVICE_ATTR_RO(available_size); 424 411 ··· 423 416 struct device_attribute *attr, char *buf) 424 417 { 425 418 struct nd_region *nd_region = to_nd_region(dev); 426 - unsigned long long available = 0; 427 419 428 - device_lock(dev); 429 - nvdimm_bus_lock(dev); 420 + guard(device)(dev); 421 + guard(nvdimm_bus)(dev); 430 422 wait_nvdimm_bus_probe_idle(dev); 431 - available = nd_region_allocatable_dpa(nd_region); 432 - nvdimm_bus_unlock(dev); 433 - device_unlock(dev); 434 423 435 - return sprintf(buf, "%llu\n", available); 424 + return sprintf(buf, "%llu\n", nd_region_allocatable_dpa(nd_region)); 436 425 } 437 426 static DEVICE_ATTR_RO(max_available_extent); 438 427 ··· 436 433 struct device_attribute *attr, char *buf) 437 434 { 438 435 struct nd_region_data *ndrd = dev_get_drvdata(dev); 439 - ssize_t rc; 440 436 441 - nvdimm_bus_lock(dev); 442 - if (ndrd) 443 - rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); 444 - else 445 - rc = -ENXIO; 446 - nvdimm_bus_unlock(dev); 437 + guard(nvdimm_bus)(dev); 438 + if (!ndrd) 439 + return -ENXIO; 447 440 448 - return rc; 441 + return sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); 449 442 } 450 443 static DEVICE_ATTR_RO(init_namespaces); 451 444 ··· 449 450 struct device_attribute *attr, char *buf) 450 451 { 451 452 struct nd_region *nd_region = to_nd_region(dev); 452 - ssize_t rc; 453 453 454 - nvdimm_bus_lock(dev); 454 + guard(nvdimm_bus)(dev); 455 455 if (nd_region->ns_seed) 456 - rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); 457 - else 458 - rc = sprintf(buf, "\n"); 459 - nvdimm_bus_unlock(dev); 460 - return rc; 456 + return sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); 457 + 458 + return sprintf(buf, "\n"); 461 459 } 462 460 static DEVICE_ATTR_RO(namespace_seed); 463 461 ··· 462 466 struct device_attribute *attr, char *buf) 463 467 { 464 468 struct nd_region *nd_region = to_nd_region(dev); 465 - ssize_t rc; 466 469 467 - nvdimm_bus_lock(dev); 470 + guard(nvdimm_bus)(dev); 468 471 if (nd_region->btt_seed) 469 - rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); 470 - else 471 - rc = sprintf(buf, "\n"); 472 - nvdimm_bus_unlock(dev); 472 + return sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); 473 473 474 - return rc; 474 + return sprintf(buf, "\n"); 475 475 } 476 476 static DEVICE_ATTR_RO(btt_seed); 477 477 ··· 475 483 struct device_attribute *attr, char *buf) 476 484 { 477 485 struct nd_region *nd_region = to_nd_region(dev); 478 - ssize_t rc; 479 486 480 - nvdimm_bus_lock(dev); 487 + guard(nvdimm_bus)(dev); 481 488 if (nd_region->pfn_seed) 482 - rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); 483 - else 484 - rc = sprintf(buf, "\n"); 485 - nvdimm_bus_unlock(dev); 489 + return sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); 486 490 487 - return rc; 491 + return sprintf(buf, "\n"); 488 492 } 489 493 static DEVICE_ATTR_RO(pfn_seed); 490 494 ··· 488 500 struct device_attribute *attr, char *buf) 489 501 { 490 502 struct nd_region *nd_region = to_nd_region(dev); 491 - ssize_t rc; 492 503 493 - nvdimm_bus_lock(dev); 504 + guard(nvdimm_bus)(dev); 494 505 if (nd_region->dax_seed) 495 - rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); 496 - else 497 - rc = sprintf(buf, "\n"); 498 - nvdimm_bus_unlock(dev); 506 + return sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); 499 507 500 - return rc; 508 + return sprintf(buf, "\n"); 501 509 } 502 510 static DEVICE_ATTR_RO(dax_seed); 503 511 ··· 565 581 * times ensure it does not change for the duration of the 566 582 * allocation. 567 583 */ 568 - nvdimm_bus_lock(dev); 584 + guard(nvdimm_bus)(dev); 569 585 nd_region->align = val; 570 - nvdimm_bus_unlock(dev); 571 586 572 587 return len; 573 588 } ··· 873 890 */ 874 891 void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) 875 892 { 876 - nvdimm_bus_lock(dev); 893 + guard(nvdimm_bus)(dev); 877 894 if (nd_region->ns_seed == dev) { 878 895 nd_region_create_ns_seed(nd_region); 879 896 } else if (is_nd_btt(dev)) { ··· 898 915 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) 899 916 nd_region_create_ns_seed(nd_region); 900 917 } 901 - nvdimm_bus_unlock(dev); 902 918 } 903 919 904 920 /**
+3 -7
drivers/nvdimm/security.c
··· 219 219 int nvdimm_security_unlock(struct device *dev) 220 220 { 221 221 struct nvdimm *nvdimm = to_nvdimm(dev); 222 - int rc; 223 222 224 - nvdimm_bus_lock(dev); 225 - rc = __nvdimm_security_unlock(nvdimm); 226 - nvdimm_bus_unlock(dev); 227 - return rc; 223 + guard(nvdimm_bus)(dev); 224 + return __nvdimm_security_unlock(nvdimm); 228 225 } 229 226 230 227 static int check_security_state(struct nvdimm *nvdimm) ··· 487 490 struct nvdimm *nvdimm = 488 491 container_of(work, typeof(*nvdimm), dwork.work); 489 492 490 - nvdimm_bus_lock(&nvdimm->dev); 493 + guard(nvdimm_bus)(&nvdimm->dev); 491 494 __nvdimm_security_overwrite_query(nvdimm); 492 - nvdimm_bus_unlock(&nvdimm->dev); 493 495 } 494 496 495 497 #define OPS \
+12 -1
tools/testing/nvdimm/test/ndtest.c
··· 850 850 851 851 p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 852 852 sizeof(dma_addr_t), GFP_KERNEL); 853 + if (!p->dcr_dma) { 854 + rc = -ENOMEM; 855 + goto err; 856 + } 853 857 p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 854 858 sizeof(dma_addr_t), GFP_KERNEL); 859 + if (!p->label_dma) { 860 + rc = -ENOMEM; 861 + goto err; 862 + } 855 863 p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 856 864 sizeof(dma_addr_t), GFP_KERNEL); 857 - 865 + if (!p->dimm_dma) { 866 + rc = -ENOMEM; 867 + goto err; 868 + } 858 869 rc = ndtest_nvdimm_init(p); 859 870 if (rc) 860 871 goto err;