Merge tag 'libnvdimm-for-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm updates from Ira Weiny:
"Primarily bug fixes. Dave introduced the usage of cleanup.h a bit late
in the cycle to help with the new label work required within CXL [1]

nvdimm:
- Return -ENOMEM if devm_kcalloc() fails in ndtest_probe()
- Clean up __nd_ioctl() and remove gotos
- Remove duplicate linux/slab.h header
- Introduce guard() for nvdimm_bus_lock
- Use str_plural() to simplify the code

ACPI:
- NFIT: Fix incorrect ndr_desc being reportedin dev_err message"

Link: https://lore.kernel.org/all/20250917134116.1623730-1-s.neeraj@samsung.com/ [1]

* tag 'libnvdimm-for-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
nvdimm: Remove duplicate linux/slab.h header
nvdimm: ndtest: Return -ENOMEM if devm_kcalloc() fails in ndtest_probe()
nvdimm: Clean up __nd_ioctl() and remove gotos
nvdimm: Introduce guard() for nvdimm_bus_lock
ACPI: NFIT: Fix incorrect ndr_desc being reportedin dev_err message
nvdimm: Use str_plural() to simplify the code

+232 -300
+1 -1
drivers/acpi/nfit/core.c
··· 2637 if (ndr_desc->target_node == NUMA_NO_NODE) { 2638 ndr_desc->target_node = phys_to_target_node(spa->address); 2639 dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]", 2640 - NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); 2641 } 2642 2643 /*
··· 2637 if (ndr_desc->target_node == NUMA_NO_NODE) { 2638 ndr_desc->target_node = phys_to_target_node(spa->address); 2639 dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]", 2640 + NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end); 2641 } 2642 2643 /*
+1 -2
drivers/nvdimm/badrange.c
··· 278 } 279 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); 280 281 - nvdimm_bus_lock(&nvdimm_bus->dev); 282 badblocks_populate(&nvdimm_bus->badrange, bb, range); 283 - nvdimm_bus_unlock(&nvdimm_bus->dev); 284 } 285 EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
··· 278 } 279 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); 280 281 + guard(nvdimm_bus)(&nvdimm_bus->dev); 282 badblocks_populate(&nvdimm_bus->badrange, bb, range); 283 } 284 EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
+8 -16
drivers/nvdimm/btt_devs.c
··· 50 struct nd_btt *nd_btt = to_nd_btt(dev); 51 ssize_t rc; 52 53 - device_lock(dev); 54 - nvdimm_bus_lock(dev); 55 rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, 56 btt_lbasize_supported); 57 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 58 buf[len - 1] == '\n' ? "" : "\n"); 59 - nvdimm_bus_unlock(dev); 60 - device_unlock(dev); 61 62 return rc ? rc : len; 63 } ··· 91 struct device_attribute *attr, char *buf) 92 { 93 struct nd_btt *nd_btt = to_nd_btt(dev); 94 - ssize_t rc; 95 96 - nvdimm_bus_lock(dev); 97 - rc = sprintf(buf, "%s\n", nd_btt->ndns 98 ? dev_name(&nd_btt->ndns->dev) : ""); 99 - nvdimm_bus_unlock(dev); 100 - return rc; 101 } 102 103 static ssize_t namespace_store(struct device *dev, ··· 103 struct nd_btt *nd_btt = to_nd_btt(dev); 104 ssize_t rc; 105 106 - device_lock(dev); 107 - nvdimm_bus_lock(dev); 108 rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); 109 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 110 buf[len - 1] == '\n' ? "" : "\n"); 111 - nvdimm_bus_unlock(dev); 112 - device_unlock(dev); 113 114 return rc; 115 } ··· 344 return -ENODEV; 345 } 346 347 - nvdimm_bus_lock(&ndns->dev); 348 - btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns); 349 - nvdimm_bus_unlock(&ndns->dev); 350 if (!btt_dev) 351 return -ENOMEM; 352 btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
··· 50 struct nd_btt *nd_btt = to_nd_btt(dev); 51 ssize_t rc; 52 53 + guard(device)(dev); 54 + guard(nvdimm_bus)(dev); 55 rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, 56 btt_lbasize_supported); 57 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 58 buf[len - 1] == '\n' ? "" : "\n"); 59 60 return rc ? rc : len; 61 } ··· 93 struct device_attribute *attr, char *buf) 94 { 95 struct nd_btt *nd_btt = to_nd_btt(dev); 96 97 + guard(nvdimm_bus)(dev); 98 + return sprintf(buf, "%s\n", nd_btt->ndns 99 ? dev_name(&nd_btt->ndns->dev) : ""); 100 } 101 102 static ssize_t namespace_store(struct device *dev, ··· 108 struct nd_btt *nd_btt = to_nd_btt(dev); 109 ssize_t rc; 110 111 + guard(device)(dev); 112 + guard(nvdimm_bus)(dev); 113 rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); 114 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 115 buf[len - 1] == '\n' ? "" : "\n"); 116 117 return rc; 118 } ··· 351 return -ENODEV; 352 } 353 354 + scoped_guard(nvdimm_bus, &ndns->dev) 355 + btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns); 356 if (!btt_dev) 357 return -ENOMEM; 358 btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
+24 -48
drivers/nvdimm/bus.c
··· 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/libnvdimm.h> 7 #include <linux/sched/mm.h> 8 - #include <linux/vmalloc.h> 9 #include <linux/uaccess.h> 10 #include <linux/module.h> 11 #include <linux/blkdev.h> ··· 13 #include <linux/async.h> 14 #include <linux/ndctl.h> 15 #include <linux/sched.h> 16 - #include <linux/slab.h> 17 #include <linux/cpu.h> 18 #include <linux/fs.h> 19 #include <linux/io.h> ··· 63 64 static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus) 65 { 66 - nvdimm_bus_lock(&nvdimm_bus->dev); 67 nvdimm_bus->probe_active++; 68 - nvdimm_bus_unlock(&nvdimm_bus->dev); 69 } 70 71 static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus) 72 { 73 - nvdimm_bus_lock(&nvdimm_bus->dev); 74 if (--nvdimm_bus->probe_active == 0) 75 wake_up(&nvdimm_bus->wait); 76 - nvdimm_bus_unlock(&nvdimm_bus->dev); 77 } 78 79 static int nvdimm_bus_probe(struct device *dev) ··· 1028 unsigned int cmd = _IOC_NR(ioctl_cmd); 1029 struct device *dev = &nvdimm_bus->dev; 1030 void __user *p = (void __user *) arg; 1031 - char *out_env = NULL, *in_env = NULL; 1032 const char *cmd_name, *dimm_name; 1033 u32 in_len = 0, out_len = 0; 1034 unsigned int func = cmd; 1035 unsigned long cmd_mask; 1036 struct nd_cmd_pkg pkg; 1037 int rc, i, cmd_rc; 1038 - void *buf = NULL; 1039 u64 buf_len = 0; 1040 1041 if (nvdimm) { ··· 1092 } 1093 1094 /* process an input envelope */ 1095 - in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); 1096 if (!in_env) 1097 return -ENOMEM; 1098 for (i = 0; i < desc->in_num; i++) { ··· 1102 if (in_size == UINT_MAX) { 1103 dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n", 1104 __func__, dimm_name, cmd_name, i); 1105 - rc = -ENXIO; 1106 - goto out; 1107 } 1108 if (in_len < ND_CMD_MAX_ENVELOPE) 1109 copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size); 1110 else 1111 copy = 0; 1112 - if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) { 1113 - rc = -EFAULT; 1114 - goto out; 1115 - } 1116 in_len += in_size; 1117 } 1118 ··· 1121 } 1122 1123 /* process an output envelope */ 1124 - out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); 1125 - if (!out_env) { 1126 - rc = -ENOMEM; 1127 - goto out; 1128 - } 1129 1130 for (i = 0; i < desc->out_num; i++) { 1131 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, ··· 1133 if (out_size == UINT_MAX) { 1134 dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n", 1135 dimm_name, cmd_name, i); 1136 - rc = -EFAULT; 1137 - goto out; 1138 } 1139 if (out_len < ND_CMD_MAX_ENVELOPE) 1140 copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size); ··· 1141 copy = 0; 1142 if (copy && copy_from_user(&out_env[out_len], 1143 p + in_len + out_len, copy)) { 1144 - rc = -EFAULT; 1145 - goto out; 1146 } 1147 out_len += out_size; 1148 } ··· 1150 if (buf_len > ND_IOCTL_MAX_BUFLEN) { 1151 dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name, 1152 cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN); 1153 - rc = -EINVAL; 1154 - goto out; 1155 } 1156 1157 - buf = vmalloc(buf_len); 1158 - if (!buf) { 1159 - rc = -ENOMEM; 1160 - goto out; 1161 - } 1162 1163 - if (copy_from_user(buf, p, buf_len)) { 1164 - rc = -EFAULT; 1165 - goto out; 1166 - } 1167 1168 - device_lock(dev); 1169 - nvdimm_bus_lock(dev); 1170 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); 1171 if (rc) 1172 - goto out_unlock; 1173 1174 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc); 1175 if (rc < 0) 1176 - goto out_unlock; 1177 1178 if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) { 1179 struct nd_cmd_clear_error *clear_err = buf; ··· 1178 } 1179 1180 if (copy_to_user(p, buf, buf_len)) 1181 - rc = -EFAULT; 1182 1183 - out_unlock: 1184 - nvdimm_bus_unlock(dev); 1185 - device_unlock(dev); 1186 - out: 1187 - kfree(in_env); 1188 - kfree(out_env); 1189 - vfree(buf); 1190 - return rc; 1191 } 1192 1193 enum nd_ioctl_mode {
··· 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/libnvdimm.h> 7 #include <linux/sched/mm.h> 8 + #include <linux/slab.h> 9 #include <linux/uaccess.h> 10 #include <linux/module.h> 11 #include <linux/blkdev.h> ··· 13 #include <linux/async.h> 14 #include <linux/ndctl.h> 15 #include <linux/sched.h> 16 #include <linux/cpu.h> 17 #include <linux/fs.h> 18 #include <linux/io.h> ··· 64 65 static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus) 66 { 67 + guard(nvdimm_bus)(&nvdimm_bus->dev); 68 nvdimm_bus->probe_active++; 69 } 70 71 static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus) 72 { 73 + guard(nvdimm_bus)(&nvdimm_bus->dev); 74 if (--nvdimm_bus->probe_active == 0) 75 wake_up(&nvdimm_bus->wait); 76 } 77 78 static int nvdimm_bus_probe(struct device *dev) ··· 1031 unsigned int cmd = _IOC_NR(ioctl_cmd); 1032 struct device *dev = &nvdimm_bus->dev; 1033 void __user *p = (void __user *) arg; 1034 const char *cmd_name, *dimm_name; 1035 u32 in_len = 0, out_len = 0; 1036 unsigned int func = cmd; 1037 unsigned long cmd_mask; 1038 struct nd_cmd_pkg pkg; 1039 int rc, i, cmd_rc; 1040 u64 buf_len = 0; 1041 1042 if (nvdimm) { ··· 1097 } 1098 1099 /* process an input envelope */ 1100 + char *in_env __free(kfree) = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); 1101 if (!in_env) 1102 return -ENOMEM; 1103 for (i = 0; i < desc->in_num; i++) { ··· 1107 if (in_size == UINT_MAX) { 1108 dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n", 1109 __func__, dimm_name, cmd_name, i); 1110 + return -ENXIO; 1111 } 1112 if (in_len < ND_CMD_MAX_ENVELOPE) 1113 copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size); 1114 else 1115 copy = 0; 1116 + if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) 1117 + return -EFAULT; 1118 in_len += in_size; 1119 } 1120 ··· 1129 } 1130 1131 /* process an output envelope */ 1132 + char *out_env __free(kfree) = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); 1133 + if (!out_env) 1134 + return -ENOMEM; 1135 1136 for (i = 0; i < desc->out_num; i++) { 1137 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, ··· 1143 if (out_size == UINT_MAX) { 1144 dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n", 1145 dimm_name, cmd_name, i); 1146 + return -EFAULT; 1147 } 1148 if (out_len < ND_CMD_MAX_ENVELOPE) 1149 copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size); ··· 1152 copy = 0; 1153 if (copy && copy_from_user(&out_env[out_len], 1154 p + in_len + out_len, copy)) { 1155 + return -EFAULT; 1156 } 1157 out_len += out_size; 1158 } ··· 1162 if (buf_len > ND_IOCTL_MAX_BUFLEN) { 1163 dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name, 1164 cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN); 1165 + return -EINVAL; 1166 } 1167 1168 + void *buf __free(kvfree) = kvzalloc(buf_len, GFP_KERNEL); 1169 + if (!buf) 1170 + return -ENOMEM; 1171 1172 + if (copy_from_user(buf, p, buf_len)) 1173 + return -EFAULT; 1174 1175 + guard(device)(dev); 1176 + guard(nvdimm_bus)(dev); 1177 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); 1178 if (rc) 1179 + return rc; 1180 1181 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc); 1182 if (rc < 0) 1183 + return rc; 1184 1185 if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) { 1186 struct nd_cmd_clear_error *clear_err = buf; ··· 1195 } 1196 1197 if (copy_to_user(p, buf, buf_len)) 1198 + return -EFAULT; 1199 1200 + return 0; 1201 } 1202 1203 enum nd_ioctl_mode {
+3 -4
drivers/nvdimm/claim.c
··· 34 35 if (!ndns) 36 return; 37 - get_device(&ndns->dev); 38 - nvdimm_bus_lock(&ndns->dev); 39 __nd_detach_ndns(dev, _ndns); 40 - nvdimm_bus_unlock(&ndns->dev); 41 - put_device(&ndns->dev); 42 } 43 44 bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
··· 34 35 if (!ndns) 36 return; 37 + 38 + struct device *ndev __free(put_device) = get_device(&ndns->dev); 39 + guard(nvdimm_bus)(ndev); 40 __nd_detach_ndns(dev, _ndns); 41 } 42 43 bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
+8 -9
drivers/nvdimm/core.c
··· 141 struct nvdimm_map *nvdimm_map = data; 142 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus; 143 144 - nvdimm_bus_lock(&nvdimm_bus->dev); 145 kref_put(&nvdimm_map->kref, nvdimm_map_release); 146 - nvdimm_bus_unlock(&nvdimm_bus->dev); 147 } 148 149 /** ··· 157 { 158 struct nvdimm_map *nvdimm_map; 159 160 - nvdimm_bus_lock(dev); 161 - nvdimm_map = find_nvdimm_map(dev, offset); 162 - if (!nvdimm_map) 163 - nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); 164 - else 165 - kref_get(&nvdimm_map->kref); 166 - nvdimm_bus_unlock(dev); 167 168 if (!nvdimm_map) 169 return NULL;
··· 141 struct nvdimm_map *nvdimm_map = data; 142 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus; 143 144 + guard(nvdimm_bus)(&nvdimm_bus->dev); 145 kref_put(&nvdimm_map->kref, nvdimm_map_release); 146 } 147 148 /** ··· 158 { 159 struct nvdimm_map *nvdimm_map; 160 161 + scoped_guard(nvdimm_bus, dev) { 162 + nvdimm_map = find_nvdimm_map(dev, offset); 163 + if (!nvdimm_map) 164 + nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); 165 + else 166 + kref_get(&nvdimm_map->kref); 167 + } 168 169 if (!nvdimm_map) 170 return NULL;
+6 -6
drivers/nvdimm/dax_devs.c
··· 104 return -ENODEV; 105 } 106 107 - nvdimm_bus_lock(&ndns->dev); 108 - nd_dax = nd_dax_alloc(nd_region); 109 - dax_dev = nd_dax_devinit(nd_dax, ndns); 110 - nvdimm_bus_unlock(&ndns->dev); 111 - if (!dax_dev) 112 - return -ENOMEM; 113 pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); 114 nd_pfn = &nd_dax->nd_pfn; 115 nd_pfn->pfn_sb = pfn_sb;
··· 104 return -ENODEV; 105 } 106 107 + scoped_guard(nvdimm_bus, &ndns->dev) { 108 + nd_dax = nd_dax_alloc(nd_region); 109 + dax_dev = nd_dax_devinit(nd_dax, ndns); 110 + if (!dax_dev) 111 + return -ENOMEM; 112 + } 113 pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); 114 nd_pfn = &nd_dax->nd_pfn; 115 nd_pfn->pfn_sb = pfn_sb;
+2 -3
drivers/nvdimm/dimm.c
··· 117 { 118 struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); 119 120 - nvdimm_bus_lock(dev); 121 - dev_set_drvdata(dev, NULL); 122 - nvdimm_bus_unlock(dev); 123 put_ndd(ndd); 124 } 125
··· 117 { 118 struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); 119 120 + scoped_guard(nvdimm_bus, dev) 121 + dev_set_drvdata(dev, NULL); 122 put_ndd(ndd); 123 } 124
+17 -31
drivers/nvdimm/dimm_devs.c
··· 226 struct resource *res, *_r; 227 228 dev_dbg(dev, "trace\n"); 229 - nvdimm_bus_lock(dev); 230 - for_each_dpa_resource_safe(ndd, res, _r) 231 - nvdimm_free_dpa(ndd, res); 232 - nvdimm_bus_unlock(dev); 233 234 kvfree(ndd->data); 235 kfree(ndd); ··· 319 static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) 320 { 321 struct device *dev; 322 - ssize_t rc; 323 u32 nfree; 324 325 if (!ndd) 326 return -ENXIO; 327 328 dev = ndd->dev; 329 - nvdimm_bus_lock(dev); 330 nfree = nd_label_nfree(ndd); 331 if (nfree - 1 > nfree) { 332 dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); 333 nfree = 0; 334 } else 335 nfree--; 336 - rc = sprintf(buf, "%d\n", nfree); 337 - nvdimm_bus_unlock(dev); 338 - return rc; 339 } 340 341 static ssize_t available_slots_show(struct device *dev, ··· 385 struct device_attribute *attr, const char *buf, size_t len) 386 387 { 388 - ssize_t rc; 389 - 390 /* 391 * Require all userspace triggered security management to be 392 * done while probing is idle and the DIMM is not in active use 393 * in any region. 394 */ 395 - device_lock(dev); 396 - nvdimm_bus_lock(dev); 397 wait_nvdimm_bus_probe_idle(dev); 398 - rc = nvdimm_security_store(dev, buf, len); 399 - nvdimm_bus_unlock(dev); 400 - device_unlock(dev); 401 - 402 - return rc; 403 } 404 static DEVICE_ATTR_RW(security); 405 ··· 445 if (!nvdimm->fw_ops) 446 return -EOPNOTSUPP; 447 448 - nvdimm_bus_lock(dev); 449 result = nvdimm->fw_ops->activate_result(nvdimm); 450 - nvdimm_bus_unlock(dev); 451 452 switch (result) { 453 case NVDIMM_FWA_RESULT_NONE: ··· 473 if (!nvdimm->fw_ops) 474 return -EOPNOTSUPP; 475 476 - nvdimm_bus_lock(dev); 477 state = nvdimm->fw_ops->activate_state(nvdimm); 478 - nvdimm_bus_unlock(dev); 479 480 switch (state) { 481 case NVDIMM_FWA_IDLE: ··· 505 else 506 return -EINVAL; 507 508 - nvdimm_bus_lock(dev); 509 rc = nvdimm->fw_ops->arm(nvdimm, arg); 510 - nvdimm_bus_unlock(dev); 511 512 if (rc < 0) 513 return rc; ··· 533 if (!nvdimm->fw_ops) 534 return 0; 535 536 - nvdimm_bus_lock(dev); 537 cap = nd_desc->fw_ops->capability(nd_desc); 538 - nvdimm_bus_unlock(dev); 539 540 if (cap < NVDIMM_FWA_CAP_QUIESCE) 541 return 0; ··· 628 bool dev_put = false; 629 630 /* We are shutting down. Make state frozen artificially. */ 631 - nvdimm_bus_lock(dev); 632 - set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags); 633 - if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags)) 634 - dev_put = true; 635 - nvdimm_bus_unlock(dev); 636 cancel_delayed_work_sync(&nvdimm->dwork); 637 if (dev_put) 638 put_device(dev);
··· 226 struct resource *res, *_r; 227 228 dev_dbg(dev, "trace\n"); 229 + scoped_guard(nvdimm_bus, dev) { 230 + for_each_dpa_resource_safe(ndd, res, _r) 231 + nvdimm_free_dpa(ndd, res); 232 + } 233 234 kvfree(ndd->data); 235 kfree(ndd); ··· 319 static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) 320 { 321 struct device *dev; 322 u32 nfree; 323 324 if (!ndd) 325 return -ENXIO; 326 327 dev = ndd->dev; 328 + guard(nvdimm_bus)(dev); 329 nfree = nd_label_nfree(ndd); 330 if (nfree - 1 > nfree) { 331 dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); 332 nfree = 0; 333 } else 334 nfree--; 335 + return sprintf(buf, "%d\n", nfree); 336 } 337 338 static ssize_t available_slots_show(struct device *dev, ··· 388 struct device_attribute *attr, const char *buf, size_t len) 389 390 { 391 /* 392 * Require all userspace triggered security management to be 393 * done while probing is idle and the DIMM is not in active use 394 * in any region. 395 */ 396 + guard(device)(dev); 397 + guard(nvdimm_bus)(dev); 398 wait_nvdimm_bus_probe_idle(dev); 399 + return nvdimm_security_store(dev, buf, len); 400 } 401 static DEVICE_ATTR_RW(security); 402 ··· 454 if (!nvdimm->fw_ops) 455 return -EOPNOTSUPP; 456 457 + guard(nvdimm_bus)(dev); 458 result = nvdimm->fw_ops->activate_result(nvdimm); 459 460 switch (result) { 461 case NVDIMM_FWA_RESULT_NONE: ··· 483 if (!nvdimm->fw_ops) 484 return -EOPNOTSUPP; 485 486 + guard(nvdimm_bus)(dev); 487 state = nvdimm->fw_ops->activate_state(nvdimm); 488 489 switch (state) { 490 case NVDIMM_FWA_IDLE: ··· 516 else 517 return -EINVAL; 518 519 + guard(nvdimm_bus)(dev); 520 rc = nvdimm->fw_ops->arm(nvdimm, arg); 521 522 if (rc < 0) 523 return rc; ··· 545 if (!nvdimm->fw_ops) 546 return 0; 547 548 + guard(nvdimm_bus)(dev); 549 cap = nd_desc->fw_ops->capability(nd_desc); 550 551 if (cap < NVDIMM_FWA_CAP_QUIESCE) 552 return 0; ··· 641 bool dev_put = false; 642 643 /* We are shutting down. Make state frozen artificially. */ 644 + scoped_guard(nvdimm_bus, dev) { 645 + set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags); 646 + dev_put = test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags); 647 + } 648 cancel_delayed_work_sync(&nvdimm->dwork); 649 if (dev_put) 650 put_device(dev);
+61 -60
drivers/nvdimm/namespace_devs.c
··· 264 struct nd_region *nd_region = to_nd_region(dev->parent); 265 ssize_t rc; 266 267 - device_lock(dev); 268 - nvdimm_bus_lock(dev); 269 wait_nvdimm_bus_probe_idle(dev); 270 rc = __alt_name_store(dev, buf, len); 271 if (rc >= 0) 272 rc = nd_namespace_label_update(nd_region, dev); 273 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); 274 - nvdimm_bus_unlock(dev); 275 - device_unlock(dev); 276 277 return rc < 0 ? rc : len; 278 } ··· 847 if (rc) 848 return rc; 849 850 - device_lock(dev); 851 - nvdimm_bus_lock(dev); 852 wait_nvdimm_bus_probe_idle(dev); 853 rc = __size_store(dev, val); 854 if (rc >= 0) ··· 863 } 864 865 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); 866 - 867 - nvdimm_bus_unlock(dev); 868 - device_unlock(dev); 869 870 return rc < 0 ? rc : len; 871 } ··· 886 887 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns) 888 { 889 - resource_size_t size; 890 - 891 - nvdimm_bus_lock(&ndns->dev); 892 - size = __nvdimm_namespace_capacity(ndns); 893 - nvdimm_bus_unlock(&ndns->dev); 894 - 895 - return size; 896 } 897 EXPORT_SYMBOL(nvdimm_namespace_capacity); 898 ··· 1034 } else 1035 return -ENXIO; 1036 1037 - device_lock(dev); 1038 - nvdimm_bus_lock(dev); 1039 wait_nvdimm_bus_probe_idle(dev); 1040 if (to_ndns(dev)->claim) 1041 rc = -EBUSY; ··· 1049 kfree(uuid); 1050 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 1051 buf[len - 1] == '\n' ? "" : "\n"); 1052 - nvdimm_bus_unlock(dev); 1053 - device_unlock(dev); 1054 1055 return rc < 0 ? rc : len; 1056 } ··· 1107 } else 1108 return -ENXIO; 1109 1110 - device_lock(dev); 1111 - nvdimm_bus_lock(dev); 1112 - if (to_ndns(dev)->claim) 1113 - rc = -EBUSY; 1114 - if (rc >= 0) 1115 - rc = nd_size_select_store(dev, buf, lbasize, supported); 1116 - if (rc >= 0) 1117 - rc = nd_namespace_label_update(nd_region, dev); 1118 - dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", 1119 - buf, buf[len - 1] == '\n' ? "" : "\n"); 1120 - nvdimm_bus_unlock(dev); 1121 - device_unlock(dev); 1122 1123 - return rc ? rc : len; 1124 } 1125 static DEVICE_ATTR_RW(sector_size); 1126 ··· 1143 int count = 0, i; 1144 u32 flags = 0; 1145 1146 - nvdimm_bus_lock(dev); 1147 if (is_namespace_pmem(dev)) { 1148 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1149 ··· 1152 } 1153 1154 if (!uuid) 1155 - goto out; 1156 1157 nd_label_gen_id(&label_id, uuid, flags); 1158 for (i = 0; i < nd_region->ndr_mappings; i++) { ··· 1164 if (strcmp(res->name, label_id.id) == 0) 1165 count++; 1166 } 1167 - out: 1168 - nvdimm_bus_unlock(dev); 1169 1170 return sprintf(buf, "%d\n", count); 1171 } ··· 1275 struct nd_region *nd_region = to_nd_region(dev->parent); 1276 int rc; 1277 1278 - device_lock(dev); 1279 - nvdimm_bus_lock(dev); 1280 wait_nvdimm_bus_probe_idle(dev); 1281 rc = __holder_class_store(dev, buf); 1282 if (rc >= 0) 1283 rc = nd_namespace_label_update(nd_region, dev); 1284 dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc); 1285 - nvdimm_bus_unlock(dev); 1286 - device_unlock(dev); 1287 1288 return rc < 0 ? rc : len; 1289 } ··· 1977 } 1978 1979 dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count, 1980 - count == 1 ? "" : "s"); 1981 1982 if (count == 0) { 1983 struct nd_namespace_pmem *nspm; ··· 2146 nd_region); 2147 } 2148 2149 int nd_region_register_namespaces(struct nd_region *nd_region, int *err) 2150 { 2151 struct device **devs = NULL; 2152 int i, rc = 0, type; 2153 2154 *err = 0; 2155 - nvdimm_bus_lock(&nd_region->dev); 2156 - rc = init_active_labels(nd_region); 2157 - if (rc) { 2158 - nvdimm_bus_unlock(&nd_region->dev); 2159 return rc; 2160 - } 2161 - 2162 - type = nd_region_to_nstype(nd_region); 2163 - switch (type) { 2164 - case ND_DEVICE_NAMESPACE_IO: 2165 - devs = create_namespace_io(nd_region); 2166 - break; 2167 - case ND_DEVICE_NAMESPACE_PMEM: 2168 - devs = create_namespaces(nd_region); 2169 - break; 2170 - default: 2171 - break; 2172 - } 2173 - nvdimm_bus_unlock(&nd_region->dev); 2174 2175 if (!devs) 2176 return -ENODEV;
··· 264 struct nd_region *nd_region = to_nd_region(dev->parent); 265 ssize_t rc; 266 267 + guard(device)(dev); 268 + guard(nvdimm_bus)(dev); 269 wait_nvdimm_bus_probe_idle(dev); 270 rc = __alt_name_store(dev, buf, len); 271 if (rc >= 0) 272 rc = nd_namespace_label_update(nd_region, dev); 273 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); 274 275 return rc < 0 ? rc : len; 276 } ··· 849 if (rc) 850 return rc; 851 852 + guard(device)(dev); 853 + guard(nvdimm_bus)(dev); 854 wait_nvdimm_bus_probe_idle(dev); 855 rc = __size_store(dev, val); 856 if (rc >= 0) ··· 865 } 866 867 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); 868 869 return rc < 0 ? rc : len; 870 } ··· 891 892 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns) 893 { 894 + guard(nvdimm_bus)(&ndns->dev); 895 + return __nvdimm_namespace_capacity(ndns); 896 } 897 EXPORT_SYMBOL(nvdimm_namespace_capacity); 898 ··· 1044 } else 1045 return -ENXIO; 1046 1047 + guard(device)(dev); 1048 + guard(nvdimm_bus)(dev); 1049 wait_nvdimm_bus_probe_idle(dev); 1050 if (to_ndns(dev)->claim) 1051 rc = -EBUSY; ··· 1059 kfree(uuid); 1060 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 1061 buf[len - 1] == '\n' ? "" : "\n"); 1062 1063 return rc < 0 ? rc : len; 1064 } ··· 1119 } else 1120 return -ENXIO; 1121 1122 + guard(device)(dev); 1123 + guard(nvdimm_bus)(dev); 1124 + if (to_ndns(dev)->claim) { 1125 + dev_dbg(dev, "namespace %s already claimed\n", dev_name(dev)); 1126 + return -EBUSY; 1127 + } 1128 1129 + rc = nd_size_select_store(dev, buf, lbasize, supported); 1130 + if (rc < 0) { 1131 + dev_dbg(dev, "size select fail: %zd tried: %s%s", rc, 1132 + buf, buf[len - 1] == '\n' ? "" : "\n"); 1133 + return rc; 1134 + } 1135 + 1136 + rc = nd_namespace_label_update(nd_region, dev); 1137 + if (rc < 0) { 1138 + dev_dbg(dev, "label update fail: %zd tried: %s%s", 1139 + rc, buf, buf[len - 1] == '\n' ? "" : "\n"); 1140 + return rc; 1141 + } 1142 + 1143 + dev_dbg(dev, "wrote: %s%s", buf, buf[len - 1] == '\n' ? "" : "\n"); 1144 + 1145 + return len; 1146 } 1147 static DEVICE_ATTR_RW(sector_size); 1148 ··· 1145 int count = 0, i; 1146 u32 flags = 0; 1147 1148 + guard(nvdimm_bus)(dev); 1149 if (is_namespace_pmem(dev)) { 1150 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 1151 ··· 1154 } 1155 1156 if (!uuid) 1157 + return sprintf(buf, "%d\n", count); 1158 1159 nd_label_gen_id(&label_id, uuid, flags); 1160 for (i = 0; i < nd_region->ndr_mappings; i++) { ··· 1166 if (strcmp(res->name, label_id.id) == 0) 1167 count++; 1168 } 1169 1170 return sprintf(buf, "%d\n", count); 1171 } ··· 1279 struct nd_region *nd_region = to_nd_region(dev->parent); 1280 int rc; 1281 1282 + guard(device)(dev); 1283 + guard(nvdimm_bus)(dev); 1284 wait_nvdimm_bus_probe_idle(dev); 1285 rc = __holder_class_store(dev, buf); 1286 if (rc >= 0) 1287 rc = nd_namespace_label_update(nd_region, dev); 1288 dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc); 1289 1290 return rc < 0 ? rc : len; 1291 } ··· 1983 } 1984 1985 dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count, 1986 + str_plural(count)); 1987 1988 if (count == 0) { 1989 struct nd_namespace_pmem *nspm; ··· 2152 nd_region); 2153 } 2154 2155 + static int create_relevant_namespaces(struct nd_region *nd_region, int *type, 2156 + struct device ***devs) 2157 + { 2158 + int rc; 2159 + 2160 + guard(nvdimm_bus)(&nd_region->dev); 2161 + rc = init_active_labels(nd_region); 2162 + if (rc) 2163 + return rc; 2164 + 2165 + *type = nd_region_to_nstype(nd_region); 2166 + switch (*type) { 2167 + case ND_DEVICE_NAMESPACE_IO: 2168 + *devs = create_namespace_io(nd_region); 2169 + break; 2170 + case ND_DEVICE_NAMESPACE_PMEM: 2171 + *devs = create_namespaces(nd_region); 2172 + break; 2173 + } 2174 + 2175 + return 0; 2176 + } 2177 + 2178 int nd_region_register_namespaces(struct nd_region *nd_region, int *err) 2179 { 2180 struct device **devs = NULL; 2181 int i, rc = 0, type; 2182 2183 *err = 0; 2184 + rc = create_relevant_namespaces(nd_region, &type, &devs); 2185 + if (rc) 2186 return rc; 2187 2188 if (!devs) 2189 return -ENODEV;
+3
drivers/nvdimm/nd.h
··· 632 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region); 633 void nvdimm_bus_lock(struct device *dev); 634 void nvdimm_bus_unlock(struct device *dev); 635 bool is_nvdimm_bus_locked(struct device *dev); 636 void nvdimm_check_and_set_ro(struct gendisk *disk); 637 void nvdimm_drvdata_release(struct kref *kref);
··· 632 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region); 633 void nvdimm_bus_lock(struct device *dev); 634 void nvdimm_bus_unlock(struct device *dev); 635 + DEFINE_GUARD(nvdimm_bus, struct device *, 636 + if (_T) nvdimm_bus_lock(_T), if (_T) nvdimm_bus_unlock(_T)); 637 + 638 bool is_nvdimm_bus_locked(struct device *dev); 639 void nvdimm_check_and_set_ro(struct gendisk *disk); 640 void nvdimm_drvdata_release(struct kref *kref);
+25 -36
drivers/nvdimm/pfn_devs.c
··· 56 { 57 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 58 ssize_t rc = 0; 59 60 - device_lock(dev); 61 - nvdimm_bus_lock(dev); 62 if (dev->driver) 63 - rc = -EBUSY; 64 - else { 65 - size_t n = len - 1; 66 67 - if (strncmp(buf, "pmem\n", n) == 0 68 - || strncmp(buf, "pmem", n) == 0) { 69 - nd_pfn->mode = PFN_MODE_PMEM; 70 - } else if (strncmp(buf, "ram\n", n) == 0 71 - || strncmp(buf, "ram", n) == 0) 72 - nd_pfn->mode = PFN_MODE_RAM; 73 - else if (strncmp(buf, "none\n", n) == 0 74 - || strncmp(buf, "none", n) == 0) 75 - nd_pfn->mode = PFN_MODE_NONE; 76 - else 77 - rc = -EINVAL; 78 - } 79 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 80 buf[len - 1] == '\n' ? "" : "\n"); 81 - nvdimm_bus_unlock(dev); 82 - device_unlock(dev); 83 84 return rc ? rc : len; 85 } ··· 121 unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, }; 122 ssize_t rc; 123 124 - device_lock(dev); 125 - nvdimm_bus_lock(dev); 126 rc = nd_size_select_store(dev, buf, &nd_pfn->align, 127 nd_pfn_supported_alignments(aligns)); 128 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 129 buf[len - 1] == '\n' ? "" : "\n"); 130 - nvdimm_bus_unlock(dev); 131 - device_unlock(dev); 132 133 return rc ? rc : len; 134 } ··· 162 struct device_attribute *attr, char *buf) 163 { 164 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 165 - ssize_t rc; 166 167 - nvdimm_bus_lock(dev); 168 - rc = sprintf(buf, "%s\n", nd_pfn->ndns 169 ? dev_name(&nd_pfn->ndns->dev) : ""); 170 - nvdimm_bus_unlock(dev); 171 - return rc; 172 } 173 174 static ssize_t namespace_store(struct device *dev, ··· 174 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 175 ssize_t rc; 176 177 - device_lock(dev); 178 - nvdimm_bus_lock(dev); 179 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); 180 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 181 buf[len - 1] == '\n' ? "" : "\n"); 182 - nvdimm_bus_unlock(dev); 183 - device_unlock(dev); 184 185 return rc; 186 } ··· 628 return -ENODEV; 629 } 630 631 - nvdimm_bus_lock(&ndns->dev); 632 - nd_pfn = nd_pfn_alloc(nd_region); 633 - pfn_dev = nd_pfn_devinit(nd_pfn, ndns); 634 - nvdimm_bus_unlock(&ndns->dev); 635 if (!pfn_dev) 636 return -ENOMEM; 637 pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
··· 56 { 57 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 58 ssize_t rc = 0; 59 + size_t n = len - 1; 60 61 + guard(device)(dev); 62 + guard(nvdimm_bus)(dev); 63 if (dev->driver) 64 + return -EBUSY; 65 66 + if (strncmp(buf, "pmem\n", n) == 0 67 + || strncmp(buf, "pmem", n) == 0) { 68 + nd_pfn->mode = PFN_MODE_PMEM; 69 + } else if (strncmp(buf, "ram\n", n) == 0 70 + || strncmp(buf, "ram", n) == 0) 71 + nd_pfn->mode = PFN_MODE_RAM; 72 + else if (strncmp(buf, "none\n", n) == 0 73 + || strncmp(buf, "none", n) == 0) 74 + nd_pfn->mode = PFN_MODE_NONE; 75 + else 76 + rc = -EINVAL; 77 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 78 buf[len - 1] == '\n' ? "" : "\n"); 79 80 return rc ? rc : len; 81 } ··· 125 unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, }; 126 ssize_t rc; 127 128 + guard(device)(dev); 129 + guard(nvdimm_bus)(dev); 130 rc = nd_size_select_store(dev, buf, &nd_pfn->align, 131 nd_pfn_supported_alignments(aligns)); 132 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 133 buf[len - 1] == '\n' ? "" : "\n"); 134 135 return rc ? rc : len; 136 } ··· 168 struct device_attribute *attr, char *buf) 169 { 170 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 171 172 + guard(nvdimm_bus)(dev); 173 + return sprintf(buf, "%s\n", nd_pfn->ndns 174 ? dev_name(&nd_pfn->ndns->dev) : ""); 175 } 176 177 static ssize_t namespace_store(struct device *dev, ··· 183 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 184 ssize_t rc; 185 186 + guard(device)(dev); 187 + guard(nvdimm_bus)(dev); 188 rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); 189 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 190 buf[len - 1] == '\n' ? "" : "\n"); 191 192 return rc; 193 } ··· 639 return -ENODEV; 640 } 641 642 + scoped_guard(nvdimm_bus, &ndns->dev) { 643 + nd_pfn = nd_pfn_alloc(nd_region); 644 + pfn_dev = nd_pfn_devinit(nd_pfn, ndns); 645 + } 646 if (!pfn_dev) 647 return -ENOMEM; 648 pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+8 -8
drivers/nvdimm/region.c
··· 70 * "<async-registered>/<total>" namespace count. 71 */ 72 dev_err(dev, "failed to register %d namespace%s, continuing...\n", 73 - err, err == 1 ? "" : "s"); 74 return 0; 75 } 76 ··· 87 device_for_each_child(dev, NULL, child_unregister); 88 89 /* flush attribute readers and disable */ 90 - nvdimm_bus_lock(dev); 91 - nd_region->ns_seed = NULL; 92 - nd_region->btt_seed = NULL; 93 - nd_region->pfn_seed = NULL; 94 - nd_region->dax_seed = NULL; 95 - dev_set_drvdata(dev, NULL); 96 - nvdimm_bus_unlock(dev); 97 98 /* 99 * Note, this assumes device_lock() context to not race
··· 70 * "<async-registered>/<total>" namespace count. 71 */ 72 dev_err(dev, "failed to register %d namespace%s, continuing...\n", 73 + err, str_plural(err)); 74 return 0; 75 } 76 ··· 87 device_for_each_child(dev, NULL, child_unregister); 88 89 /* flush attribute readers and disable */ 90 + scoped_guard(nvdimm_bus, dev) { 91 + nd_region->ns_seed = NULL; 92 + nd_region->btt_seed = NULL; 93 + nd_region->pfn_seed = NULL; 94 + nd_region->dax_seed = NULL; 95 + dev_set_drvdata(dev, NULL); 96 + } 97 98 /* 99 * Note, this assumes device_lock() context to not race
+50 -68
drivers/nvdimm/region_devs.c
··· 102 return 0; 103 } 104 105 - int nd_region_activate(struct nd_region *nd_region) 106 { 107 - int i, j, rc, num_flush = 0; 108 - struct nd_region_data *ndrd; 109 - struct device *dev = &nd_region->dev; 110 size_t flush_data_size = sizeof(void *); 111 112 - nvdimm_bus_lock(&nd_region->dev); 113 for (i = 0; i < nd_region->ndr_mappings; i++) { 114 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 115 struct nvdimm *nvdimm = nd_mapping->nvdimm; 116 117 - if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { 118 - nvdimm_bus_unlock(&nd_region->dev); 119 return -EBUSY; 120 - } 121 122 /* at least one null hint slot per-dimm for the "no-hint" case */ 123 flush_data_size += sizeof(void *); 124 - num_flush = min_not_zero(num_flush, nvdimm->num_flush); 125 if (!nvdimm->num_flush) 126 continue; 127 flush_data_size += nvdimm->num_flush * sizeof(void *); 128 } 129 - nvdimm_bus_unlock(&nd_region->dev); 130 131 rc = nd_region_invalidate_memregion(nd_region); 132 if (rc) ··· 340 * the v1.1 namespace label cookie definition. To read all this 341 * data we need to wait for probing to settle. 342 */ 343 - device_lock(dev); 344 - nvdimm_bus_lock(dev); 345 wait_nvdimm_bus_probe_idle(dev); 346 if (nd_region->ndr_mappings) { 347 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; ··· 356 nsindex)); 357 } 358 } 359 - nvdimm_bus_unlock(dev); 360 - device_unlock(dev); 361 362 if (rc) 363 return rc; ··· 404 struct device_attribute *attr, char *buf) 405 { 406 struct nd_region *nd_region = to_nd_region(dev); 407 - unsigned long long available = 0; 408 409 /* 410 * Flush in-flight updates and grab a snapshot of the available ··· 411 * memory nvdimm_bus_lock() is dropped, but that's userspace's 412 * problem to not race itself. 413 */ 414 - device_lock(dev); 415 - nvdimm_bus_lock(dev); 416 wait_nvdimm_bus_probe_idle(dev); 417 - available = nd_region_available_dpa(nd_region); 418 - nvdimm_bus_unlock(dev); 419 - device_unlock(dev); 420 421 - return sprintf(buf, "%llu\n", available); 422 } 423 static DEVICE_ATTR_RO(available_size); 424 ··· 423 struct device_attribute *attr, char *buf) 424 { 425 struct nd_region *nd_region = to_nd_region(dev); 426 - unsigned long long available = 0; 427 428 - device_lock(dev); 429 - nvdimm_bus_lock(dev); 430 wait_nvdimm_bus_probe_idle(dev); 431 - available = nd_region_allocatable_dpa(nd_region); 432 - nvdimm_bus_unlock(dev); 433 - device_unlock(dev); 434 435 - return sprintf(buf, "%llu\n", available); 436 } 437 static DEVICE_ATTR_RO(max_available_extent); 438 ··· 436 struct device_attribute *attr, char *buf) 437 { 438 struct nd_region_data *ndrd = dev_get_drvdata(dev); 439 - ssize_t rc; 440 441 - nvdimm_bus_lock(dev); 442 - if (ndrd) 443 - rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); 444 - else 445 - rc = -ENXIO; 446 - nvdimm_bus_unlock(dev); 447 448 - return rc; 449 } 450 static DEVICE_ATTR_RO(init_namespaces); 451 ··· 449 struct device_attribute *attr, char *buf) 450 { 451 struct nd_region *nd_region = to_nd_region(dev); 452 - ssize_t rc; 453 454 - nvdimm_bus_lock(dev); 455 if (nd_region->ns_seed) 456 - rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); 457 - else 458 - rc = sprintf(buf, "\n"); 459 - nvdimm_bus_unlock(dev); 460 - return rc; 461 } 462 static DEVICE_ATTR_RO(namespace_seed); 463 ··· 462 struct device_attribute *attr, char *buf) 463 { 464 struct nd_region *nd_region = to_nd_region(dev); 465 - ssize_t rc; 466 467 - nvdimm_bus_lock(dev); 468 if (nd_region->btt_seed) 469 - rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); 470 - else 471 - rc = sprintf(buf, "\n"); 472 - nvdimm_bus_unlock(dev); 473 474 - return rc; 475 } 476 static DEVICE_ATTR_RO(btt_seed); 477 ··· 475 struct device_attribute *attr, char *buf) 476 { 477 struct nd_region *nd_region = to_nd_region(dev); 478 - ssize_t rc; 479 480 - nvdimm_bus_lock(dev); 481 if (nd_region->pfn_seed) 482 - rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); 483 - else 484 - rc = sprintf(buf, "\n"); 485 - nvdimm_bus_unlock(dev); 486 487 - return rc; 488 } 489 static DEVICE_ATTR_RO(pfn_seed); 490 ··· 488 struct device_attribute *attr, char *buf) 489 { 490 struct nd_region *nd_region = to_nd_region(dev); 491 - ssize_t rc; 492 493 - nvdimm_bus_lock(dev); 494 if (nd_region->dax_seed) 495 - rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); 496 - else 497 - rc = sprintf(buf, "\n"); 498 - nvdimm_bus_unlock(dev); 499 500 - return rc; 501 } 502 static DEVICE_ATTR_RO(dax_seed); 503 ··· 565 * times ensure it does not change for the duration of the 566 * allocation. 567 */ 568 - nvdimm_bus_lock(dev); 569 nd_region->align = val; 570 - nvdimm_bus_unlock(dev); 571 572 return len; 573 } ··· 873 */ 874 void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) 875 { 876 - nvdimm_bus_lock(dev); 877 if (nd_region->ns_seed == dev) { 878 nd_region_create_ns_seed(nd_region); 879 } else if (is_nd_btt(dev)) { ··· 898 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) 899 nd_region_create_ns_seed(nd_region); 900 } 901 - nvdimm_bus_unlock(dev); 902 } 903 904 /**
··· 102 return 0; 103 } 104 105 + static int get_flush_data(struct nd_region *nd_region, size_t *size, int *num_flush) 106 { 107 size_t flush_data_size = sizeof(void *); 108 + int _num_flush = 0; 109 + int i; 110 111 + guard(nvdimm_bus)(&nd_region->dev); 112 for (i = 0; i < nd_region->ndr_mappings; i++) { 113 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 114 struct nvdimm *nvdimm = nd_mapping->nvdimm; 115 116 + if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) 117 return -EBUSY; 118 119 /* at least one null hint slot per-dimm for the "no-hint" case */ 120 flush_data_size += sizeof(void *); 121 + _num_flush = min_not_zero(_num_flush, nvdimm->num_flush); 122 if (!nvdimm->num_flush) 123 continue; 124 flush_data_size += nvdimm->num_flush * sizeof(void *); 125 } 126 + 127 + *size = flush_data_size; 128 + *num_flush = _num_flush; 129 + 130 + return 0; 131 + } 132 + 133 + int nd_region_activate(struct nd_region *nd_region) 134 + { 135 + int i, j, rc, num_flush; 136 + struct nd_region_data *ndrd; 137 + struct device *dev = &nd_region->dev; 138 + size_t flush_data_size; 139 + 140 + rc = get_flush_data(nd_region, &flush_data_size, &num_flush); 141 + if (rc) 142 + return rc; 143 144 rc = nd_region_invalidate_memregion(nd_region); 145 if (rc) ··· 327 * the v1.1 namespace label cookie definition. To read all this 328 * data we need to wait for probing to settle. 329 */ 330 + guard(device)(dev); 331 + guard(nvdimm_bus)(dev); 332 wait_nvdimm_bus_probe_idle(dev); 333 if (nd_region->ndr_mappings) { 334 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; ··· 343 nsindex)); 344 } 345 } 346 347 if (rc) 348 return rc; ··· 393 struct device_attribute *attr, char *buf) 394 { 395 struct nd_region *nd_region = to_nd_region(dev); 396 397 /* 398 * Flush in-flight updates and grab a snapshot of the available ··· 401 * memory nvdimm_bus_lock() is dropped, but that's userspace's 402 * problem to not race itself. 403 */ 404 + guard(device)(dev); 405 + guard(nvdimm_bus)(dev); 406 wait_nvdimm_bus_probe_idle(dev); 407 408 + return sprintf(buf, "%llu\n", nd_region_available_dpa(nd_region)); 409 } 410 static DEVICE_ATTR_RO(available_size); 411 ··· 416 struct device_attribute *attr, char *buf) 417 { 418 struct nd_region *nd_region = to_nd_region(dev); 419 420 + guard(device)(dev); 421 + guard(nvdimm_bus)(dev); 422 wait_nvdimm_bus_probe_idle(dev); 423 424 + return sprintf(buf, "%llu\n", nd_region_allocatable_dpa(nd_region)); 425 } 426 static DEVICE_ATTR_RO(max_available_extent); 427 ··· 433 struct device_attribute *attr, char *buf) 434 { 435 struct nd_region_data *ndrd = dev_get_drvdata(dev); 436 437 + guard(nvdimm_bus)(dev); 438 + if (!ndrd) 439 + return -ENXIO; 440 441 + return sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); 442 } 443 static DEVICE_ATTR_RO(init_namespaces); 444 ··· 450 struct device_attribute *attr, char *buf) 451 { 452 struct nd_region *nd_region = to_nd_region(dev); 453 454 + guard(nvdimm_bus)(dev); 455 if (nd_region->ns_seed) 456 + return sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); 457 + 458 + return sprintf(buf, "\n"); 459 } 460 static DEVICE_ATTR_RO(namespace_seed); 461 ··· 466 struct device_attribute *attr, char *buf) 467 { 468 struct nd_region *nd_region = to_nd_region(dev); 469 470 + guard(nvdimm_bus)(dev); 471 if (nd_region->btt_seed) 472 + return sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); 473 474 + return sprintf(buf, "\n"); 475 } 476 static DEVICE_ATTR_RO(btt_seed); 477 ··· 483 struct device_attribute *attr, char *buf) 484 { 485 struct nd_region *nd_region = to_nd_region(dev); 486 487 + guard(nvdimm_bus)(dev); 488 if (nd_region->pfn_seed) 489 + return sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); 490 491 + return sprintf(buf, "\n"); 492 } 493 static DEVICE_ATTR_RO(pfn_seed); 494 ··· 500 struct device_attribute *attr, char *buf) 501 { 502 struct nd_region *nd_region = to_nd_region(dev); 503 504 + guard(nvdimm_bus)(dev); 505 if (nd_region->dax_seed) 506 + return sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); 507 508 + return sprintf(buf, "\n"); 509 } 510 static DEVICE_ATTR_RO(dax_seed); 511 ··· 581 * times ensure it does not change for the duration of the 582 * allocation. 583 */ 584 + guard(nvdimm_bus)(dev); 585 nd_region->align = val; 586 587 return len; 588 } ··· 890 */ 891 void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) 892 { 893 + guard(nvdimm_bus)(dev); 894 if (nd_region->ns_seed == dev) { 895 nd_region_create_ns_seed(nd_region); 896 } else if (is_nd_btt(dev)) { ··· 915 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) 916 nd_region_create_ns_seed(nd_region); 917 } 918 } 919 920 /**
+3 -7
drivers/nvdimm/security.c
··· 219 int nvdimm_security_unlock(struct device *dev) 220 { 221 struct nvdimm *nvdimm = to_nvdimm(dev); 222 - int rc; 223 224 - nvdimm_bus_lock(dev); 225 - rc = __nvdimm_security_unlock(nvdimm); 226 - nvdimm_bus_unlock(dev); 227 - return rc; 228 } 229 230 static int check_security_state(struct nvdimm *nvdimm) ··· 487 struct nvdimm *nvdimm = 488 container_of(work, typeof(*nvdimm), dwork.work); 489 490 - nvdimm_bus_lock(&nvdimm->dev); 491 __nvdimm_security_overwrite_query(nvdimm); 492 - nvdimm_bus_unlock(&nvdimm->dev); 493 } 494 495 #define OPS \
··· 219 int nvdimm_security_unlock(struct device *dev) 220 { 221 struct nvdimm *nvdimm = to_nvdimm(dev); 222 223 + guard(nvdimm_bus)(dev); 224 + return __nvdimm_security_unlock(nvdimm); 225 } 226 227 static int check_security_state(struct nvdimm *nvdimm) ··· 490 struct nvdimm *nvdimm = 491 container_of(work, typeof(*nvdimm), dwork.work); 492 493 + guard(nvdimm_bus)(&nvdimm->dev); 494 __nvdimm_security_overwrite_query(nvdimm); 495 } 496 497 #define OPS \
+12 -1
tools/testing/nvdimm/test/ndtest.c
··· 850 851 p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 852 sizeof(dma_addr_t), GFP_KERNEL); 853 p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 854 sizeof(dma_addr_t), GFP_KERNEL); 855 p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 856 sizeof(dma_addr_t), GFP_KERNEL); 857 - 858 rc = ndtest_nvdimm_init(p); 859 if (rc) 860 goto err;
··· 850 851 p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 852 sizeof(dma_addr_t), GFP_KERNEL); 853 + if (!p->dcr_dma) { 854 + rc = -ENOMEM; 855 + goto err; 856 + } 857 p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 858 sizeof(dma_addr_t), GFP_KERNEL); 859 + if (!p->label_dma) { 860 + rc = -ENOMEM; 861 + goto err; 862 + } 863 p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR, 864 sizeof(dma_addr_t), GFP_KERNEL); 865 + if (!p->dimm_dma) { 866 + rc = -ENOMEM; 867 + goto err; 868 + } 869 rc = ndtest_nvdimm_init(p); 870 if (rc) 871 goto err;