Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'libnvdimm-for-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm updates from Ira Weiny:
"A mix of bug fixes and updates to interfaces used by nvdimm:

- Updates to interfaces include:
Use the new scope based management
Remove deprecated ida interfaces
Update to sysfs_emit()

- Fixup kdoc comments"

* tag 'libnvdimm-for-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
acpi/nfit: Use sysfs_emit() for all attributes
nvdimm/namespace: fix kernel-doc for function params
nvdimm/dimm_devs: fix kernel-doc for function params
nvdimm/btt: fix btt_blk_cleanup() kernel-doc
nvdimm-btt: simplify code with the scope based resource management
nvdimm: Remove usage of the deprecated ida_simple_xx() API
ACPI: NFIT: Use cleanup.h helpers instead of devm_*()

+71 -63
+30 -35
drivers/acpi/nfit/core.c
··· 1186 1186 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1187 1187 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1188 1188 1189 - return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask); 1189 + return sysfs_emit(buf, "%#lx\n", acpi_desc->bus_dsm_mask); 1190 1190 } 1191 1191 static struct device_attribute dev_attr_bus_dsm_mask = 1192 1192 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); ··· 1198 1198 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1199 1199 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1200 1200 1201 - return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); 1201 + return sysfs_emit(buf, "%d\n", acpi_desc->acpi_header.revision); 1202 1202 } 1203 1203 static DEVICE_ATTR_RO(revision); 1204 1204 ··· 1209 1209 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1210 1210 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1211 1211 1212 - return sprintf(buf, "%d\n", acpi_desc->scrub_mode); 1212 + return sysfs_emit(buf, "%d\n", acpi_desc->scrub_mode); 1213 1213 } 1214 1214 1215 1215 /* ··· 1278 1278 mutex_lock(&acpi_desc->init_mutex); 1279 1279 busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags) 1280 1280 && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags); 1281 - rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n"); 1281 + rc = sysfs_emit(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n"); 1282 1282 /* Allow an admin to poll the busy state at a higher rate */ 1283 1283 if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL, 1284 1284 &acpi_desc->scrub_flags)) { ··· 1382 1382 { 1383 1383 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1384 1384 1385 - return sprintf(buf, "%#x\n", memdev->device_handle); 1385 + return sysfs_emit(buf, "%#x\n", memdev->device_handle); 1386 1386 } 1387 1387 static DEVICE_ATTR_RO(handle); 1388 1388 ··· 1391 1391 { 1392 1392 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 1393 1393 1394 - return sprintf(buf, "%#x\n", memdev->physical_id); 1394 + return sysfs_emit(buf, "%#x\n", memdev->physical_id); 1395 1395 } 1396 1396 static DEVICE_ATTR_RO(phys_id); 1397 1397 ··· 1400 1400 { 1401 1401 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1402 1402 1403 - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); 1403 + return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); 1404 1404 } 1405 1405 static DEVICE_ATTR_RO(vendor); 1406 1406 ··· 1409 1409 { 1410 1410 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1411 1411 1412 - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); 1412 + return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); 1413 1413 } 1414 1414 static DEVICE_ATTR_RO(rev_id); 1415 1415 ··· 1418 1418 { 1419 1419 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1420 1420 1421 - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); 1421 + return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); 1422 1422 } 1423 1423 static DEVICE_ATTR_RO(device); 1424 1424 ··· 1427 1427 { 1428 1428 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1429 1429 1430 - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); 1430 + return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); 1431 1431 } 1432 1432 static DEVICE_ATTR_RO(subsystem_vendor); 1433 1433 ··· 1436 1436 { 1437 1437 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1438 1438 1439 - return sprintf(buf, "0x%04x\n", 1439 + return sysfs_emit(buf, "0x%04x\n", 1440 1440 be16_to_cpu(dcr->subsystem_revision_id)); 1441 1441 } 1442 1442 static DEVICE_ATTR_RO(subsystem_rev_id); ··· 1446 1446 { 1447 1447 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1448 1448 1449 - return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); 1449 + return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); 1450 1450 } 1451 1451 static DEVICE_ATTR_RO(subsystem_device); 1452 1452 ··· 1465 1465 { 1466 1466 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1467 1467 1468 - return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); 1468 + return sysfs_emit(buf, "0x%04x\n", le16_to_cpu(dcr->code)); 1469 1469 } 1470 1470 static DEVICE_ATTR_RO(format); 1471 1471 ··· 1498 1498 continue; 1499 1499 if (nfit_dcr->dcr->code == dcr->code) 1500 1500 continue; 1501 - rc = sprintf(buf, "0x%04x\n", 1501 + rc = sysfs_emit(buf, "0x%04x\n", 1502 1502 le16_to_cpu(nfit_dcr->dcr->code)); 1503 1503 break; 1504 1504 } ··· 1515 1515 { 1516 1516 struct nvdimm *nvdimm = to_nvdimm(dev); 1517 1517 1518 - return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); 1518 + return sysfs_emit(buf, "%d\n", num_nvdimm_formats(nvdimm)); 1519 1519 } 1520 1520 static DEVICE_ATTR_RO(formats); 1521 1521 ··· 1524 1524 { 1525 1525 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 1526 1526 1527 - return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); 1527 + return sysfs_emit(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); 1528 1528 } 1529 1529 static DEVICE_ATTR_RO(serial); 1530 1530 ··· 1536 1536 1537 1537 if (nfit_mem->family < 0) 1538 1538 return -ENXIO; 1539 - return sprintf(buf, "%d\n", nfit_mem->family); 1539 + return sysfs_emit(buf, "%d\n", nfit_mem->family); 1540 1540 } 1541 1541 static DEVICE_ATTR_RO(family); 1542 1542 ··· 1548 1548 1549 1549 if (nfit_mem->family < 0) 1550 1550 return -ENXIO; 1551 - return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); 1551 + return sysfs_emit(buf, "%#lx\n", nfit_mem->dsm_mask); 1552 1552 } 1553 1553 static DEVICE_ATTR_RO(dsm_mask); 1554 1554 ··· 1562 1562 if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags)) 1563 1563 flags |= ACPI_NFIT_MEM_FLUSH_FAILED; 1564 1564 1565 - return sprintf(buf, "%s%s%s%s%s%s%s\n", 1565 + return sysfs_emit(buf, "%s%s%s%s%s%s%s\n", 1566 1566 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 1567 1567 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 1568 1568 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", ··· 1579 1579 struct nvdimm *nvdimm = to_nvdimm(dev); 1580 1580 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1581 1581 1582 - return sprintf(buf, "%s\n", nfit_mem->id); 1582 + return sysfs_emit(buf, "%s\n", nfit_mem->id); 1583 1583 } 1584 1584 static DEVICE_ATTR_RO(id); 1585 1585 ··· 1589 1589 struct nvdimm *nvdimm = to_nvdimm(dev); 1590 1590 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1591 1591 1592 - return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown); 1592 + return sysfs_emit(buf, "%d\n", nfit_mem->dirty_shutdown); 1593 1593 } 1594 1594 static DEVICE_ATTR_RO(dirty_shutdown); 1595 1595 ··· 2172 2172 struct nd_region *nd_region = to_nd_region(dev); 2173 2173 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 2174 2174 2175 - return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 2175 + return sysfs_emit(buf, "%d\n", nfit_spa->spa->range_index); 2176 2176 } 2177 2177 static DEVICE_ATTR_RO(range_index); 2178 2178 ··· 2257 2257 struct nd_region_desc *ndr_desc, 2258 2258 struct acpi_nfit_system_address *spa) 2259 2259 { 2260 + u16 nr = ndr_desc->num_mappings; 2261 + struct nfit_set_info2 *info2 __free(kfree) = 2262 + kcalloc(nr, sizeof(*info2), GFP_KERNEL); 2263 + struct nfit_set_info *info __free(kfree) = 2264 + kcalloc(nr, sizeof(*info), GFP_KERNEL); 2260 2265 struct device *dev = acpi_desc->dev; 2261 2266 struct nd_interleave_set *nd_set; 2262 - u16 nr = ndr_desc->num_mappings; 2263 - struct nfit_set_info2 *info2; 2264 - struct nfit_set_info *info; 2265 2267 int i; 2268 + 2269 + if (!info || !info2) 2270 + return -ENOMEM; 2266 2271 2267 2272 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2268 2273 if (!nd_set) 2269 2274 return -ENOMEM; 2270 2275 import_guid(&nd_set->type_guid, spa->range_guid); 2271 - 2272 - info = devm_kcalloc(dev, nr, sizeof(*info), GFP_KERNEL); 2273 - if (!info) 2274 - return -ENOMEM; 2275 - 2276 - info2 = devm_kcalloc(dev, nr, sizeof(*info2), GFP_KERNEL); 2277 - if (!info2) 2278 - return -ENOMEM; 2279 2276 2280 2277 for (i = 0; i < nr; i++) { 2281 2278 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; ··· 2334 2337 } 2335 2338 2336 2339 ndr_desc->nd_set = nd_set; 2337 - devm_kfree(dev, info); 2338 - devm_kfree(dev, info2); 2339 2340 2340 2341 return 0; 2341 2342 }
+5 -10
drivers/nvdimm/btt.c
··· 16 16 #include <linux/fs.h> 17 17 #include <linux/nd.h> 18 18 #include <linux/backing-dev.h> 19 + #include <linux/cleanup.h> 19 20 #include "btt.h" 20 21 #include "nd.h" 21 22 ··· 848 847 { 849 848 int ret = 0; 850 849 struct arena_info *arena; 851 - struct btt_sb *super; 852 850 size_t remaining = btt->rawsize; 853 851 u64 cur_nlba = 0; 854 852 size_t cur_off = 0; 855 853 int num_arenas = 0; 856 854 857 - super = kzalloc(sizeof(*super), GFP_KERNEL); 855 + struct btt_sb *super __free(kfree) = kzalloc(sizeof(*super), GFP_KERNEL); 858 856 if (!super) 859 857 return -ENOMEM; 860 858 861 859 while (remaining) { 862 860 /* Alloc memory for arena */ 863 861 arena = alloc_arena(btt, 0, 0, 0); 864 - if (!arena) { 865 - ret = -ENOMEM; 866 - goto out_super; 867 - } 862 + if (!arena) 863 + return -ENOMEM; 868 864 869 865 arena->infooff = cur_off; 870 866 ret = btt_info_read(arena, super); ··· 917 919 btt->nlba = cur_nlba; 918 920 btt->init_state = INIT_READY; 919 921 920 - kfree(super); 921 922 return ret; 922 923 923 924 out: 924 925 kfree(arena); 925 926 free_arenas(btt); 926 - out_super: 927 - kfree(super); 928 927 return ret; 929 928 } 930 929 ··· 1545 1550 * @rawsize: raw size in bytes of the backing device 1546 1551 * @lbasize: lba size of the backing device 1547 1552 * @uuid: A uuid for the backing device - this is stored on media 1548 - * @maxlane: maximum number of parallel requests the device can handle 1553 + * @nd_region: &struct nd_region for the REGION device 1549 1554 * 1550 1555 * Initialize a Block Translation Table on a backing device to provide 1551 1556 * single sector power fail atomicity.
+3 -3
drivers/nvdimm/btt_devs.c
··· 19 19 20 20 dev_dbg(dev, "trace\n"); 21 21 nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns); 22 - ida_simple_remove(&nd_region->btt_ida, nd_btt->id); 22 + ida_free(&nd_region->btt_ida, nd_btt->id); 23 23 kfree(nd_btt->uuid); 24 24 kfree(nd_btt); 25 25 } ··· 191 191 if (!nd_btt) 192 192 return NULL; 193 193 194 - nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL); 194 + nd_btt->id = ida_alloc(&nd_region->btt_ida, GFP_KERNEL); 195 195 if (nd_btt->id < 0) 196 196 goto out_nd_btt; 197 197 ··· 217 217 return dev; 218 218 219 219 out_put_id: 220 - ida_simple_remove(&nd_region->btt_ida, nd_btt->id); 220 + ida_free(&nd_region->btt_ida, nd_btt->id); 221 221 222 222 out_nd_btt: 223 223 kfree(nd_btt);
+2 -2
drivers/nvdimm/bus.c
··· 285 285 struct nvdimm_bus *nvdimm_bus; 286 286 287 287 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev); 288 - ida_simple_remove(&nd_ida, nvdimm_bus->id); 288 + ida_free(&nd_ida, nvdimm_bus->id); 289 289 kfree(nvdimm_bus); 290 290 } 291 291 ··· 342 342 INIT_LIST_HEAD(&nvdimm_bus->list); 343 343 INIT_LIST_HEAD(&nvdimm_bus->mapping_list); 344 344 init_waitqueue_head(&nvdimm_bus->wait); 345 - nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); 345 + nvdimm_bus->id = ida_alloc(&nd_ida, GFP_KERNEL); 346 346 if (nvdimm_bus->id < 0) { 347 347 kfree(nvdimm_bus); 348 348 return NULL;
+2 -2
drivers/nvdimm/dax_devs.c
··· 18 18 19 19 dev_dbg(dev, "trace\n"); 20 20 nd_detach_ndns(dev, &nd_pfn->ndns); 21 - ida_simple_remove(&nd_region->dax_ida, nd_pfn->id); 21 + ida_free(&nd_region->dax_ida, nd_pfn->id); 22 22 kfree(nd_pfn->uuid); 23 23 kfree(nd_dax); 24 24 } ··· 55 55 return NULL; 56 56 57 57 nd_pfn = &nd_dax->nd_pfn; 58 - nd_pfn->id = ida_simple_get(&nd_region->dax_ida, 0, 0, GFP_KERNEL); 58 + nd_pfn->id = ida_alloc(&nd_region->dax_ida, GFP_KERNEL); 59 59 if (nd_pfn->id < 0) { 60 60 kfree(nd_dax); 61 61 return NULL;
+13 -4
drivers/nvdimm/dimm_devs.c
··· 53 53 54 54 /** 55 55 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area 56 - * @nvdimm: dimm to initialize 56 + * @ndd: dimm to initialize 57 + * 58 + * Returns: %0 if the area is already valid, -errno on error 57 59 */ 58 60 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) 59 61 { ··· 196 194 { 197 195 struct nvdimm *nvdimm = to_nvdimm(dev); 198 196 199 - ida_simple_remove(&dimm_ida, nvdimm->id); 197 + ida_free(&dimm_ida, nvdimm->id); 200 198 kfree(nvdimm); 201 199 } 202 200 ··· 594 592 if (!nvdimm) 595 593 return NULL; 596 594 597 - nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL); 595 + nvdimm->id = ida_alloc(&dimm_ida, GFP_KERNEL); 598 596 if (nvdimm->id < 0) { 599 597 kfree(nvdimm); 600 598 return NULL; ··· 724 722 * contiguous unallocated dpa range. 725 723 * @nd_region: constrain available space check to this reference region 726 724 * @nd_mapping: container of dpa-resource-root + labels 725 + * 726 + * Returns: %0 if there is an alignment error, otherwise the max 727 + * unallocated dpa range 727 728 */ 728 729 resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, 729 730 struct nd_mapping *nd_mapping) ··· 772 767 * 773 768 * Validate that a PMEM label, if present, aligns with the start of an 774 769 * interleave set. 770 + * 771 + * Returns: %0 if there is an alignment error, otherwise the unallocated dpa 775 772 */ 776 773 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, 777 774 struct nd_mapping *nd_mapping) ··· 843 836 844 837 /** 845 838 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id 846 - * @nvdimm: container of dpa-resource-root + labels 839 + * @ndd: container of dpa-resource-root + labels 847 840 * @label_id: dpa resource name of the form pmem-<human readable uuid> 841 + * 842 + * Returns: sum of the dpa allocated to the label_id 848 843 */ 849 844 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, 850 845 struct nd_label_id *label_id)
+14 -5
drivers/nvdimm/namespace_devs.c
··· 27 27 struct nd_region *nd_region = to_nd_region(dev->parent); 28 28 29 29 if (nspm->id >= 0) 30 - ida_simple_remove(&nd_region->ns_ida, nspm->id); 30 + ida_free(&nd_region->ns_ida, nspm->id); 31 31 kfree(nspm->alt_name); 32 32 kfree(nspm->uuid); 33 33 kfree(nspm); ··· 71 71 * nd_is_uuid_unique - verify that no other namespace has @uuid 72 72 * @dev: any device on a nvdimm_bus 73 73 * @uuid: uuid to check 74 + * 75 + * Returns: %true if the uuid is unique, %false if not 74 76 */ 75 77 bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid) 76 78 { ··· 339 337 * adjust_resource() the allocation to @n, but if @n is larger than the 340 338 * allocation delete it and find the 'new' last allocation in the label 341 339 * set. 340 + * 341 + * Returns: %0 on success on -errno on error 342 342 */ 343 343 static int shrink_dpa_allocation(struct nd_region *nd_region, 344 344 struct nd_label_id *label_id, resource_size_t n) ··· 666 662 * allocations from the start of an interleave set and end at the first 667 663 * BLK allocation or the end of the interleave set, whichever comes 668 664 * first. 665 + * 666 + * Returns: %0 on success on -errno on error 669 667 */ 670 668 static int grow_dpa_allocation(struct nd_region *nd_region, 671 669 struct nd_label_id *label_id, resource_size_t n) ··· 957 951 * @dev: namespace type for generating label_id 958 952 * @new_uuid: incoming uuid 959 953 * @old_uuid: reference to the uuid storage location in the namespace object 954 + * 955 + * Returns: %0 on success on -errno on error 960 956 */ 961 957 static int namespace_update_uuid(struct nd_region *nd_region, 962 958 struct device *dev, uuid_t *new_uuid, ··· 1664 1656 /** 1665 1657 * create_namespace_pmem - validate interleave set labelling, retrieve label0 1666 1658 * @nd_region: region with mappings to validate 1667 - * @nspm: target namespace to create 1659 + * @nd_mapping: container of dpa-resource-root + labels 1668 1660 * @nd_label: target pmem namespace label to evaluate 1661 + * 1662 + * Returns: the created &struct device on success or ERR_PTR(-errno) on error 1669 1663 */ 1670 1664 static struct device *create_namespace_pmem(struct nd_region *nd_region, 1671 1665 struct nd_mapping *nd_mapping, ··· 1820 1810 res->name = dev_name(&nd_region->dev); 1821 1811 res->flags = IORESOURCE_MEM; 1822 1812 1823 - nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL); 1813 + nspm->id = ida_alloc(&nd_region->ns_ida, GFP_KERNEL); 1824 1814 if (nspm->id < 0) { 1825 1815 kfree(nspm); 1826 1816 return NULL; ··· 2198 2188 struct nd_namespace_pmem *nspm; 2199 2189 2200 2190 nspm = to_nd_namespace_pmem(dev); 2201 - id = ida_simple_get(&nd_region->ns_ida, 0, 0, 2202 - GFP_KERNEL); 2191 + id = ida_alloc(&nd_region->ns_ida, GFP_KERNEL); 2203 2192 nspm->id = id; 2204 2193 } else 2205 2194 id = i;
+2 -2
drivers/nvdimm/pfn_devs.c
··· 22 22 23 23 dev_dbg(dev, "trace\n"); 24 24 nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns); 25 - ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id); 25 + ida_free(&nd_region->pfn_ida, nd_pfn->id); 26 26 kfree(nd_pfn->uuid); 27 27 kfree(nd_pfn); 28 28 } ··· 326 326 if (!nd_pfn) 327 327 return NULL; 328 328 329 - nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL); 329 + nd_pfn->id = ida_alloc(&nd_region->pfn_ida, GFP_KERNEL); 330 330 if (nd_pfn->id < 0) { 331 331 kfree(nd_pfn); 332 332 return NULL;