Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nvdimm/region: Delete nd_blk_region infrastructure

Now that the nd_namespace_blk infrastructure is removed, delete all the
region machinery to coordinate provisioning aliased capacity between
PMEM and BLK.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/164688418803.2879318.1302315202397235855.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

+66 -604
+3 -8
drivers/acpi/nfit/core.c
··· 2036 2036 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; 2037 2037 } 2038 2038 2039 - /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */ 2040 - if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) 2041 - set_bit(NDD_NOBLK, &flags); 2042 - 2043 2039 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { 2044 2040 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); 2045 2041 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); ··· 2598 2602 { 2599 2603 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; 2600 2604 struct acpi_nfit_system_address *spa = nfit_spa->spa; 2601 - struct nd_blk_region_desc ndbr_desc; 2602 - struct nd_region_desc *ndr_desc; 2605 + struct nd_region_desc *ndr_desc, _ndr_desc; 2603 2606 struct nfit_memdev *nfit_memdev; 2604 2607 struct nvdimm_bus *nvdimm_bus; 2605 2608 struct resource res; ··· 2614 2619 2615 2620 memset(&res, 0, sizeof(res)); 2616 2621 memset(&mappings, 0, sizeof(mappings)); 2617 - memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 2622 + memset(&_ndr_desc, 0, sizeof(_ndr_desc)); 2618 2623 res.start = spa->address; 2619 2624 res.end = res.start + spa->length - 1; 2620 - ndr_desc = &ndbr_desc.ndr_desc; 2625 + ndr_desc = &_ndr_desc; 2621 2626 ndr_desc->res = &res; 2622 2627 ndr_desc->provider_data = nfit_spa; 2623 2628 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
-2
drivers/nvdimm/bus.c
··· 35 35 return ND_DEVICE_DIMM; 36 36 else if (is_memory(dev)) 37 37 return ND_DEVICE_REGION_PMEM; 38 - else if (is_nd_blk(dev)) 39 - return ND_DEVICE_REGION_BLK; 40 38 else if (is_nd_dax(dev)) 41 39 return ND_DEVICE_DAX_PMEM; 42 40 else if (is_nd_region(dev->parent))
+14 -190
drivers/nvdimm/dimm_devs.c
··· 18 18 19 19 static DEFINE_IDA(dimm_ida); 20 20 21 - static bool noblk; 22 - module_param(noblk, bool, 0444); 23 - MODULE_PARM_DESC(noblk, "force disable BLK / local alias support"); 24 - 25 21 /* 26 22 * Retrieve bus and dimm handle and return if this bus supports 27 23 * get_config_data commands ··· 207 211 } 208 212 EXPORT_SYMBOL_GPL(to_nvdimm); 209 213 210 - struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) 211 - { 212 - struct nd_region *nd_region = &ndbr->nd_region; 213 - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 214 - 215 - return nd_mapping->nvdimm; 216 - } 217 - EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); 218 - 219 - unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr) 220 - { 221 - /* pmem mapping properties are private to libnvdimm */ 222 - return ARCH_MEMREMAP_PMEM; 223 - } 224 - EXPORT_SYMBOL_GPL(nd_blk_memremap_flags); 225 - 226 214 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) 227 215 { 228 216 struct nvdimm *nvdimm = nd_mapping->nvdimm; ··· 292 312 { 293 313 struct nvdimm *nvdimm = to_nvdimm(dev); 294 314 295 - return sprintf(buf, "%s%s%s\n", 296 - test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "", 315 + return sprintf(buf, "%s%s\n", 297 316 test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "", 298 317 test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : ""); 299 318 } ··· 591 612 592 613 nvdimm->dimm_id = dimm_id; 593 614 nvdimm->provider_data = provider_data; 594 - if (noblk) 595 - flags |= 1 << NDD_NOBLK; 596 615 nvdimm->flags = flags; 597 616 nvdimm->cmd_mask = cmd_mask; 598 617 nvdimm->num_flush = num_flush; ··· 703 726 return nd_region->align / nd_region->ndr_mappings; 704 727 } 705 728 706 - int alias_dpa_busy(struct device *dev, void *data) 707 - { 708 - resource_size_t map_end, blk_start, new; 709 - struct blk_alloc_info *info = data; 710 - struct nd_mapping *nd_mapping; 711 - struct nd_region *nd_region; 712 - struct nvdimm_drvdata *ndd; 713 - struct resource *res; 714 - unsigned long align; 715 - int i; 716 - 717 - if (!is_memory(dev)) 718 - return 0; 719 - 720 - nd_region = to_nd_region(dev); 721 - for (i = 0; i < nd_region->ndr_mappings; i++) { 722 - nd_mapping = &nd_region->mapping[i]; 723 - if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) 724 - break; 725 - } 726 - 727 - if (i >= nd_region->ndr_mappings) 728 - return 0; 729 - 730 - ndd = to_ndd(nd_mapping); 731 - map_end = nd_mapping->start + nd_mapping->size - 1; 732 - blk_start = nd_mapping->start; 733 - 734 - /* 735 - * In the allocation case ->res is set to free space that we are 736 - * looking to validate against PMEM aliasing collision rules 737 - * (i.e. BLK is allocated after all aliased PMEM). 738 - */ 739 - if (info->res) { 740 - if (info->res->start >= nd_mapping->start 741 - && info->res->start < map_end) 742 - /* pass */; 743 - else 744 - return 0; 745 - } 746 - 747 - retry: 748 - /* 749 - * Find the free dpa from the end of the last pmem allocation to 750 - * the end of the interleave-set mapping. 751 - */ 752 - align = dpa_align(nd_region); 753 - if (!align) 754 - return 0; 755 - 756 - for_each_dpa_resource(ndd, res) { 757 - resource_size_t start, end; 758 - 759 - if (strncmp(res->name, "pmem", 4) != 0) 760 - continue; 761 - 762 - start = ALIGN_DOWN(res->start, align); 763 - end = ALIGN(res->end + 1, align) - 1; 764 - if ((start >= blk_start && start < map_end) 765 - || (end >= blk_start && end <= map_end)) { 766 - new = max(blk_start, min(map_end, end) + 1); 767 - if (new != blk_start) { 768 - blk_start = new; 769 - goto retry; 770 - } 771 - } 772 - } 773 - 774 - /* update the free space range with the probed blk_start */ 775 - if (info->res && blk_start > info->res->start) { 776 - info->res->start = max(info->res->start, blk_start); 777 - if (info->res->start > info->res->end) 778 - info->res->end = info->res->start - 1; 779 - return 1; 780 - } 781 - 782 - info->available -= blk_start - nd_mapping->start; 783 - 784 - return 0; 785 - } 786 - 787 - /** 788 - * nd_blk_available_dpa - account the unused dpa of BLK region 789 - * @nd_mapping: container of dpa-resource-root + labels 790 - * 791 - * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but 792 - * we arrange for them to never start at an lower dpa than the last 793 - * PMEM allocation in an aliased region. 794 - */ 795 - resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) 796 - { 797 - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); 798 - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 799 - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 800 - struct blk_alloc_info info = { 801 - .nd_mapping = nd_mapping, 802 - .available = nd_mapping->size, 803 - .res = NULL, 804 - }; 805 - struct resource *res; 806 - unsigned long align; 807 - 808 - if (!ndd) 809 - return 0; 810 - 811 - device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); 812 - 813 - /* now account for busy blk allocations in unaliased dpa */ 814 - align = dpa_align(nd_region); 815 - if (!align) 816 - return 0; 817 - for_each_dpa_resource(ndd, res) { 818 - resource_size_t start, end, size; 819 - 820 - if (strncmp(res->name, "blk", 3) != 0) 821 - continue; 822 - start = ALIGN_DOWN(res->start, align); 823 - end = ALIGN(res->end + 1, align) - 1; 824 - size = end - start + 1; 825 - if (size >= info.available) 826 - return 0; 827 - info.available -= size; 828 - } 829 - 830 - return info.available; 831 - } 832 - 833 729 /** 834 730 * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max 835 731 * contiguous unallocated dpa range. ··· 750 900 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa 751 901 * @nd_mapping: container of dpa-resource-root + labels 752 902 * @nd_region: constrain available space check to this reference region 753 - * @overlap: calculate available space assuming this level of overlap 754 903 * 755 904 * Validate that a PMEM label, if present, aligns with the start of an 756 - * interleave set and truncate the available size at the lowest BLK 757 - * overlap point. 758 - * 759 - * The expectation is that this routine is called multiple times as it 760 - * probes for the largest BLK encroachment for any single member DIMM of 761 - * the interleave set. Once that value is determined the PMEM-limit for 762 - * the set can be established. 905 + * interleave set. 763 906 */ 764 907 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, 765 - struct nd_mapping *nd_mapping, resource_size_t *overlap) 908 + struct nd_mapping *nd_mapping) 766 909 { 767 - resource_size_t map_start, map_end, busy = 0, available, blk_start; 768 910 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 911 + resource_size_t map_start, map_end, busy = 0; 769 912 struct resource *res; 770 - const char *reason; 771 913 unsigned long align; 772 914 773 915 if (!ndd) ··· 771 929 772 930 map_start = nd_mapping->start; 773 931 map_end = map_start + nd_mapping->size - 1; 774 - blk_start = max(map_start, map_end + 1 - *overlap); 775 932 for_each_dpa_resource(ndd, res) { 776 933 resource_size_t start, end; 777 934 778 935 start = ALIGN_DOWN(res->start, align); 779 936 end = ALIGN(res->end + 1, align) - 1; 780 937 if (start >= map_start && start < map_end) { 781 - if (strncmp(res->name, "blk", 3) == 0) 782 - blk_start = min(blk_start, 783 - max(map_start, start)); 784 - else if (end > map_end) { 785 - reason = "misaligned to iset"; 786 - goto err; 787 - } else 788 - busy += end - start + 1; 938 + if (end > map_end) { 939 + nd_dbg_dpa(nd_region, ndd, res, 940 + "misaligned to iset\n"); 941 + return 0; 942 + } 943 + busy += end - start + 1; 789 944 } else if (end >= map_start && end <= map_end) { 790 - if (strncmp(res->name, "blk", 3) == 0) { 791 - /* 792 - * If a BLK allocation overlaps the start of 793 - * PMEM the entire interleave set may now only 794 - * be used for BLK. 795 - */ 796 - blk_start = map_start; 797 - } else 798 - busy += end - start + 1; 945 + busy += end - start + 1; 799 946 } else if (map_start > start && map_start < end) { 800 947 /* total eclipse of the mapping */ 801 948 busy += nd_mapping->size; 802 - blk_start = map_start; 803 949 } 804 950 } 805 951 806 - *overlap = map_end + 1 - blk_start; 807 - available = blk_start - map_start; 808 - if (busy < available) 809 - return ALIGN_DOWN(available - busy, align); 810 - return 0; 811 - 812 - err: 813 - nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); 952 + if (busy < nd_mapping->size) 953 + return ALIGN_DOWN(nd_mapping->size - busy, align); 814 954 return 0; 815 955 } 816 956 ··· 823 999 /** 824 1000 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id 825 1001 * @nvdimm: container of dpa-resource-root + labels 826 - * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid> 1002 + * @label_id: dpa resource name of the form pmem-<human readable uuid> 827 1003 */ 828 1004 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, 829 1005 struct nd_label_id *label_id)
+1 -5
drivers/nvdimm/label.c
··· 334 334 { 335 335 if (!label_id || !uuid) 336 336 return NULL; 337 - snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb", 338 - flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid); 337 + snprintf(label_id->id, ND_LABEL_ID_SIZE, "pmem-%pUb", uuid); 339 338 return label_id->id; 340 339 } 341 340 ··· 405 406 return 0; /* no label, nothing to reserve */ 406 407 407 408 for_each_clear_bit_le(slot, free, nslot) { 408 - struct nvdimm *nvdimm = to_nvdimm(ndd->dev); 409 409 struct nd_namespace_label *nd_label; 410 410 struct nd_region *nd_region = NULL; 411 411 struct nd_label_id label_id; ··· 419 421 420 422 nsl_get_uuid(ndd, nd_label, &label_uuid); 421 423 flags = nsl_get_flags(ndd, nd_label); 422 - if (test_bit(NDD_NOBLK, &nvdimm->flags)) 423 - flags &= ~NSLABEL_FLAG_LOCAL; 424 424 nd_label_gen_id(&label_id, &label_uuid, flags); 425 425 res = nvdimm_allocate_dpa(ndd, &label_id, 426 426 nsl_get_dpa(ndd, nd_label),
+1 -1
drivers/nvdimm/label.h
··· 193 193 194 194 /** 195 195 * struct nd_label_id - identifier string for dpa allocation 196 - * @id: "{blk|pmem}-<namespace uuid>" 196 + * @id: "pmem-<namespace uuid>" 197 197 */ 198 198 struct nd_label_id { 199 199 char id[ND_LABEL_ID_SIZE];
+12 -115
drivers/nvdimm/namespace_devs.c
··· 297 297 struct nd_mapping *nd_mapping, struct nd_label_id *label_id, 298 298 resource_size_t n) 299 299 { 300 - bool is_blk = strncmp(label_id->id, "blk", 3) == 0; 301 300 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 302 301 int rc = 0; 303 302 304 303 while (n) { 305 304 struct resource *res, *last; 306 - resource_size_t new_start; 307 305 308 306 last = NULL; 309 307 for_each_dpa_resource(ndd, res) ··· 319 321 continue; 320 322 } 321 323 322 - /* 323 - * Keep BLK allocations relegated to high DPA as much as 324 - * possible 325 - */ 326 - if (is_blk) 327 - new_start = res->start + n; 328 - else 329 - new_start = res->start; 330 - 331 - rc = adjust_resource(res, new_start, resource_size(res) - n); 324 + rc = adjust_resource(res, res->start, resource_size(res) - n); 332 325 if (rc == 0) 333 326 res->flags |= DPA_RESOURCE_ADJUSTED; 334 327 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc); ··· 361 372 struct nd_region *nd_region, struct nd_mapping *nd_mapping, 362 373 resource_size_t n) 363 374 { 364 - bool is_blk = strncmp(label_id->id, "blk", 3) == 0; 365 375 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 366 - resource_size_t first_dpa; 367 376 struct resource *res; 368 377 int rc = 0; 369 378 370 - /* allocate blk from highest dpa first */ 371 - if (is_blk) 372 - first_dpa = nd_mapping->start + nd_mapping->size - n; 373 - else 374 - first_dpa = nd_mapping->start; 375 - 376 379 /* first resource allocation for this label-id or dimm */ 377 - res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n); 380 + res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n); 378 381 if (!res) 379 382 rc = -EBUSY; 380 383 ··· 397 416 resource_size_t n, struct resource *valid) 398 417 { 399 418 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; 400 - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; 401 419 unsigned long align; 402 420 403 421 align = nd_region->align / nd_region->ndr_mappings; ··· 408 428 409 429 if (is_reserve) 410 430 return; 411 - 412 - if (!is_pmem) { 413 - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; 414 - struct nvdimm_bus *nvdimm_bus; 415 - struct blk_alloc_info info = { 416 - .nd_mapping = nd_mapping, 417 - .available = nd_mapping->size, 418 - .res = valid, 419 - }; 420 - 421 - WARN_ON(!is_nd_blk(&nd_region->dev)); 422 - nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); 423 - device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); 424 - return; 425 - } 426 431 427 432 /* allocation needs to be contiguous, so this is all or nothing */ 428 433 if (resource_size(valid) < n) ··· 436 471 resource_size_t n) 437 472 { 438 473 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; 439 - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; 440 474 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 441 475 struct resource *res, *exist = NULL, valid; 442 476 const resource_size_t to_allocate = n; ··· 533 569 } 534 570 535 571 if (strcmp(action, "allocate") == 0) { 536 - /* BLK allocate bottom up */ 537 - if (!is_pmem) 538 - valid.start += available - allocate; 539 - 540 572 new_res = nvdimm_allocate_dpa(ndd, label_id, 541 573 valid.start, allocate); 542 574 if (!new_res) ··· 568 608 return 0; 569 609 } 570 610 571 - /* 572 - * If we allocated nothing in the BLK case it may be because we are in 573 - * an initial "pmem-reserve pass". Only do an initial BLK allocation 574 - * when none of the DPA space is reserved. 575 - */ 576 - if ((is_pmem || !ndd->dpa.child) && n == to_allocate) 611 + if (n == to_allocate) 577 612 return init_dpa_allocation(label_id, nd_region, nd_mapping, n); 578 613 return n; 579 614 } ··· 627 672 if (nd_mapping->nvdimm != nvdimm) 628 673 continue; 629 674 630 - n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem); 675 + n = nd_pmem_available_dpa(nd_region, nd_mapping); 631 676 if (n == 0) 632 677 return 0; 633 678 rem = scan_allocate(nd_region, nd_mapping, &label_id, n); ··· 652 697 nvdimm_free_dpa(ndd, res); 653 698 } 654 699 655 - static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus, 656 - struct nd_mapping *nd_mapping) 657 - { 658 - struct nvdimm *nvdimm = nd_mapping->nvdimm; 659 - int rc; 660 - 661 - rc = device_for_each_child(&nvdimm_bus->dev, nvdimm, 662 - __reserve_free_pmem); 663 - if (rc) 664 - release_free_pmem(nvdimm_bus, nd_mapping); 665 - return rc; 666 - } 667 - 668 700 /** 669 701 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id 670 702 * @nd_region: the set of dimms to allocate @n more bytes from ··· 668 726 static int grow_dpa_allocation(struct nd_region *nd_region, 669 727 struct nd_label_id *label_id, resource_size_t n) 670 728 { 671 - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); 672 - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; 673 729 int i; 674 730 675 731 for (i = 0; i < nd_region->ndr_mappings; i++) { 676 732 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 677 733 resource_size_t rem = n; 678 - int rc, j; 734 + int rc; 679 735 680 - /* 681 - * In the BLK case try once with all unallocated PMEM 682 - * reserved, and once without 683 - */ 684 - for (j = is_pmem; j < 2; j++) { 685 - bool blk_only = j == 0; 686 - 687 - if (blk_only) { 688 - rc = reserve_free_pmem(nvdimm_bus, nd_mapping); 689 - if (rc) 690 - return rc; 691 - } 692 - rem = scan_allocate(nd_region, nd_mapping, 693 - label_id, rem); 694 - if (blk_only) 695 - release_free_pmem(nvdimm_bus, nd_mapping); 696 - 697 - /* try again and allow encroachments into PMEM */ 698 - if (rem == 0) 699 - break; 700 - } 701 - 736 + rem = scan_allocate(nd_region, nd_mapping, label_id, rem); 702 737 dev_WARN_ONCE(&nd_region->dev, rem, 703 738 "allocation underrun: %#llx of %#llx bytes\n", 704 739 (unsigned long long) n - rem, ··· 788 869 ndd = to_ndd(nd_mapping); 789 870 790 871 /* 791 - * All dimms in an interleave set, or the base dimm for a blk 792 - * region, need to be enabled for the size to be changed. 872 + * All dimms in an interleave set, need to be enabled 873 + * for the size to be changed. 793 874 */ 794 875 if (!ndd) 795 876 return -ENXIO; ··· 1087 1168 return sprintf(buf, "%#llx\n", (unsigned long long) res->start); 1088 1169 } 1089 1170 static DEVICE_ATTR_ADMIN_RO(resource); 1090 - 1091 - static const unsigned long blk_lbasize_supported[] = { 512, 520, 528, 1092 - 4096, 4104, 4160, 4224, 0 }; 1093 1171 1094 1172 static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 }; 1095 1173 ··· 1739 1823 /* 1740 1824 * Fix up each mapping's 'labels' to have the validated pmem label for 1741 1825 * that position at labels[0], and NULL at labels[1]. In the process, 1742 - * check that the namespace aligns with interleave-set. We know 1743 - * that it does not overlap with any blk namespaces by virtue of 1744 - * the dimm being enabled (i.e. nd_label_reserve_dpa() 1745 - * succeeded). 1826 + * check that the namespace aligns with interleave-set. 1746 1827 */ 1747 1828 nsl_get_uuid(ndd, nd_label, &uuid); 1748 1829 rc = select_pmem_id(nd_region, &uuid); ··· 1844 1931 * disabled until memory becomes available 1845 1932 */ 1846 1933 if (!nd_region->ns_seed) 1847 - dev_err(&nd_region->dev, "failed to create %s namespace\n", 1848 - is_nd_blk(&nd_region->dev) ? "blk" : "pmem"); 1934 + dev_err(&nd_region->dev, "failed to create namespace\n"); 1849 1935 else 1850 1936 nd_device_register(nd_region->ns_seed); 1851 1937 } ··· 1940 2028 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { 1941 2029 struct nd_namespace_label *nd_label = label_ent->label; 1942 2030 struct device **__devs; 1943 - u32 flags; 1944 2031 1945 2032 if (!nd_label) 1946 - continue; 1947 - flags = nsl_get_flags(ndd, nd_label); 1948 - if (is_nd_blk(&nd_region->dev) 1949 - == !!(flags & NSLABEL_FLAG_LOCAL)) 1950 - /* pass, region matches label type */; 1951 - else 1952 2033 continue; 1953 2034 1954 2035 /* skip labels that describe extents outside of the region */ ··· 1978 2073 1979 2074 } 1980 2075 1981 - dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n", 1982 - count, is_nd_blk(&nd_region->dev) 1983 - ? "blk" : "pmem", count == 1 ? "" : "s"); 2076 + dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count, 2077 + count == 1 ? "" : "s"); 1984 2078 1985 2079 if (count == 0) { 1986 2080 struct nd_namespace_pmem *nspm; ··· 2130 2226 if (!label_ent) 2131 2227 break; 2132 2228 label = nd_label_active(ndd, j); 2133 - if (test_bit(NDD_NOBLK, &nvdimm->flags)) { 2134 - u32 flags = nsl_get_flags(ndd, label); 2135 - 2136 - flags &= ~NSLABEL_FLAG_LOCAL; 2137 - nsl_set_flags(ndd, label, flags); 2138 - } 2139 2229 label_ent->label = label; 2140 2230 2141 2231 mutex_lock(&nd_mapping->lock); ··· 2173 2275 devs = create_namespace_io(nd_region); 2174 2276 break; 2175 2277 case ND_DEVICE_NAMESPACE_PMEM: 2176 - case ND_DEVICE_NAMESPACE_BLK: 2177 2278 devs = create_namespaces(nd_region); 2178 2279 break; 2179 2280 default:
+2 -22
drivers/nvdimm/nd-core.h
··· 82 82 } 83 83 #endif 84 84 85 - /** 86 - * struct blk_alloc_info - tracking info for BLK dpa scanning 87 - * @nd_mapping: blk region mapping boundaries 88 - * @available: decremented in alias_dpa_busy as aliased PMEM is scanned 89 - * @busy: decremented in blk_dpa_busy to account for ranges already 90 - * handled by alias_dpa_busy 91 - * @res: alias_dpa_busy interprets this a free space range that needs to 92 - * be truncated to the valid BLK allocation starting DPA, blk_dpa_busy 93 - * treats it as a busy range that needs the aliased PMEM ranges 94 - * truncated. 95 - */ 96 - struct blk_alloc_info { 97 - struct nd_mapping *nd_mapping; 98 - resource_size_t available, busy; 99 - struct resource *res; 100 - }; 101 - 102 85 bool is_nvdimm(struct device *dev); 103 86 bool is_nd_pmem(struct device *dev); 104 87 bool is_nd_volatile(struct device *dev); 105 - bool is_nd_blk(struct device *dev); 106 88 static inline bool is_nd_region(struct device *dev) 107 89 { 108 - return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev); 90 + return is_nd_pmem(dev) || is_nd_volatile(dev); 109 91 } 110 92 static inline bool is_memory(struct device *dev) 111 93 { ··· 124 142 struct nd_mapping *nd_mapping); 125 143 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region); 126 144 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, 127 - struct nd_mapping *nd_mapping, resource_size_t *overlap); 128 - resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); 145 + struct nd_mapping *nd_mapping); 129 146 resource_size_t nd_region_available_dpa(struct nd_region *nd_region); 130 147 int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, 131 148 resource_size_t size); 132 149 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, 133 150 struct nd_label_id *label_id); 134 - int alias_dpa_busy(struct device *dev, void *data); 135 151 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd); 136 152 void get_ndd(struct nvdimm_drvdata *ndd); 137 153 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
-12
drivers/nvdimm/nd.h
··· 295 295 return nd_label->efi.uuid; 296 296 } 297 297 298 - bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, 299 - struct nd_namespace_label *nd_label, 300 - u64 isetcookie); 301 298 bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, 302 299 struct nd_namespace_label *nd_label, guid_t *guid); 303 300 enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd, ··· 433 436 return true; 434 437 return nsl_get_nlabel(ndd, nd_label) == nd_region->ndr_mappings; 435 438 } 436 - 437 - struct nd_blk_region { 438 - int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); 439 - int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, 440 - void *iobuf, u64 len, int rw); 441 - void *blk_provider_data; 442 - struct nd_region nd_region; 443 - }; 444 439 445 440 /* 446 441 * Lookup next in the repeating sequence of 01, 10, and 11. ··· 661 672 return -ENXIO; 662 673 } 663 674 #endif 664 - int nd_blk_region_init(struct nd_region *nd_region); 665 675 int nd_region_activate(struct nd_region *nd_region); 666 676 static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, 667 677 unsigned int len)
+11 -20
drivers/nvdimm/region.c
··· 15 15 static unsigned long once; 16 16 struct nd_region_data *ndrd; 17 17 struct nd_region *nd_region = to_nd_region(dev); 18 + struct range range = { 19 + .start = nd_region->ndr_start, 20 + .end = nd_region->ndr_start + nd_region->ndr_size - 1, 21 + }; 18 22 19 23 if (nd_region->num_lanes > num_online_cpus() 20 24 && nd_region->num_lanes < num_possible_cpus() ··· 34 30 if (rc) 35 31 return rc; 36 32 37 - rc = nd_blk_region_init(nd_region); 38 - if (rc) 39 - return rc; 40 - 41 - if (is_memory(&nd_region->dev)) { 42 - struct range range = { 43 - .start = nd_region->ndr_start, 44 - .end = nd_region->ndr_start + nd_region->ndr_size - 1, 45 - }; 46 - 47 - if (devm_init_badblocks(dev, &nd_region->bb)) 48 - return -ENODEV; 49 - nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd, 50 - "badblocks"); 51 - if (!nd_region->bb_state) 52 - dev_warn(&nd_region->dev, 53 - "'badblocks' notification disabled\n"); 54 - nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range); 55 - } 33 + if (devm_init_badblocks(dev, &nd_region->bb)) 34 + return -ENODEV; 35 + nd_region->bb_state = 36 + sysfs_get_dirent(nd_region->dev.kobj.sd, "badblocks"); 37 + if (!nd_region->bb_state) 38 + dev_warn(dev, "'badblocks' notification disabled\n"); 39 + nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range); 56 40 57 41 rc = nd_region_register_namespaces(nd_region, &err); 58 42 if (rc < 0) ··· 150 158 } 151 159 152 160 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM); 153 - MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_BLK);
+18 -140
drivers/nvdimm/region_devs.c
··· 134 134 } 135 135 free_percpu(nd_region->lane); 136 136 memregion_free(nd_region->id); 137 - if (is_nd_blk(dev)) 138 - kfree(to_nd_blk_region(dev)); 139 - else 140 - kfree(nd_region); 137 + kfree(nd_region); 141 138 } 142 139 143 140 struct nd_region *to_nd_region(struct device *dev) ··· 154 157 } 155 158 EXPORT_SYMBOL_GPL(nd_region_dev); 156 159 157 - struct nd_blk_region *to_nd_blk_region(struct device *dev) 158 - { 159 - struct nd_region *nd_region = to_nd_region(dev); 160 - 161 - WARN_ON(!is_nd_blk(dev)); 162 - return container_of(nd_region, struct nd_blk_region, nd_region); 163 - } 164 - EXPORT_SYMBOL_GPL(to_nd_blk_region); 165 - 166 160 void *nd_region_provider_data(struct nd_region *nd_region) 167 161 { 168 162 return nd_region->provider_data; 169 163 } 170 164 EXPORT_SYMBOL_GPL(nd_region_provider_data); 171 - 172 - void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) 173 - { 174 - return ndbr->blk_provider_data; 175 - } 176 - EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); 177 - 178 - void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) 179 - { 180 - ndbr->blk_provider_data = data; 181 - } 182 - EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); 183 165 184 166 /** 185 167 * nd_region_to_nstype() - region to an integer namespace type ··· 184 208 return ND_DEVICE_NAMESPACE_PMEM; 185 209 else 186 210 return ND_DEVICE_NAMESPACE_IO; 187 - } else if (is_nd_blk(&nd_region->dev)) { 188 - return ND_DEVICE_NAMESPACE_BLK; 189 211 } 190 212 191 213 return 0; ··· 306 332 307 333 resource_size_t nd_region_available_dpa(struct nd_region *nd_region) 308 334 { 309 - resource_size_t blk_max_overlap = 0, available, overlap; 335 + resource_size_t available; 310 336 int i; 311 337 312 338 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 313 339 314 - retry: 315 340 available = 0; 316 - overlap = blk_max_overlap; 317 341 for (i = 0; i < nd_region->ndr_mappings; i++) { 318 342 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 319 343 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); ··· 320 348 if (!ndd) 321 349 return 0; 322 350 323 - if (is_memory(&nd_region->dev)) { 324 - available += nd_pmem_available_dpa(nd_region, 325 - nd_mapping, &overlap); 326 - if (overlap > blk_max_overlap) { 327 - blk_max_overlap = overlap; 328 - goto retry; 329 - } 330 - } else if (is_nd_blk(&nd_region->dev)) 331 - available += nd_blk_available_dpa(nd_region); 351 + available += nd_pmem_available_dpa(nd_region, nd_mapping); 332 352 } 333 353 334 354 return available; ··· 328 364 329 365 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region) 330 366 { 331 - resource_size_t available = 0; 367 + resource_size_t avail = 0; 332 368 int i; 333 - 334 - if (is_memory(&nd_region->dev)) 335 - available = PHYS_ADDR_MAX; 336 369 337 370 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); 338 371 for (i = 0; i < nd_region->ndr_mappings; i++) { 339 372 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 340 373 341 - if (is_memory(&nd_region->dev)) 342 - available = min(available, 343 - nd_pmem_max_contiguous_dpa(nd_region, 344 - nd_mapping)); 345 - else if (is_nd_blk(&nd_region->dev)) 346 - available += nd_blk_available_dpa(nd_region); 374 + avail = min_not_zero(avail, nd_pmem_max_contiguous_dpa( 375 + nd_region, nd_mapping)); 347 376 } 348 - if (is_memory(&nd_region->dev)) 349 - return available * nd_region->ndr_mappings; 350 - return available; 377 + return avail * nd_region->ndr_mappings; 351 378 } 352 379 353 380 static ssize_t available_size_show(struct device *dev, ··· 648 693 && a != &dev_attr_available_size.attr) 649 694 return a->mode; 650 695 651 - if ((type == ND_DEVICE_NAMESPACE_PMEM 652 - || type == ND_DEVICE_NAMESPACE_BLK) 653 - && a == &dev_attr_available_size.attr) 696 + if (type == ND_DEVICE_NAMESPACE_PMEM && 697 + a == &dev_attr_available_size.attr) 654 698 return a->mode; 655 699 else if (is_memory(dev) && nd_set) 656 700 return a->mode; ··· 782 828 NULL, 783 829 }; 784 830 785 - static const struct device_type nd_blk_device_type = { 786 - .name = "nd_blk", 787 - .release = nd_region_release, 788 - .groups = nd_region_attribute_groups, 789 - }; 790 - 791 831 static const struct device_type nd_pmem_device_type = { 792 832 .name = "nd_pmem", 793 833 .release = nd_region_release, ··· 797 849 bool is_nd_pmem(struct device *dev) 798 850 { 799 851 return dev ? dev->type == &nd_pmem_device_type : false; 800 - } 801 - 802 - bool is_nd_blk(struct device *dev) 803 - { 804 - return dev ? dev->type == &nd_blk_device_type : false; 805 852 } 806 853 807 854 bool is_nd_volatile(struct device *dev) ··· 872 929 nvdimm_bus_unlock(dev); 873 930 } 874 931 875 - int nd_blk_region_init(struct nd_region *nd_region) 876 - { 877 - struct device *dev = &nd_region->dev; 878 - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 879 - 880 - if (!is_nd_blk(dev)) 881 - return 0; 882 - 883 - if (nd_region->ndr_mappings < 1) { 884 - dev_dbg(dev, "invalid BLK region\n"); 885 - return -ENXIO; 886 - } 887 - 888 - return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); 889 - } 890 - 891 932 /** 892 933 * nd_region_acquire_lane - allocate and lock a lane 893 934 * @nd_region: region id and number of lanes possible ··· 934 1007 static unsigned long default_align(struct nd_region *nd_region) 935 1008 { 936 1009 unsigned long align; 937 - int i, mappings; 938 1010 u32 remainder; 1011 + int mappings; 939 1012 940 - if (is_nd_blk(&nd_region->dev)) 941 - align = PAGE_SIZE; 942 - else 943 - align = MEMREMAP_COMPAT_ALIGN_MAX; 944 - 945 - for (i = 0; i < nd_region->ndr_mappings; i++) { 946 - struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 947 - struct nvdimm *nvdimm = nd_mapping->nvdimm; 948 - 949 - if (test_bit(NDD_ALIASING, &nvdimm->flags)) { 950 - align = MEMREMAP_COMPAT_ALIGN_MAX; 951 - break; 952 - } 953 - } 954 - 1013 + align = MEMREMAP_COMPAT_ALIGN_MAX; 955 1014 if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX) 956 1015 align = PAGE_SIZE; 957 1016 ··· 955 1042 { 956 1043 struct nd_region *nd_region; 957 1044 struct device *dev; 958 - void *region_buf; 959 1045 unsigned int i; 960 1046 int ro = 0; 961 1047 ··· 972 1060 if (test_bit(NDD_UNARMED, &nvdimm->flags)) 973 1061 ro = 1; 974 1062 975 - if (test_bit(NDD_NOBLK, &nvdimm->flags) 976 - && dev_type == &nd_blk_device_type) { 977 - dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n", 978 - caller, dev_name(&nvdimm->dev), i); 979 - return NULL; 980 - } 981 1063 } 982 1064 983 - if (dev_type == &nd_blk_device_type) { 984 - struct nd_blk_region_desc *ndbr_desc; 985 - struct nd_blk_region *ndbr; 1065 + nd_region = 1066 + kzalloc(struct_size(nd_region, mapping, ndr_desc->num_mappings), 1067 + GFP_KERNEL); 986 1068 987 - ndbr_desc = to_blk_region_desc(ndr_desc); 988 - ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) 989 - * ndr_desc->num_mappings, 990 - GFP_KERNEL); 991 - if (ndbr) { 992 - nd_region = &ndbr->nd_region; 993 - ndbr->enable = ndbr_desc->enable; 994 - ndbr->do_io = ndbr_desc->do_io; 995 - } 996 - region_buf = ndbr; 997 - } else { 998 - nd_region = kzalloc(struct_size(nd_region, mapping, 999 - ndr_desc->num_mappings), 1000 - GFP_KERNEL); 1001 - region_buf = nd_region; 1002 - } 1003 - 1004 - if (!region_buf) 1069 + if (!nd_region) 1005 1070 return NULL; 1006 1071 nd_region->id = memregion_alloc(GFP_KERNEL); 1007 1072 if (nd_region->id < 0) ··· 1042 1153 err_percpu: 1043 1154 memregion_free(nd_region->id); 1044 1155 err_id: 1045 - kfree(region_buf); 1156 + kfree(nd_region); 1046 1157 return NULL; 1047 1158 } 1048 1159 ··· 1054 1165 __func__); 1055 1166 } 1056 1167 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); 1057 - 1058 - struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, 1059 - struct nd_region_desc *ndr_desc) 1060 - { 1061 - if (ndr_desc->num_mappings > 1) 1062 - return NULL; 1063 - ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); 1064 - return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, 1065 - __func__); 1066 - } 1067 - EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); 1068 1168 1069 1169 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, 1070 1170 struct nd_region_desc *ndr_desc) ··· 1079 1201 } 1080 1202 /** 1081 1203 * nvdimm_flush - flush any posted write queues between the cpu and pmem media 1082 - * @nd_region: blk or interleaved pmem region 1204 + * @nd_region: interleaved pmem region 1083 1205 */ 1084 1206 int generic_nvdimm_flush(struct nd_region *nd_region) 1085 1207 { ··· 1112 1234 1113 1235 /** 1114 1236 * nvdimm_has_flush - determine write flushing requirements 1115 - * @nd_region: blk or interleaved pmem region 1237 + * @nd_region: interleaved pmem region 1116 1238 * 1117 1239 * Returns 1 if writes require flushing 1118 1240 * Returns 0 if writes do not require flushing
-24
include/linux/libnvdimm.h
··· 25 25 }; 26 26 27 27 enum { 28 - /* when a dimm supports both PMEM and BLK access a label is required */ 29 - NDD_ALIASING = 0, 30 28 /* unarmed memory devices may not persist writes */ 31 29 NDD_UNARMED = 1, 32 30 /* locked memory devices should not be accessed */ ··· 33 35 NDD_SECURITY_OVERWRITE = 3, 34 36 /* tracking whether or not there is a pending device reference */ 35 37 NDD_WORK_PENDING = 4, 36 - /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */ 37 - NDD_NOBLK = 5, 38 38 /* dimm supports namespace labels */ 39 39 NDD_LABELING = 6, 40 40 ··· 136 140 } 137 141 138 142 struct nvdimm_bus; 139 - struct module; 140 - struct nd_blk_region; 141 - struct nd_blk_region_desc { 142 - int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); 143 - int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, 144 - void *iobuf, u64 len, int rw); 145 - struct nd_region_desc ndr_desc; 146 - }; 147 - 148 - static inline struct nd_blk_region_desc *to_blk_region_desc( 149 - struct nd_region_desc *ndr_desc) 150 - { 151 - return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc); 152 - 153 - } 154 143 155 144 /* 156 145 * Note that separate bits for locked + unlocked are defined so that ··· 238 257 struct nvdimm *to_nvdimm(struct device *dev); 239 258 struct nd_region *to_nd_region(struct device *dev); 240 259 struct device *nd_region_dev(struct nd_region *nd_region); 241 - struct nd_blk_region *to_nd_blk_region(struct device *dev); 242 260 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); 243 261 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); 244 262 const char *nvdimm_name(struct nvdimm *nvdimm); ··· 275 295 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, 276 296 struct nd_region_desc *ndr_desc); 277 297 void *nd_region_provider_data(struct nd_region *nd_region); 278 - void *nd_blk_region_provider_data(struct nd_blk_region *ndbr); 279 - void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data); 280 - struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); 281 - unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr); 282 298 unsigned int nd_region_acquire_lane(struct nd_region *nd_region); 283 299 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); 284 300 u64 nd_fletcher64(void *addr, size_t len, bool le);
-2
include/uapi/linux/ndctl.h
··· 189 189 #define ND_DEVICE_REGION_BLK 3 /* nd_region: (parent of BLK namespaces) */ 190 190 #define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */ 191 191 #define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */ 192 - #define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */ 193 192 #define ND_DEVICE_DAX_PMEM 7 /* Device DAX interface to pmem */ 194 193 195 194 enum nd_driver_flags { ··· 197 198 ND_DRIVER_REGION_BLK = 1 << ND_DEVICE_REGION_BLK, 198 199 ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO, 199 200 ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM, 200 - ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK, 201 201 ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM, 202 202 }; 203 203
+4 -63
tools/testing/nvdimm/test/ndtest.c
··· 338 338 return 0; 339 339 } 340 340 341 - static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, 342 - void *iobuf, u64 len, int rw) 343 - { 344 - struct ndtest_dimm *dimm = ndbr->blk_provider_data; 345 - struct ndtest_blk_mmio *mmio = dimm->mmio; 346 - struct nd_region *nd_region = &ndbr->nd_region; 347 - unsigned int lane; 348 - 349 - if (!mmio) 350 - return -ENOMEM; 351 - 352 - lane = nd_region_acquire_lane(nd_region); 353 - if (rw) 354 - memcpy(mmio->base + dpa, iobuf, len); 355 - else { 356 - memcpy(iobuf, mmio->base + dpa, len); 357 - arch_invalidate_pmem(mmio->base + dpa, len); 358 - } 359 - 360 - nd_region_release_lane(nd_region, lane); 361 - 362 - return 0; 363 - } 364 - 365 - static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 366 - struct device *dev) 367 - { 368 - struct nd_blk_region *ndbr = to_nd_blk_region(dev); 369 - struct nvdimm *nvdimm; 370 - struct ndtest_dimm *dimm; 371 - struct ndtest_blk_mmio *mmio; 372 - 373 - nvdimm = nd_blk_region_to_dimm(ndbr); 374 - dimm = nvdimm_provider_data(nvdimm); 375 - 376 - nd_blk_region_set_provider_data(ndbr, dimm); 377 - dimm->blk_region = to_nd_region(dev); 378 - 379 - mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL); 380 - if (!mmio) 381 - return -ENOMEM; 382 - 383 - mmio->base = (void __iomem *) devm_nvdimm_memremap( 384 - dev, dimm->address, 12, nd_blk_memremap_flags(ndbr)); 385 - if (!mmio->base) { 386 - dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm)); 387 - return -ENOMEM; 388 - } 389 - mmio->size = dimm->size; 390 - mmio->base_offset = 0; 391 - 392 - dimm->mmio = mmio; 393 - 394 - return 0; 395 - } 396 - 397 341 static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr) 398 342 { 399 343 int i; ··· 467 523 struct ndtest_region *region) 468 524 { 469 525 struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING]; 470 - struct nd_blk_region_desc ndbr_desc; 526 + struct nd_region_desc *ndr_desc, _ndr_desc; 471 527 struct nd_interleave_set *nd_set; 472 - struct nd_region_desc *ndr_desc; 473 528 struct resource res; 474 529 int i, ndimm = region->mapping[0].dimm; 475 530 u64 uuid[2]; 476 531 477 532 memset(&res, 0, sizeof(res)); 478 533 memset(&mappings, 0, sizeof(mappings)); 479 - memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 480 - ndr_desc = &ndbr_desc.ndr_desc; 534 + memset(&_ndr_desc, 0, sizeof(_ndr_desc)); 535 + ndr_desc = &_ndr_desc; 481 536 482 537 if (!ndtest_alloc_resource(p, region->size, &res.start)) 483 538 return -ENOMEM; ··· 800 857 struct device *dev = &priv->pdev.dev; 801 858 unsigned long dimm_flags = dimm->flags; 802 859 803 - if (dimm->num_formats > 1) { 804 - set_bit(NDD_ALIASING, &dimm_flags); 860 + if (dimm->num_formats > 1) 805 861 set_bit(NDD_LABELING, &dimm_flags); 806 - } 807 862 808 863 if (dimm->flags & PAPR_PMEM_UNARMED_MASK) 809 864 set_bit(NDD_UNARMED, &dimm_flags);