Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/memremap_pages: convert to 'struct range'

The 'struct resource' in 'struct dev_pagemap' is only used for holding
resource span information. The other fields, 'name', 'flags', 'desc',
'parent', 'sibling', and 'child' are all unused wasted space.

This is in preparation for introducing a multi-range extension of
devm_memremap_pages().

The bulk of this change is unwinding all the places internal to libnvdimm
that used 'struct resource' unnecessarily, and replacing instances of
'struct dev_pagemap'.res with 'struct dev_pagemap'.range.

P2PDMA had a minor usage of the resource flags field, but only to report
failures with "%pR". That is replaced with an open coded print of the
range.

[dan.carpenter@oracle.com: mm/hmm/test: use after free in dmirror_allocate_chunk()]
Link: https://lkml.kernel.org/r/20200926121402.GA7467@kadam

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen]
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brice Goglin <Brice.Goglin@inria.fr>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Hulk Robot <hulkci@huawei.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Jason Yan <yanaijie@huawei.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Jia He <justin.he@arm.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: kernel test robot <lkp@intel.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lkml.kernel.org/r/159643103173.4062302.768998885691711532.stgit@dwillia2-desk3.amr.corp.intel.com
Link: https://lkml.kernel.org/r/160106115761.30709.13539840236873663620.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Dan Williams and committed by
Linus Torvalds
a4574f63 fcffb6a1

+195 -165
+7 -6
arch/powerpc/kvm/book3s_hv_uvmem.c
··· 687 687 struct kvmppc_uvmem_page_pvt *pvt; 688 688 unsigned long pfn_last, pfn_first; 689 689 690 - pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT; 690 + pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT; 691 691 pfn_last = pfn_first + 692 - (resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT); 692 + (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT); 693 693 694 694 spin_lock(&kvmppc_uvmem_bitmap_lock); 695 695 bit = find_first_zero_bit(kvmppc_uvmem_bitmap, ··· 1007 1007 static void kvmppc_uvmem_page_free(struct page *page) 1008 1008 { 1009 1009 unsigned long pfn = page_to_pfn(page) - 1010 - (kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT); 1010 + (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT); 1011 1011 struct kvmppc_uvmem_page_pvt *pvt; 1012 1012 1013 1013 spin_lock(&kvmppc_uvmem_bitmap_lock); ··· 1170 1170 } 1171 1171 1172 1172 kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE; 1173 - kvmppc_uvmem_pgmap.res = *res; 1173 + kvmppc_uvmem_pgmap.range.start = res->start; 1174 + kvmppc_uvmem_pgmap.range.end = res->end; 1174 1175 kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops; 1175 1176 /* just one global instance: */ 1176 1177 kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap; ··· 1206 1205 return; 1207 1206 1208 1207 memunmap_pages(&kvmppc_uvmem_pgmap); 1209 - release_mem_region(kvmppc_uvmem_pgmap.res.start, 1210 - resource_size(&kvmppc_uvmem_pgmap.res)); 1208 + release_mem_region(kvmppc_uvmem_pgmap.range.start, 1209 + range_len(&kvmppc_uvmem_pgmap.range)); 1211 1210 kfree(kvmppc_uvmem_bitmap); 1212 1211 }
+5 -5
drivers/dax/bus.c
··· 515 515 } 516 516 517 517 struct dax_region *alloc_dax_region(struct device *parent, int region_id, 518 - struct resource *res, int target_node, unsigned int align, 518 + struct range *range, int target_node, unsigned int align, 519 519 unsigned long flags) 520 520 { 521 521 struct dax_region *dax_region; ··· 530 530 return NULL; 531 531 } 532 532 533 - if (!IS_ALIGNED(res->start, align) 534 - || !IS_ALIGNED(resource_size(res), align)) 533 + if (!IS_ALIGNED(range->start, align) 534 + || !IS_ALIGNED(range_len(range), align)) 535 535 return NULL; 536 536 537 537 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); ··· 546 546 dax_region->target_node = target_node; 547 547 ida_init(&dax_region->ida); 548 548 dax_region->res = (struct resource) { 549 - .start = res->start, 550 - .end = res->end, 549 + .start = range->start, 550 + .end = range->end, 551 551 .flags = IORESOURCE_MEM | flags, 552 552 }; 553 553
+1 -1
drivers/dax/bus.h
··· 13 13 14 14 #define IORESOURCE_DAX_STATIC (1UL << 0) 15 15 struct dax_region *alloc_dax_region(struct device *parent, int region_id, 16 - struct resource *res, int target_node, unsigned int align, 16 + struct range *range, int target_node, unsigned int align, 17 17 unsigned long flags); 18 18 19 19 enum dev_dax_subsys {
-5
drivers/dax/dax-private.h
··· 61 61 struct range range; 62 62 }; 63 63 64 - static inline u64 range_len(struct range *range) 65 - { 66 - return range->end - range->start + 1; 67 - } 68 - 69 64 static inline struct dev_dax *to_dev_dax(struct device *dev) 70 65 { 71 66 return container_of(dev, struct dev_dax, dev);
+1 -2
drivers/dax/device.c
··· 416 416 pgmap = devm_kzalloc(dev, sizeof(*pgmap), GFP_KERNEL); 417 417 if (!pgmap) 418 418 return -ENOMEM; 419 - pgmap->res.start = range->start; 420 - pgmap->res.end = range->end; 419 + pgmap->range = *range; 421 420 } 422 421 pgmap->type = MEMORY_DEVICE_GENERIC; 423 422 addr = devm_memremap_pages(dev, pgmap);
+4 -1
drivers/dax/hmem/hmem.c
··· 13 13 struct dev_dax_data data; 14 14 struct dev_dax *dev_dax; 15 15 struct resource *res; 16 + struct range range; 16 17 17 18 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 18 19 if (!res) 19 20 return -ENOMEM; 20 21 21 22 mri = dev->platform_data; 22 - dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node, 23 + range.start = res->start; 24 + range.end = res->end; 25 + dax_region = alloc_dax_region(dev, pdev->id, &range, mri->target_node, 23 26 PMD_SIZE, 0); 24 27 if (!dax_region) 25 28 return -ENOMEM;
+6 -6
drivers/dax/pmem/core.c
··· 9 9 10 10 struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) 11 11 { 12 - struct resource res; 12 + struct range range; 13 13 int rc, id, region_id; 14 14 resource_size_t offset; 15 15 struct nd_pfn_sb *pfn_sb; ··· 50 50 if (rc != 2) 51 51 return ERR_PTR(-EINVAL); 52 52 53 - /* adjust the dax_region resource to the start of data */ 54 - memcpy(&res, &pgmap.res, sizeof(res)); 55 - res.start += offset; 56 - dax_region = alloc_dax_region(dev, region_id, &res, 53 + /* adjust the dax_region range to the start of data */ 54 + range = pgmap.range; 55 + range.start += offset, 56 + dax_region = alloc_dax_region(dev, region_id, &range, 57 57 nd_region->target_node, le32_to_cpu(pfn_sb->align), 58 58 IORESOURCE_DAX_STATIC); 59 59 if (!dax_region) ··· 64 64 .id = id, 65 65 .pgmap = &pgmap, 66 66 .subsys = subsys, 67 - .size = resource_size(&res), 67 + .size = range_len(&range), 68 68 }; 69 69 dev_dax = devm_create_dev_dax(&data); 70 70
+7 -7
drivers/gpu/drm/nouveau/nouveau_dmem.c
··· 101 101 { 102 102 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); 103 103 unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) - 104 - chunk->pagemap.res.start; 104 + chunk->pagemap.range.start; 105 105 106 106 return chunk->bo->offset + off; 107 107 } ··· 249 249 250 250 chunk->drm = drm; 251 251 chunk->pagemap.type = MEMORY_DEVICE_PRIVATE; 252 - chunk->pagemap.res = *res; 252 + chunk->pagemap.range.start = res->start; 253 + chunk->pagemap.range.end = res->end; 253 254 chunk->pagemap.ops = &nouveau_dmem_pagemap_ops; 254 255 chunk->pagemap.owner = drm->dev; 255 256 ··· 274 273 list_add(&chunk->list, &drm->dmem->chunks); 275 274 mutex_unlock(&drm->dmem->mutex); 276 275 277 - pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT; 276 + pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT; 278 277 page = pfn_to_page(pfn_first); 279 278 spin_lock(&drm->dmem->lock); 280 279 for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) { ··· 295 294 out_bo_free: 296 295 nouveau_bo_ref(NULL, &chunk->bo); 297 296 out_release: 298 - release_mem_region(chunk->pagemap.res.start, 299 - resource_size(&chunk->pagemap.res)); 297 + release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range)); 300 298 out_free: 301 299 kfree(chunk); 302 300 out: ··· 382 382 nouveau_bo_ref(NULL, &chunk->bo); 383 383 list_del(&chunk->list); 384 384 memunmap_pages(&chunk->pagemap); 385 - release_mem_region(chunk->pagemap.res.start, 386 - resource_size(&chunk->pagemap.res)); 385 + release_mem_region(chunk->pagemap.range.start, 386 + range_len(&chunk->pagemap.range)); 387 387 kfree(chunk); 388 388 } 389 389
+13 -13
drivers/nvdimm/badrange.c
··· 211 211 } 212 212 213 213 static void badblocks_populate(struct badrange *badrange, 214 - struct badblocks *bb, const struct resource *res) 214 + struct badblocks *bb, const struct range *range) 215 215 { 216 216 struct badrange_entry *bre; 217 217 ··· 222 222 u64 bre_end = bre->start + bre->length - 1; 223 223 224 224 /* Discard intervals with no intersection */ 225 - if (bre_end < res->start) 225 + if (bre_end < range->start) 226 226 continue; 227 - if (bre->start > res->end) 227 + if (bre->start > range->end) 228 228 continue; 229 229 /* Deal with any overlap after start of the namespace */ 230 - if (bre->start >= res->start) { 230 + if (bre->start >= range->start) { 231 231 u64 start = bre->start; 232 232 u64 len; 233 233 234 - if (bre_end <= res->end) 234 + if (bre_end <= range->end) 235 235 len = bre->length; 236 236 else 237 - len = res->start + resource_size(res) 237 + len = range->start + range_len(range) 238 238 - bre->start; 239 - __add_badblock_range(bb, start - res->start, len); 239 + __add_badblock_range(bb, start - range->start, len); 240 240 continue; 241 241 } 242 242 /* 243 243 * Deal with overlap for badrange starting before 244 244 * the namespace. 245 245 */ 246 - if (bre->start < res->start) { 246 + if (bre->start < range->start) { 247 247 u64 len; 248 248 249 - if (bre_end < res->end) 250 - len = bre->start + bre->length - res->start; 249 + if (bre_end < range->end) 250 + len = bre->start + bre->length - range->start; 251 251 else 252 - len = resource_size(res); 252 + len = range_len(range); 253 253 __add_badblock_range(bb, 0, len); 254 254 } 255 255 } ··· 267 267 * and add badblocks entries for all matching sub-ranges 268 268 */ 269 269 void nvdimm_badblocks_populate(struct nd_region *nd_region, 270 - struct badblocks *bb, const struct resource *res) 270 + struct badblocks *bb, const struct range *range) 271 271 { 272 272 struct nvdimm_bus *nvdimm_bus; 273 273 ··· 279 279 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); 280 280 281 281 nvdimm_bus_lock(&nvdimm_bus->dev); 282 - badblocks_populate(&nvdimm_bus->badrange, bb, res); 282 + badblocks_populate(&nvdimm_bus->badrange, bb, range); 283 283 nvdimm_bus_unlock(&nvdimm_bus->dev); 284 284 } 285 285 EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
+8 -5
drivers/nvdimm/claim.c
··· 303 303 int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, 304 304 resource_size_t size) 305 305 { 306 - struct resource *res = &nsio->res; 307 306 struct nd_namespace_common *ndns = &nsio->common; 307 + struct range range = { 308 + .start = nsio->res.start, 309 + .end = nsio->res.end, 310 + }; 308 311 309 312 nsio->size = size; 310 - if (!devm_request_mem_region(dev, res->start, size, 313 + if (!devm_request_mem_region(dev, range.start, size, 311 314 dev_name(&ndns->dev))) { 312 - dev_warn(dev, "could not reserve region %pR\n", res); 315 + dev_warn(dev, "could not reserve region %pR\n", &nsio->res); 313 316 return -EBUSY; 314 317 } 315 318 ··· 320 317 if (devm_init_badblocks(dev, &nsio->bb)) 321 318 return -ENOMEM; 322 319 nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb, 323 - &nsio->res); 320 + &range); 324 321 325 - nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM); 322 + nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM); 326 323 327 324 return PTR_ERR_OR_ZERO(nsio->addr); 328 325 }
+2 -1
drivers/nvdimm/nd.h
··· 377 377 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, 378 378 char *name); 379 379 unsigned int pmem_sector_size(struct nd_namespace_common *ndns); 380 + struct range; 380 381 void nvdimm_badblocks_populate(struct nd_region *nd_region, 381 - struct badblocks *bb, const struct resource *res); 382 + struct badblocks *bb, const struct range *range); 382 383 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns, 383 384 resource_size_t size); 384 385 void devm_namespace_disable(struct device *dev,
+6 -6
drivers/nvdimm/pfn_devs.c
··· 672 672 673 673 static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) 674 674 { 675 - struct resource *res = &pgmap->res; 675 + struct range *range = &pgmap->range; 676 676 struct vmem_altmap *altmap = &pgmap->altmap; 677 677 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 678 678 u64 offset = le64_to_cpu(pfn_sb->dataoff); ··· 689 689 .end_pfn = PHYS_PFN(end), 690 690 }; 691 691 692 - memcpy(res, &nsio->res, sizeof(*res)); 693 - res->start += start_pad; 694 - res->end -= end_trunc; 695 - 692 + *range = (struct range) { 693 + .start = nsio->res.start + start_pad, 694 + .end = nsio->res.end - end_trunc, 695 + }; 696 696 if (nd_pfn->mode == PFN_MODE_RAM) { 697 697 if (offset < reserve) 698 698 return -EINVAL; 699 699 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); 700 700 } else if (nd_pfn->mode == PFN_MODE_PMEM) { 701 - nd_pfn->npfns = PHYS_PFN((resource_size(res) - offset)); 701 + nd_pfn->npfns = PHYS_PFN((range_len(range) - offset)); 702 702 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) 703 703 dev_info(&nd_pfn->dev, 704 704 "number of pfns truncated from %lld to %ld\n",
+14 -12
drivers/nvdimm/pmem.c
··· 375 375 struct nd_region *nd_region = to_nd_region(dev->parent); 376 376 int nid = dev_to_node(dev), fua; 377 377 struct resource *res = &nsio->res; 378 - struct resource bb_res; 378 + struct range bb_range; 379 379 struct nd_pfn *nd_pfn = NULL; 380 380 struct dax_device *dax_dev; 381 381 struct nd_pfn_sb *pfn_sb; ··· 434 434 pfn_sb = nd_pfn->pfn_sb; 435 435 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); 436 436 pmem->pfn_pad = resource_size(res) - 437 - resource_size(&pmem->pgmap.res); 437 + range_len(&pmem->pgmap.range); 438 438 pmem->pfn_flags |= PFN_MAP; 439 - memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); 440 - bb_res.start += pmem->data_offset; 439 + bb_range = pmem->pgmap.range; 440 + bb_range.start += pmem->data_offset; 441 441 } else if (pmem_should_map_pages(dev)) { 442 - memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res)); 442 + pmem->pgmap.range.start = res->start; 443 + pmem->pgmap.range.end = res->end; 443 444 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; 444 445 pmem->pgmap.ops = &fsdax_pagemap_ops; 445 446 addr = devm_memremap_pages(dev, &pmem->pgmap); 446 447 pmem->pfn_flags |= PFN_MAP; 447 - memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); 448 + bb_range = pmem->pgmap.range; 448 449 } else { 449 450 if (devm_add_action_or_reset(dev, pmem_release_queue, 450 451 &pmem->pgmap)) 451 452 return -ENOMEM; 452 453 addr = devm_memremap(dev, pmem->phys_addr, 453 454 pmem->size, ARCH_MEMREMAP_PMEM); 454 - memcpy(&bb_res, &nsio->res, sizeof(bb_res)); 455 + bb_range.start = res->start; 456 + bb_range.end = res->end; 455 457 } 456 458 457 459 if (IS_ERR(addr)) ··· 482 480 / 512); 483 481 if (devm_init_badblocks(dev, &pmem->bb)) 484 482 return -ENOMEM; 485 - nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res); 483 + nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range); 486 484 disk->bb = &pmem->bb; 487 485 488 486 if (is_nvdimm_sync(nd_region)) ··· 593 591 resource_size_t offset = 0, end_trunc = 0; 594 592 struct nd_namespace_common *ndns; 595 593 struct nd_namespace_io *nsio; 596 - struct resource res; 597 594 struct badblocks *bb; 595 + struct range range; 598 596 struct kernfs_node *bb_state; 599 597 600 598 if (event != NVDIMM_REVALIDATE_POISON) ··· 630 628 nsio = to_nd_namespace_io(&ndns->dev); 631 629 } 632 630 633 - res.start = nsio->res.start + offset; 634 - res.end = nsio->res.end - end_trunc; 635 - nvdimm_badblocks_populate(nd_region, bb, &res); 631 + range.start = nsio->res.start + offset; 632 + range.end = nsio->res.end - end_trunc; 633 + nvdimm_badblocks_populate(nd_region, bb, &range); 636 634 if (bb_state) 637 635 sysfs_notify_dirent(bb_state); 638 636 }
+12 -9
drivers/nvdimm/region.c
··· 35 35 return rc; 36 36 37 37 if (is_memory(&nd_region->dev)) { 38 - struct resource ndr_res; 38 + struct range range = { 39 + .start = nd_region->ndr_start, 40 + .end = nd_region->ndr_start + nd_region->ndr_size - 1, 41 + }; 39 42 40 43 if (devm_init_badblocks(dev, &nd_region->bb)) 41 44 return -ENODEV; ··· 47 44 if (!nd_region->bb_state) 48 45 dev_warn(&nd_region->dev, 49 46 "'badblocks' notification disabled\n"); 50 - ndr_res.start = nd_region->ndr_start; 51 - ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1; 52 - nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res); 47 + nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range); 53 48 } 54 49 55 50 rc = nd_region_register_namespaces(nd_region, &err); ··· 122 121 { 123 122 if (event == NVDIMM_REVALIDATE_POISON) { 124 123 struct nd_region *nd_region = to_nd_region(dev); 125 - struct resource res; 126 124 127 125 if (is_memory(&nd_region->dev)) { 128 - res.start = nd_region->ndr_start; 129 - res.end = nd_region->ndr_start + 130 - nd_region->ndr_size - 1; 126 + struct range range = { 127 + .start = nd_region->ndr_start, 128 + .end = nd_region->ndr_start + 129 + nd_region->ndr_size - 1, 130 + }; 131 + 131 132 nvdimm_badblocks_populate(nd_region, 132 - &nd_region->bb, &res); 133 + &nd_region->bb, &range); 133 134 if (nd_region->bb_state) 134 135 sysfs_notify_dirent(nd_region->bb_state); 135 136 }
+5 -6
drivers/pci/p2pdma.c
··· 185 185 return -ENOMEM; 186 186 187 187 pgmap = &p2p_pgmap->pgmap; 188 - pgmap->res.start = pci_resource_start(pdev, bar) + offset; 189 - pgmap->res.end = pgmap->res.start + size - 1; 190 - pgmap->res.flags = pci_resource_flags(pdev, bar); 188 + pgmap->range.start = pci_resource_start(pdev, bar) + offset; 189 + pgmap->range.end = pgmap->range.start + size - 1; 191 190 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; 192 191 193 192 p2p_pgmap->provider = pdev; ··· 201 202 202 203 error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr, 203 204 pci_bus_address(pdev, bar) + offset, 204 - resource_size(&pgmap->res), dev_to_node(&pdev->dev), 205 + range_len(&pgmap->range), dev_to_node(&pdev->dev), 205 206 pgmap->ref); 206 207 if (error) 207 208 goto pages_free; 208 209 209 - pci_info(pdev, "added peer-to-peer DMA memory %pR\n", 210 - &pgmap->res); 210 + pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n", 211 + pgmap->range.start, pgmap->range.end); 211 212 212 213 return 0; 213 214
+30 -14
drivers/xen/unpopulated-alloc.c
··· 18 18 static int fill_list(unsigned int nr_pages) 19 19 { 20 20 struct dev_pagemap *pgmap; 21 + struct resource *res; 21 22 void *vaddr; 22 23 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); 23 - int ret; 24 + int ret = -ENOMEM; 25 + 26 + res = kzalloc(sizeof(*res), GFP_KERNEL); 27 + if (!res) 28 + return -ENOMEM; 24 29 25 30 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); 26 31 if (!pgmap) 27 - return -ENOMEM; 32 + goto err_pgmap; 28 33 29 34 pgmap->type = MEMORY_DEVICE_GENERIC; 30 - pgmap->res.name = "Xen scratch"; 31 - pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 35 + res->name = "Xen scratch"; 36 + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 32 37 33 - ret = allocate_resource(&iomem_resource, &pgmap->res, 38 + ret = allocate_resource(&iomem_resource, res, 34 39 alloc_pages * PAGE_SIZE, 0, -1, 35 40 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); 36 41 if (ret < 0) { 37 42 pr_err("Cannot allocate new IOMEM resource\n"); 38 - kfree(pgmap); 39 - return ret; 43 + goto err_resource; 40 44 } 45 + 46 + pgmap->range = (struct range) { 47 + .start = res->start, 48 + .end = res->end, 49 + }; 50 + pgmap->owner = res; 41 51 42 52 #ifdef CONFIG_XEN_HAVE_PVMMU 43 53 /* ··· 60 50 * conflict with any devices. 61 51 */ 62 52 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 63 - xen_pfn_t pfn = PFN_DOWN(pgmap->res.start); 53 + xen_pfn_t pfn = PFN_DOWN(res->start); 64 54 65 55 for (i = 0; i < alloc_pages; i++) { 66 56 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { 67 57 pr_warn("set_phys_to_machine() failed, no memory added\n"); 68 - release_resource(&pgmap->res); 69 - kfree(pgmap); 70 - return -ENOMEM; 58 + ret = -ENOMEM; 59 + goto err_memremap; 71 60 } 72 61 } 73 62 } ··· 75 66 vaddr = memremap_pages(pgmap, NUMA_NO_NODE); 76 67 if (IS_ERR(vaddr)) { 77 68 pr_err("Cannot remap memory range\n"); 78 - release_resource(&pgmap->res); 79 - kfree(pgmap); 80 - return PTR_ERR(vaddr); 69 + ret = PTR_ERR(vaddr); 70 + goto err_memremap; 81 71 } 82 72 83 73 for (i = 0; i < alloc_pages; i++) { ··· 88 80 } 89 81 90 82 return 0; 83 + 84 + err_memremap: 85 + release_resource(res); 86 + err_resource: 87 + kfree(pgmap); 88 + err_pgmap: 89 + kfree(res); 90 + return ret; 91 91 } 92 92 93 93 /**
+3 -2
include/linux/memremap.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #ifndef _LINUX_MEMREMAP_H_ 3 3 #define _LINUX_MEMREMAP_H_ 4 + #include <linux/range.h> 4 5 #include <linux/ioport.h> 5 6 #include <linux/percpu-refcount.h> 6 7 ··· 94 93 /** 95 94 * struct dev_pagemap - metadata for ZONE_DEVICE mappings 96 95 * @altmap: pre-allocated/reserved memory for vmemmap allocations 97 - * @res: physical address range covered by @ref 96 + * @range: physical address range covered by @ref 98 97 * @ref: reference count that pins the devm_memremap_pages() mapping 99 98 * @internal_ref: internal reference if @ref is not provided by the caller 100 99 * @done: completion for @internal_ref ··· 107 106 */ 108 107 struct dev_pagemap { 109 108 struct vmem_altmap altmap; 110 - struct resource res; 109 + struct range range; 111 110 struct percpu_ref *ref; 112 111 struct percpu_ref internal_ref; 113 112 struct completion done;
+6
include/linux/range.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #ifndef _LINUX_RANGE_H 3 3 #define _LINUX_RANGE_H 4 + #include <linux/types.h> 4 5 5 6 struct range { 6 7 u64 start; 7 8 u64 end; 8 9 }; 10 + 11 + static inline u64 range_len(const struct range *range) 12 + { 13 + return range->end - range->start + 1; 14 + } 9 15 10 16 int add_range(struct range *range, int az, int nr_range, 11 17 u64 start, u64 end);
+25 -25
lib/test_hmm.c
··· 460 460 unsigned long pfn_last; 461 461 void *ptr; 462 462 463 + devmem = kzalloc(sizeof(*devmem), GFP_KERNEL); 464 + if (!devmem) 465 + return -ENOMEM; 466 + 467 + res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, 468 + "hmm_dmirror"); 469 + if (IS_ERR(res)) 470 + goto err_devmem; 471 + 472 + devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; 473 + devmem->pagemap.range.start = res->start; 474 + devmem->pagemap.range.end = res->end; 475 + devmem->pagemap.ops = &dmirror_devmem_ops; 476 + devmem->pagemap.owner = mdevice; 477 + 463 478 mutex_lock(&mdevice->devmem_lock); 464 479 465 480 if (mdevice->devmem_count == mdevice->devmem_capacity) { ··· 487 472 sizeof(new_chunks[0]) * new_capacity, 488 473 GFP_KERNEL); 489 474 if (!new_chunks) 490 - goto err; 475 + goto err_release; 491 476 mdevice->devmem_capacity = new_capacity; 492 477 mdevice->devmem_chunks = new_chunks; 493 478 } 494 479 495 - res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, 496 - "hmm_dmirror"); 497 - if (IS_ERR(res)) 498 - goto err; 499 - 500 - devmem = kzalloc(sizeof(*devmem), GFP_KERNEL); 501 - if (!devmem) 502 - goto err_release; 503 - 504 - devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; 505 - devmem->pagemap.res = *res; 506 - devmem->pagemap.ops = &dmirror_devmem_ops; 507 - devmem->pagemap.owner = mdevice; 508 - 509 480 ptr = memremap_pages(&devmem->pagemap, numa_node_id()); 510 481 if (IS_ERR(ptr)) 511 - goto err_free; 482 + goto err_release; 512 483 513 484 devmem->mdevice = mdevice; 514 - pfn_first = devmem->pagemap.res.start >> PAGE_SHIFT; 515 - pfn_last = pfn_first + 516 - (resource_size(&devmem->pagemap.res) >> PAGE_SHIFT); 485 + pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT; 486 + pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT); 517 487 mdevice->devmem_chunks[mdevice->devmem_count++] = devmem; 518 488 519 489 mutex_unlock(&mdevice->devmem_lock); ··· 525 525 526 526 return true; 527 527 528 - err_free: 529 - kfree(devmem); 530 528 err_release: 531 - release_mem_region(res->start, resource_size(res)); 532 - err: 533 529 mutex_unlock(&mdevice->devmem_lock); 530 + release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range)); 531 + err_devmem: 532 + kfree(devmem); 533 + 534 534 return false; 535 535 } 536 536 ··· 1100 1100 mdevice->devmem_chunks[i]; 1101 1101 1102 1102 memunmap_pages(&devmem->pagemap); 1103 - release_mem_region(devmem->pagemap.res.start, 1104 - resource_size(&devmem->pagemap.res)); 1103 + release_mem_region(devmem->pagemap.range.start, 1104 + range_len(&devmem->pagemap.range)); 1105 1105 kfree(devmem); 1106 1106 } 1107 1107 kfree(mdevice->devmem_chunks);
+39 -38
mm/memremap.c
··· 70 70 } 71 71 #endif /* CONFIG_DEV_PAGEMAP_OPS */ 72 72 73 - static void pgmap_array_delete(struct resource *res) 73 + static void pgmap_array_delete(struct range *range) 74 74 { 75 - xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end), 75 + xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), 76 76 NULL, GFP_KERNEL); 77 77 synchronize_rcu(); 78 78 } 79 79 80 80 static unsigned long pfn_first(struct dev_pagemap *pgmap) 81 81 { 82 - return PHYS_PFN(pgmap->res.start) + 82 + return PHYS_PFN(pgmap->range.start) + 83 83 vmem_altmap_offset(pgmap_altmap(pgmap)); 84 84 } 85 85 86 86 static unsigned long pfn_end(struct dev_pagemap *pgmap) 87 87 { 88 - const struct resource *res = &pgmap->res; 88 + const struct range *range = &pgmap->range; 89 89 90 - return (res->start + resource_size(res)) >> PAGE_SHIFT; 90 + return (range->start + range_len(range)) >> PAGE_SHIFT; 91 91 } 92 92 93 93 static unsigned long pfn_next(unsigned long pfn) ··· 126 126 127 127 void memunmap_pages(struct dev_pagemap *pgmap) 128 128 { 129 - struct resource *res = &pgmap->res; 129 + struct range *range = &pgmap->range; 130 130 struct page *first_page; 131 131 unsigned long pfn; 132 132 int nid; ··· 143 143 nid = page_to_nid(first_page); 144 144 145 145 mem_hotplug_begin(); 146 - remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(res->start), 147 - PHYS_PFN(resource_size(res))); 146 + remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), 147 + PHYS_PFN(range_len(range))); 148 148 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { 149 - __remove_pages(PHYS_PFN(res->start), 150 - PHYS_PFN(resource_size(res)), NULL); 149 + __remove_pages(PHYS_PFN(range->start), 150 + PHYS_PFN(range_len(range)), NULL); 151 151 } else { 152 - arch_remove_memory(nid, res->start, resource_size(res), 152 + arch_remove_memory(nid, range->start, range_len(range), 153 153 pgmap_altmap(pgmap)); 154 - kasan_remove_zero_shadow(__va(res->start), resource_size(res)); 154 + kasan_remove_zero_shadow(__va(range->start), range_len(range)); 155 155 } 156 156 mem_hotplug_done(); 157 157 158 - untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); 159 - pgmap_array_delete(res); 158 + untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); 159 + pgmap_array_delete(range); 160 160 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); 161 161 devmap_managed_enable_put(); 162 162 } ··· 182 182 */ 183 183 void *memremap_pages(struct dev_pagemap *pgmap, int nid) 184 184 { 185 - struct resource *res = &pgmap->res; 185 + struct range *range = &pgmap->range; 186 186 struct dev_pagemap *conflict_pgmap; 187 187 struct mhp_params params = { 188 188 /* ··· 251 251 return ERR_PTR(error); 252 252 } 253 253 254 - conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL); 254 + conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); 255 255 if (conflict_pgmap) { 256 256 WARN(1, "Conflicting mapping in same section\n"); 257 257 put_dev_pagemap(conflict_pgmap); ··· 259 259 goto err_array; 260 260 } 261 261 262 - conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL); 262 + conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); 263 263 if (conflict_pgmap) { 264 264 WARN(1, "Conflicting mapping in same section\n"); 265 265 put_dev_pagemap(conflict_pgmap); ··· 267 267 goto err_array; 268 268 } 269 269 270 - is_ram = region_intersects(res->start, resource_size(res), 270 + is_ram = region_intersects(range->start, range_len(range), 271 271 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); 272 272 273 273 if (is_ram != REGION_DISJOINT) { 274 - WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__, 275 - is_ram == REGION_MIXED ? "mixed" : "ram", res); 274 + WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", 275 + is_ram == REGION_MIXED ? "mixed" : "ram", 276 + range->start, range->end); 276 277 error = -ENXIO; 277 278 goto err_array; 278 279 } 279 280 280 - error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start), 281 - PHYS_PFN(res->end), pgmap, GFP_KERNEL)); 281 + error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), 282 + PHYS_PFN(range->end), pgmap, GFP_KERNEL)); 282 283 if (error) 283 284 goto err_array; 284 285 285 286 if (nid < 0) 286 287 nid = numa_mem_id(); 287 288 288 - error = track_pfn_remap(NULL, &params.pgprot, PHYS_PFN(res->start), 289 - 0, resource_size(res)); 289 + error = track_pfn_remap(NULL, &params.pgprot, PHYS_PFN(range->start), 0, 290 + range_len(range)); 290 291 if (error) 291 292 goto err_pfn_remap; 292 293 ··· 305 304 * arch_add_memory(). 306 305 */ 307 306 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { 308 - error = add_pages(nid, PHYS_PFN(res->start), 309 - PHYS_PFN(resource_size(res)), &params); 307 + error = add_pages(nid, PHYS_PFN(range->start), 308 + PHYS_PFN(range_len(range)), &params); 310 309 } else { 311 - error = kasan_add_zero_shadow(__va(res->start), resource_size(res)); 310 + error = kasan_add_zero_shadow(__va(range->start), range_len(range)); 312 311 if (error) { 313 312 mem_hotplug_done(); 314 313 goto err_kasan; 315 314 } 316 315 317 - error = arch_add_memory(nid, res->start, resource_size(res), 316 + error = arch_add_memory(nid, range->start, range_len(range), 318 317 &params); 319 318 } 320 319 ··· 322 321 struct zone *zone; 323 322 324 323 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; 325 - move_pfn_range_to_zone(zone, PHYS_PFN(res->start), 326 - PHYS_PFN(resource_size(res)), params.altmap); 324 + move_pfn_range_to_zone(zone, PHYS_PFN(range->start), 325 + PHYS_PFN(range_len(range)), params.altmap); 327 326 } 328 327 329 328 mem_hotplug_done(); ··· 335 334 * to allow us to do the work while not holding the hotplug lock. 336 335 */ 337 336 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], 338 - PHYS_PFN(res->start), 339 - PHYS_PFN(resource_size(res)), pgmap); 337 + PHYS_PFN(range->start), 338 + PHYS_PFN(range_len(range)), pgmap); 340 339 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap)); 341 - return __va(res->start); 340 + return __va(range->start); 342 341 343 342 err_add_memory: 344 - kasan_remove_zero_shadow(__va(res->start), resource_size(res)); 343 + kasan_remove_zero_shadow(__va(range->start), range_len(range)); 345 344 err_kasan: 346 - untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); 345 + untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); 347 346 err_pfn_remap: 348 - pgmap_array_delete(res); 347 + pgmap_array_delete(range); 349 348 err_array: 350 349 dev_pagemap_kill(pgmap); 351 350 dev_pagemap_cleanup(pgmap); ··· 370 369 * 'live' on entry and will be killed and reaped at 371 370 * devm_memremap_pages_release() time, or if this routine fails. 372 371 * 373 - * 4/ res is expected to be a host memory range that could feasibly be 372 + * 4/ range is expected to be a host memory range that could feasibly be 374 373 * treated as a "System RAM" range, i.e. not a device mmio range, but 375 374 * this is not enforced. 376 375 */ ··· 427 426 * In the cached case we're already holding a live reference. 428 427 */ 429 428 if (pgmap) { 430 - if (phys >= pgmap->res.start && phys <= pgmap->res.end) 429 + if (phys >= pgmap->range.start && phys <= pgmap->range.end) 431 430 return pgmap; 432 431 put_dev_pagemap(pgmap); 433 432 }
+1 -1
tools/testing/nvdimm/test/iomap.c
··· 126 126 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) 127 127 { 128 128 int error; 129 - resource_size_t offset = pgmap->res.start; 129 + resource_size_t offset = pgmap->range.start; 130 130 struct nfit_test_resource *nfit_res = get_nfit_res(offset); 131 131 132 132 if (!nfit_res)