Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/memremap: remove unused get_dev_pagemap() parameter

GUP no longer uses get_dev_pagemap(). As it was the only user of the
get_dev_pagemap() pgmap caching feature it can be removed.

Link: https://lkml.kernel.org/r/20250903225926.34702-2-apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Alistair Popple and committed by
Andrew Morton
614d850e d3f7922b

+8 -24
+2 -4
include/linux/memremap.h
··· 211 211 void memunmap_pages(struct dev_pagemap *pgmap); 212 212 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); 213 213 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); 214 - struct dev_pagemap *get_dev_pagemap(unsigned long pfn, 215 - struct dev_pagemap *pgmap); 214 + struct dev_pagemap *get_dev_pagemap(unsigned long pfn); 216 215 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); 217 216 218 217 unsigned long memremap_compat_align(void); ··· 233 234 { 234 235 } 235 236 236 - static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, 237 - struct dev_pagemap *pgmap) 237 + static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn) 238 238 { 239 239 return NULL; 240 240 }
+1 -1
mm/memory-failure.c
··· 2194 2194 goto unlock_mutex; 2195 2195 2196 2196 if (pfn_valid(pfn)) { 2197 - pgmap = get_dev_pagemap(pfn, NULL); 2197 + pgmap = get_dev_pagemap(pfn); 2198 2198 put_ref_page(pfn, flags); 2199 2199 if (pgmap) { 2200 2200 res = memory_failure_dev_pagemap(pfn, flags,
+1 -1
mm/memory_hotplug.c
··· 375 375 * the section may be 'offline' but 'valid'. Only 376 376 * get_dev_pagemap() can determine sub-section online status. 377 377 */ 378 - pgmap = get_dev_pagemap(pfn, NULL); 378 + pgmap = get_dev_pagemap(pfn); 379 379 put_dev_pagemap(pgmap); 380 380 381 381 /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */
+4 -18
mm/memremap.c
··· 153 153 "altmap not supported for multiple ranges\n")) 154 154 return -EINVAL; 155 155 156 - conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); 156 + conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start)); 157 157 if (conflict_pgmap) { 158 158 WARN(1, "Conflicting mapping in same section\n"); 159 159 put_dev_pagemap(conflict_pgmap); 160 160 return -ENOMEM; 161 161 } 162 162 163 - conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); 163 + conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end)); 164 164 if (conflict_pgmap) { 165 165 WARN(1, "Conflicting mapping in same section\n"); 166 166 put_dev_pagemap(conflict_pgmap); ··· 397 397 /** 398 398 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn 399 399 * @pfn: page frame number to lookup page_map 400 - * @pgmap: optional known pgmap that already has a reference 401 - * 402 - * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap 403 - * is non-NULL but does not cover @pfn the reference to it will be released. 404 400 */ 405 - struct dev_pagemap *get_dev_pagemap(unsigned long pfn, 406 - struct dev_pagemap *pgmap) 401 + struct dev_pagemap *get_dev_pagemap(unsigned long pfn) 407 402 { 403 + struct dev_pagemap *pgmap; 408 404 resource_size_t phys = PFN_PHYS(pfn); 409 405 410 - /* 411 - * In the cached case we're already holding a live reference. 412 - */ 413 - if (pgmap) { 414 - if (phys >= pgmap->range.start && phys <= pgmap->range.end) 415 - return pgmap; 416 - put_dev_pagemap(pgmap); 417 - } 418 - 419 - /* fall back to slow path lookup */ 420 406 rcu_read_lock(); 421 407 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); 422 408 if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref))