Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/zone_device: rename page_free callback to folio_free

Change page_free to folio_free to make the folio support for
zone device-private more consistent. The PCI P2PDMA callback
has also been updated and changed to folio_free() as a result.

For drivers that do not support folios (yet), the folio is
converted back into page via &folio->page and the page is used
as is, in the current callback implementation.

Link: https://lkml.kernel.org/r/20251001065707.920170-3-balbirs@nvidia.com
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: "Christian König" <christian.koenig@amd.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Balbir Singh and committed by
Andrew Morton
3a5a0655 d245f9b4

+32 -27
+1 -1
Documentation/mm/memory-model.rst
··· 165 165 * pmem: Map platform persistent memory to be used as a direct-I/O target 166 166 via DAX mappings. 167 167 168 - * hmm: Extend `ZONE_DEVICE` with `->page_fault()` and `->page_free()` 168 + * hmm: Extend `ZONE_DEVICE` with `->page_fault()` and `->folio_free()` 169 169 event callbacks to allow a device-driver to coordinate memory management 170 170 events related to device-memory, typically GPU memory. See 171 171 Documentation/mm/hmm.rst.
+3 -2
arch/powerpc/kvm/book3s_hv_uvmem.c
··· 1014 1014 * to a normal PFN during H_SVM_PAGE_OUT. 1015 1015 * Gets called with kvm->arch.uvmem_lock held. 1016 1016 */ 1017 - static void kvmppc_uvmem_page_free(struct page *page) 1017 + static void kvmppc_uvmem_folio_free(struct folio *folio) 1018 1018 { 1019 + struct page *page = &folio->page; 1019 1020 unsigned long pfn = page_to_pfn(page) - 1020 1021 (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT); 1021 1022 struct kvmppc_uvmem_page_pvt *pvt; ··· 1035 1034 } 1036 1035 1037 1036 static const struct dev_pagemap_ops kvmppc_uvmem_ops = { 1038 - .page_free = kvmppc_uvmem_page_free, 1037 + .folio_free = kvmppc_uvmem_folio_free, 1039 1038 .migrate_to_ram = kvmppc_uvmem_migrate_to_ram, 1040 1039 }; 1041 1040
+3 -2
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 568 568 return r < 0 ? r : 0; 569 569 } 570 570 571 - static void svm_migrate_page_free(struct page *page) 571 + static void svm_migrate_folio_free(struct folio *folio) 572 572 { 573 + struct page *page = &folio->page; 573 574 struct svm_range_bo *svm_bo = page->zone_device_data; 574 575 575 576 if (svm_bo) { ··· 1010 1009 } 1011 1010 1012 1011 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { 1013 - .page_free = svm_migrate_page_free, 1012 + .folio_free = svm_migrate_folio_free, 1014 1013 .migrate_to_ram = svm_migrate_to_ram, 1015 1014 }; 1016 1015
+5 -5
drivers/gpu/drm/drm_pagemap.c
··· 752 752 } 753 753 754 754 /** 755 - * drm_pagemap_page_free() - Put GPU SVM zone device data associated with a page 756 - * @page: Pointer to the page 755 + * drm_pagemap_folio_free() - Put GPU SVM zone device data associated with a folio 756 + * @folio: Pointer to the folio 757 757 * 758 758 * This function is a callback used to put the GPU SVM zone device data 759 759 * associated with a page when it is being released. 760 760 */ 761 - static void drm_pagemap_page_free(struct page *page) 761 + static void drm_pagemap_folio_free(struct folio *folio) 762 762 { 763 - drm_pagemap_zdd_put(page->zone_device_data); 763 + drm_pagemap_zdd_put(folio->page.zone_device_data); 764 764 } 765 765 766 766 /** ··· 788 788 } 789 789 790 790 static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = { 791 - .page_free = drm_pagemap_page_free, 791 + .folio_free = drm_pagemap_folio_free, 792 792 .migrate_to_ram = drm_pagemap_migrate_to_ram, 793 793 }; 794 794
+3 -2
drivers/gpu/drm/nouveau/nouveau_dmem.c
··· 108 108 return chunk->bo->offset + off; 109 109 } 110 110 111 - static void nouveau_dmem_page_free(struct page *page) 111 + static void nouveau_dmem_folio_free(struct folio *folio) 112 112 { 113 + struct page *page = &folio->page; 113 114 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); 114 115 struct nouveau_dmem *dmem = chunk->drm->dmem; 115 116 ··· 221 220 } 222 221 223 222 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = { 224 - .page_free = nouveau_dmem_page_free, 223 + .folio_free = nouveau_dmem_folio_free, 225 224 .migrate_to_ram = nouveau_dmem_migrate_to_ram, 226 225 }; 227 226
+3 -2
drivers/pci/p2pdma.c
··· 200 200 .name = "p2pmem", 201 201 }; 202 202 203 - static void p2pdma_page_free(struct page *page) 203 + static void p2pdma_folio_free(struct folio *folio) 204 204 { 205 + struct page *page = &folio->page; 205 206 struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page)); 206 207 /* safe to dereference while a reference is held to the percpu ref */ 207 208 struct pci_p2pdma *p2pdma = ··· 215 214 } 216 215 217 216 static const struct dev_pagemap_ops p2pdma_pgmap_ops = { 218 - .page_free = p2pdma_page_free, 217 + .folio_free = p2pdma_folio_free, 219 218 }; 220 219 221 220 static void pci_p2pdma_release(void *data)
+3 -3
include/linux/memremap.h
··· 77 77 78 78 struct dev_pagemap_ops { 79 79 /* 80 - * Called once the page refcount reaches 0. The reference count will be 80 + * Called once the folio refcount reaches 0. The reference count will be 81 81 * reset to one by the core code after the method is called to prepare 82 - * for handing out the page again. 82 + * for handing out the folio again. 83 83 */ 84 - void (*page_free)(struct page *page); 84 + void (*folio_free)(struct folio *folio); 85 85 86 86 /* 87 87 * Used for private (un-addressable) device memory only. Must migrate
+3 -2
lib/test_hmm.c
··· 1374 1374 .owner = THIS_MODULE, 1375 1375 }; 1376 1376 1377 - static void dmirror_devmem_free(struct page *page) 1377 + static void dmirror_devmem_free(struct folio *folio) 1378 1378 { 1379 + struct page *page = &folio->page; 1379 1380 struct page *rpage = BACKING_PAGE(page); 1380 1381 struct dmirror_device *mdevice; 1381 1382 ··· 1439 1438 } 1440 1439 1441 1440 static const struct dev_pagemap_ops dmirror_devmem_ops = { 1442 - .page_free = dmirror_devmem_free, 1441 + .folio_free = dmirror_devmem_free, 1443 1442 .migrate_to_ram = dmirror_devmem_fault, 1444 1443 }; 1445 1444
+8 -8
mm/memremap.c
··· 289 289 WARN(1, "Missing migrate_to_ram method\n"); 290 290 return ERR_PTR(-EINVAL); 291 291 } 292 - if (!pgmap->ops->page_free) { 293 - WARN(1, "Missing page_free method\n"); 292 + if (!pgmap->ops->folio_free) { 293 + WARN(1, "Missing folio_free method\n"); 294 294 return ERR_PTR(-EINVAL); 295 295 } 296 296 if (!pgmap->owner) { ··· 299 299 } 300 300 break; 301 301 case MEMORY_DEVICE_COHERENT: 302 - if (!pgmap->ops->page_free) { 303 - WARN(1, "Missing page_free method\n"); 302 + if (!pgmap->ops->folio_free) { 303 + WARN(1, "Missing folio_free method\n"); 304 304 return ERR_PTR(-EINVAL); 305 305 } 306 306 if (!pgmap->owner) { ··· 453 453 switch (pgmap->type) { 454 454 case MEMORY_DEVICE_PRIVATE: 455 455 case MEMORY_DEVICE_COHERENT: 456 - if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) 456 + if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->folio_free)) 457 457 break; 458 - pgmap->ops->page_free(&folio->page); 458 + pgmap->ops->folio_free(folio); 459 459 percpu_ref_put_many(&folio->pgmap->ref, nr); 460 460 break; 461 461 ··· 472 472 break; 473 473 474 474 case MEMORY_DEVICE_PCI_P2PDMA: 475 - if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) 475 + if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->folio_free)) 476 476 break; 477 - pgmap->ops->page_free(folio_page(folio, 0)); 477 + pgmap->ops->folio_free(folio); 478 478 break; 479 479 } 480 480 }