Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/migrate: remove cruft from migration_entry_wait()s

migration_entry_wait_on_locked() does not need to take a mapped pte
pointer, its callers can do the unmap first. Annotate it with
__releases(ptl) to reduce sparse warnings.

Fold __migration_entry_wait_huge() into migration_entry_wait_huge(). Fold
__migration_entry_wait() into migration_entry_wait(), preferring the
tighter pte_offset_map_lock() to pte_offset_map() and pte_lockptr().

Link: https://lkml.kernel.org/r/b0e2a532-cdf2-561b-e999-f3b13b8d6d3@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zack Rusin <zackr@vmware.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Hugh Dickins and committed by
Andrew Morton
0cb8fd4d 26e1a0c3

+22 -49
+2 -2
include/linux/migrate.h
··· 75 75 76 76 int migrate_huge_page_move_mapping(struct address_space *mapping, 77 77 struct folio *dst, struct folio *src); 78 - void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep, 79 - spinlock_t *ptl); 78 + void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) 79 + __releases(ptl); 80 80 void folio_migrate_flags(struct folio *newfolio, struct folio *folio); 81 81 void folio_migrate_copy(struct folio *newfolio, struct folio *folio); 82 82 int folio_migrate_mapping(struct address_space *mapping,
+3 -14
include/linux/swapops.h
··· 332 332 return false; 333 333 } 334 334 335 - extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 336 - spinlock_t *ptl); 337 335 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 338 336 unsigned long address); 339 - #ifdef CONFIG_HUGETLB_PAGE 340 - extern void __migration_entry_wait_huge(struct vm_area_struct *vma, 341 - pte_t *ptep, spinlock_t *ptl); 342 337 extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte); 343 - #endif /* CONFIG_HUGETLB_PAGE */ 344 338 #else /* CONFIG_MIGRATION */ 345 339 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) 346 340 { ··· 356 362 return 0; 357 363 } 358 364 359 - static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 360 - spinlock_t *ptl) { } 361 365 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 362 - unsigned long address) { } 363 - #ifdef CONFIG_HUGETLB_PAGE 364 - static inline void __migration_entry_wait_huge(struct vm_area_struct *vma, 365 - pte_t *ptep, spinlock_t *ptl) { } 366 - static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { } 367 - #endif /* CONFIG_HUGETLB_PAGE */ 366 + unsigned long address) { } 367 + static inline void migration_entry_wait_huge(struct vm_area_struct *vma, 368 + pte_t *pte) { } 368 369 static inline int is_writable_migration_entry(swp_entry_t entry) 369 370 { 370 371 return 0;
+4 -9
mm/filemap.c
··· 1362 1362 /** 1363 1363 * migration_entry_wait_on_locked - Wait for a migration entry to be removed 1364 1364 * @entry: migration swap entry. 1365 - * @ptep: mapped pte pointer. Will return with the ptep unmapped. Only required 1366 - * for pte entries, pass NULL for pmd entries. 1367 1365 * @ptl: already locked ptl. This function will drop the lock. 1368 1366 * 1369 1367 * Wait for a migration entry referencing the given page to be removed. This is ··· 1370 1372 * should be called while holding the ptl for the migration entry referencing 1371 1373 * the page. 1372 1374 * 1373 - * Returns after unmapping and unlocking the pte/ptl with pte_unmap_unlock(). 1375 + * Returns after unlocking the ptl. 1374 1376 * 1375 1377 * This follows the same logic as folio_wait_bit_common() so see the comments 1376 1378 * there. 1377 1379 */ 1378 - void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep, 1379 - spinlock_t *ptl) 1380 + void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) 1381 + __releases(ptl) 1380 1382 { 1381 1383 struct wait_page_queue wait_page; 1382 1384 wait_queue_entry_t *wait = &wait_page.wait; ··· 1410 1412 * a valid reference to the page, and it must take the ptl to remove the 1411 1413 * migration entry. So the page is valid until the ptl is dropped. 1412 1414 */ 1413 - if (ptep) 1414 - pte_unmap_unlock(ptep, ptl); 1415 - else 1416 - spin_unlock(ptl); 1415 + spin_unlock(ptl); 1417 1416 1418 1417 for (;;) { 1419 1418 unsigned int flags;
+13 -24
mm/migrate.c
··· 296 296 * get to the page and wait until migration is finished. 297 297 * When we return from this function the fault will be retried. 298 298 */ 299 - void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 300 - spinlock_t *ptl) 299 + void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 300 + unsigned long address) 301 301 { 302 + spinlock_t *ptl; 303 + pte_t *ptep; 302 304 pte_t pte; 303 305 swp_entry_t entry; 304 306 305 - spin_lock(ptl); 307 + ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 306 308 pte = *ptep; 309 + pte_unmap(ptep); 310 + 307 311 if (!is_swap_pte(pte)) 308 312 goto out; 309 313 ··· 315 311 if (!is_migration_entry(entry)) 316 312 goto out; 317 313 318 - migration_entry_wait_on_locked(entry, ptep, ptl); 314 + migration_entry_wait_on_locked(entry, ptl); 319 315 return; 320 316 out: 321 - pte_unmap_unlock(ptep, ptl); 322 - } 323 - 324 - void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 325 - unsigned long address) 326 - { 327 - spinlock_t *ptl = pte_lockptr(mm, pmd); 328 - pte_t *ptep = pte_offset_map(pmd, address); 329 - __migration_entry_wait(mm, ptep, ptl); 317 + spin_unlock(ptl); 330 318 } 331 319 332 320 #ifdef CONFIG_HUGETLB_PAGE ··· 328 332 * 329 333 * This function will release the vma lock before returning. 330 334 */ 331 - void __migration_entry_wait_huge(struct vm_area_struct *vma, 332 - pte_t *ptep, spinlock_t *ptl) 335 + void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep) 333 336 { 337 + spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep); 334 338 pte_t pte; 335 339 336 340 hugetlb_vma_assert_locked(vma); ··· 348 352 * lock release in migration_entry_wait_on_locked(). 349 353 */ 350 354 hugetlb_vma_unlock_read(vma); 351 - migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl); 355 + migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl); 352 356 } 353 - } 354 - 355 - void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) 356 - { 357 - spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte); 358 - 359 - __migration_entry_wait_huge(vma, pte, ptl); 360 357 } 361 358 #endif 362 359 ··· 361 372 ptl = pmd_lock(mm, pmd); 362 373 if (!is_pmd_migration_entry(*pmd)) 363 374 goto unlock; 364 - migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); 375 + migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl); 365 376 return; 366 377 unlock: 367 378 spin_unlock(ptl);