Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: rename page struct field helpers

The function names page_xchg_last_nid(), page_last_nid() and
reset_page_last_nid() were judged to be inconsistent so rename them to a
struct_field_op style pattern. As it looked jarring to have
reset_page_mapcount() and page_nid_reset_last() beside each other in
memmap_init_zone(), this patch also renames reset_page_mapcount() to
page_mapcount_reset(). There are others like init_page_count() but as
it is used throughout the arch code a rename would likely cause more
conflicts than it is worth.

[akpm@linux-foundation.org: fix zcache]
Signed-off-by: Mel Gorman <mgorman@suse.de>
Suggested-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mel Gorman and committed by
Linus Torvalds
22b751c3 e4715f01

+25 -25
+1 -1
drivers/staging/zcache/zbud.c
··· 404 404 else 405 405 zbud_pers_pageframes--; 406 406 zbudpage_spin_unlock(zbudpage); 407 - reset_page_mapcount(page); 407 + page_mapcount_reset(page); 408 408 init_page_count(page); 409 409 page->index = 0; 410 410 return page;
+1 -1
drivers/staging/zsmalloc/zsmalloc-main.c
··· 472 472 set_page_private(page, 0); 473 473 page->mapping = NULL; 474 474 page->freelist = NULL; 475 - reset_page_mapcount(page); 475 + page_mapcount_reset(page); 476 476 } 477 477 478 478 static void free_zspage(struct page *first_page)
+10 -10
include/linux/mm.h
··· 367 367 * both from it and to it can be tracked, using atomic_inc_and_test 368 368 * and atomic_add_negative(-1). 369 369 */ 370 - static inline void reset_page_mapcount(struct page *page) 370 + static inline void page_mapcount_reset(struct page *page) 371 371 { 372 372 atomic_set(&(page)->_mapcount, -1); 373 373 } ··· 658 658 659 659 #ifdef CONFIG_NUMA_BALANCING 660 660 #ifdef LAST_NID_NOT_IN_PAGE_FLAGS 661 - static inline int page_xchg_last_nid(struct page *page, int nid) 661 + static inline int page_nid_xchg_last(struct page *page, int nid) 662 662 { 663 663 return xchg(&page->_last_nid, nid); 664 664 } 665 665 666 - static inline int page_last_nid(struct page *page) 666 + static inline int page_nid_last(struct page *page) 667 667 { 668 668 return page->_last_nid; 669 669 } 670 - static inline void reset_page_last_nid(struct page *page) 670 + static inline void page_nid_reset_last(struct page *page) 671 671 { 672 672 page->_last_nid = -1; 673 673 } 674 674 #else 675 - static inline int page_last_nid(struct page *page) 675 + static inline int page_nid_last(struct page *page) 676 676 { 677 677 return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK; 678 678 } 679 679 680 - extern int page_xchg_last_nid(struct page *page, int nid); 680 + extern int page_nid_xchg_last(struct page *page, int nid); 681 681 682 - static inline void reset_page_last_nid(struct page *page) 682 + static inline void page_nid_reset_last(struct page *page) 683 683 { 684 684 int nid = (1 << LAST_NID_SHIFT) - 1; 685 685 ··· 688 688 } 689 689 #endif /* LAST_NID_NOT_IN_PAGE_FLAGS */ 690 690 #else 691 - static inline int page_xchg_last_nid(struct page *page, int nid) 691 + static inline int page_nid_xchg_last(struct page *page, int nid) 692 692 { 693 693 return page_to_nid(page); 694 694 } 695 695 696 - static inline int page_last_nid(struct page *page) 696 + static inline int page_nid_last(struct page *page) 697 697 { 698 698 return page_to_nid(page); 699 699 } 700 700 701 - static inline void reset_page_last_nid(struct page *page) 701 + static inline void page_nid_reset_last(struct page *page) 702 702 { 703 703 } 704 704 #endif
+1 -1
mm/huge_memory.c
··· 1639 1639 page_tail->mapping = page->mapping; 1640 1640 1641 1641 page_tail->index = page->index + i; 1642 - page_xchg_last_nid(page_tail, page_last_nid(page)); 1642 + page_nid_xchg_last(page_tail, page_nid_last(page)); 1643 1643 1644 1644 BUG_ON(!PageAnon(page_tail)); 1645 1645 BUG_ON(!PageUptodate(page_tail));
+1 -1
mm/mempolicy.c
··· 2316 2316 * it less likely we act on an unlikely task<->page 2317 2317 * relation. 2318 2318 */ 2319 - last_nid = page_xchg_last_nid(page, polnid); 2319 + last_nid = page_nid_xchg_last(page, polnid); 2320 2320 if (last_nid != polnid) 2321 2321 goto out; 2322 2322 }
+2 -2
mm/migrate.c
··· 1497 1497 __GFP_NOWARN) & 1498 1498 ~GFP_IOFS, 0); 1499 1499 if (newpage) 1500 - page_xchg_last_nid(newpage, page_last_nid(page)); 1500 + page_nid_xchg_last(newpage, page_nid_last(page)); 1501 1501 1502 1502 return newpage; 1503 1503 } ··· 1681 1681 if (!new_page) 1682 1682 goto out_fail; 1683 1683 1684 - page_xchg_last_nid(new_page, page_last_nid(page)); 1684 + page_nid_xchg_last(new_page, page_nid_last(page)); 1685 1685 1686 1686 isolated = numamigrate_isolate_page(pgdat, page); 1687 1687 if (!isolated) {
+2 -2
mm/mmzone.c
··· 98 98 } 99 99 100 100 #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NID_NOT_IN_PAGE_FLAGS) 101 - int page_xchg_last_nid(struct page *page, int nid) 101 + int page_nid_xchg_last(struct page *page, int nid) 102 102 { 103 103 unsigned long old_flags, flags; 104 104 int last_nid; 105 105 106 106 do { 107 107 old_flags = flags = page->flags; 108 - last_nid = page_last_nid(page); 108 + last_nid = page_nid_last(page); 109 109 110 110 flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT); 111 111 flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
+5 -5
mm/page_alloc.c
··· 295 295 296 296 /* Don't complain about poisoned pages */ 297 297 if (PageHWPoison(page)) { 298 - reset_page_mapcount(page); /* remove PageBuddy */ 298 + page_mapcount_reset(page); /* remove PageBuddy */ 299 299 return; 300 300 } 301 301 ··· 327 327 dump_stack(); 328 328 out: 329 329 /* Leave bad fields for debug, except PageBuddy could make trouble */ 330 - reset_page_mapcount(page); /* remove PageBuddy */ 330 + page_mapcount_reset(page); /* remove PageBuddy */ 331 331 add_taint(TAINT_BAD_PAGE); 332 332 } 333 333 ··· 613 613 bad_page(page); 614 614 return 1; 615 615 } 616 - reset_page_last_nid(page); 616 + page_nid_reset_last(page); 617 617 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 618 618 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 619 619 return 0; ··· 3894 3894 set_page_links(page, zone, nid, pfn); 3895 3895 mminit_verify_page_links(page, zone, nid, pfn); 3896 3896 init_page_count(page); 3897 - reset_page_mapcount(page); 3898 - reset_page_last_nid(page); 3897 + page_mapcount_reset(page); 3898 + page_nid_reset_last(page); 3899 3899 SetPageReserved(page); 3900 3900 /* 3901 3901 * Mark the block movable so that blocks are reserved for
+1 -1
mm/slob.c
··· 360 360 clear_slob_page_free(sp); 361 361 spin_unlock_irqrestore(&slob_lock, flags); 362 362 __ClearPageSlab(sp); 363 - reset_page_mapcount(sp); 363 + page_mapcount_reset(sp); 364 364 slob_free_pages(b, 0); 365 365 return; 366 366 }
+1 -1
mm/slub.c
··· 1408 1408 __ClearPageSlab(page); 1409 1409 1410 1410 memcg_release_pages(s, order); 1411 - reset_page_mapcount(page); 1411 + page_mapcount_reset(page); 1412 1412 if (current->reclaim_state) 1413 1413 current->reclaim_state->reclaimed_slab += pages; 1414 1414 __free_memcg_kmem_pages(page, order);