Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: remove cold parameter from free_hot_cold_page*

Most callers users of free_hot_cold_page claim the pages being released
are cache hot. The exception is the page reclaim paths where it is
likely that enough pages will be freed in the near future that the
per-cpu lists are going to be recycled and the cache hotness information
is lost. As no one really cares about the hotness of pages being
released to the allocator, just ditch the parameter.

The APIs are renamed to indicate that it's no longer about hot/cold
pages. It should also be less confusing as there are subtle differences
between them. __free_pages drops a reference and frees a page when the
refcount reaches zero. free_hot_cold_page handled pages whose refcount
was already zero which is non-obvious from the name. free_unref_page
should be more obvious.

No performance impact is expected as the overhead is marginal. The
parameter is removed simply because it is a bit stupid to have a useless
parameter copied everywhere.

[mgorman@techsingularity.net: add pages to head, not tail]
Link: http://lkml.kernel.org/r/20171019154321.qtpzaeftoyyw4iey@techsingularity.net
Link: http://lkml.kernel.org/r/20171018075952.10627-8-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mel Gorman and committed by
Linus Torvalds
2d4894b5 c6f92f9f

+28 -36
+1 -1
arch/powerpc/mm/mmu_context_book3s64.c
··· 200 200 /* We allow PTE_FRAG_NR fragments from a PTE page */ 201 201 if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) { 202 202 pgtable_page_dtor(page); 203 - free_hot_cold_page(page, 0); 203 + free_unref_page(page); 204 204 } 205 205 } 206 206
+1 -1
arch/powerpc/mm/pgtable_64.c
··· 404 404 if (put_page_testzero(page)) { 405 405 if (!kernel) 406 406 pgtable_page_dtor(page); 407 - free_hot_cold_page(page, 0); 407 + free_unref_page(page); 408 408 } 409 409 } 410 410
+1 -1
arch/sparc/mm/init_64.c
··· 2939 2939 if (!page) 2940 2940 return NULL; 2941 2941 if (!pgtable_page_ctor(page)) { 2942 - free_hot_cold_page(page, 0); 2942 + free_unref_page(page); 2943 2943 return NULL; 2944 2944 } 2945 2945 return (pte_t *) page_address(page);
+1 -1
arch/tile/mm/homecache.c
··· 409 409 if (put_page_testzero(page)) { 410 410 homecache_change_page_home(page, order, PAGE_HOME_HASH); 411 411 if (order == 0) { 412 - free_hot_cold_page(page, false); 412 + free_unref_page(page); 413 413 } else { 414 414 init_page_count(page); 415 415 __free_pages(page, order);
+2 -2
include/linux/gfp.h
··· 530 530 531 531 extern void __free_pages(struct page *page, unsigned int order); 532 532 extern void free_pages(unsigned long addr, unsigned int order); 533 - extern void free_hot_cold_page(struct page *page, bool cold); 534 - extern void free_hot_cold_page_list(struct list_head *list, bool cold); 533 + extern void free_unref_page(struct page *page); 534 + extern void free_unref_page_list(struct list_head *list); 535 535 536 536 struct page_frag_cache; 537 537 extern void __page_frag_cache_drain(struct page *page, unsigned int count);
+4 -7
include/trace/events/kmem.h
··· 172 172 173 173 TRACE_EVENT(mm_page_free_batched, 174 174 175 - TP_PROTO(struct page *page, int cold), 175 + TP_PROTO(struct page *page), 176 176 177 - TP_ARGS(page, cold), 177 + TP_ARGS(page), 178 178 179 179 TP_STRUCT__entry( 180 180 __field( unsigned long, pfn ) 181 - __field( int, cold ) 182 181 ), 183 182 184 183 TP_fast_assign( 185 184 __entry->pfn = page_to_pfn(page); 186 - __entry->cold = cold; 187 185 ), 188 186 189 - TP_printk("page=%p pfn=%lu order=0 cold=%d", 187 + TP_printk("page=%p pfn=%lu order=0", 190 188 pfn_to_page(__entry->pfn), 191 - __entry->pfn, 192 - __entry->cold) 189 + __entry->pfn) 193 190 ); 194 191 195 192 TRACE_EVENT(mm_page_alloc,
+12 -17
mm/page_alloc.c
··· 2611 2611 } 2612 2612 #endif /* CONFIG_PM */ 2613 2613 2614 - static bool free_hot_cold_page_prepare(struct page *page, unsigned long pfn) 2614 + static bool free_unref_page_prepare(struct page *page, unsigned long pfn) 2615 2615 { 2616 2616 int migratetype; 2617 2617 ··· 2623 2623 return true; 2624 2624 } 2625 2625 2626 - static void free_hot_cold_page_commit(struct page *page, unsigned long pfn, 2627 - bool cold) 2626 + static void free_unref_page_commit(struct page *page, unsigned long pfn) 2628 2627 { 2629 2628 struct zone *zone = page_zone(page); 2630 2629 struct per_cpu_pages *pcp; ··· 2648 2649 } 2649 2650 2650 2651 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2651 - if (!cold) 2652 - list_add(&page->lru, &pcp->lists[migratetype]); 2653 - else 2654 - list_add_tail(&page->lru, &pcp->lists[migratetype]); 2652 + list_add(&page->lru, &pcp->lists[migratetype]); 2655 2653 pcp->count++; 2656 2654 if (pcp->count >= pcp->high) { 2657 2655 unsigned long batch = READ_ONCE(pcp->batch); ··· 2659 2663 2660 2664 /* 2661 2665 * Free a 0-order page 2662 - * cold == true ? free a cold page : free a hot page 2663 2666 */ 2664 - void free_hot_cold_page(struct page *page, bool cold) 2667 + void free_unref_page(struct page *page) 2665 2668 { 2666 2669 unsigned long flags; 2667 2670 unsigned long pfn = page_to_pfn(page); 2668 2671 2669 - if (!free_hot_cold_page_prepare(page, pfn)) 2672 + if (!free_unref_page_prepare(page, pfn)) 2670 2673 return; 2671 2674 2672 2675 local_irq_save(flags); 2673 - free_hot_cold_page_commit(page, pfn, cold); 2676 + free_unref_page_commit(page, pfn); 2674 2677 local_irq_restore(flags); 2675 2678 } 2676 2679 2677 2680 /* 2678 2681 * Free a list of 0-order pages 2679 2682 */ 2680 - void free_hot_cold_page_list(struct list_head *list, bool cold) 2683 + void free_unref_page_list(struct list_head *list) 2681 2684 { 2682 2685 struct page *page, *next; 2683 2686 unsigned long flags, pfn; ··· 2684 2689 /* Prepare pages for freeing */ 2685 2690 list_for_each_entry_safe(page, next, list, lru) { 2686 2691 pfn = page_to_pfn(page); 2687 - if (!free_hot_cold_page_prepare(page, pfn)) 2692 + if (!free_unref_page_prepare(page, pfn)) 2688 2693 list_del(&page->lru); 2689 2694 set_page_private(page, pfn); 2690 2695 } ··· 2694 2699 unsigned long pfn = page_private(page); 2695 2700 2696 2701 set_page_private(page, 0); 2697 - trace_mm_page_free_batched(page, cold); 2698 - free_hot_cold_page_commit(page, pfn, cold); 2702 + trace_mm_page_free_batched(page); 2703 + free_unref_page_commit(page, pfn); 2699 2704 } 2700 2705 local_irq_restore(flags); 2701 2706 } ··· 4296 4301 { 4297 4302 if (put_page_testzero(page)) { 4298 4303 if (order == 0) 4299 - free_hot_cold_page(page, false); 4304 + free_unref_page(page); 4300 4305 else 4301 4306 __free_pages_ok(page, order); 4302 4307 } ··· 4354 4359 unsigned int order = compound_order(page); 4355 4360 4356 4361 if (order == 0) 4357 - free_hot_cold_page(page, false); 4362 + free_unref_page(page); 4358 4363 else 4359 4364 __free_pages_ok(page, order); 4360 4365 }
+1 -1
mm/rmap.c
··· 1321 1321 * It would be tidy to reset the PageAnon mapping here, 1322 1322 * but that might overwrite a racing page_add_anon_rmap 1323 1323 * which increments mapcount after us but sets mapping 1324 - * before us: so leave the reset to free_hot_cold_page, 1324 + * before us: so leave the reset to free_unref_page, 1325 1325 * and remember that it's only reliable while mapped. 1326 1326 * Leaving it set also helps swapoff to reinstate ptes 1327 1327 * faster for those pages still in swapcache.
+2 -2
mm/swap.c
··· 76 76 static void __put_single_page(struct page *page) 77 77 { 78 78 __page_cache_release(page); 79 - free_hot_cold_page(page, false); 79 + free_unref_page(page); 80 80 } 81 81 82 82 static void __put_compound_page(struct page *page) ··· 817 817 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 818 818 819 819 mem_cgroup_uncharge_list(&pages_to_free); 820 - free_hot_cold_page_list(&pages_to_free, 0); 820 + free_unref_page_list(&pages_to_free); 821 821 } 822 822 EXPORT_SYMBOL(release_pages); 823 823
+3 -3
mm/vmscan.c
··· 1349 1349 1350 1350 mem_cgroup_uncharge_list(&free_pages); 1351 1351 try_to_unmap_flush(); 1352 - free_hot_cold_page_list(&free_pages, true); 1352 + free_unref_page_list(&free_pages); 1353 1353 1354 1354 list_splice(&ret_pages, page_list); 1355 1355 count_vm_events(PGACTIVATE, pgactivate); ··· 1824 1824 spin_unlock_irq(&pgdat->lru_lock); 1825 1825 1826 1826 mem_cgroup_uncharge_list(&page_list); 1827 - free_hot_cold_page_list(&page_list, true); 1827 + free_unref_page_list(&page_list); 1828 1828 1829 1829 /* 1830 1830 * If reclaim is isolating dirty pages under writeback, it implies ··· 2063 2063 spin_unlock_irq(&pgdat->lru_lock); 2064 2064 2065 2065 mem_cgroup_uncharge_list(&l_hold); 2066 - free_hot_cold_page_list(&l_hold, true); 2066 + free_unref_page_list(&l_hold); 2067 2067 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, 2068 2068 nr_deactivate, nr_rotated, sc->priority, file); 2069 2069 }