Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm, pagevec: remove cold parameter for pagevecs

Every pagevec_init user claims the pages being released are hot even in
cases where it is unlikely the pages are hot. As no one cares about the
hotness of pages being released to the allocator, just ditch the
parameter.

No performance impact is expected as the overhead is marginal. The
parameter is removed simply because it is a bit stupid to have a useless
parameter copied everywhere.

Link: http://lkml.kernel.org/r/20171018075952.10627-6-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mel Gorman and committed by
Linus Torvalds
86679820 d9ed0d08

+45 -47
+1 -1
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 1859 1859 INIT_LIST_HEAD(&vm->unbound_list); 1860 1860 1861 1861 list_add_tail(&vm->global_link, &dev_priv->vm_list); 1862 - pagevec_init(&vm->free_pages, false); 1862 + pagevec_init(&vm->free_pages); 1863 1863 } 1864 1864 1865 1865 static void i915_address_space_fini(struct i915_address_space *vm)
+2 -2
fs/afs/write.c
··· 308 308 _enter("{%x:%u},%lx-%lx", 309 309 vnode->fid.vid, vnode->fid.vnode, first, last); 310 310 311 - pagevec_init(&pv, 0); 311 + pagevec_init(&pv); 312 312 313 313 do { 314 314 _debug("kill %lx-%lx", first, last); ··· 602 602 603 603 ASSERT(wb != NULL); 604 604 605 - pagevec_init(&pv, 0); 605 + pagevec_init(&pv); 606 606 607 607 do { 608 608 _debug("done %lx-%lx", first, last);
+2 -2
fs/btrfs/extent_io.c
··· 3797 3797 int scanned = 0; 3798 3798 int tag; 3799 3799 3800 - pagevec_init(&pvec, 0); 3800 + pagevec_init(&pvec); 3801 3801 if (wbc->range_cyclic) { 3802 3802 index = mapping->writeback_index; /* Start from prev offset */ 3803 3803 end = -1; ··· 3936 3936 if (!igrab(inode)) 3937 3937 return 0; 3938 3938 3939 - pagevec_init(&pvec, 0); 3939 + pagevec_init(&pvec); 3940 3940 if (wbc->range_cyclic) { 3941 3941 index = mapping->writeback_index; /* Start from prev offset */ 3942 3942 end = -1;
+2 -2
fs/buffer.c
··· 1592 1592 struct buffer_head *head; 1593 1593 1594 1594 end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits); 1595 - pagevec_init(&pvec, 0); 1595 + pagevec_init(&pvec); 1596 1596 while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) { 1597 1597 count = pagevec_count(&pvec); 1598 1598 for (i = 0; i < count; i++) { ··· 3514 3514 if (length <= 0) 3515 3515 return -ENOENT; 3516 3516 3517 - pagevec_init(&pvec, 0); 3517 + pagevec_init(&pvec); 3518 3518 3519 3519 do { 3520 3520 unsigned nr_pages, i;
+2 -2
fs/cachefiles/rdwr.c
··· 710 710 /* calculate the shift required to use bmap */ 711 711 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 712 712 713 - pagevec_init(&pagevec, 0); 713 + pagevec_init(&pagevec); 714 714 715 715 op->op.flags &= FSCACHE_OP_KEEP_FLAGS; 716 716 op->op.flags |= FSCACHE_OP_ASYNC; ··· 844 844 845 845 ret = cachefiles_has_space(cache, 0, *nr_pages); 846 846 if (ret == 0) { 847 - pagevec_init(&pagevec, 0); 847 + pagevec_init(&pagevec); 848 848 849 849 list_for_each_entry(page, pages, lru) { 850 850 if (pagevec_add(&pagevec, page) == 0)
+2 -2
fs/ceph/addr.c
··· 680 680 struct pagevec pvec; 681 681 int i; 682 682 683 - pagevec_init(&pvec, 0); 683 + pagevec_init(&pvec); 684 684 for (i = 0; i < num; i++) { 685 685 if (pagevec_add(&pvec, pages[i]) == 0) 686 686 pagevec_release(&pvec); ··· 811 811 if (fsc->mount_options->wsize < wsize) 812 812 wsize = fsc->mount_options->wsize; 813 813 814 - pagevec_init(&pvec, 0); 814 + pagevec_init(&pvec); 815 815 816 816 start_index = wbc->range_cyclic ? mapping->writeback_index : 0; 817 817 index = start_index;
+1 -1
fs/dax.c
··· 794 794 795 795 tag_pages_for_writeback(mapping, start_index, end_index); 796 796 797 - pagevec_init(&pvec, 0); 797 + pagevec_init(&pvec); 798 798 while (!done) { 799 799 pvec.nr = find_get_entries_tag(mapping, start_index, 800 800 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
+3 -3
fs/ext4/inode.c
··· 1719 1719 ext4_es_remove_extent(inode, start, last - start + 1); 1720 1720 } 1721 1721 1722 - pagevec_init(&pvec, 0); 1722 + pagevec_init(&pvec); 1723 1723 while (index <= end) { 1724 1724 nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); 1725 1725 if (nr_pages == 0) ··· 2345 2345 lblk = start << bpp_bits; 2346 2346 pblock = mpd->map.m_pblk; 2347 2347 2348 - pagevec_init(&pvec, 0); 2348 + pagevec_init(&pvec); 2349 2349 while (start <= end) { 2350 2350 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, 2351 2351 &start, end); ··· 2616 2616 else 2617 2617 tag = PAGECACHE_TAG_DIRTY; 2618 2618 2619 - pagevec_init(&pvec, 0); 2619 + pagevec_init(&pvec); 2620 2620 mpd->map.m_len = 0; 2621 2621 mpd->next_page = index; 2622 2622 while (index <= end) {
+1 -1
fs/f2fs/checkpoint.c
··· 314 314 }; 315 315 struct blk_plug plug; 316 316 317 - pagevec_init(&pvec, 0); 317 + pagevec_init(&pvec); 318 318 319 319 blk_start_plug(&plug); 320 320
+1 -1
fs/f2fs/data.c
··· 1635 1635 int range_whole = 0; 1636 1636 int tag; 1637 1637 1638 - pagevec_init(&pvec, 0); 1638 + pagevec_init(&pvec); 1639 1639 1640 1640 if (get_dirty_pages(mapping->host) <= 1641 1641 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
+4 -4
fs/f2fs/node.c
··· 1282 1282 struct page *last_page = NULL; 1283 1283 int nr_pages; 1284 1284 1285 - pagevec_init(&pvec, 0); 1285 + pagevec_init(&pvec); 1286 1286 index = 0; 1287 1287 1288 1288 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, ··· 1436 1436 return PTR_ERR_OR_ZERO(last_page); 1437 1437 } 1438 1438 retry: 1439 - pagevec_init(&pvec, 0); 1439 + pagevec_init(&pvec); 1440 1440 index = 0; 1441 1441 1442 1442 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, ··· 1547 1547 int ret = 0; 1548 1548 int nr_pages; 1549 1549 1550 - pagevec_init(&pvec, 0); 1550 + pagevec_init(&pvec); 1551 1551 1552 1552 next_step: 1553 1553 index = 0; ··· 1648 1648 int ret2, ret = 0; 1649 1649 int nr_pages; 1650 1650 1651 - pagevec_init(&pvec, 0); 1651 + pagevec_init(&pvec); 1652 1652 1653 1653 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, 1654 1654 PAGECACHE_TAG_WRITEBACK))) {
+1 -1
fs/fscache/page.c
··· 1175 1175 return; 1176 1176 } 1177 1177 1178 - pagevec_init(&pvec, 0); 1178 + pagevec_init(&pvec); 1179 1179 next = 0; 1180 1180 do { 1181 1181 if (!pagevec_lookup(&pvec, mapping, &next))
+1 -1
fs/gfs2/aops.c
··· 371 371 int range_whole = 0; 372 372 int tag; 373 373 374 - pagevec_init(&pvec, 0); 374 + pagevec_init(&pvec); 375 375 if (wbc->range_cyclic) { 376 376 writeback_index = mapping->writeback_index; /* prev offset */ 377 377 index = writeback_index;
+1 -1
fs/hugetlbfs/inode.c
··· 407 407 408 408 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); 409 409 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 410 - pagevec_init(&pvec, 0); 410 + pagevec_init(&pvec); 411 411 next = start; 412 412 while (next < end) { 413 413 /*
+1 -1
fs/nilfs2/btree.c
··· 2156 2156 level++) 2157 2157 INIT_LIST_HEAD(&lists[level]); 2158 2158 2159 - pagevec_init(&pvec, 0); 2159 + pagevec_init(&pvec); 2160 2160 2161 2161 while (pagevec_lookup_tag(&pvec, btcache, &index, 2162 2162 PAGECACHE_TAG_DIRTY)) {
+4 -4
fs/nilfs2/page.c
··· 255 255 pgoff_t index = 0; 256 256 int err = 0; 257 257 258 - pagevec_init(&pvec, 0); 258 + pagevec_init(&pvec); 259 259 repeat: 260 260 if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY)) 261 261 return 0; ··· 309 309 pgoff_t index = 0; 310 310 int err; 311 311 312 - pagevec_init(&pvec, 0); 312 + pagevec_init(&pvec); 313 313 repeat: 314 314 n = pagevec_lookup(&pvec, smap, &index); 315 315 if (!n) ··· 373 373 unsigned int i; 374 374 pgoff_t index = 0; 375 375 376 - pagevec_init(&pvec, 0); 376 + pagevec_init(&pvec); 377 377 378 378 while (pagevec_lookup_tag(&pvec, mapping, &index, 379 379 PAGECACHE_TAG_DIRTY)) { ··· 518 518 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits); 519 519 nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits); 520 520 521 - pagevec_init(&pvec, 0); 521 + pagevec_init(&pvec); 522 522 523 523 repeat: 524 524 pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
+2 -2
fs/nilfs2/segment.c
··· 708 708 index = start >> PAGE_SHIFT; 709 709 last = end >> PAGE_SHIFT; 710 710 } 711 - pagevec_init(&pvec, 0); 711 + pagevec_init(&pvec); 712 712 repeat: 713 713 if (unlikely(index > last) || 714 714 !pagevec_lookup_range_tag(&pvec, mapping, &index, last, ··· 753 753 unsigned int i; 754 754 pgoff_t index = 0; 755 755 756 - pagevec_init(&pvec, 0); 756 + pagevec_init(&pvec); 757 757 758 758 while (pagevec_lookup_tag(&pvec, mapping, &index, 759 759 PAGECACHE_TAG_DIRTY)) {
+1 -3
include/linux/pagevec.h
··· 17 17 18 18 struct pagevec { 19 19 unsigned long nr; 20 - bool cold; 21 20 bool drained; 22 21 struct page *pages[PAGEVEC_SIZE]; 23 22 }; ··· 50 51 return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); 51 52 } 52 53 53 - static inline void pagevec_init(struct pagevec *pvec, int cold) 54 + static inline void pagevec_init(struct pagevec *pvec) 54 55 { 55 56 pvec->nr = 0; 56 - pvec->cold = cold; 57 57 pvec->drained = false; 58 58 } 59 59
+1 -1
mm/filemap.c
··· 519 519 if (end_byte < start_byte) 520 520 return; 521 521 522 - pagevec_init(&pvec, 0); 522 + pagevec_init(&pvec); 523 523 while (index <= end) { 524 524 unsigned i; 525 525
+2 -2
mm/mlock.c
··· 289 289 struct pagevec pvec_putback; 290 290 int pgrescued = 0; 291 291 292 - pagevec_init(&pvec_putback, 0); 292 + pagevec_init(&pvec_putback); 293 293 294 294 /* Phase 1: page isolation */ 295 295 spin_lock_irq(zone_lru_lock(zone)); ··· 448 448 struct pagevec pvec; 449 449 struct zone *zone; 450 450 451 - pagevec_init(&pvec, 0); 451 + pagevec_init(&pvec); 452 452 /* 453 453 * Although FOLL_DUMP is intended for get_dump_page(), 454 454 * it just so happens that its special treatment of the
+1 -1
mm/page-writeback.c
··· 2168 2168 int range_whole = 0; 2169 2169 int tag; 2170 2170 2171 - pagevec_init(&pvec, 0); 2171 + pagevec_init(&pvec); 2172 2172 if (wbc->range_cyclic) { 2173 2173 writeback_index = mapping->writeback_index; /* prev offset */ 2174 2174 index = writeback_index;
+3 -3
mm/shmem.c
··· 747 747 pgoff_t indices[PAGEVEC_SIZE]; 748 748 pgoff_t index = 0; 749 749 750 - pagevec_init(&pvec, 0); 750 + pagevec_init(&pvec); 751 751 /* 752 752 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 753 753 */ ··· 790 790 if (lend == -1) 791 791 end = -1; /* unsigned, so actually very big */ 792 792 793 - pagevec_init(&pvec, 0); 793 + pagevec_init(&pvec); 794 794 index = start; 795 795 while (index < end) { 796 796 pvec.nr = find_get_entries(mapping, index, ··· 2528 2528 bool done = false; 2529 2529 int i; 2530 2530 2531 - pagevec_init(&pvec, 0); 2531 + pagevec_init(&pvec); 2532 2532 pvec.nr = 1; /* start small: we may be there already */ 2533 2533 while (!done) { 2534 2534 pvec.nr = find_get_entries(mapping, index,
+2 -2
mm/swap.c
··· 210 210 } 211 211 if (pgdat) 212 212 spin_unlock_irqrestore(&pgdat->lru_lock, flags); 213 - release_pages(pvec->pages, pvec->nr, pvec->cold); 213 + release_pages(pvec->pages, pvec->nr, 0); 214 214 pagevec_reinit(pvec); 215 215 } 216 216 ··· 837 837 lru_add_drain(); 838 838 pvec->drained = true; 839 839 } 840 - release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); 840 + release_pages(pvec->pages, pagevec_count(pvec), 0); 841 841 pagevec_reinit(pvec); 842 842 } 843 843 EXPORT_SYMBOL(__pagevec_release);
+4 -4
mm/truncate.c
··· 330 330 else 331 331 end = (lend + 1) >> PAGE_SHIFT; 332 332 333 - pagevec_init(&pvec, 0); 333 + pagevec_init(&pvec); 334 334 index = start; 335 335 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, 336 336 min(end - index, (pgoff_t)PAGEVEC_SIZE), ··· 342 342 */ 343 343 struct pagevec locked_pvec; 344 344 345 - pagevec_init(&locked_pvec, 0); 345 + pagevec_init(&locked_pvec); 346 346 for (i = 0; i < pagevec_count(&pvec); i++) { 347 347 struct page *page = pvec.pages[i]; 348 348 ··· 553 553 unsigned long count = 0; 554 554 int i; 555 555 556 - pagevec_init(&pvec, 0); 556 + pagevec_init(&pvec); 557 557 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, 558 558 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, 559 559 indices)) { ··· 683 683 if (mapping->nrpages == 0 && mapping->nrexceptional == 0) 684 684 goto out; 685 685 686 - pagevec_init(&pvec, 0); 686 + pagevec_init(&pvec); 687 687 index = start; 688 688 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, 689 689 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,