Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: remove lru parameter from __pagevec_lru_add and remove parts of pagevec API

Now that the LRU to add a page to is decided at LRU-add time, remove the
misleading lru parameter from __pagevec_lru_add. A consequence of this
is that the pagevec_lru_add_file, pagevec_lru_add_anon and similar
helpers are misleading as the caller no longer has direct control over
what LRU the page is added to. Unused helpers are removed by this patch
and existing users of pagevec_lru_add_file() are converted to use
lru_cache_add_file() directly and use the per-cpu pagevecs instead of
creating their own pagevec.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Alexey Lyahkov <alexey.lyashkov@gmail.com>
Cc: Andrew Perepechko <anserper@ya.ru>
Cc: Robin Dong <sanbai@taobao.com>
Cc: Theodore Tso <tytso@mit.edu>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Bernd Schubert <bernd.schubert@fastmail.fm>
Cc: David Howells <dhowells@redhat.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mel Gorman and committed by
Linus Torvalds
a0b8cab3 059285a2

+14 -69
+7 -23
fs/cachefiles/rdwr.c
··· 12 12 #include <linux/mount.h> 13 13 #include <linux/slab.h> 14 14 #include <linux/file.h> 15 + #include <linux/swap.h> 15 16 #include "internal.h" 16 17 17 18 /* ··· 228 227 */ 229 228 static int cachefiles_read_backing_file_one(struct cachefiles_object *object, 230 229 struct fscache_retrieval *op, 231 - struct page *netpage, 232 - struct pagevec *pagevec) 230 + struct page *netpage) 233 231 { 234 232 struct cachefiles_one_read *monitor; 235 233 struct address_space *bmapping; ··· 236 236 int ret; 237 237 238 238 _enter(""); 239 - 240 - pagevec_reinit(pagevec); 241 239 242 240 _debug("read back %p{%lu,%d}", 243 241 netpage, netpage->index, page_count(netpage)); ··· 281 283 backpage = newpage; 282 284 newpage = NULL; 283 285 284 - page_cache_get(backpage); 285 - pagevec_add(pagevec, backpage); 286 - __pagevec_lru_add_file(pagevec); 286 + lru_cache_add_file(backpage); 287 287 288 288 read_backing_page: 289 289 ret = bmapping->a_ops->readpage(NULL, backpage); ··· 448 452 if (block) { 449 453 /* submit the apparently valid page to the backing fs to be 450 454 * read from disk */ 451 - ret = cachefiles_read_backing_file_one(object, op, page, 452 - &pagevec); 455 + ret = cachefiles_read_backing_file_one(object, op, page); 453 456 } else if (cachefiles_has_space(cache, 0, 1) == 0) { 454 457 /* there's space in the cache we can use */ 455 458 fscache_mark_page_cached(op, page); ··· 477 482 { 478 483 struct cachefiles_one_read *monitor = NULL; 479 484 struct address_space *bmapping = object->backer->d_inode->i_mapping; 480 - struct pagevec lru_pvec; 481 485 struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; 482 486 int ret = 0; 483 487 484 488 _enter(""); 485 - 486 - pagevec_init(&lru_pvec, 0); 487 489 488 490 list_for_each_entry_safe(netpage, _n, list, lru) { 489 491 list_del(&netpage->lru); ··· 526 534 backpage = newpage; 527 535 newpage = NULL; 528 536 529 - page_cache_get(backpage); 530 - if (!pagevec_add(&lru_pvec, backpage)) 531 - __pagevec_lru_add_file(&lru_pvec); 537 + lru_cache_add_file(backpage); 532 538 533 539 reread_backing_page: 534 540 ret = bmapping->a_ops->readpage(NULL, backpage); ··· 549 559 goto nomem; 550 560 } 551 561 552 - page_cache_get(netpage); 553 - if (!pagevec_add(&lru_pvec, netpage)) 554 - __pagevec_lru_add_file(&lru_pvec); 562 + lru_cache_add_file(netpage); 555 563 556 564 /* install a monitor */ 557 565 page_cache_get(netpage); ··· 631 643 632 644 fscache_mark_page_cached(op, netpage); 633 645 634 - page_cache_get(netpage); 635 - if (!pagevec_add(&lru_pvec, netpage)) 636 - __pagevec_lru_add_file(&lru_pvec); 646 + lru_cache_add_file(netpage); 637 647 638 648 /* the netpage is unlocked and marked up to date here */ 639 649 fscache_end_io(op, netpage, 0); ··· 647 661 648 662 out: 649 663 /* tidy up */ 650 - pagevec_lru_add_file(&lru_pvec); 651 - 652 664 if (newpage) 653 665 page_cache_release(newpage); 654 666 if (netpage)
+2 -5
fs/nfs/dir.c
··· 33 33 #include <linux/pagevec.h> 34 34 #include <linux/namei.h> 35 35 #include <linux/mount.h> 36 + #include <linux/swap.h> 36 37 #include <linux/sched.h> 37 38 #include <linux/kmemleak.h> 38 39 #include <linux/xattr.h> ··· 1759 1758 */ 1760 1759 int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 1761 1760 { 1762 - struct pagevec lru_pvec; 1763 1761 struct page *page; 1764 1762 char *kaddr; 1765 1763 struct iattr attr; ··· 1798 1798 * No big deal if we can't add this page to the page cache here. 1799 1799 * READLINK will get the missing page from the server if needed. 1800 1800 */ 1801 - pagevec_init(&lru_pvec, 0); 1802 - if (!add_to_page_cache(page, dentry->d_inode->i_mapping, 0, 1801 + if (!add_to_page_cache_lru(page, dentry->d_inode->i_mapping, 0, 1803 1802 GFP_KERNEL)) { 1804 - pagevec_add(&lru_pvec, page); 1805 - pagevec_lru_add_file(&lru_pvec); 1806 1803 SetPageUptodate(page); 1807 1804 unlock_page(page); 1808 1805 } else
+1 -33
include/linux/pagevec.h
··· 21 21 }; 22 22 23 23 void __pagevec_release(struct pagevec *pvec); 24 - void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); 24 + void __pagevec_lru_add(struct pagevec *pvec); 25 25 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 26 26 pgoff_t start, unsigned nr_pages); 27 27 unsigned pagevec_lookup_tag(struct pagevec *pvec, ··· 62 62 { 63 63 if (pagevec_count(pvec)) 64 64 __pagevec_release(pvec); 65 - } 66 - 67 - static inline void __pagevec_lru_add_anon(struct pagevec *pvec) 68 - { 69 - __pagevec_lru_add(pvec, LRU_INACTIVE_ANON); 70 - } 71 - 72 - static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec) 73 - { 74 - __pagevec_lru_add(pvec, LRU_ACTIVE_ANON); 75 - } 76 - 77 - static inline void __pagevec_lru_add_file(struct pagevec *pvec) 78 - { 79 - __pagevec_lru_add(pvec, LRU_INACTIVE_FILE); 80 - } 81 - 82 - static inline void __pagevec_lru_add_active_file(struct pagevec *pvec) 83 - { 84 - __pagevec_lru_add(pvec, LRU_ACTIVE_FILE); 85 - } 86 - 87 - static inline void pagevec_lru_add_file(struct pagevec *pvec) 88 - { 89 - if (pagevec_count(pvec)) 90 - __pagevec_lru_add_file(pvec); 91 - } 92 - 93 - static inline void pagevec_lru_add_anon(struct pagevec *pvec) 94 - { 95 - if (pagevec_count(pvec)) 96 - __pagevec_lru_add_anon(pvec); 97 65 } 98 66 99 67 #endif /* _LINUX_PAGEVEC_H */
+4 -8
mm/swap.c
··· 505 505 506 506 page_cache_get(page); 507 507 if (!pagevec_space(pvec)) 508 - __pagevec_lru_add(pvec, lru); 508 + __pagevec_lru_add(pvec); 509 509 pagevec_add(pvec, page); 510 510 put_cpu_var(lru_add_pvec); 511 511 } ··· 628 628 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); 629 629 630 630 if (pagevec_count(pvec)) 631 - __pagevec_lru_add(pvec, NR_LRU_LISTS); 631 + __pagevec_lru_add(pvec); 632 632 633 633 pvec = &per_cpu(lru_rotate_pvecs, cpu); 634 634 if (pagevec_count(pvec)) { ··· 832 832 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 833 833 void *arg) 834 834 { 835 - enum lru_list requested_lru = (enum lru_list)arg; 836 835 int file = page_is_file_cache(page); 837 836 int active = PageActive(page); 838 837 enum lru_list lru = page_lru(page); 839 838 840 - WARN_ON_ONCE(requested_lru < NR_LRU_LISTS && requested_lru != lru); 841 839 VM_BUG_ON(PageUnevictable(page)); 842 840 VM_BUG_ON(PageLRU(page)); 843 841 ··· 849 851 * Add the passed pages to the LRU, then drop the caller's refcount 850 852 * on them. Reinitialises the caller's pagevec. 851 853 */ 852 - void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) 854 + void __pagevec_lru_add(struct pagevec *pvec) 853 855 { 854 - VM_BUG_ON(is_unevictable_lru(lru)); 855 - 856 - pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru); 856 + pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); 857 857 } 858 858 EXPORT_SYMBOL(__pagevec_lru_add); 859 859