mm: simplify code of swap.c

Clean up code and remove duplicate code. Next patch will use
pagevec_lru_move_fn introduced here too.

Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Shaohua Li and committed by
Linus Torvalds
d8505dee c06b1fca

+54 -47
+54 -47
mm/swap.c
··· 178 178 } 179 179 EXPORT_SYMBOL(put_pages_list); 180 180 181 - /* 182 - * pagevec_move_tail() must be called with IRQ disabled. 183 - * Otherwise this may cause nasty races. 184 - */ 185 - static void pagevec_move_tail(struct pagevec *pvec) 181 + static void pagevec_lru_move_fn(struct pagevec *pvec, 182 + void (*move_fn)(struct page *page, void *arg), 183 + void *arg) 186 184 { 187 185 int i; 188 - int pgmoved = 0; 189 186 struct zone *zone = NULL; 187 + unsigned long flags = 0; 190 188 191 189 for (i = 0; i < pagevec_count(pvec); i++) { 192 190 struct page *page = pvec->pages[i]; ··· 192 194 193 195 if (pagezone != zone) { 194 196 if (zone) 195 - spin_unlock(&zone->lru_lock); 197 + spin_unlock_irqrestore(&zone->lru_lock, flags); 196 198 zone = pagezone; 197 - spin_lock(&zone->lru_lock); 199 + spin_lock_irqsave(&zone->lru_lock, flags); 198 200 } 199 - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 200 - int lru = page_lru_base_type(page); 201 - list_move_tail(&page->lru, &zone->lru[lru].list); 202 - pgmoved++; 203 - } 201 + 202 + (*move_fn)(page, arg); 204 203 } 205 204 if (zone) 206 - spin_unlock(&zone->lru_lock); 207 - __count_vm_events(PGROTATED, pgmoved); 208 - release_pages(pvec->pages, pvec->nr, pvec->cold); 205 + spin_unlock_irqrestore(&zone->lru_lock, flags); 206 + release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); 209 207 pagevec_reinit(pvec); 208 + } 209 + 210 + static void pagevec_move_tail_fn(struct page *page, void *arg) 211 + { 212 + int *pgmoved = arg; 213 + struct zone *zone = page_zone(page); 214 + 215 + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 216 + int lru = page_lru_base_type(page); 217 + list_move_tail(&page->lru, &zone->lru[lru].list); 218 + (*pgmoved)++; 219 + } 220 + } 221 + 222 + /* 223 + * pagevec_move_tail() must be called with IRQ disabled. 224 + * Otherwise this may cause nasty races. 225 + */ 226 + static void pagevec_move_tail(struct pagevec *pvec) 227 + { 228 + int pgmoved = 0; 229 + 230 + pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); 231 + __count_vm_events(PGROTATED, pgmoved); 210 232 } 211 233 212 234 /* ··· 234 216 * reclaim. If it still appears to be reclaimable, move it to the tail of the 235 217 * inactive list. 236 218 */ 237 - void rotate_reclaimable_page(struct page *page) 219 + void rotate_reclaimable_page(struct page *page) 238 220 { 239 221 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && 240 222 !PageUnevictable(page) && PageLRU(page)) { ··· 534 516 } 535 517 } 536 518 519 + static void ____pagevec_lru_add_fn(struct page *page, void *arg) 520 + { 521 + enum lru_list lru = (enum lru_list)arg; 522 + struct zone *zone = page_zone(page); 523 + int file = is_file_lru(lru); 524 + int active = is_active_lru(lru); 525 + 526 + VM_BUG_ON(PageActive(page)); 527 + VM_BUG_ON(PageUnevictable(page)); 528 + VM_BUG_ON(PageLRU(page)); 529 + 530 + SetPageLRU(page); 531 + if (active) 532 + SetPageActive(page); 533 + update_page_reclaim_stat(zone, page, file, active); 534 + add_page_to_lru_list(zone, page, lru); 535 + } 536 + 537 537 /* 538 538 * Add the passed pages to the LRU, then drop the caller's refcount 539 539 * on them. Reinitialises the caller's pagevec. 540 540 */ 541 541 void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) 542 542 { 543 - int i; 544 - struct zone *zone = NULL; 545 - 546 543 VM_BUG_ON(is_unevictable_lru(lru)); 547 544 548 - for (i = 0; i < pagevec_count(pvec); i++) { 549 - struct page *page = pvec->pages[i]; 550 - struct zone *pagezone = page_zone(page); 551 - int file; 552 - int active; 553 - 554 - if (pagezone != zone) { 555 - if (zone) 556 - spin_unlock_irq(&zone->lru_lock); 557 - zone = pagezone; 558 - spin_lock_irq(&zone->lru_lock); 559 - } 560 - VM_BUG_ON(PageActive(page)); 561 - VM_BUG_ON(PageUnevictable(page)); 562 - VM_BUG_ON(PageLRU(page)); 563 - SetPageLRU(page); 564 - active = is_active_lru(lru); 565 - file = is_file_lru(lru); 566 - if (active) 567 - SetPageActive(page); 568 - update_page_reclaim_stat(zone, page, file, active); 569 - add_page_to_lru_list(zone, page, lru); 570 - } 571 - if (zone) 572 - spin_unlock_irq(&zone->lru_lock); 573 - release_pages(pvec->pages, pvec->nr, pvec->cold); 574 - pagevec_reinit(pvec); 545 + pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru); 575 546 } 576 547 577 548 EXPORT_SYMBOL(____pagevec_lru_add);