Revert "mm: batch activate_page() to reduce lock contention"

This reverts commit 744ed1442757767ffede5008bb13e0805085902e.

Chris Mason ended up chasing down some page allocation errors and pages
stuck waiting on the IO scheduler, and was able to narrow it down to two
commits: commit 744ed1442757 ("mm: batch activate_page() to reduce lock
contention") and d8505dee1a87 ("mm: simplify code of swap.c").

This reverts the first of them.

Reported-and-debugged-by: Chris Mason <chris.mason@oracle.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Jens Axboe <jaxboe@fusionio.com>
Cc: linux-mm <linux-mm@kvack.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

+15 -94
-9
mm/internal.h
··· 39 40 extern unsigned long highest_memmap_pfn; 41 42 - #ifdef CONFIG_SMP 43 - extern int putback_active_lru_page(struct zone *zone, struct page *page); 44 - #else 45 - static inline int putback_active_lru_page(struct zone *zone, struct page *page) 46 - { 47 - return 0; 48 - } 49 - #endif 50 - 51 /* 52 * in mm/vmscan.c: 53 */
··· 39 40 extern unsigned long highest_memmap_pfn; 41 42 /* 43 * in mm/vmscan.c: 44 */
+13 -81
mm/swap.c
··· 271 } 272 273 /* 274 - * A page will go to active list either by activate_page or putback_lru_page. 275 - * In the activate_page case, the page hasn't active bit set. The page might 276 - * not in LRU list because it's isolated before it gets a chance to be moved to 277 - * active list. The window is small because pagevec just stores several pages. 278 - * For such case, we do nothing for such page. 279 - * In the putback_lru_page case, the page isn't in lru list but has active 280 - * bit set 281 */ 282 - static void __activate_page(struct page *page, void *arg) 283 - { 284 - struct zone *zone = page_zone(page); 285 - int file = page_is_file_cache(page); 286 - int lru = page_lru_base_type(page); 287 - bool putback = !PageLRU(page); 288 - 289 - /* The page is isolated before it's moved to active list */ 290 - if (!PageLRU(page) && !PageActive(page)) 291 - return; 292 - if ((PageLRU(page) && PageActive(page)) || PageUnevictable(page)) 293 - return; 294 - 295 - if (!putback) 296 - del_page_from_lru_list(zone, page, lru); 297 - else 298 - SetPageLRU(page); 299 - 300 - SetPageActive(page); 301 - lru += LRU_ACTIVE; 302 - add_page_to_lru_list(zone, page, lru); 303 - 304 - if (putback) 305 - return; 306 - __count_vm_event(PGACTIVATE); 307 - update_page_reclaim_stat(zone, page, file, 1); 308 - } 309 - 310 - #ifdef CONFIG_SMP 311 - static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); 312 - 313 - static void activate_page_drain(int cpu) 314 - { 315 - struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); 316 - 317 - if (pagevec_count(pvec)) 318 - pagevec_lru_move_fn(pvec, __activate_page, NULL); 319 - } 320 - 321 - void activate_page(struct page *page) 322 - { 323 - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 324 - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 325 - 326 - page_cache_get(page); 327 - if (!pagevec_add(pvec, page)) 328 - pagevec_lru_move_fn(pvec, __activate_page, NULL); 329 - put_cpu_var(activate_page_pvecs); 330 - } 331 - } 332 - 333 - /* Caller should hold zone->lru_lock */ 334 - int putback_active_lru_page(struct zone *zone, struct page *page) 335 - { 336 - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 337 - 338 - if (!pagevec_add(pvec, page)) { 339 - spin_unlock_irq(&zone->lru_lock); 340 - pagevec_lru_move_fn(pvec, __activate_page, NULL); 341 - spin_lock_irq(&zone->lru_lock); 342 - } 343 - put_cpu_var(activate_page_pvecs); 344 - return 1; 345 - } 346 - 347 - #else 348 - static inline void activate_page_drain(int cpu) 349 - { 350 - } 351 - 352 void activate_page(struct page *page) 353 { 354 struct zone *zone = page_zone(page); 355 356 spin_lock_irq(&zone->lru_lock); 357 - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) 358 - __activate_page(page, NULL); 359 spin_unlock_irq(&zone->lru_lock); 360 } 361 - #endif 362 363 /* 364 * Mark a page as having seen activity. ··· 390 pagevec_move_tail(pvec); 391 local_irq_restore(flags); 392 } 393 - activate_page_drain(cpu); 394 } 395 396 void lru_add_drain(void)
··· 271 } 272 273 /* 274 + * FIXME: speed this up? 275 */ 276 void activate_page(struct page *page) 277 { 278 struct zone *zone = page_zone(page); 279 280 spin_lock_irq(&zone->lru_lock); 281 + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 282 + int file = page_is_file_cache(page); 283 + int lru = page_lru_base_type(page); 284 + del_page_from_lru_list(zone, page, lru); 285 + 286 + SetPageActive(page); 287 + lru += LRU_ACTIVE; 288 + add_page_to_lru_list(zone, page, lru); 289 + __count_vm_event(PGACTIVATE); 290 + 291 + update_page_reclaim_stat(zone, page, file, 1); 292 + } 293 spin_unlock_irq(&zone->lru_lock); 294 } 295 296 /* 297 * Mark a page as having seen activity. ··· 457 pagevec_move_tail(pvec); 458 local_irq_restore(flags); 459 } 460 } 461 462 void lru_add_drain(void)
+2 -4
mm/vmscan.c
··· 1271 spin_lock_irq(&zone->lru_lock); 1272 continue; 1273 } 1274 lru = page_lru(page); 1275 if (is_active_lru(lru)) { 1276 int file = is_file_lru(lru); 1277 int numpages = hpage_nr_pages(page); 1278 reclaim_stat->recent_rotated[file] += numpages; 1279 - if (putback_active_lru_page(zone, page)) 1280 - continue; 1281 } 1282 - SetPageLRU(page); 1283 - add_page_to_lru_list(zone, page, lru); 1284 if (!pagevec_add(&pvec, page)) { 1285 spin_unlock_irq(&zone->lru_lock); 1286 __pagevec_release(&pvec);
··· 1271 spin_lock_irq(&zone->lru_lock); 1272 continue; 1273 } 1274 + SetPageLRU(page); 1275 lru = page_lru(page); 1276 + add_page_to_lru_list(zone, page, lru); 1277 if (is_active_lru(lru)) { 1278 int file = is_file_lru(lru); 1279 int numpages = hpage_nr_pages(page); 1280 reclaim_stat->recent_rotated[file] += numpages; 1281 } 1282 if (!pagevec_add(&pvec, page)) { 1283 spin_unlock_irq(&zone->lru_lock); 1284 __pagevec_release(&pvec);