Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/memcg: apply add/del_page to lruvec

Take lruvec further: pass it instead of zone to add_page_to_lru_list() and
del_page_from_lru_list(); and pagevec_lru_move_fn() pass lruvec down to
its target functions.

This cleanup eliminates a swathe of cruft in memcontrol.c, including
mem_cgroup_lru_add_list(), mem_cgroup_lru_del_list() and
mem_cgroup_lru_move_lists() - which never actually touched the lists.

In their place, mem_cgroup_page_lruvec() to decide the lruvec, previously
a side-effect of add, and mem_cgroup_update_lru_size() to maintain the
lru_size stats.

Whilst these are simplifications in their own right, the goal is to bring
the evaluation of lruvec next to the spin_locking of the lrus, in
preparation for a future patch.

Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Hugh Dickins and committed by
Linus Torvalds
fa9add64 75b00af7

+122 -181
+7 -25
include/linux/memcontrol.h
··· 63 63 gfp_t gfp_mask); 64 64 65 65 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 66 - struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *, 67 - enum lru_list); 68 - void mem_cgroup_lru_del_list(struct page *, enum lru_list); 69 - struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *, 70 - enum lru_list, enum lru_list); 66 + struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 71 67 72 68 /* For coalescing uncharge for reducing memcg' overhead*/ 73 69 extern void mem_cgroup_uncharge_start(void); ··· 118 122 int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec); 119 123 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 120 124 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); 121 - struct zone_reclaim_stat* 122 - mem_cgroup_get_reclaim_stat_from_page(struct page *page); 125 + void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); 123 126 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 124 127 struct task_struct *p); 125 128 extern void mem_cgroup_replace_page_cache(struct page *oldpage, ··· 245 250 return &zone->lruvec; 246 251 } 247 252 248 - static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, 249 - struct page *page, 250 - enum lru_list lru) 251 - { 252 - return &zone->lruvec; 253 - } 254 - 255 - static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) 256 - { 257 - } 258 - 259 - static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone, 260 - struct page *page, 261 - enum lru_list from, 262 - enum lru_list to) 253 + static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, 254 + struct zone *zone) 263 255 { 264 256 return &zone->lruvec; 265 257 } ··· 327 345 return 0; 328 346 } 329 347 330 - static inline struct zone_reclaim_stat* 331 - mem_cgroup_get_reclaim_stat_from_page(struct page *page) 348 + static inline void 349 + mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 350 + int increment) 332 351 { 333 - return NULL; 334 352 } 335 353 336 354 static inline void
+10 -10
include/linux/mm_inline.h
··· 21 21 return !PageSwapBacked(page); 22 22 } 23 23 24 - static __always_inline void 25 - add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru) 24 + static __always_inline void add_page_to_lru_list(struct page *page, 25 + struct lruvec *lruvec, enum lru_list lru) 26 26 { 27 - struct lruvec *lruvec; 28 - 29 - lruvec = mem_cgroup_lru_add_list(zone, page, lru); 27 + int nr_pages = hpage_nr_pages(page); 28 + mem_cgroup_update_lru_size(lruvec, lru, nr_pages); 30 29 list_add(&page->lru, &lruvec->lists[lru]); 31 - __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page)); 30 + __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); 32 31 } 33 32 34 - static __always_inline void 35 - del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru) 33 + static __always_inline void del_page_from_lru_list(struct page *page, 34 + struct lruvec *lruvec, enum lru_list lru) 36 35 { 37 - mem_cgroup_lru_del_list(page, lru); 36 + int nr_pages = hpage_nr_pages(page); 37 + mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); 38 38 list_del(&page->lru); 39 - __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page)); 39 + __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages); 40 40 } 41 41 42 42 /**
+2 -2
include/linux/swap.h
··· 221 221 /* linux/mm/swap.c */ 222 222 extern void __lru_cache_add(struct page *, enum lru_list lru); 223 223 extern void lru_cache_add_lru(struct page *, enum lru_list lru); 224 - extern void lru_add_page_tail(struct zone* zone, 225 - struct page *page, struct page *page_tail); 224 + extern void lru_add_page_tail(struct page *page, struct page *page_tail, 225 + struct lruvec *lruvec); 226 226 extern void activate_page(struct page *); 227 227 extern void mark_page_accessed(struct page *); 228 228 extern void lru_add_drain(void);
+4 -1
mm/compaction.c
··· 227 227 unsigned long nr_scanned = 0, nr_isolated = 0; 228 228 struct list_head *migratelist = &cc->migratepages; 229 229 isolate_mode_t mode = 0; 230 + struct lruvec *lruvec; 230 231 231 232 /* 232 233 * Ensure that there are not too many pages isolated from the LRU ··· 329 328 if (cc->mode != COMPACT_SYNC) 330 329 mode |= ISOLATE_ASYNC_MIGRATE; 331 330 331 + lruvec = mem_cgroup_page_lruvec(page, zone); 332 + 332 333 /* Try isolate the page */ 333 334 if (__isolate_lru_page(page, mode) != 0) 334 335 continue; ··· 338 335 VM_BUG_ON(PageTransCompound(page)); 339 336 340 337 /* Successfully isolated */ 341 - del_page_from_lru_list(zone, page, page_lru(page)); 338 + del_page_from_lru_list(page, lruvec, page_lru(page)); 342 339 list_add(&page->lru, migratelist); 343 340 cc->nr_migratepages++; 344 341 nr_isolated++;
+5 -3
mm/huge_memory.c
··· 1231 1231 { 1232 1232 int i; 1233 1233 struct zone *zone = page_zone(page); 1234 + struct lruvec *lruvec; 1234 1235 int tail_count = 0; 1235 1236 1236 1237 /* prevent PageLRU to go away from under us, and freeze lru stats */ 1237 1238 spin_lock_irq(&zone->lru_lock); 1239 + lruvec = mem_cgroup_page_lruvec(page, zone); 1240 + 1238 1241 compound_lock(page); 1239 1242 /* complete memcg works before add pages to LRU */ 1240 1243 mem_cgroup_split_huge_fixup(page); ··· 1312 1309 BUG_ON(!PageDirty(page_tail)); 1313 1310 BUG_ON(!PageSwapBacked(page_tail)); 1314 1311 1315 - 1316 - lru_add_page_tail(zone, page, page_tail); 1312 + lru_add_page_tail(page, page_tail, lruvec); 1317 1313 } 1318 1314 atomic_sub(tail_count, &page->_count); 1319 1315 BUG_ON(atomic_read(&page->_count) <= 0); 1320 1316 1321 - __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1317 + __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); 1322 1318 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); 1323 1319 1324 1320 ClearPageCompound(page);
+24 -77
mm/memcontrol.c
··· 1035 1035 /** 1036 1036 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 1037 1037 * @zone: zone of the wanted lruvec 1038 - * @mem: memcg of the wanted lruvec 1038 + * @memcg: memcg of the wanted lruvec 1039 1039 * 1040 1040 * Returns the lru list vector holding pages for the given @zone and 1041 1041 * @mem. This can be the global zone lruvec, if the memory controller ··· 1068 1068 */ 1069 1069 1070 1070 /** 1071 - * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec 1072 - * @zone: zone of the page 1071 + * mem_cgroup_page_lruvec - return lruvec for adding an lru page 1073 1072 * @page: the page 1074 - * @lru: current lru 1075 - * 1076 - * This function accounts for @page being added to @lru, and returns 1077 - * the lruvec for the given @zone and the memcg @page is charged to. 1078 - * 1079 - * The callsite is then responsible for physically linking the page to 1080 - * the returned lruvec->lists[@lru]. 1073 + * @zone: zone of the page 1081 1074 */ 1082 - struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, 1083 - enum lru_list lru) 1075 + struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) 1084 1076 { 1085 1077 struct mem_cgroup_per_zone *mz; 1086 1078 struct mem_cgroup *memcg; ··· 1085 1093 memcg = pc->mem_cgroup; 1086 1094 1087 1095 /* 1088 - * Surreptitiously switch any uncharged page to root: 1096 + * Surreptitiously switch any uncharged offlist page to root: 1089 1097 * an uncharged page off lru does nothing to secure 1090 1098 * its former mem_cgroup from sudden removal. 1091 1099 * ··· 1093 1101 * under page_cgroup lock: between them, they make all uses 1094 1102 * of pc->mem_cgroup safe. 1095 1103 */ 1096 - if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup) 1104 + if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup) 1097 1105 pc->mem_cgroup = memcg = root_mem_cgroup; 1098 1106 1099 1107 mz = page_cgroup_zoneinfo(memcg, page); 1100 - /* compound_order() is stabilized through lru_lock */ 1101 - mz->lru_size[lru] += 1 << compound_order(page); 1102 1108 return &mz->lruvec; 1103 1109 } 1104 1110 1105 1111 /** 1106 - * mem_cgroup_lru_del_list - account for removing an lru page 1107 - * @page: the page 1108 - * @lru: target lru 1112 + * mem_cgroup_update_lru_size - account for adding or removing an lru page 1113 + * @lruvec: mem_cgroup per zone lru vector 1114 + * @lru: index of lru list the page is sitting on 1115 + * @nr_pages: positive when adding or negative when removing 1109 1116 * 1110 - * This function accounts for @page being removed from @lru. 1111 - * 1112 - * The callsite is then responsible for physically unlinking 1113 - * @page->lru. 1117 + * This function must be called when a page is added to or removed from an 1118 + * lru list. 1114 1119 */ 1115 - void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) 1120 + void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1121 + int nr_pages) 1116 1122 { 1117 1123 struct mem_cgroup_per_zone *mz; 1118 - struct mem_cgroup *memcg; 1119 - struct page_cgroup *pc; 1124 + unsigned long *lru_size; 1120 1125 1121 1126 if (mem_cgroup_disabled()) 1122 1127 return; 1123 1128 1124 - pc = lookup_page_cgroup(page); 1125 - memcg = pc->mem_cgroup; 1126 - VM_BUG_ON(!memcg); 1127 - mz = page_cgroup_zoneinfo(memcg, page); 1128 - /* huge page split is done under lru_lock. so, we have no races. */ 1129 - VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page))); 1130 - mz->lru_size[lru] -= 1 << compound_order(page); 1131 - } 1132 - 1133 - /** 1134 - * mem_cgroup_lru_move_lists - account for moving a page between lrus 1135 - * @zone: zone of the page 1136 - * @page: the page 1137 - * @from: current lru 1138 - * @to: target lru 1139 - * 1140 - * This function accounts for @page being moved between the lrus @from 1141 - * and @to, and returns the lruvec for the given @zone and the memcg 1142 - * @page is charged to. 1143 - * 1144 - * The callsite is then responsible for physically relinking 1145 - * @page->lru to the returned lruvec->lists[@to]. 1146 - */ 1147 - struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone, 1148 - struct page *page, 1149 - enum lru_list from, 1150 - enum lru_list to) 1151 - { 1152 - /* XXX: Optimize this, especially for @from == @to */ 1153 - mem_cgroup_lru_del_list(page, from); 1154 - return mem_cgroup_lru_add_list(zone, page, to); 1129 + mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 1130 + lru_size = mz->lru_size + lru; 1131 + *lru_size += nr_pages; 1132 + VM_BUG_ON((long)(*lru_size) < 0); 1155 1133 } 1156 1134 1157 1135 /* ··· 1212 1250 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE); 1213 1251 1214 1252 return (active > inactive); 1215 - } 1216 - 1217 - struct zone_reclaim_stat * 1218 - mem_cgroup_get_reclaim_stat_from_page(struct page *page) 1219 - { 1220 - struct page_cgroup *pc; 1221 - struct mem_cgroup_per_zone *mz; 1222 - 1223 - if (mem_cgroup_disabled()) 1224 - return NULL; 1225 - 1226 - pc = lookup_page_cgroup(page); 1227 - if (!PageCgroupUsed(pc)) 1228 - return NULL; 1229 - /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1230 - smp_rmb(); 1231 - mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1232 - return &mz->lruvec.reclaim_stat; 1233 1253 } 1234 1254 1235 1255 #define mem_cgroup_from_res_counter(counter, member) \ ··· 2453 2509 { 2454 2510 struct page_cgroup *pc = lookup_page_cgroup(page); 2455 2511 struct zone *uninitialized_var(zone); 2512 + struct lruvec *lruvec; 2456 2513 bool was_on_lru = false; 2457 2514 bool anon; 2458 2515 ··· 2476 2531 zone = page_zone(page); 2477 2532 spin_lock_irq(&zone->lru_lock); 2478 2533 if (PageLRU(page)) { 2534 + lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); 2479 2535 ClearPageLRU(page); 2480 - del_page_from_lru_list(zone, page, page_lru(page)); 2536 + del_page_from_lru_list(page, lruvec, page_lru(page)); 2481 2537 was_on_lru = true; 2482 2538 } 2483 2539 } ··· 2496 2550 2497 2551 if (lrucare) { 2498 2552 if (was_on_lru) { 2553 + lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); 2499 2554 VM_BUG_ON(PageLRU(page)); 2500 2555 SetPageLRU(page); 2501 - add_page_to_lru_list(zone, page, page_lru(page)); 2556 + add_page_to_lru_list(page, lruvec, page_lru(page)); 2502 2557 } 2503 2558 spin_unlock_irq(&zone->lru_lock); 2504 2559 }
+43 -43
mm/swap.c
··· 47 47 static void __page_cache_release(struct page *page) 48 48 { 49 49 if (PageLRU(page)) { 50 - unsigned long flags; 51 50 struct zone *zone = page_zone(page); 51 + struct lruvec *lruvec; 52 + unsigned long flags; 52 53 53 54 spin_lock_irqsave(&zone->lru_lock, flags); 55 + lruvec = mem_cgroup_page_lruvec(page, zone); 54 56 VM_BUG_ON(!PageLRU(page)); 55 57 __ClearPageLRU(page); 56 - del_page_from_lru_list(zone, page, page_off_lru(page)); 58 + del_page_from_lru_list(page, lruvec, page_off_lru(page)); 57 59 spin_unlock_irqrestore(&zone->lru_lock, flags); 58 60 } 59 61 } ··· 237 235 EXPORT_SYMBOL(put_pages_list); 238 236 239 237 static void pagevec_lru_move_fn(struct pagevec *pvec, 240 - void (*move_fn)(struct page *page, void *arg), 241 - void *arg) 238 + void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), 239 + void *arg) 242 240 { 243 241 int i; 244 242 struct zone *zone = NULL; 243 + struct lruvec *lruvec; 245 244 unsigned long flags = 0; 246 245 247 246 for (i = 0; i < pagevec_count(pvec); i++) { ··· 256 253 spin_lock_irqsave(&zone->lru_lock, flags); 257 254 } 258 255 259 - (*move_fn)(page, arg); 256 + lruvec = mem_cgroup_page_lruvec(page, zone); 257 + (*move_fn)(page, lruvec, arg); 260 258 } 261 259 if (zone) 262 260 spin_unlock_irqrestore(&zone->lru_lock, flags); ··· 265 261 pagevec_reinit(pvec); 266 262 } 267 263 268 - static void pagevec_move_tail_fn(struct page *page, void *arg) 264 + static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, 265 + void *arg) 269 266 { 270 267 int *pgmoved = arg; 271 268 272 269 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 273 270 enum lru_list lru = page_lru_base_type(page); 274 - struct lruvec *lruvec; 275 - 276 - lruvec = mem_cgroup_lru_move_lists(page_zone(page), 277 - page, lru, lru); 278 271 list_move_tail(&page->lru, &lruvec->lists[lru]); 279 272 (*pgmoved)++; 280 273 } ··· 310 309 } 311 310 } 312 311 313 - static void update_page_reclaim_stat(struct zone *zone, struct page *page, 312 + static void update_page_reclaim_stat(struct lruvec *lruvec, 314 313 int file, int rotated) 315 314 { 316 - struct zone_reclaim_stat *reclaim_stat; 317 - 318 - reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); 319 - if (!reclaim_stat) 320 - reclaim_stat = &zone->lruvec.reclaim_stat; 315 + struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 321 316 322 317 reclaim_stat->recent_scanned[file]++; 323 318 if (rotated) 324 319 reclaim_stat->recent_rotated[file]++; 325 320 } 326 321 327 - static void __activate_page(struct page *page, void *arg) 322 + static void __activate_page(struct page *page, struct lruvec *lruvec, 323 + void *arg) 328 324 { 329 - struct zone *zone = page_zone(page); 330 - 331 325 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 332 326 int file = page_is_file_cache(page); 333 327 int lru = page_lru_base_type(page); 334 - del_page_from_lru_list(zone, page, lru); 335 328 329 + del_page_from_lru_list(page, lruvec, lru); 336 330 SetPageActive(page); 337 331 lru += LRU_ACTIVE; 338 - add_page_to_lru_list(zone, page, lru); 339 - __count_vm_event(PGACTIVATE); 332 + add_page_to_lru_list(page, lruvec, lru); 340 333 341 - update_page_reclaim_stat(zone, page, file, 1); 334 + __count_vm_event(PGACTIVATE); 335 + update_page_reclaim_stat(lruvec, file, 1); 342 336 } 343 337 } 344 338 ··· 370 374 struct zone *zone = page_zone(page); 371 375 372 376 spin_lock_irq(&zone->lru_lock); 373 - __activate_page(page, NULL); 377 + __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL); 374 378 spin_unlock_irq(&zone->lru_lock); 375 379 } 376 380 #endif ··· 437 441 void add_page_to_unevictable_list(struct page *page) 438 442 { 439 443 struct zone *zone = page_zone(page); 444 + struct lruvec *lruvec; 440 445 441 446 spin_lock_irq(&zone->lru_lock); 447 + lruvec = mem_cgroup_page_lruvec(page, zone); 442 448 SetPageUnevictable(page); 443 449 SetPageLRU(page); 444 - add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); 450 + add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); 445 451 spin_unlock_irq(&zone->lru_lock); 446 452 } 447 453 ··· 468 470 * be write it out by flusher threads as this is much more effective 469 471 * than the single-page writeout from reclaim. 470 472 */ 471 - static void lru_deactivate_fn(struct page *page, void *arg) 473 + static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, 474 + void *arg) 472 475 { 473 476 int lru, file; 474 477 bool active; 475 - struct zone *zone = page_zone(page); 476 478 477 479 if (!PageLRU(page)) 478 480 return; ··· 485 487 return; 486 488 487 489 active = PageActive(page); 488 - 489 490 file = page_is_file_cache(page); 490 491 lru = page_lru_base_type(page); 491 - del_page_from_lru_list(zone, page, lru + active); 492 + 493 + del_page_from_lru_list(page, lruvec, lru + active); 492 494 ClearPageActive(page); 493 495 ClearPageReferenced(page); 494 - add_page_to_lru_list(zone, page, lru); 496 + add_page_to_lru_list(page, lruvec, lru); 495 497 496 498 if (PageWriteback(page) || PageDirty(page)) { 497 499 /* ··· 501 503 */ 502 504 SetPageReclaim(page); 503 505 } else { 504 - struct lruvec *lruvec; 505 506 /* 506 507 * The page's writeback ends up during pagevec 507 508 * We moves tha page into tail of inactive. 508 509 */ 509 - lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru); 510 510 list_move_tail(&page->lru, &lruvec->lists[lru]); 511 511 __count_vm_event(PGROTATED); 512 512 } 513 513 514 514 if (active) 515 515 __count_vm_event(PGDEACTIVATE); 516 - update_page_reclaim_stat(zone, page, file, 0); 516 + update_page_reclaim_stat(lruvec, file, 0); 517 517 } 518 518 519 519 /* ··· 611 615 int i; 612 616 LIST_HEAD(pages_to_free); 613 617 struct zone *zone = NULL; 618 + struct lruvec *lruvec; 614 619 unsigned long uninitialized_var(flags); 615 620 616 621 for (i = 0; i < nr; i++) { ··· 639 642 zone = pagezone; 640 643 spin_lock_irqsave(&zone->lru_lock, flags); 641 644 } 645 + 646 + lruvec = mem_cgroup_page_lruvec(page, zone); 642 647 VM_BUG_ON(!PageLRU(page)); 643 648 __ClearPageLRU(page); 644 - del_page_from_lru_list(zone, page, page_off_lru(page)); 649 + del_page_from_lru_list(page, lruvec, page_off_lru(page)); 645 650 } 646 651 647 652 list_add(&page->lru, &pages_to_free); ··· 675 676 676 677 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 677 678 /* used by __split_huge_page_refcount() */ 678 - void lru_add_page_tail(struct zone* zone, 679 - struct page *page, struct page *page_tail) 679 + void lru_add_page_tail(struct page *page, struct page *page_tail, 680 + struct lruvec *lruvec) 680 681 { 681 682 int uninitialized_var(active); 682 683 enum lru_list lru; ··· 685 686 VM_BUG_ON(!PageHead(page)); 686 687 VM_BUG_ON(PageCompound(page_tail)); 687 688 VM_BUG_ON(PageLRU(page_tail)); 688 - VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock)); 689 + VM_BUG_ON(NR_CPUS != 1 && 690 + !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); 689 691 690 692 SetPageLRU(page_tail); 691 693 ··· 715 715 * Use the standard add function to put page_tail on the list, 716 716 * but then correct its position so they all end up in order. 717 717 */ 718 - add_page_to_lru_list(zone, page_tail, lru); 718 + add_page_to_lru_list(page_tail, lruvec, lru); 719 719 list_head = page_tail->lru.prev; 720 720 list_move_tail(&page_tail->lru, list_head); 721 721 } 722 722 723 723 if (!PageUnevictable(page)) 724 - update_page_reclaim_stat(zone, page_tail, file, active); 724 + update_page_reclaim_stat(lruvec, file, active); 725 725 } 726 726 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 727 727 728 - static void __pagevec_lru_add_fn(struct page *page, void *arg) 728 + static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 729 + void *arg) 729 730 { 730 731 enum lru_list lru = (enum lru_list)arg; 731 - struct zone *zone = page_zone(page); 732 732 int file = is_file_lru(lru); 733 733 int active = is_active_lru(lru); 734 734 ··· 739 739 SetPageLRU(page); 740 740 if (active) 741 741 SetPageActive(page); 742 - add_page_to_lru_list(zone, page, lru); 743 - update_page_reclaim_stat(zone, page, file, active); 742 + add_page_to_lru_list(page, lruvec, lru); 743 + update_page_reclaim_stat(lruvec, file, active); 744 744 } 745 745 746 746 /*
+27 -20
mm/vmscan.c
··· 1031 1031 1032 1032 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 1033 1033 struct page *page; 1034 + int nr_pages; 1034 1035 1035 1036 page = lru_to_page(src); 1036 1037 prefetchw_prev_lru_page(page, src, flags); ··· 1040 1039 1041 1040 switch (__isolate_lru_page(page, mode)) { 1042 1041 case 0: 1043 - mem_cgroup_lru_del_list(page, lru); 1042 + nr_pages = hpage_nr_pages(page); 1043 + mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); 1044 1044 list_move(&page->lru, dst); 1045 - nr_taken += hpage_nr_pages(page); 1045 + nr_taken += nr_pages; 1046 1046 break; 1047 1047 1048 1048 case -EBUSY: ··· 1095 1093 1096 1094 if (PageLRU(page)) { 1097 1095 struct zone *zone = page_zone(page); 1096 + struct lruvec *lruvec; 1098 1097 1099 1098 spin_lock_irq(&zone->lru_lock); 1099 + lruvec = mem_cgroup_page_lruvec(page, zone); 1100 1100 if (PageLRU(page)) { 1101 1101 int lru = page_lru(page); 1102 - ret = 0; 1103 1102 get_page(page); 1104 1103 ClearPageLRU(page); 1105 - 1106 - del_page_from_lru_list(zone, page, lru); 1104 + del_page_from_lru_list(page, lruvec, lru); 1105 + ret = 0; 1107 1106 } 1108 1107 spin_unlock_irq(&zone->lru_lock); 1109 1108 } ··· 1158 1155 spin_lock_irq(&zone->lru_lock); 1159 1156 continue; 1160 1157 } 1158 + 1159 + lruvec = mem_cgroup_page_lruvec(page, zone); 1160 + 1161 1161 SetPageLRU(page); 1162 1162 lru = page_lru(page); 1163 - add_page_to_lru_list(zone, page, lru); 1163 + add_page_to_lru_list(page, lruvec, lru); 1164 + 1164 1165 if (is_active_lru(lru)) { 1165 1166 int file = is_file_lru(lru); 1166 1167 int numpages = hpage_nr_pages(page); ··· 1173 1166 if (put_page_testzero(page)) { 1174 1167 __ClearPageLRU(page); 1175 1168 __ClearPageActive(page); 1176 - del_page_from_lru_list(zone, page, lru); 1169 + del_page_from_lru_list(page, lruvec, lru); 1177 1170 1178 1171 if (unlikely(PageCompound(page))) { 1179 1172 spin_unlock_irq(&zone->lru_lock); ··· 1321 1314 * But we had to alter page->flags anyway. 1322 1315 */ 1323 1316 1324 - static void move_active_pages_to_lru(struct zone *zone, 1317 + static void move_active_pages_to_lru(struct lruvec *lruvec, 1325 1318 struct list_head *list, 1326 1319 struct list_head *pages_to_free, 1327 1320 enum lru_list lru) 1328 1321 { 1322 + struct zone *zone = lruvec_zone(lruvec); 1329 1323 unsigned long pgmoved = 0; 1330 1324 struct page *page; 1325 + int nr_pages; 1331 1326 1332 1327 while (!list_empty(list)) { 1333 - struct lruvec *lruvec; 1334 - 1335 1328 page = lru_to_page(list); 1329 + lruvec = mem_cgroup_page_lruvec(page, zone); 1336 1330 1337 1331 VM_BUG_ON(PageLRU(page)); 1338 1332 SetPageLRU(page); 1339 1333 1340 - lruvec = mem_cgroup_lru_add_list(zone, page, lru); 1334 + nr_pages = hpage_nr_pages(page); 1335 + mem_cgroup_update_lru_size(lruvec, lru, nr_pages); 1341 1336 list_move(&page->lru, &lruvec->lists[lru]); 1342 - pgmoved += hpage_nr_pages(page); 1337 + pgmoved += nr_pages; 1343 1338 1344 1339 if (put_page_testzero(page)) { 1345 1340 __ClearPageLRU(page); 1346 1341 __ClearPageActive(page); 1347 - del_page_from_lru_list(zone, page, lru); 1342 + del_page_from_lru_list(page, lruvec, lru); 1348 1343 1349 1344 if (unlikely(PageCompound(page))) { 1350 1345 spin_unlock_irq(&zone->lru_lock); ··· 1452 1443 */ 1453 1444 reclaim_stat->recent_rotated[file] += nr_rotated; 1454 1445 1455 - move_active_pages_to_lru(zone, &l_active, &l_hold, lru); 1456 - move_active_pages_to_lru(zone, &l_inactive, &l_hold, lru - LRU_ACTIVE); 1446 + move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru); 1447 + move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE); 1457 1448 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1458 1449 spin_unlock_irq(&zone->lru_lock); 1459 1450 ··· 3246 3237 zone = pagezone; 3247 3238 spin_lock_irq(&zone->lru_lock); 3248 3239 } 3240 + lruvec = mem_cgroup_page_lruvec(page, zone); 3249 3241 3250 3242 if (!PageLRU(page) || !PageUnevictable(page)) 3251 3243 continue; ··· 3256 3246 3257 3247 VM_BUG_ON(PageActive(page)); 3258 3248 ClearPageUnevictable(page); 3259 - __dec_zone_state(zone, NR_UNEVICTABLE); 3260 - lruvec = mem_cgroup_lru_move_lists(zone, page, 3261 - LRU_UNEVICTABLE, lru); 3262 - list_move(&page->lru, &lruvec->lists[lru]); 3263 - __inc_zone_state(zone, NR_INACTIVE_ANON + lru); 3249 + del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); 3250 + add_page_to_lru_list(page, lruvec, lru); 3264 3251 pgrescued++; 3265 3252 } 3266 3253 }