Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/page_alloc: integrate classzone_idx and high_zoneidx

classzone_idx is just different name for high_zoneidx now. So, integrate
them and add some comment to struct alloc_context in order to reduce
future confusion about the meaning of this variable.

The accessor, ac_classzone_idx() is also removed since it isn't needed
after integration.

In addition to integration, this patch also renames high_zoneidx to
highest_zoneidx since it represents more precise meaning.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Baoquan He <bhe@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Ye Xiaolong <xiaolong.ye@intel.com>
Link: http://lkml.kernel.org/r/1587095923-7515-3-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Joonsoo Kim and committed by
Linus Torvalds
97a225e6 3334a45e

+175 -150
+5 -4
include/linux/compaction.h
··· 97 97 struct page **page); 98 98 extern void reset_isolation_suitable(pg_data_t *pgdat); 99 99 extern enum compact_result compaction_suitable(struct zone *zone, int order, 100 - unsigned int alloc_flags, int classzone_idx); 100 + unsigned int alloc_flags, int highest_zoneidx); 101 101 102 102 extern void defer_compaction(struct zone *zone, int order); 103 103 extern bool compaction_deferred(struct zone *zone, int order); ··· 182 182 183 183 extern int kcompactd_run(int nid); 184 184 extern void kcompactd_stop(int nid); 185 - extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); 185 + extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); 186 186 187 187 #else 188 188 static inline void reset_isolation_suitable(pg_data_t *pgdat) ··· 190 190 } 191 191 192 192 static inline enum compact_result compaction_suitable(struct zone *zone, int order, 193 - int alloc_flags, int classzone_idx) 193 + int alloc_flags, int highest_zoneidx) 194 194 { 195 195 return COMPACT_SKIPPED; 196 196 } ··· 232 232 { 233 233 } 234 234 235 - static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) 235 + static inline void wakeup_kcompactd(pg_data_t *pgdat, 236 + int order, int highest_zoneidx) 236 237 { 237 238 } 238 239
+6 -6
include/linux/mmzone.h
··· 699 699 struct task_struct *kswapd; /* Protected by 700 700 mem_hotplug_begin/end() */ 701 701 int kswapd_order; 702 - enum zone_type kswapd_classzone_idx; 702 + enum zone_type kswapd_highest_zoneidx; 703 703 704 704 int kswapd_failures; /* Number of 'reclaimed == 0' runs */ 705 705 706 706 #ifdef CONFIG_COMPACTION 707 707 int kcompactd_max_order; 708 - enum zone_type kcompactd_classzone_idx; 708 + enum zone_type kcompactd_highest_zoneidx; 709 709 wait_queue_head_t kcompactd_wait; 710 710 struct task_struct *kcompactd; 711 711 #endif ··· 783 783 784 784 void build_all_zonelists(pg_data_t *pgdat); 785 785 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, 786 - enum zone_type classzone_idx); 786 + enum zone_type highest_zoneidx); 787 787 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 788 - int classzone_idx, unsigned int alloc_flags, 788 + int highest_zoneidx, unsigned int alloc_flags, 789 789 long free_pages); 790 790 bool zone_watermark_ok(struct zone *z, unsigned int order, 791 - unsigned long mark, int classzone_idx, 791 + unsigned long mark, int highest_zoneidx, 792 792 unsigned int alloc_flags); 793 793 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 794 - unsigned long mark, int classzone_idx); 794 + unsigned long mark, int highest_zoneidx); 795 795 enum memmap_context { 796 796 MEMMAP_EARLY, 797 797 MEMMAP_HOTPLUG,
+13 -9
include/trace/events/compaction.h
··· 314 314 315 315 DECLARE_EVENT_CLASS(kcompactd_wake_template, 316 316 317 - TP_PROTO(int nid, int order, enum zone_type classzone_idx), 317 + TP_PROTO(int nid, int order, enum zone_type highest_zoneidx), 318 318 319 - TP_ARGS(nid, order, classzone_idx), 319 + TP_ARGS(nid, order, highest_zoneidx), 320 320 321 321 TP_STRUCT__entry( 322 322 __field(int, nid) 323 323 __field(int, order) 324 - __field(enum zone_type, classzone_idx) 324 + __field(enum zone_type, highest_zoneidx) 325 325 ), 326 326 327 327 TP_fast_assign( 328 328 __entry->nid = nid; 329 329 __entry->order = order; 330 - __entry->classzone_idx = classzone_idx; 330 + __entry->highest_zoneidx = highest_zoneidx; 331 331 ), 332 332 333 + /* 334 + * classzone_idx is previous name of the highest_zoneidx. 335 + * Reason not to change it is the ABI requirement of the tracepoint. 336 + */ 333 337 TP_printk("nid=%d order=%d classzone_idx=%-8s", 334 338 __entry->nid, 335 339 __entry->order, 336 - __print_symbolic(__entry->classzone_idx, ZONE_TYPE)) 340 + __print_symbolic(__entry->highest_zoneidx, ZONE_TYPE)) 337 341 ); 338 342 339 343 DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd, 340 344 341 - TP_PROTO(int nid, int order, enum zone_type classzone_idx), 345 + TP_PROTO(int nid, int order, enum zone_type highest_zoneidx), 342 346 343 - TP_ARGS(nid, order, classzone_idx) 347 + TP_ARGS(nid, order, highest_zoneidx) 344 348 ); 345 349 346 350 DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake, 347 351 348 - TP_PROTO(int nid, int order, enum zone_type classzone_idx), 352 + TP_PROTO(int nid, int order, enum zone_type highest_zoneidx), 349 353 350 - TP_ARGS(nid, order, classzone_idx) 354 + TP_ARGS(nid, order, highest_zoneidx) 351 355 ); 352 356 #endif 353 357
+9 -5
include/trace/events/vmscan.h
··· 265 265 ); 266 266 267 267 TRACE_EVENT(mm_vmscan_lru_isolate, 268 - TP_PROTO(int classzone_idx, 268 + TP_PROTO(int highest_zoneidx, 269 269 int order, 270 270 unsigned long nr_requested, 271 271 unsigned long nr_scanned, ··· 274 274 isolate_mode_t isolate_mode, 275 275 int lru), 276 276 277 - TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru), 277 + TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru), 278 278 279 279 TP_STRUCT__entry( 280 - __field(int, classzone_idx) 280 + __field(int, highest_zoneidx) 281 281 __field(int, order) 282 282 __field(unsigned long, nr_requested) 283 283 __field(unsigned long, nr_scanned) ··· 288 288 ), 289 289 290 290 TP_fast_assign( 291 - __entry->classzone_idx = classzone_idx; 291 + __entry->highest_zoneidx = highest_zoneidx; 292 292 __entry->order = order; 293 293 __entry->nr_requested = nr_requested; 294 294 __entry->nr_scanned = nr_scanned; ··· 298 298 __entry->lru = lru; 299 299 ), 300 300 301 + /* 302 + * classzone is previous name of the highest_zoneidx. 303 + * Reason not to change it is the ABI requirement of the tracepoint. 304 + */ 301 305 TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s", 302 306 __entry->isolate_mode, 303 - __entry->classzone_idx, 307 + __entry->highest_zoneidx, 304 308 __entry->order, 305 309 __entry->nr_requested, 306 310 __entry->nr_scanned,
+32 -32
mm/compaction.c
··· 1968 1968 */ 1969 1969 static enum compact_result __compaction_suitable(struct zone *zone, int order, 1970 1970 unsigned int alloc_flags, 1971 - int classzone_idx, 1971 + int highest_zoneidx, 1972 1972 unsigned long wmark_target) 1973 1973 { 1974 1974 unsigned long watermark; ··· 1981 1981 * If watermarks for high-order allocation are already met, there 1982 1982 * should be no need for compaction at all. 1983 1983 */ 1984 - if (zone_watermark_ok(zone, order, watermark, classzone_idx, 1984 + if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, 1985 1985 alloc_flags)) 1986 1986 return COMPACT_SUCCESS; 1987 1987 ··· 1991 1991 * watermark and alloc_flags have to match, or be more pessimistic than 1992 1992 * the check in __isolate_free_page(). We don't use the direct 1993 1993 * compactor's alloc_flags, as they are not relevant for freepage 1994 - * isolation. We however do use the direct compactor's classzone_idx to 1995 - * skip over zones where lowmem reserves would prevent allocation even 1996 - * if compaction succeeds. 1994 + * isolation. We however do use the direct compactor's highest_zoneidx 1995 + * to skip over zones where lowmem reserves would prevent allocation 1996 + * even if compaction succeeds. 1997 1997 * For costly orders, we require low watermark instead of min for 1998 1998 * compaction to proceed to increase its chances. 1999 1999 * ALLOC_CMA is used, as pages in CMA pageblocks are considered ··· 2002 2002 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 2003 2003 low_wmark_pages(zone) : min_wmark_pages(zone); 2004 2004 watermark += compact_gap(order); 2005 - if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, 2005 + if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, 2006 2006 ALLOC_CMA, wmark_target)) 2007 2007 return COMPACT_SKIPPED; 2008 2008 ··· 2011 2011 2012 2012 enum compact_result compaction_suitable(struct zone *zone, int order, 2013 2013 unsigned int alloc_flags, 2014 - int classzone_idx) 2014 + int highest_zoneidx) 2015 2015 { 2016 2016 enum compact_result ret; 2017 2017 int fragindex; 2018 2018 2019 - ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, 2019 + ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, 2020 2020 zone_page_state(zone, NR_FREE_PAGES)); 2021 2021 /* 2022 2022 * fragmentation index determines if allocation failures are due to ··· 2057 2057 * Make sure at least one zone would pass __compaction_suitable if we continue 2058 2058 * retrying the reclaim. 2059 2059 */ 2060 - for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 2061 - ac->nodemask) { 2060 + for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2061 + ac->highest_zoneidx, ac->nodemask) { 2062 2062 unsigned long available; 2063 2063 enum compact_result compact_result; 2064 2064 ··· 2071 2071 available = zone_reclaimable_pages(zone) / order; 2072 2072 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 2073 2073 compact_result = __compaction_suitable(zone, order, alloc_flags, 2074 - ac_classzone_idx(ac), available); 2074 + ac->highest_zoneidx, available); 2075 2075 if (compact_result != COMPACT_SKIPPED) 2076 2076 return true; 2077 2077 } ··· 2102 2102 2103 2103 cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); 2104 2104 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, 2105 - cc->classzone_idx); 2105 + cc->highest_zoneidx); 2106 2106 /* Compaction is likely to fail */ 2107 2107 if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) 2108 2108 return ret; ··· 2293 2293 2294 2294 static enum compact_result compact_zone_order(struct zone *zone, int order, 2295 2295 gfp_t gfp_mask, enum compact_priority prio, 2296 - unsigned int alloc_flags, int classzone_idx, 2296 + unsigned int alloc_flags, int highest_zoneidx, 2297 2297 struct page **capture) 2298 2298 { 2299 2299 enum compact_result ret; ··· 2305 2305 .mode = (prio == COMPACT_PRIO_ASYNC) ? 2306 2306 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 2307 2307 .alloc_flags = alloc_flags, 2308 - .classzone_idx = classzone_idx, 2308 + .highest_zoneidx = highest_zoneidx, 2309 2309 .direct_compaction = true, 2310 2310 .whole_zone = (prio == MIN_COMPACT_PRIORITY), 2311 2311 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), ··· 2361 2361 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 2362 2362 2363 2363 /* Compact each zone in the list */ 2364 - for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 2365 - ac->nodemask) { 2364 + for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2365 + ac->highest_zoneidx, ac->nodemask) { 2366 2366 enum compact_result status; 2367 2367 2368 2368 if (prio > MIN_COMPACT_PRIORITY ··· 2372 2372 } 2373 2373 2374 2374 status = compact_zone_order(zone, order, gfp_mask, prio, 2375 - alloc_flags, ac_classzone_idx(ac), capture); 2375 + alloc_flags, ac->highest_zoneidx, capture); 2376 2376 rc = max(status, rc); 2377 2377 2378 2378 /* The allocation should succeed, stop compacting */ ··· 2507 2507 { 2508 2508 int zoneid; 2509 2509 struct zone *zone; 2510 - enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx; 2510 + enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; 2511 2511 2512 - for (zoneid = 0; zoneid <= classzone_idx; zoneid++) { 2512 + for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { 2513 2513 zone = &pgdat->node_zones[zoneid]; 2514 2514 2515 2515 if (!populated_zone(zone)) 2516 2516 continue; 2517 2517 2518 2518 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 2519 - classzone_idx) == COMPACT_CONTINUE) 2519 + highest_zoneidx) == COMPACT_CONTINUE) 2520 2520 return true; 2521 2521 } 2522 2522 ··· 2534 2534 struct compact_control cc = { 2535 2535 .order = pgdat->kcompactd_max_order, 2536 2536 .search_order = pgdat->kcompactd_max_order, 2537 - .classzone_idx = pgdat->kcompactd_classzone_idx, 2537 + .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, 2538 2538 .mode = MIGRATE_SYNC_LIGHT, 2539 2539 .ignore_skip_hint = false, 2540 2540 .gfp_mask = GFP_KERNEL, 2541 2541 }; 2542 2542 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 2543 - cc.classzone_idx); 2543 + cc.highest_zoneidx); 2544 2544 count_compact_event(KCOMPACTD_WAKE); 2545 2545 2546 - for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) { 2546 + for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { 2547 2547 int status; 2548 2548 2549 2549 zone = &pgdat->node_zones[zoneid]; ··· 2592 2592 2593 2593 /* 2594 2594 * Regardless of success, we are done until woken up next. But remember 2595 - * the requested order/classzone_idx in case it was higher/tighter than 2596 - * our current ones 2595 + * the requested order/highest_zoneidx in case it was higher/tighter 2596 + * than our current ones 2597 2597 */ 2598 2598 if (pgdat->kcompactd_max_order <= cc.order) 2599 2599 pgdat->kcompactd_max_order = 0; 2600 - if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx) 2601 - pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2600 + if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) 2601 + pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2602 2602 } 2603 2603 2604 - void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) 2604 + void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) 2605 2605 { 2606 2606 if (!order) 2607 2607 return; ··· 2609 2609 if (pgdat->kcompactd_max_order < order) 2610 2610 pgdat->kcompactd_max_order = order; 2611 2611 2612 - if (pgdat->kcompactd_classzone_idx > classzone_idx) 2613 - pgdat->kcompactd_classzone_idx = classzone_idx; 2612 + if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) 2613 + pgdat->kcompactd_highest_zoneidx = highest_zoneidx; 2614 2614 2615 2615 /* 2616 2616 * Pairs with implicit barrier in wait_event_freezable() ··· 2623 2623 return; 2624 2624 2625 2625 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 2626 - classzone_idx); 2626 + highest_zoneidx); 2627 2627 wake_up_interruptible(&pgdat->kcompactd_wait); 2628 2628 } 2629 2629 ··· 2644 2644 set_freezable(); 2645 2645 2646 2646 pgdat->kcompactd_max_order = 0; 2647 - pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2647 + pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2648 2648 2649 2649 while (!kthread_should_stop()) { 2650 2650 unsigned long pflags;
+15 -6
mm/internal.h
··· 127 127 * between functions involved in allocations, including the alloc_pages* 128 128 * family of functions. 129 129 * 130 - * nodemask, migratetype and high_zoneidx are initialized only once in 130 + * nodemask, migratetype and highest_zoneidx are initialized only once in 131 131 * __alloc_pages_nodemask() and then never change. 132 132 * 133 - * zonelist, preferred_zone and classzone_idx are set first in 133 + * zonelist, preferred_zone and highest_zoneidx are set first in 134 134 * __alloc_pages_nodemask() for the fast path, and might be later changed 135 135 * in __alloc_pages_slowpath(). All other functions pass the whole strucure 136 136 * by a const pointer. ··· 140 140 nodemask_t *nodemask; 141 141 struct zoneref *preferred_zoneref; 142 142 int migratetype; 143 - enum zone_type high_zoneidx; 143 + 144 + /* 145 + * highest_zoneidx represents highest usable zone index of 146 + * the allocation request. Due to the nature of the zone, 147 + * memory on lower zone than the highest_zoneidx will be 148 + * protected by lowmem_reserve[highest_zoneidx]. 149 + * 150 + * highest_zoneidx is also used by reclaim/compaction to limit 151 + * the target zone since higher zone than this index cannot be 152 + * usable for this allocation request. 153 + */ 154 + enum zone_type highest_zoneidx; 144 155 bool spread_dirty_pages; 145 156 }; 146 - 147 - #define ac_classzone_idx(ac) (ac->high_zoneidx) 148 157 149 158 /* 150 159 * Locate the struct page for both the matching buddy in our ··· 233 224 int order; /* order a direct compactor needs */ 234 225 int migratetype; /* migratetype of direct compactor */ 235 226 const unsigned int alloc_flags; /* alloc flags of a direct compactor */ 236 - const int classzone_idx; /* zone index of a direct compactor */ 227 + const int highest_zoneidx; /* zone index of a direct compactor */ 237 228 enum migrate_mode mode; /* Async or sync migration mode */ 238 229 bool ignore_skip_hint; /* Scan blocks even if marked skip */ 239 230 bool no_set_skip_hint; /* Don't mark blocks for skipping */
+3 -3
mm/memory_hotplug.c
··· 879 879 } else { 880 880 int cpu; 881 881 /* 882 - * Reset the nr_zones, order and classzone_idx before reuse. 883 - * Note that kswapd will init kswapd_classzone_idx properly 882 + * Reset the nr_zones, order and highest_zoneidx before reuse. 883 + * Note that kswapd will init kswapd_highest_zoneidx properly 884 884 * when it starts in the near future. 885 885 */ 886 886 pgdat->nr_zones = 0; 887 887 pgdat->kswapd_order = 0; 888 - pgdat->kswapd_classzone_idx = 0; 888 + pgdat->kswapd_highest_zoneidx = 0; 889 889 for_each_online_cpu(cpu) { 890 890 struct per_cpu_nodestat *p; 891 891
+2 -2
mm/oom_kill.c
··· 254 254 { 255 255 struct zone *zone; 256 256 struct zoneref *z; 257 - enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); 257 + enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask); 258 258 bool cpuset_limited = false; 259 259 int nid; 260 260 ··· 294 294 295 295 /* Check this allocation failure is caused by cpuset's wall function */ 296 296 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, 297 - high_zoneidx, oc->nodemask) 297 + highest_zoneidx, oc->nodemask) 298 298 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) 299 299 cpuset_limited = true; 300 300
+31 -29
mm/page_alloc.c
··· 2593 2593 int order; 2594 2594 bool ret; 2595 2595 2596 - for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, 2596 + for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 2597 2597 ac->nodemask) { 2598 2598 /* 2599 2599 * Preserve at least one pageblock unless memory pressure ··· 3462 3462 * to check in the allocation paths if no pages are free. 3463 3463 */ 3464 3464 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3465 - int classzone_idx, unsigned int alloc_flags, 3465 + int highest_zoneidx, unsigned int alloc_flags, 3466 3466 long free_pages) 3467 3467 { 3468 3468 long min = mark; ··· 3507 3507 * are not met, then a high-order request also cannot go ahead 3508 3508 * even if a suitable page happened to be free. 3509 3509 */ 3510 - if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 3510 + if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3511 3511 return false; 3512 3512 3513 3513 /* If this is an order-0 request then the watermark is fine */ ··· 3540 3540 } 3541 3541 3542 3542 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3543 - int classzone_idx, unsigned int alloc_flags) 3543 + int highest_zoneidx, unsigned int alloc_flags) 3544 3544 { 3545 - return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 3545 + return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3546 3546 zone_page_state(z, NR_FREE_PAGES)); 3547 3547 } 3548 3548 3549 3549 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3550 - unsigned long mark, int classzone_idx, unsigned int alloc_flags) 3550 + unsigned long mark, int highest_zoneidx, 3551 + unsigned int alloc_flags) 3551 3552 { 3552 3553 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3553 3554 long cma_pages = 0; ··· 3566 3565 * the caller is !atomic then it'll uselessly search the free 3567 3566 * list. That corner case is then slower but it is harmless. 3568 3567 */ 3569 - if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx]) 3568 + if (!order && (free_pages - cma_pages) > 3569 + mark + z->lowmem_reserve[highest_zoneidx]) 3570 3570 return true; 3571 3571 3572 - return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 3572 + return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3573 3573 free_pages); 3574 3574 } 3575 3575 3576 3576 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 3577 - unsigned long mark, int classzone_idx) 3577 + unsigned long mark, int highest_zoneidx) 3578 3578 { 3579 3579 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3580 3580 3581 3581 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 3582 3582 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 3583 3583 3584 - return __zone_watermark_ok(z, order, mark, classzone_idx, 0, 3584 + return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 3585 3585 free_pages); 3586 3586 } 3587 3587 ··· 3659 3657 */ 3660 3658 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3661 3659 z = ac->preferred_zoneref; 3662 - for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 3663 - ac->nodemask) { 3660 + for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, 3661 + ac->highest_zoneidx, ac->nodemask) { 3664 3662 struct page *page; 3665 3663 unsigned long mark; 3666 3664 ··· 3715 3713 3716 3714 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3717 3715 if (!zone_watermark_fast(zone, order, mark, 3718 - ac_classzone_idx(ac), alloc_flags)) { 3716 + ac->highest_zoneidx, alloc_flags)) { 3719 3717 int ret; 3720 3718 3721 3719 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT ··· 3748 3746 default: 3749 3747 /* did we reclaim enough */ 3750 3748 if (zone_watermark_ok(zone, order, mark, 3751 - ac_classzone_idx(ac), alloc_flags)) 3749 + ac->highest_zoneidx, alloc_flags)) 3752 3750 goto try_this_zone; 3753 3751 3754 3752 continue; ··· 3907 3905 if (gfp_mask & __GFP_RETRY_MAYFAIL) 3908 3906 goto out; 3909 3907 /* The OOM killer does not needlessly kill tasks for lowmem */ 3910 - if (ac->high_zoneidx < ZONE_NORMAL) 3908 + if (ac->highest_zoneidx < ZONE_NORMAL) 3911 3909 goto out; 3912 3910 if (pm_suspended_storage()) 3913 3911 goto out; ··· 4110 4108 * Let's give them a good hope and keep retrying while the order-0 4111 4109 * watermarks are OK. 4112 4110 */ 4113 - for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 4114 - ac->nodemask) { 4111 + for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4112 + ac->highest_zoneidx, ac->nodemask) { 4115 4113 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4116 - ac_classzone_idx(ac), alloc_flags)) 4114 + ac->highest_zoneidx, alloc_flags)) 4117 4115 return true; 4118 4116 } 4119 4117 return false; ··· 4237 4235 struct zoneref *z; 4238 4236 struct zone *zone; 4239 4237 pg_data_t *last_pgdat = NULL; 4240 - enum zone_type high_zoneidx = ac->high_zoneidx; 4238 + enum zone_type highest_zoneidx = ac->highest_zoneidx; 4241 4239 4242 - for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, high_zoneidx, 4240 + for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4243 4241 ac->nodemask) { 4244 4242 if (last_pgdat != zone->zone_pgdat) 4245 - wakeup_kswapd(zone, gfp_mask, order, high_zoneidx); 4243 + wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 4246 4244 last_pgdat = zone->zone_pgdat; 4247 4245 } 4248 4246 } ··· 4377 4375 * request even if all reclaimable pages are considered then we are 4378 4376 * screwed and have to go OOM. 4379 4377 */ 4380 - for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 4381 - ac->nodemask) { 4378 + for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4379 + ac->highest_zoneidx, ac->nodemask) { 4382 4380 unsigned long available; 4383 4381 unsigned long reclaimable; 4384 4382 unsigned long min_wmark = min_wmark_pages(zone); ··· 4392 4390 * reclaimable pages? 4393 4391 */ 4394 4392 wmark = __zone_watermark_ok(zone, order, min_wmark, 4395 - ac_classzone_idx(ac), alloc_flags, available); 4393 + ac->highest_zoneidx, alloc_flags, available); 4396 4394 trace_reclaim_retry_zone(z, order, reclaimable, 4397 4395 available, min_wmark, *no_progress_loops, wmark); 4398 4396 if (wmark) { ··· 4511 4509 * could end up iterating over non-eligible zones endlessly. 4512 4510 */ 4513 4511 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4514 - ac->high_zoneidx, ac->nodemask); 4512 + ac->highest_zoneidx, ac->nodemask); 4515 4513 if (!ac->preferred_zoneref->zone) 4516 4514 goto nopage; 4517 4515 ··· 4598 4596 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4599 4597 ac->nodemask = NULL; 4600 4598 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4601 - ac->high_zoneidx, ac->nodemask); 4599 + ac->highest_zoneidx, ac->nodemask); 4602 4600 } 4603 4601 4604 4602 /* Attempt with potentially adjusted zonelist and alloc_flags */ ··· 4732 4730 struct alloc_context *ac, gfp_t *alloc_mask, 4733 4731 unsigned int *alloc_flags) 4734 4732 { 4735 - ac->high_zoneidx = gfp_zone(gfp_mask); 4733 + ac->highest_zoneidx = gfp_zone(gfp_mask); 4736 4734 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4737 4735 ac->nodemask = nodemask; 4738 4736 ac->migratetype = gfpflags_to_migratetype(gfp_mask); ··· 4771 4769 * may get reset for allocations that ignore memory policies. 4772 4770 */ 4773 4771 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4774 - ac->high_zoneidx, ac->nodemask); 4772 + ac->highest_zoneidx, ac->nodemask); 4775 4773 } 4776 4774 4777 4775 /* ··· 6869 6867 unsigned long end_pfn = 0; 6870 6868 6871 6869 /* pg_data_t should be reset to zero when it's allocated */ 6872 - WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx); 6870 + WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); 6873 6871 6874 6872 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 6875 6873
+2 -2
mm/slab.c
··· 3106 3106 struct zonelist *zonelist; 3107 3107 struct zoneref *z; 3108 3108 struct zone *zone; 3109 - enum zone_type high_zoneidx = gfp_zone(flags); 3109 + enum zone_type highest_zoneidx = gfp_zone(flags); 3110 3110 void *obj = NULL; 3111 3111 struct page *page; 3112 3112 int nid; ··· 3124 3124 * Look through allowed nodes for objects available 3125 3125 * from existing per node queues. 3126 3126 */ 3127 - for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 3127 + for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 3128 3128 nid = zone_to_nid(zone); 3129 3129 3130 3130 if (cpuset_zone_allowed(zone, flags) &&
+2 -2
mm/slub.c
··· 1938 1938 struct zonelist *zonelist; 1939 1939 struct zoneref *z; 1940 1940 struct zone *zone; 1941 - enum zone_type high_zoneidx = gfp_zone(flags); 1941 + enum zone_type highest_zoneidx = gfp_zone(flags); 1942 1942 void *object; 1943 1943 unsigned int cpuset_mems_cookie; 1944 1944 ··· 1967 1967 do { 1968 1968 cpuset_mems_cookie = read_mems_allowed_begin(); 1969 1969 zonelist = node_zonelist(mempolicy_slab_node(), flags); 1970 - for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1970 + for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 1971 1971 struct kmem_cache_node *n; 1972 1972 1973 1973 n = get_node(s, zone_to_nid(zone));
+55 -50
mm/vmscan.c
··· 3131 3131 3132 3132 /* kswapd must be awake if processes are being throttled */ 3133 3133 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { 3134 - if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL) 3135 - WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL); 3134 + if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) 3135 + WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); 3136 3136 3137 3137 wake_up_interruptible(&pgdat->kswapd_wait); 3138 3138 } ··· 3385 3385 } while (memcg); 3386 3386 } 3387 3387 3388 - static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx) 3388 + static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) 3389 3389 { 3390 3390 int i; 3391 3391 struct zone *zone; ··· 3397 3397 * start prematurely when there is no boosting and a lower 3398 3398 * zone is balanced. 3399 3399 */ 3400 - for (i = classzone_idx; i >= 0; i--) { 3400 + for (i = highest_zoneidx; i >= 0; i--) { 3401 3401 zone = pgdat->node_zones + i; 3402 3402 if (!managed_zone(zone)) 3403 3403 continue; ··· 3411 3411 3412 3412 /* 3413 3413 * Returns true if there is an eligible zone balanced for the request order 3414 - * and classzone_idx 3414 + * and highest_zoneidx 3415 3415 */ 3416 - static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) 3416 + static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) 3417 3417 { 3418 3418 int i; 3419 3419 unsigned long mark = -1; ··· 3423 3423 * Check watermarks bottom-up as lower zones are more likely to 3424 3424 * meet watermarks. 3425 3425 */ 3426 - for (i = 0; i <= classzone_idx; i++) { 3426 + for (i = 0; i <= highest_zoneidx; i++) { 3427 3427 zone = pgdat->node_zones + i; 3428 3428 3429 3429 if (!managed_zone(zone)) 3430 3430 continue; 3431 3431 3432 3432 mark = high_wmark_pages(zone); 3433 - if (zone_watermark_ok_safe(zone, order, mark, classzone_idx)) 3433 + if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx)) 3434 3434 return true; 3435 3435 } 3436 3436 3437 3437 /* 3438 - * If a node has no populated zone within classzone_idx, it does not 3438 + * If a node has no populated zone within highest_zoneidx, it does not 3439 3439 * need balancing by definition. This can happen if a zone-restricted 3440 3440 * allocation tries to wake a remote kswapd. 3441 3441 */ ··· 3461 3461 * 3462 3462 * Returns true if kswapd is ready to sleep 3463 3463 */ 3464 - static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx) 3464 + static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, 3465 + int highest_zoneidx) 3465 3466 { 3466 3467 /* 3467 3468 * The throttled processes are normally woken up in balance_pgdat() as ··· 3484 3483 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) 3485 3484 return true; 3486 3485 3487 - if (pgdat_balanced(pgdat, order, classzone_idx)) { 3486 + if (pgdat_balanced(pgdat, order, highest_zoneidx)) { 3488 3487 clear_pgdat_congested(pgdat); 3489 3488 return true; 3490 3489 } ··· 3548 3547 * or lower is eligible for reclaim until at least one usable zone is 3549 3548 * balanced. 3550 3549 */ 3551 - static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) 3550 + static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) 3552 3551 { 3553 3552 int i; 3554 3553 unsigned long nr_soft_reclaimed; ··· 3576 3575 * stall or direct reclaim until kswapd is finished. 3577 3576 */ 3578 3577 nr_boost_reclaim = 0; 3579 - for (i = 0; i <= classzone_idx; i++) { 3578 + for (i = 0; i <= highest_zoneidx; i++) { 3580 3579 zone = pgdat->node_zones + i; 3581 3580 if (!managed_zone(zone)) 3582 3581 continue; ··· 3594 3593 bool balanced; 3595 3594 bool ret; 3596 3595 3597 - sc.reclaim_idx = classzone_idx; 3596 + sc.reclaim_idx = highest_zoneidx; 3598 3597 3599 3598 /* 3600 3599 * If the number of buffer_heads exceeds the maximum allowed ··· 3624 3623 * on the grounds that the normal reclaim should be enough to 3625 3624 * re-evaluate if boosting is required when kswapd next wakes. 3626 3625 */ 3627 - balanced = pgdat_balanced(pgdat, sc.order, classzone_idx); 3626 + balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx); 3628 3627 if (!balanced && nr_boost_reclaim) { 3629 3628 nr_boost_reclaim = 0; 3630 3629 goto restart; ··· 3724 3723 if (boosted) { 3725 3724 unsigned long flags; 3726 3725 3727 - for (i = 0; i <= classzone_idx; i++) { 3726 + for (i = 0; i <= highest_zoneidx; i++) { 3728 3727 if (!zone_boosts[i]) 3729 3728 continue; 3730 3729 ··· 3739 3738 * As there is now likely space, wakeup kcompact to defragment 3740 3739 * pageblocks. 3741 3740 */ 3742 - wakeup_kcompactd(pgdat, pageblock_order, classzone_idx); 3741 + wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); 3743 3742 } 3744 3743 3745 3744 snapshot_refaults(NULL, pgdat); ··· 3757 3756 } 3758 3757 3759 3758 /* 3760 - * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be 3761 - * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not 3762 - * a valid index then either kswapd runs for first time or kswapd couldn't sleep 3763 - * after previous reclaim attempt (node is still unbalanced). In that case 3764 - * return the zone index of the previous kswapd reclaim cycle. 3759 + * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to 3760 + * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is 3761 + * not a valid index then either kswapd runs for first time or kswapd couldn't 3762 + * sleep after previous reclaim attempt (node is still unbalanced). In that 3763 + * case return the zone index of the previous kswapd reclaim cycle. 3765 3764 */ 3766 - static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, 3767 - enum zone_type prev_classzone_idx) 3765 + static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, 3766 + enum zone_type prev_highest_zoneidx) 3768 3767 { 3769 - enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); 3768 + enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 3770 3769 3771 - return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx; 3770 + return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx; 3772 3771 } 3773 3772 3774 3773 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, 3775 - unsigned int classzone_idx) 3774 + unsigned int highest_zoneidx) 3776 3775 { 3777 3776 long remaining = 0; 3778 3777 DEFINE_WAIT(wait); ··· 3789 3788 * eligible zone balanced that it's also unlikely that compaction will 3790 3789 * succeed. 3791 3790 */ 3792 - if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { 3791 + if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 3793 3792 /* 3794 3793 * Compaction records what page blocks it recently failed to 3795 3794 * isolate pages from and skips them in the future scanning. ··· 3802 3801 * We have freed the memory, now we should compact it to make 3803 3802 * allocation of the requested order possible. 3804 3803 */ 3805 - wakeup_kcompactd(pgdat, alloc_order, classzone_idx); 3804 + wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); 3806 3805 3807 3806 remaining = schedule_timeout(HZ/10); 3808 3807 3809 3808 /* 3810 - * If woken prematurely then reset kswapd_classzone_idx and 3809 + * If woken prematurely then reset kswapd_highest_zoneidx and 3811 3810 * order. The values will either be from a wakeup request or 3812 3811 * the previous request that slept prematurely. 3813 3812 */ 3814 3813 if (remaining) { 3815 - WRITE_ONCE(pgdat->kswapd_classzone_idx, 3816 - kswapd_classzone_idx(pgdat, classzone_idx)); 3814 + WRITE_ONCE(pgdat->kswapd_highest_zoneidx, 3815 + kswapd_highest_zoneidx(pgdat, 3816 + highest_zoneidx)); 3817 3817 3818 3818 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) 3819 3819 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); ··· 3829 3827 * go fully to sleep until explicitly woken up. 3830 3828 */ 3831 3829 if (!remaining && 3832 - prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) { 3830 + prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { 3833 3831 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 3834 3832 3835 3833 /* ··· 3871 3869 static int kswapd(void *p) 3872 3870 { 3873 3871 unsigned int alloc_order, reclaim_order; 3874 - unsigned int classzone_idx = MAX_NR_ZONES - 1; 3872 + unsigned int highest_zoneidx = MAX_NR_ZONES - 1; 3875 3873 pg_data_t *pgdat = (pg_data_t*)p; 3876 3874 struct task_struct *tsk = current; 3877 3875 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); ··· 3895 3893 set_freezable(); 3896 3894 3897 3895 WRITE_ONCE(pgdat->kswapd_order, 0); 3898 - WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES); 3896 + WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 3899 3897 for ( ; ; ) { 3900 3898 bool ret; 3901 3899 3902 3900 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); 3903 - classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); 3901 + highest_zoneidx = kswapd_highest_zoneidx(pgdat, 3902 + highest_zoneidx); 3904 3903 3905 3904 kswapd_try_sleep: 3906 3905 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, 3907 - classzone_idx); 3906 + highest_zoneidx); 3908 3907 3909 - /* Read the new order and classzone_idx */ 3908 + /* Read the new order and highest_zoneidx */ 3910 3909 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); 3911 - classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); 3910 + highest_zoneidx = kswapd_highest_zoneidx(pgdat, 3911 + highest_zoneidx); 3912 3912 WRITE_ONCE(pgdat->kswapd_order, 0); 3913 - WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES); 3913 + WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); 3914 3914 3915 3915 ret = try_to_freeze(); 3916 3916 if (kthread_should_stop()) ··· 3933 3929 * but kcompactd is woken to compact for the original 3934 3930 * request (alloc_order). 3935 3931 */ 3936 - trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx, 3932 + trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, 3937 3933 alloc_order); 3938 - reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx); 3934 + reclaim_order = balance_pgdat(pgdat, alloc_order, 3935 + highest_zoneidx); 3939 3936 if (reclaim_order < alloc_order) 3940 3937 goto kswapd_try_sleep; 3941 3938 } ··· 3954 3949 * needed. 3955 3950 */ 3956 3951 void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, 3957 - enum zone_type classzone_idx) 3952 + enum zone_type highest_zoneidx) 3958 3953 { 3959 3954 pg_data_t *pgdat; 3960 3955 enum zone_type curr_idx; ··· 3966 3961 return; 3967 3962 3968 3963 pgdat = zone->zone_pgdat; 3969 - curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); 3964 + curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); 3970 3965 3971 - if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx) 3972 - WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx); 3966 + if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx) 3967 + WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); 3973 3968 3974 3969 if (READ_ONCE(pgdat->kswapd_order) < order) 3975 3970 WRITE_ONCE(pgdat->kswapd_order, order); ··· 3979 3974 3980 3975 /* Hopeless node, leave it to direct reclaim if possible */ 3981 3976 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || 3982 - (pgdat_balanced(pgdat, order, classzone_idx) && 3983 - !pgdat_watermark_boosted(pgdat, classzone_idx))) { 3977 + (pgdat_balanced(pgdat, order, highest_zoneidx) && 3978 + !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { 3984 3979 /* 3985 3980 * There may be plenty of free memory available, but it's too 3986 3981 * fragmented for high-order allocations. Wake up kcompactd ··· 3989 3984 * ratelimit its work. 3990 3985 */ 3991 3986 if (!(gfp_flags & __GFP_DIRECT_RECLAIM)) 3992 - wakeup_kcompactd(pgdat, order, classzone_idx); 3987 + wakeup_kcompactd(pgdat, order, highest_zoneidx); 3993 3988 return; 3994 3989 } 3995 3990 3996 - trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order, 3991 + trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, 3997 3992 gfp_flags); 3998 3993 wake_up_interruptible(&pgdat->kswapd_wait); 3999 3994 }