Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: use two zonelist that are filtered by GFP mask

Currently a node has two sets of zonelists, one for each zone type in the
system and a second set for GFP_THISNODE allocations. Based on the zones
allowed by a gfp mask, one of these zonelists is selected. All of these
zonelists consume memory and occupy cache lines.

This patch replaces the multiple zonelists per-node with two zonelists. The
first contains all populated zones in the system, ordered by distance, for
fallback allocations when the target/preferred node has no free pages. The
second contains all populated zones in the node suitable for GFP_THISNODE
allocations.

An iterator macro is introduced called for_each_zone_zonelist() that interates
through each zone allowed by the GFP flags in the selected zonelist.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mel Gorman and committed by
Linus Torvalds
54a6eb5c 18ea7e71

+168 -154
+7 -4
arch/parisc/mm/init.c
··· 603 603 #ifdef CONFIG_DISCONTIGMEM 604 604 { 605 605 struct zonelist *zl; 606 - int i, j, k; 606 + int i, j; 607 607 608 608 for (i = 0; i < npmem_ranges; i++) { 609 + zl = node_zonelist(i); 609 610 for (j = 0; j < MAX_NR_ZONES; j++) { 610 - zl = NODE_DATA(i)->node_zonelists + j; 611 + struct zone **z; 612 + struct zone *zone; 611 613 612 614 printk("Zone list for zone %d on node %d: ", j, i); 613 - for (k = 0; zl->zones[k] != NULL; k++) 614 - printk("[%d/%s] ", zone_to_nid(zl->zones[k]), zl->zones[k]->name); 615 + for_each_zone_zonelist(zone, z, zl, j) 616 + printk("[%d/%s] ", zone_to_nid(zone), 617 + zone->name); 615 618 printk("\n"); 616 619 } 617 620 }
+6 -4
fs/buffer.c
··· 360 360 */ 361 361 static void free_more_memory(void) 362 362 { 363 - struct zonelist *zonelist; 363 + struct zone **zones; 364 364 int nid; 365 365 366 366 wakeup_pdflush(1024); 367 367 yield(); 368 368 369 369 for_each_online_node(nid) { 370 - zonelist = node_zonelist(nid, GFP_NOFS); 371 - if (zonelist->zones[0]) 372 - try_to_free_pages(zonelist, 0, GFP_NOFS); 370 + zones = first_zones_zonelist(node_zonelist(nid, GFP_NOFS), 371 + gfp_zone(GFP_NOFS)); 372 + if (*zones) 373 + try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, 374 + GFP_NOFS); 373 375 } 374 376 } 375 377
+11 -2
include/linux/gfp.h
··· 151 151 * virtual kernel addresses to the allocated page(s). 152 152 */ 153 153 154 + static inline int gfp_zonelist(gfp_t flags) 155 + { 156 + if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) 157 + return 1; 158 + 159 + return 0; 160 + } 161 + 154 162 /* 155 163 * We get the zone list from the current node and the gfp_mask. 156 164 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. 157 - * There are many zonelists per node, two for each active zone. 165 + * There are two zonelists per node, one for all zones with memory and 166 + * one containing just zones from the node the zonelist belongs to. 158 167 * 159 168 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets 160 169 * optimized to &contig_page_data at compile-time. 161 170 */ 162 171 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) 163 172 { 164 - return NODE_DATA(nid)->node_zonelists + gfp_zone(flags); 173 + return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); 165 174 } 166 175 167 176 #ifndef HAVE_ARCH_FREE_PAGE
+43 -22
include/linux/mmzone.h
··· 393 393 * The NUMA zonelists are doubled becausse we need zonelists that restrict the 394 394 * allocations to a single node for GFP_THISNODE. 395 395 * 396 - * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback 397 - * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE) 396 + * [0] : Zonelist with fallback 397 + * [1] : No fallback (GFP_THISNODE) 398 398 */ 399 - #define MAX_ZONELISTS (2 * MAX_NR_ZONES) 399 + #define MAX_ZONELISTS 2 400 400 401 401 402 402 /* ··· 464 464 unsigned long last_full_zap; /* when last zap'd (jiffies) */ 465 465 }; 466 466 #else 467 - #define MAX_ZONELISTS MAX_NR_ZONES 467 + #define MAX_ZONELISTS 1 468 468 struct zonelist_cache; 469 469 #endif 470 470 ··· 485 485 struct zonelist_cache zlcache; // optional ... 486 486 #endif 487 487 }; 488 - 489 - #ifdef CONFIG_NUMA 490 - /* 491 - * Only custom zonelists like MPOL_BIND need to be filtered as part of 492 - * policies. As described in the comment for struct zonelist_cache, these 493 - * zonelists will not have a zlcache so zlcache_ptr will not be set. Use 494 - * that to determine if the zonelists needs to be filtered or not. 495 - */ 496 - static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) 497 - { 498 - return !zonelist->zlcache_ptr; 499 - } 500 - #else 501 - static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) 502 - { 503 - return 0; 504 - } 505 - #endif /* CONFIG_NUMA */ 506 488 507 489 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP 508 490 struct node_active_region { ··· 712 730 for (zone = (first_online_pgdat())->node_zones; \ 713 731 zone; \ 714 732 zone = next_zone(zone)) 733 + 734 + /* Returns the first zone at or below highest_zoneidx in a zonelist */ 735 + static inline struct zone **first_zones_zonelist(struct zonelist *zonelist, 736 + enum zone_type highest_zoneidx) 737 + { 738 + struct zone **z; 739 + 740 + /* Find the first suitable zone to use for the allocation */ 741 + z = zonelist->zones; 742 + while (*z && zone_idx(*z) > highest_zoneidx) 743 + z++; 744 + 745 + return z; 746 + } 747 + 748 + /* Returns the next zone at or below highest_zoneidx in a zonelist */ 749 + static inline struct zone **next_zones_zonelist(struct zone **z, 750 + enum zone_type highest_zoneidx) 751 + { 752 + /* Find the next suitable zone to use for the allocation */ 753 + while (*z && zone_idx(*z) > highest_zoneidx) 754 + z++; 755 + 756 + return z; 757 + } 758 + 759 + /** 760 + * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index 761 + * @zone - The current zone in the iterator 762 + * @z - The current pointer within zonelist->zones being iterated 763 + * @zlist - The zonelist being iterated 764 + * @highidx - The zone index of the highest zone to return 765 + * 766 + * This iterator iterates though all zones at or below a given zone index. 767 + */ 768 + #define for_each_zone_zonelist(zone, z, zlist, highidx) \ 769 + for (z = first_zones_zonelist(zlist, highidx), zone = *z++; \ 770 + zone; \ 771 + z = next_zones_zonelist(z, highidx), zone = *z++) 715 772 716 773 #ifdef CONFIG_SPARSEMEM 717 774 #include <asm/sparsemem.h>
+4 -4
mm/hugetlb.c
··· 97 97 struct mempolicy *mpol; 98 98 struct zonelist *zonelist = huge_zonelist(vma, address, 99 99 htlb_alloc_mask, &mpol); 100 - struct zone **z; 100 + struct zone *zone, **z; 101 101 102 - for (z = zonelist->zones; *z; z++) { 103 - nid = zone_to_nid(*z); 104 - if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) && 102 + for_each_zone_zonelist(zone, z, zonelist, MAX_NR_ZONES - 1) { 103 + nid = zone_to_nid(zone); 104 + if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && 105 105 !list_empty(&hugepage_freelists[nid])) { 106 106 page = list_entry(hugepage_freelists[nid].next, 107 107 struct page, lru);
+5 -3
mm/oom_kill.c
··· 175 175 gfp_t gfp_mask) 176 176 { 177 177 #ifdef CONFIG_NUMA 178 + struct zone *zone; 178 179 struct zone **z; 180 + enum zone_type high_zoneidx = gfp_zone(gfp_mask); 179 181 nodemask_t nodes = node_states[N_HIGH_MEMORY]; 180 182 181 - for (z = zonelist->zones; *z; z++) 182 - if (cpuset_zone_allowed_softwall(*z, gfp_mask)) 183 - node_clear(zone_to_nid(*z), nodes); 183 + for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) 184 + if (cpuset_zone_allowed_softwall(zone, gfp_mask)) 185 + node_clear(zone_to_nid(zone), nodes); 184 186 else 185 187 return CONSTRAINT_CPUSET; 186 188
+73 -97
mm/page_alloc.c
··· 1378 1378 */ 1379 1379 static struct page * 1380 1380 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, 1381 - struct zonelist *zonelist, int alloc_flags) 1381 + struct zonelist *zonelist, int high_zoneidx, int alloc_flags) 1382 1382 { 1383 1383 struct zone **z; 1384 1384 struct page *page = NULL; 1385 - int classzone_idx = zone_idx(zonelist->zones[0]); 1385 + int classzone_idx; 1386 1386 struct zone *zone, *preferred_zone; 1387 1387 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1388 1388 int zlc_active = 0; /* set if using zonelist_cache */ 1389 1389 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1390 - enum zone_type highest_zoneidx = -1; /* Gets set for policy zonelists */ 1390 + 1391 + z = first_zones_zonelist(zonelist, high_zoneidx); 1392 + classzone_idx = zone_idx(*z); 1393 + preferred_zone = *z; 1391 1394 1392 1395 zonelist_scan: 1393 1396 /* 1394 1397 * Scan zonelist, looking for a zone with enough free. 1395 1398 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1396 1399 */ 1397 - z = zonelist->zones; 1398 - preferred_zone = *z; 1399 - 1400 - do { 1401 - /* 1402 - * In NUMA, this could be a policy zonelist which contains 1403 - * zones that may not be allowed by the current gfp_mask. 1404 - * Check the zone is allowed by the current flags 1405 - */ 1406 - if (unlikely(alloc_should_filter_zonelist(zonelist))) { 1407 - if (highest_zoneidx == -1) 1408 - highest_zoneidx = gfp_zone(gfp_mask); 1409 - if (zone_idx(*z) > highest_zoneidx) 1410 - continue; 1411 - } 1412 - 1400 + for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1413 1401 if (NUMA_BUILD && zlc_active && 1414 1402 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1415 1403 continue; 1416 - zone = *z; 1417 1404 if ((alloc_flags & ALLOC_CPUSET) && 1418 1405 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1419 1406 goto try_next_zone; ··· 1434 1447 zlc_active = 1; 1435 1448 did_zlc_setup = 1; 1436 1449 } 1437 - } while (*(++z) != NULL); 1450 + } 1438 1451 1439 1452 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { 1440 1453 /* Disable zlc cache for second zonelist scan */ ··· 1452 1465 struct zonelist *zonelist) 1453 1466 { 1454 1467 const gfp_t wait = gfp_mask & __GFP_WAIT; 1468 + enum zone_type high_zoneidx = gfp_zone(gfp_mask); 1455 1469 struct zone **z; 1456 1470 struct page *page; 1457 1471 struct reclaim_state reclaim_state; ··· 1478 1490 } 1479 1491 1480 1492 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1481 - zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); 1493 + zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET); 1482 1494 if (page) 1483 1495 goto got_pg; 1484 1496 ··· 1522 1534 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1523 1535 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1524 1536 */ 1525 - page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); 1537 + page = get_page_from_freelist(gfp_mask, order, zonelist, 1538 + high_zoneidx, alloc_flags); 1526 1539 if (page) 1527 1540 goto got_pg; 1528 1541 ··· 1536 1547 nofail_alloc: 1537 1548 /* go through the zonelist yet again, ignoring mins */ 1538 1549 page = get_page_from_freelist(gfp_mask, order, 1539 - zonelist, ALLOC_NO_WATERMARKS); 1550 + zonelist, high_zoneidx, ALLOC_NO_WATERMARKS); 1540 1551 if (page) 1541 1552 goto got_pg; 1542 1553 if (gfp_mask & __GFP_NOFAIL) { ··· 1571 1582 1572 1583 if (likely(did_some_progress)) { 1573 1584 page = get_page_from_freelist(gfp_mask, order, 1574 - zonelist, alloc_flags); 1585 + zonelist, high_zoneidx, alloc_flags); 1575 1586 if (page) 1576 1587 goto got_pg; 1577 1588 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { ··· 1587 1598 * under heavy pressure. 1588 1599 */ 1589 1600 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1590 - zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1601 + zonelist, high_zoneidx, ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1591 1602 if (page) { 1592 1603 clear_zonelist_oom(zonelist); 1593 1604 goto got_pg; ··· 1702 1713 1703 1714 static unsigned int nr_free_zone_pages(int offset) 1704 1715 { 1716 + struct zone **z; 1717 + struct zone *zone; 1718 + 1705 1719 /* Just pick one node, since fallback list is circular */ 1706 1720 unsigned int sum = 0; 1707 1721 1708 1722 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 1709 - struct zone **zonep = zonelist->zones; 1710 - struct zone *zone; 1711 1723 1712 - for (zone = *zonep++; zone; zone = *zonep++) { 1724 + for_each_zone_zonelist(zone, z, zonelist, offset) { 1713 1725 unsigned long size = zone->present_pages; 1714 1726 unsigned long high = zone->pages_high; 1715 1727 if (size > high) ··· 2068 2078 */ 2069 2079 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 2070 2080 { 2071 - enum zone_type i; 2072 2081 int j; 2073 2082 struct zonelist *zonelist; 2074 2083 2075 - for (i = 0; i < MAX_NR_ZONES; i++) { 2076 - zonelist = pgdat->node_zonelists + i; 2077 - for (j = 0; zonelist->zones[j] != NULL; j++) 2078 - ; 2079 - j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 2080 - zonelist->zones[j] = NULL; 2081 - } 2084 + zonelist = &pgdat->node_zonelists[0]; 2085 + for (j = 0; zonelist->zones[j] != NULL; j++) 2086 + ; 2087 + j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2088 + MAX_NR_ZONES - 1); 2089 + zonelist->zones[j] = NULL; 2082 2090 } 2083 2091 2084 2092 /* ··· 2084 2096 */ 2085 2097 static void build_thisnode_zonelists(pg_data_t *pgdat) 2086 2098 { 2087 - enum zone_type i; 2088 2099 int j; 2089 2100 struct zonelist *zonelist; 2090 2101 2091 - for (i = 0; i < MAX_NR_ZONES; i++) { 2092 - zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i; 2093 - j = build_zonelists_node(pgdat, zonelist, 0, i); 2094 - zonelist->zones[j] = NULL; 2095 - } 2102 + zonelist = &pgdat->node_zonelists[1]; 2103 + j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 2104 + zonelist->zones[j] = NULL; 2096 2105 } 2097 2106 2098 2107 /* ··· 2102 2117 2103 2118 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 2104 2119 { 2105 - enum zone_type i; 2106 2120 int pos, j, node; 2107 2121 int zone_type; /* needs to be signed */ 2108 2122 struct zone *z; 2109 2123 struct zonelist *zonelist; 2110 2124 2111 - for (i = 0; i < MAX_NR_ZONES; i++) { 2112 - zonelist = pgdat->node_zonelists + i; 2113 - pos = 0; 2114 - for (zone_type = i; zone_type >= 0; zone_type--) { 2115 - for (j = 0; j < nr_nodes; j++) { 2116 - node = node_order[j]; 2117 - z = &NODE_DATA(node)->node_zones[zone_type]; 2118 - if (populated_zone(z)) { 2119 - zonelist->zones[pos++] = z; 2120 - check_highest_zone(zone_type); 2121 - } 2125 + zonelist = &pgdat->node_zonelists[0]; 2126 + pos = 0; 2127 + for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 2128 + for (j = 0; j < nr_nodes; j++) { 2129 + node = node_order[j]; 2130 + z = &NODE_DATA(node)->node_zones[zone_type]; 2131 + if (populated_zone(z)) { 2132 + zonelist->zones[pos++] = z; 2133 + check_highest_zone(zone_type); 2122 2134 } 2123 2135 } 2124 - zonelist->zones[pos] = NULL; 2125 2136 } 2137 + zonelist->zones[pos] = NULL; 2126 2138 } 2127 2139 2128 2140 static int default_zonelist_order(void) ··· 2246 2264 /* Construct the zonelist performance cache - see further mmzone.h */ 2247 2265 static void build_zonelist_cache(pg_data_t *pgdat) 2248 2266 { 2249 - int i; 2267 + struct zonelist *zonelist; 2268 + struct zonelist_cache *zlc; 2269 + struct zone **z; 2250 2270 2251 - for (i = 0; i < MAX_NR_ZONES; i++) { 2252 - struct zonelist *zonelist; 2253 - struct zonelist_cache *zlc; 2254 - struct zone **z; 2255 - 2256 - zonelist = pgdat->node_zonelists + i; 2257 - zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 2258 - bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 2259 - for (z = zonelist->zones; *z; z++) 2260 - zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z); 2261 - } 2271 + zonelist = &pgdat->node_zonelists[0]; 2272 + zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 2273 + bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 2274 + for (z = zonelist->zones; *z; z++) 2275 + zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z); 2262 2276 } 2263 2277 2264 2278 ··· 2268 2290 static void build_zonelists(pg_data_t *pgdat) 2269 2291 { 2270 2292 int node, local_node; 2271 - enum zone_type i,j; 2293 + enum zone_type j; 2294 + struct zonelist *zonelist; 2272 2295 2273 2296 local_node = pgdat->node_id; 2274 - for (i = 0; i < MAX_NR_ZONES; i++) { 2275 - struct zonelist *zonelist; 2276 2297 2277 - zonelist = pgdat->node_zonelists + i; 2298 + zonelist = &pgdat->node_zonelists[0]; 2299 + j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1); 2278 2300 2279 - j = build_zonelists_node(pgdat, zonelist, 0, i); 2280 - /* 2281 - * Now we build the zonelist so that it contains the zones 2282 - * of all the other nodes. 2283 - * We don't want to pressure a particular node, so when 2284 - * building the zones for node N, we make sure that the 2285 - * zones coming right after the local ones are those from 2286 - * node N+1 (modulo N) 2287 - */ 2288 - for (node = local_node + 1; node < MAX_NUMNODES; node++) { 2289 - if (!node_online(node)) 2290 - continue; 2291 - j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 2292 - } 2293 - for (node = 0; node < local_node; node++) { 2294 - if (!node_online(node)) 2295 - continue; 2296 - j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 2297 - } 2298 - 2299 - zonelist->zones[j] = NULL; 2301 + /* 2302 + * Now we build the zonelist so that it contains the zones 2303 + * of all the other nodes. 2304 + * We don't want to pressure a particular node, so when 2305 + * building the zones for node N, we make sure that the 2306 + * zones coming right after the local ones are those from 2307 + * node N+1 (modulo N) 2308 + */ 2309 + for (node = local_node + 1; node < MAX_NUMNODES; node++) { 2310 + if (!node_online(node)) 2311 + continue; 2312 + j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2313 + MAX_NR_ZONES - 1); 2300 2314 } 2315 + for (node = 0; node < local_node; node++) { 2316 + if (!node_online(node)) 2317 + continue; 2318 + j = build_zonelists_node(NODE_DATA(node), zonelist, j, 2319 + MAX_NR_ZONES - 1); 2320 + } 2321 + 2322 + zonelist->zones[j] = NULL; 2301 2323 } 2302 2324 2303 2325 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 2304 2326 static void build_zonelist_cache(pg_data_t *pgdat) 2305 2327 { 2306 - int i; 2307 - 2308 - for (i = 0; i < MAX_NR_ZONES; i++) 2309 - pgdat->node_zonelists[i].zlcache_ptr = NULL; 2328 + pgdat->node_zonelists[0].zlcache_ptr = NULL; 2329 + pgdat->node_zonelists[1].zlcache_ptr = NULL; 2310 2330 } 2311 2331 2312 2332 #endif /* CONFIG_NUMA */
+5 -3
mm/slab.c
··· 3243 3243 struct zonelist *zonelist; 3244 3244 gfp_t local_flags; 3245 3245 struct zone **z; 3246 + struct zone *zone; 3247 + enum zone_type high_zoneidx = gfp_zone(flags); 3246 3248 void *obj = NULL; 3247 3249 int nid; 3248 3250 ··· 3259 3257 * Look through allowed nodes for objects available 3260 3258 * from existing per node queues. 3261 3259 */ 3262 - for (z = zonelist->zones; *z && !obj; z++) { 3263 - nid = zone_to_nid(*z); 3260 + for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 3261 + nid = zone_to_nid(zone); 3264 3262 3265 - if (cpuset_zone_allowed_hardwall(*z, flags) && 3263 + if (cpuset_zone_allowed_hardwall(zone, flags) && 3266 3264 cache->nodelists[nid] && 3267 3265 cache->nodelists[nid]->free_objects) 3268 3266 obj = ____cache_alloc_node(cache,
+5 -3
mm/slub.c
··· 1285 1285 #ifdef CONFIG_NUMA 1286 1286 struct zonelist *zonelist; 1287 1287 struct zone **z; 1288 + struct zone *zone; 1289 + enum zone_type high_zoneidx = gfp_zone(flags); 1288 1290 struct page *page; 1289 1291 1290 1292 /* ··· 1312 1310 return NULL; 1313 1311 1314 1312 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 1315 - for (z = zonelist->zones; *z; z++) { 1313 + for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1316 1314 struct kmem_cache_node *n; 1317 1315 1318 - n = get_node(s, zone_to_nid(*z)); 1316 + n = get_node(s, zone_to_nid(zone)); 1319 1317 1320 - if (n && cpuset_zone_allowed_hardwall(*z, flags) && 1318 + if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1321 1319 n->nr_partial > MIN_PARTIAL) { 1322 1320 page = get_partial_node(n); 1323 1321 if (page)
+9 -12
mm/vmscan.c
··· 1249 1249 static unsigned long shrink_zones(int priority, struct zonelist *zonelist, 1250 1250 struct scan_control *sc) 1251 1251 { 1252 + enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1252 1253 unsigned long nr_reclaimed = 0; 1253 - struct zone **zones = zonelist->zones; 1254 - int i; 1255 - 1254 + struct zone **z; 1255 + struct zone *zone; 1256 1256 1257 1257 sc->all_unreclaimable = 1; 1258 - for (i = 0; zones[i] != NULL; i++) { 1259 - struct zone *zone = zones[i]; 1260 - 1258 + for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1261 1259 if (!populated_zone(zone)) 1262 1260 continue; 1263 1261 /* ··· 1309 1311 unsigned long nr_reclaimed = 0; 1310 1312 struct reclaim_state *reclaim_state = current->reclaim_state; 1311 1313 unsigned long lru_pages = 0; 1312 - struct zone **zones = zonelist->zones; 1313 - int i; 1314 + struct zone **z; 1315 + struct zone *zone; 1316 + enum zone_type high_zoneidx = gfp_zone(gfp_mask); 1314 1317 1315 1318 if (scan_global_lru(sc)) 1316 1319 count_vm_event(ALLOCSTALL); ··· 1319 1320 * mem_cgroup will not do shrink_slab. 1320 1321 */ 1321 1322 if (scan_global_lru(sc)) { 1322 - for (i = 0; zones[i] != NULL; i++) { 1323 - struct zone *zone = zones[i]; 1323 + for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1324 1324 1325 1325 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1326 1326 continue; ··· 1383 1385 priority = 0; 1384 1386 1385 1387 if (scan_global_lru(sc)) { 1386 - for (i = 0; zones[i] != NULL; i++) { 1387 - struct zone *zone = zones[i]; 1388 + for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1388 1389 1389 1390 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1390 1391 continue;