Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: improve code consistency with zonelist_* helper functions

Replace direct access to zoneref->zone, zoneref->zone_idx, or
zone_to_nid(zoneref->zone) with the corresponding zonelist_* helper
functions for consistency.

No functional change.

Link: https://lkml.kernel.org/r/20240729091717.464-1-shivankg@amd.com
Co-developed-by: Shivank Garg <shivankg@amd.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Wei Yang and committed by
Andrew Morton
29943248 9325b8b5

+18 -18
+2 -2
include/linux/mmzone.h
··· 1688 1688 zone = zonelist_zone(z)) 1689 1689 1690 1690 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ 1691 - for (zone = z->zone; \ 1691 + for (zone = zonelist_zone(z); \ 1692 1692 zone; \ 1693 1693 z = next_zones_zonelist(++z, highidx, nodemask), \ 1694 1694 zone = zonelist_zone(z)) ··· 1724 1724 nid = first_node(*nodes); 1725 1725 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; 1726 1726 z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); 1727 - return (!z->zone) ? true : false; 1727 + return (!zonelist_zone(z)) ? true : false; 1728 1728 } 1729 1729 1730 1730
+2 -2
include/trace/events/oom.h
··· 55 55 ), 56 56 57 57 TP_fast_assign( 58 - __entry->node = zone_to_nid(zoneref->zone); 59 - __entry->zone_idx = zoneref->zone_idx; 58 + __entry->node = zonelist_node_idx(zoneref); 59 + __entry->zone_idx = zonelist_zone_idx(zoneref); 60 60 __entry->order = order; 61 61 __entry->reclaimable = reclaimable; 62 62 __entry->available = available;
+2 -2
mm/mempolicy.c
··· 1951 1951 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; 1952 1952 z = first_zones_zonelist(zonelist, highest_zoneidx, 1953 1953 &policy->nodes); 1954 - return z->zone ? zone_to_nid(z->zone) : node; 1954 + return zonelist_zone(z) ? zonelist_node_idx(z) : node; 1955 1955 } 1956 1956 case MPOL_LOCAL: 1957 1957 return node; ··· 2809 2809 node_zonelist(thisnid, GFP_HIGHUSER), 2810 2810 gfp_zone(GFP_HIGHUSER), 2811 2811 &pol->nodes); 2812 - polnid = zone_to_nid(z->zone); 2812 + polnid = zonelist_node_idx(z); 2813 2813 break; 2814 2814 2815 2815 default:
+1 -1
mm/mmzone.c
··· 66 66 z++; 67 67 else 68 68 while (zonelist_zone_idx(z) > highest_zoneidx || 69 - (z->zone && !zref_in_nodemask(z, nodes))) 69 + (zonelist_zone(z) && !zref_in_nodemask(z, nodes))) 70 70 z++; 71 71 72 72 return z;
+11 -11
mm/page_alloc.c
··· 3350 3350 } 3351 3351 3352 3352 if (no_fallback && nr_online_nodes > 1 && 3353 - zone != ac->preferred_zoneref->zone) { 3353 + zone != zonelist_zone(ac->preferred_zoneref)) { 3354 3354 int local_nid; 3355 3355 3356 3356 /* ··· 3358 3358 * fragmenting fallbacks. Locality is more important 3359 3359 * than fragmentation avoidance. 3360 3360 */ 3361 - local_nid = zone_to_nid(ac->preferred_zoneref->zone); 3361 + local_nid = zonelist_node_idx(ac->preferred_zoneref); 3362 3362 if (zone_to_nid(zone) != local_nid) { 3363 3363 alloc_flags &= ~ALLOC_NOFRAGMENT; 3364 3364 goto retry; ··· 3411 3411 goto try_this_zone; 3412 3412 3413 3413 if (!node_reclaim_enabled() || 3414 - !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 3414 + !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) 3415 3415 continue; 3416 3416 3417 3417 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); ··· 3433 3433 } 3434 3434 3435 3435 try_this_zone: 3436 - page = rmqueue(ac->preferred_zoneref->zone, zone, order, 3436 + page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, 3437 3437 gfp_mask, alloc_flags, ac->migratetype); 3438 3438 if (page) { 3439 3439 prep_new_page(page, order, gfp_mask, alloc_flags); ··· 4202 4202 */ 4203 4203 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4204 4204 ac->highest_zoneidx, ac->nodemask); 4205 - if (!ac->preferred_zoneref->zone) 4205 + if (!zonelist_zone(ac->preferred_zoneref)) 4206 4206 goto nopage; 4207 4207 4208 4208 /* ··· 4214 4214 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4215 4215 ac->highest_zoneidx, 4216 4216 &cpuset_current_mems_allowed); 4217 - if (!z->zone) 4217 + if (!zonelist_zone(z)) 4218 4218 goto nopage; 4219 4219 } 4220 4220 ··· 4571 4571 continue; 4572 4572 } 4573 4573 4574 - if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && 4575 - zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { 4574 + if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && 4575 + zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { 4576 4576 goto failed; 4577 4577 } 4578 4578 ··· 4631 4631 pcp_trylock_finish(UP_flags); 4632 4632 4633 4633 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4634 - zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); 4634 + zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); 4635 4635 4636 4636 out: 4637 4637 return nr_populated; ··· 4689 4689 * Forbid the first pass from falling back to types that fragment 4690 4690 * memory until all local zones are considered. 4691 4691 */ 4692 - alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); 4692 + alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); 4693 4693 4694 4694 /* First allocation attempt */ 4695 4695 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); ··· 5294 5294 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5295 5295 gfp_zone(GFP_KERNEL), 5296 5296 NULL); 5297 - return zone_to_nid(z->zone); 5297 + return zonelist_node_idx(z); 5298 5298 } 5299 5299 #endif 5300 5300