Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: reduce noise in show_mem for lowmem allocations

While discussing early DMA pool pre-allocation failure with Christoph [1]
I have realized that the allocation failure warning is rather noisy for
constrained allocations like GFP_DMA{32}. Those zones are usually not
populated on all nodes very often as their memory ranges are constrained.

This is an attempt to reduce the ballast that doesn't provide any relevant
information for those allocation failures investigation. Please note that
I have only compile tested it (in my default config setup) and I am
throwing it mostly to see what people think about it.

[1] http://lkml.kernel.org/r/20220817060647.1032426-1-hch@lst.de

[mhocko@suse.com: update]
Link: https://lkml.kernel.org/r/Yw29bmJTIkKogTiW@dhcp22.suse.cz
[mhocko@suse.com: fix build]
[akpm@linux-foundation.org: fix it for mapletree]
[akpm@linux-foundation.org: update it for Michal's update]
[mhocko@suse.com: fix arch/powerpc/xmon/xmon.c]
Link: https://lkml.kernel.org/r/Ywh3C4dKB9B93jIy@dhcp22.suse.cz
[akpm@linux-foundation.org: fix arch/sparc/kernel/setup_32.c]
Link: https://lkml.kernel.org/r/YwScVmVofIZkopkF@dhcp22.suse.cz
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Michal Hocko and committed by
Andrew Morton
974f4367 7014887a

+33 -7
+11 -2
include/linux/mm.h
··· 1838 1838 */ 1839 1839 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1840 1840 1841 - extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); 1841 + extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx); 1842 + static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask) 1843 + { 1844 + __show_free_areas(flags, nodemask, MAX_NR_ZONES - 1); 1845 + } 1842 1846 1843 1847 #ifdef CONFIG_MMU 1844 1848 extern bool can_do_mlock(void); ··· 2582 2578 extern int __meminit init_per_zone_wmark_min(void); 2583 2579 extern void mem_init(void); 2584 2580 extern void __init mmap_init(void); 2585 - extern void show_mem(unsigned int flags, nodemask_t *nodemask); 2581 + 2582 + extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx); 2583 + static inline void show_mem(unsigned int flags, nodemask_t *nodemask) 2584 + { 2585 + __show_mem(flags, nodemask, MAX_NR_ZONES - 1); 2586 + } 2586 2587 extern long si_mem_available(void); 2587 2588 extern void si_meminfo(struct sysinfo * val); 2588 2589 extern void si_meminfo_node(struct sysinfo *val, int nid);
+2 -2
lib/show_mem.c
··· 8 8 #include <linux/mm.h> 9 9 #include <linux/cma.h> 10 10 11 - void show_mem(unsigned int filter, nodemask_t *nodemask) 11 + void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) 12 12 { 13 13 pg_data_t *pgdat; 14 14 unsigned long total = 0, reserved = 0, highmem = 0; 15 15 16 16 printk("Mem-Info:\n"); 17 - show_free_areas(filter, nodemask); 17 + __show_free_areas(filter, nodemask, max_zone_idx); 18 18 19 19 for_each_online_pgdat(pgdat) { 20 20 int zoneid;
+1 -1
mm/oom_kill.c
··· 461 461 if (is_memcg_oom(oc)) 462 462 mem_cgroup_print_oom_meminfo(oc->memcg); 463 463 else { 464 - show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); 464 + __show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask, gfp_zone(oc->gfp_mask)); 465 465 if (should_dump_unreclaim_slab()) 466 466 dump_unreclaimable_slab(); 467 467 }
+19 -2
mm/page_alloc.c
··· 4322 4322 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 4323 4323 filter &= ~SHOW_MEM_FILTER_NODES; 4324 4324 4325 - show_mem(filter, nodemask); 4325 + __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 4326 4326 } 4327 4327 4328 4328 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) ··· 6050 6050 printk(KERN_CONT "(%s) ", tmp); 6051 6051 } 6052 6052 6053 + static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) 6054 + { 6055 + int zone_idx; 6056 + for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) 6057 + if (zone_managed_pages(pgdat->node_zones + zone_idx)) 6058 + return true; 6059 + return false; 6060 + } 6061 + 6053 6062 /* 6054 6063 * Show free area list (used inside shift_scroll-lock stuff) 6055 6064 * We also calculate the percentage fragmentation. We do this by counting the ··· 6068 6059 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 6069 6060 * cpuset. 6070 6061 */ 6071 - void show_free_areas(unsigned int filter, nodemask_t *nodemask) 6062 + void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) 6072 6063 { 6073 6064 unsigned long free_pcp = 0; 6074 6065 int cpu, nid; ··· 6076 6067 pg_data_t *pgdat; 6077 6068 6078 6069 for_each_populated_zone(zone) { 6070 + if (zone_idx(zone) > max_zone_idx) 6071 + continue; 6079 6072 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 6080 6073 continue; 6081 6074 ··· 6114 6103 6115 6104 for_each_online_pgdat(pgdat) { 6116 6105 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 6106 + continue; 6107 + if (!node_has_managed_zones(pgdat, max_zone_idx)) 6117 6108 continue; 6118 6109 6119 6110 printk("Node %d" ··· 6173 6160 for_each_populated_zone(zone) { 6174 6161 int i; 6175 6162 6163 + if (zone_idx(zone) > max_zone_idx) 6164 + continue; 6176 6165 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 6177 6166 continue; 6178 6167 ··· 6236 6221 unsigned long nr[MAX_ORDER], flags, total = 0; 6237 6222 unsigned char types[MAX_ORDER]; 6238 6223 6224 + if (zone_idx(zone) > max_zone_idx) 6225 + continue; 6239 6226 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 6240 6227 continue; 6241 6228 show_node(zone);