Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: page_alloc: collect mem statistic into show_mem.c

Let's move show_mem.c from lib to mm, as it belongs memory subsystem, also
split some memory statistic related functions from page_alloc.c to
show_mem.c, and we cleanup some unneeded include.

There is no functional change.

Link: https://lkml.kernel.org/r/20230516063821.121844-5-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Iurii Zaikin <yzaikin@google.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Len Brown <len.brown@intel.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Rafael J. Wysocki <rafael@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Kefeng Wang and committed by
Andrew Morton
e9aae170 904d5857

+431 -441
+1 -1
lib/Makefile
··· 30 30 lib-y := ctype.o string.o vsprintf.o cmdline.o \ 31 31 rbtree.o radix-tree.o timerqueue.o xarray.o \ 32 32 maple_tree.o idr.o extable.o irq_regs.o argv_split.o \ 33 - flex_proportions.o ratelimit.o show_mem.o \ 33 + flex_proportions.o ratelimit.o \ 34 34 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 35 35 earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ 36 36 nmi_backtrace.o win_minmax.o memcat_p.o \
-37
lib/show_mem.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Generic show_mem() implementation 4 - * 5 - * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de> 6 - */ 7 - 8 - #include <linux/mm.h> 9 - #include <linux/cma.h> 10 - 11 - void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) 12 - { 13 - unsigned long total = 0, reserved = 0, highmem = 0; 14 - struct zone *zone; 15 - 16 - printk("Mem-Info:\n"); 17 - __show_free_areas(filter, nodemask, max_zone_idx); 18 - 19 - for_each_populated_zone(zone) { 20 - 21 - total += zone->present_pages; 22 - reserved += zone->present_pages - zone_managed_pages(zone); 23 - 24 - if (is_highmem(zone)) 25 - highmem += zone->present_pages; 26 - } 27 - 28 - printk("%lu pages RAM\n", total); 29 - printk("%lu pages HighMem/MovableOnly\n", highmem); 30 - printk("%lu pages reserved\n", reserved); 31 - #ifdef CONFIG_CMA 32 - printk("%lu pages cma reserved\n", totalcma_pages); 33 - #endif 34 - #ifdef CONFIG_MEMORY_FAILURE 35 - printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); 36 - #endif 37 - }
+1 -1
mm/Makefile
··· 51 51 readahead.o swap.o truncate.o vmscan.o shmem.o \ 52 52 util.o mmzone.o vmstat.o backing-dev.o \ 53 53 mm_init.o percpu.o slab_common.o \ 54 - compaction.o \ 54 + compaction.o show_mem.o\ 55 55 interval_tree.o list_lru.o workingset.o \ 56 56 debug.o gup.o mmap_lock.o $(mmu-y) 57 57
-402
mm/page_alloc.c
··· 18 18 #include <linux/stddef.h> 19 19 #include <linux/mm.h> 20 20 #include <linux/highmem.h> 21 - #include <linux/swap.h> 22 - #include <linux/swapops.h> 23 21 #include <linux/interrupt.h> 24 - #include <linux/pagemap.h> 25 22 #include <linux/jiffies.h> 26 23 #include <linux/compiler.h> 27 24 #include <linux/kernel.h> ··· 27 30 #include <linux/module.h> 28 31 #include <linux/suspend.h> 29 32 #include <linux/pagevec.h> 30 - #include <linux/blkdev.h> 31 - #include <linux/slab.h> 32 33 #include <linux/ratelimit.h> 33 34 #include <linux/oom.h> 34 35 #include <linux/topology.h> ··· 35 40 #include <linux/cpuset.h> 36 41 #include <linux/memory_hotplug.h> 37 42 #include <linux/nodemask.h> 38 - #include <linux/vmalloc.h> 39 43 #include <linux/vmstat.h> 40 - #include <linux/mempolicy.h> 41 - #include <linux/memremap.h> 42 - #include <linux/stop_machine.h> 43 - #include <linux/random.h> 44 44 #include <linux/sort.h> 45 45 #include <linux/pfn.h> 46 - #include <linux/backing-dev.h> 47 46 #include <linux/fault-inject.h> 48 - #include <linux/page-isolation.h> 49 - #include <linux/debugobjects.h> 50 - #include <linux/kmemleak.h> 51 47 #include <linux/compaction.h> 52 48 #include <trace/events/kmem.h> 53 49 #include <trace/events/oom.h> ··· 46 60 #include <linux/mm_inline.h> 47 61 #include <linux/mmu_notifier.h> 48 62 #include <linux/migrate.h> 49 - #include <linux/hugetlb.h> 50 - #include <linux/sched/rt.h> 51 63 #include <linux/sched/mm.h> 52 64 #include <linux/page_owner.h> 53 65 #include <linux/page_table_check.h> 54 - #include <linux/kthread.h> 55 66 #include <linux/memcontrol.h> 56 67 #include <linux/ftrace.h> 57 68 #include <linux/lockdep.h> ··· 56 73 #include <linux/psi.h> 57 74 #include <linux/khugepaged.h> 58 75 #include <linux/delayacct.h> 59 - #include <asm/sections.h> 60 - #include <asm/tlbflush.h> 61 76 #include <asm/div64.h> 62 77 #include "internal.h" 63 78 #include "shuffle.h" 64 79 #include "page_reporting.h" 65 - #include "swap.h" 66 80 67 81 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 68 82 typedef int __bitwise fpi_t; ··· 205 225 #endif /* NUMA */ 206 226 }; 207 227 EXPORT_SYMBOL(node_states); 208 - 209 - atomic_long_t _totalram_pages __read_mostly; 210 - EXPORT_SYMBOL(_totalram_pages); 211 - unsigned long totalreserve_pages __read_mostly; 212 - unsigned long totalcma_pages __read_mostly; 213 228 214 229 int percpu_pagelist_high_fraction; 215 230 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; ··· 5076 5101 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5077 5102 } 5078 5103 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5079 - 5080 - static inline void show_node(struct zone *zone) 5081 - { 5082 - if (IS_ENABLED(CONFIG_NUMA)) 5083 - printk("Node %d ", zone_to_nid(zone)); 5084 - } 5085 - 5086 - long si_mem_available(void) 5087 - { 5088 - long available; 5089 - unsigned long pagecache; 5090 - unsigned long wmark_low = 0; 5091 - unsigned long pages[NR_LRU_LISTS]; 5092 - unsigned long reclaimable; 5093 - struct zone *zone; 5094 - int lru; 5095 - 5096 - for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 5097 - pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 5098 - 5099 - for_each_zone(zone) 5100 - wmark_low += low_wmark_pages(zone); 5101 - 5102 - /* 5103 - * Estimate the amount of memory available for userspace allocations, 5104 - * without causing swapping or OOM. 5105 - */ 5106 - available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; 5107 - 5108 - /* 5109 - * Not all the page cache can be freed, otherwise the system will 5110 - * start swapping or thrashing. Assume at least half of the page 5111 - * cache, or the low watermark worth of cache, needs to stay. 5112 - */ 5113 - pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; 5114 - pagecache -= min(pagecache / 2, wmark_low); 5115 - available += pagecache; 5116 - 5117 - /* 5118 - * Part of the reclaimable slab and other kernel memory consists of 5119 - * items that are in use, and cannot be freed. Cap this estimate at the 5120 - * low watermark. 5121 - */ 5122 - reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + 5123 - global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); 5124 - available += reclaimable - min(reclaimable / 2, wmark_low); 5125 - 5126 - if (available < 0) 5127 - available = 0; 5128 - return available; 5129 - } 5130 - EXPORT_SYMBOL_GPL(si_mem_available); 5131 - 5132 - void si_meminfo(struct sysinfo *val) 5133 - { 5134 - val->totalram = totalram_pages(); 5135 - val->sharedram = global_node_page_state(NR_SHMEM); 5136 - val->freeram = global_zone_page_state(NR_FREE_PAGES); 5137 - val->bufferram = nr_blockdev_pages(); 5138 - val->totalhigh = totalhigh_pages(); 5139 - val->freehigh = nr_free_highpages(); 5140 - val->mem_unit = PAGE_SIZE; 5141 - } 5142 - 5143 - EXPORT_SYMBOL(si_meminfo); 5144 - 5145 - #ifdef CONFIG_NUMA 5146 - void si_meminfo_node(struct sysinfo *val, int nid) 5147 - { 5148 - int zone_type; /* needs to be signed */ 5149 - unsigned long managed_pages = 0; 5150 - unsigned long managed_highpages = 0; 5151 - unsigned long free_highpages = 0; 5152 - pg_data_t *pgdat = NODE_DATA(nid); 5153 - 5154 - for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 5155 - managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); 5156 - val->totalram = managed_pages; 5157 - val->sharedram = node_page_state(pgdat, NR_SHMEM); 5158 - val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 5159 - #ifdef CONFIG_HIGHMEM 5160 - for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 5161 - struct zone *zone = &pgdat->node_zones[zone_type]; 5162 - 5163 - if (is_highmem(zone)) { 5164 - managed_highpages += zone_managed_pages(zone); 5165 - free_highpages += zone_page_state(zone, NR_FREE_PAGES); 5166 - } 5167 - } 5168 - val->totalhigh = managed_highpages; 5169 - val->freehigh = free_highpages; 5170 - #else 5171 - val->totalhigh = managed_highpages; 5172 - val->freehigh = free_highpages; 5173 - #endif 5174 - val->mem_unit = PAGE_SIZE; 5175 - } 5176 - #endif 5177 - 5178 - /* 5179 - * Determine whether the node should be displayed or not, depending on whether 5180 - * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 5181 - */ 5182 - static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) 5183 - { 5184 - if (!(flags & SHOW_MEM_FILTER_NODES)) 5185 - return false; 5186 - 5187 - /* 5188 - * no node mask - aka implicit memory numa policy. Do not bother with 5189 - * the synchronization - read_mems_allowed_begin - because we do not 5190 - * have to be precise here. 5191 - */ 5192 - if (!nodemask) 5193 - nodemask = &cpuset_current_mems_allowed; 5194 - 5195 - return !node_isset(nid, *nodemask); 5196 - } 5197 - 5198 - static void show_migration_types(unsigned char type) 5199 - { 5200 - static const char types[MIGRATE_TYPES] = { 5201 - [MIGRATE_UNMOVABLE] = 'U', 5202 - [MIGRATE_MOVABLE] = 'M', 5203 - [MIGRATE_RECLAIMABLE] = 'E', 5204 - [MIGRATE_HIGHATOMIC] = 'H', 5205 - #ifdef CONFIG_CMA 5206 - [MIGRATE_CMA] = 'C', 5207 - #endif 5208 - #ifdef CONFIG_MEMORY_ISOLATION 5209 - [MIGRATE_ISOLATE] = 'I', 5210 - #endif 5211 - }; 5212 - char tmp[MIGRATE_TYPES + 1]; 5213 - char *p = tmp; 5214 - int i; 5215 - 5216 - for (i = 0; i < MIGRATE_TYPES; i++) { 5217 - if (type & (1 << i)) 5218 - *p++ = types[i]; 5219 - } 5220 - 5221 - *p = '\0'; 5222 - printk(KERN_CONT "(%s) ", tmp); 5223 - } 5224 - 5225 - static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) 5226 - { 5227 - int zone_idx; 5228 - for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) 5229 - if (zone_managed_pages(pgdat->node_zones + zone_idx)) 5230 - return true; 5231 - return false; 5232 - } 5233 - 5234 - /* 5235 - * Show free area list (used inside shift_scroll-lock stuff) 5236 - * We also calculate the percentage fragmentation. We do this by counting the 5237 - * memory on each free list with the exception of the first item on the list. 5238 - * 5239 - * Bits in @filter: 5240 - * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 5241 - * cpuset. 5242 - */ 5243 - void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) 5244 - { 5245 - unsigned long free_pcp = 0; 5246 - int cpu, nid; 5247 - struct zone *zone; 5248 - pg_data_t *pgdat; 5249 - 5250 - for_each_populated_zone(zone) { 5251 - if (zone_idx(zone) > max_zone_idx) 5252 - continue; 5253 - if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5254 - continue; 5255 - 5256 - for_each_online_cpu(cpu) 5257 - free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 5258 - } 5259 - 5260 - printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 5261 - " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 5262 - " unevictable:%lu dirty:%lu writeback:%lu\n" 5263 - " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 5264 - " mapped:%lu shmem:%lu pagetables:%lu\n" 5265 - " sec_pagetables:%lu bounce:%lu\n" 5266 - " kernel_misc_reclaimable:%lu\n" 5267 - " free:%lu free_pcp:%lu free_cma:%lu\n", 5268 - global_node_page_state(NR_ACTIVE_ANON), 5269 - global_node_page_state(NR_INACTIVE_ANON), 5270 - global_node_page_state(NR_ISOLATED_ANON), 5271 - global_node_page_state(NR_ACTIVE_FILE), 5272 - global_node_page_state(NR_INACTIVE_FILE), 5273 - global_node_page_state(NR_ISOLATED_FILE), 5274 - global_node_page_state(NR_UNEVICTABLE), 5275 - global_node_page_state(NR_FILE_DIRTY), 5276 - global_node_page_state(NR_WRITEBACK), 5277 - global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), 5278 - global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), 5279 - global_node_page_state(NR_FILE_MAPPED), 5280 - global_node_page_state(NR_SHMEM), 5281 - global_node_page_state(NR_PAGETABLE), 5282 - global_node_page_state(NR_SECONDARY_PAGETABLE), 5283 - global_zone_page_state(NR_BOUNCE), 5284 - global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), 5285 - global_zone_page_state(NR_FREE_PAGES), 5286 - free_pcp, 5287 - global_zone_page_state(NR_FREE_CMA_PAGES)); 5288 - 5289 - for_each_online_pgdat(pgdat) { 5290 - if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 5291 - continue; 5292 - if (!node_has_managed_zones(pgdat, max_zone_idx)) 5293 - continue; 5294 - 5295 - printk("Node %d" 5296 - " active_anon:%lukB" 5297 - " inactive_anon:%lukB" 5298 - " active_file:%lukB" 5299 - " inactive_file:%lukB" 5300 - " unevictable:%lukB" 5301 - " isolated(anon):%lukB" 5302 - " isolated(file):%lukB" 5303 - " mapped:%lukB" 5304 - " dirty:%lukB" 5305 - " writeback:%lukB" 5306 - " shmem:%lukB" 5307 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5308 - " shmem_thp: %lukB" 5309 - " shmem_pmdmapped: %lukB" 5310 - " anon_thp: %lukB" 5311 - #endif 5312 - " writeback_tmp:%lukB" 5313 - " kernel_stack:%lukB" 5314 - #ifdef CONFIG_SHADOW_CALL_STACK 5315 - " shadow_call_stack:%lukB" 5316 - #endif 5317 - " pagetables:%lukB" 5318 - " sec_pagetables:%lukB" 5319 - " all_unreclaimable? %s" 5320 - "\n", 5321 - pgdat->node_id, 5322 - K(node_page_state(pgdat, NR_ACTIVE_ANON)), 5323 - K(node_page_state(pgdat, NR_INACTIVE_ANON)), 5324 - K(node_page_state(pgdat, NR_ACTIVE_FILE)), 5325 - K(node_page_state(pgdat, NR_INACTIVE_FILE)), 5326 - K(node_page_state(pgdat, NR_UNEVICTABLE)), 5327 - K(node_page_state(pgdat, NR_ISOLATED_ANON)), 5328 - K(node_page_state(pgdat, NR_ISOLATED_FILE)), 5329 - K(node_page_state(pgdat, NR_FILE_MAPPED)), 5330 - K(node_page_state(pgdat, NR_FILE_DIRTY)), 5331 - K(node_page_state(pgdat, NR_WRITEBACK)), 5332 - K(node_page_state(pgdat, NR_SHMEM)), 5333 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5334 - K(node_page_state(pgdat, NR_SHMEM_THPS)), 5335 - K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), 5336 - K(node_page_state(pgdat, NR_ANON_THPS)), 5337 - #endif 5338 - K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 5339 - node_page_state(pgdat, NR_KERNEL_STACK_KB), 5340 - #ifdef CONFIG_SHADOW_CALL_STACK 5341 - node_page_state(pgdat, NR_KERNEL_SCS_KB), 5342 - #endif 5343 - K(node_page_state(pgdat, NR_PAGETABLE)), 5344 - K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), 5345 - pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? 5346 - "yes" : "no"); 5347 - } 5348 - 5349 - for_each_populated_zone(zone) { 5350 - int i; 5351 - 5352 - if (zone_idx(zone) > max_zone_idx) 5353 - continue; 5354 - if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5355 - continue; 5356 - 5357 - free_pcp = 0; 5358 - for_each_online_cpu(cpu) 5359 - free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 5360 - 5361 - show_node(zone); 5362 - printk(KERN_CONT 5363 - "%s" 5364 - " free:%lukB" 5365 - " boost:%lukB" 5366 - " min:%lukB" 5367 - " low:%lukB" 5368 - " high:%lukB" 5369 - " reserved_highatomic:%luKB" 5370 - " active_anon:%lukB" 5371 - " inactive_anon:%lukB" 5372 - " active_file:%lukB" 5373 - " inactive_file:%lukB" 5374 - " unevictable:%lukB" 5375 - " writepending:%lukB" 5376 - " present:%lukB" 5377 - " managed:%lukB" 5378 - " mlocked:%lukB" 5379 - " bounce:%lukB" 5380 - " free_pcp:%lukB" 5381 - " local_pcp:%ukB" 5382 - " free_cma:%lukB" 5383 - "\n", 5384 - zone->name, 5385 - K(zone_page_state(zone, NR_FREE_PAGES)), 5386 - K(zone->watermark_boost), 5387 - K(min_wmark_pages(zone)), 5388 - K(low_wmark_pages(zone)), 5389 - K(high_wmark_pages(zone)), 5390 - K(zone->nr_reserved_highatomic), 5391 - K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), 5392 - K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), 5393 - K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), 5394 - K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), 5395 - K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), 5396 - K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), 5397 - K(zone->present_pages), 5398 - K(zone_managed_pages(zone)), 5399 - K(zone_page_state(zone, NR_MLOCK)), 5400 - K(zone_page_state(zone, NR_BOUNCE)), 5401 - K(free_pcp), 5402 - K(this_cpu_read(zone->per_cpu_pageset->count)), 5403 - K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 5404 - printk("lowmem_reserve[]:"); 5405 - for (i = 0; i < MAX_NR_ZONES; i++) 5406 - printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 5407 - printk(KERN_CONT "\n"); 5408 - } 5409 - 5410 - for_each_populated_zone(zone) { 5411 - unsigned int order; 5412 - unsigned long nr[MAX_ORDER + 1], flags, total = 0; 5413 - unsigned char types[MAX_ORDER + 1]; 5414 - 5415 - if (zone_idx(zone) > max_zone_idx) 5416 - continue; 5417 - if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5418 - continue; 5419 - show_node(zone); 5420 - printk(KERN_CONT "%s: ", zone->name); 5421 - 5422 - spin_lock_irqsave(&zone->lock, flags); 5423 - for (order = 0; order <= MAX_ORDER; order++) { 5424 - struct free_area *area = &zone->free_area[order]; 5425 - int type; 5426 - 5427 - nr[order] = area->nr_free; 5428 - total += nr[order] << order; 5429 - 5430 - types[order] = 0; 5431 - for (type = 0; type < MIGRATE_TYPES; type++) { 5432 - if (!free_area_empty(area, type)) 5433 - types[order] |= 1 << type; 5434 - } 5435 - } 5436 - spin_unlock_irqrestore(&zone->lock, flags); 5437 - for (order = 0; order <= MAX_ORDER; order++) { 5438 - printk(KERN_CONT "%lu*%lukB ", 5439 - nr[order], K(1UL) << order); 5440 - if (nr[order]) 5441 - show_migration_types(types[order]); 5442 - } 5443 - printk(KERN_CONT "= %lukB\n", K(total)); 5444 - } 5445 - 5446 - for_each_online_node(nid) { 5447 - if (show_mem_node_skip(filter, nid, nodemask)) 5448 - continue; 5449 - hugetlb_show_meminfo_node(nid); 5450 - } 5451 - 5452 - printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 5453 - 5454 - show_swap_cache_info(); 5455 - } 5456 5104 5457 5105 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5458 5106 {
+429
mm/show_mem.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Generic show_mem() implementation 4 + * 5 + * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de> 6 + */ 7 + 8 + #include <linux/blkdev.h> 9 + #include <linux/cma.h> 10 + #include <linux/cpuset.h> 11 + #include <linux/highmem.h> 12 + #include <linux/hugetlb.h> 13 + #include <linux/mm.h> 14 + #include <linux/mmzone.h> 15 + #include <linux/swap.h> 16 + #include <linux/vmstat.h> 17 + 18 + #include "internal.h" 19 + #include "swap.h" 20 + 21 + atomic_long_t _totalram_pages __read_mostly; 22 + EXPORT_SYMBOL(_totalram_pages); 23 + unsigned long totalreserve_pages __read_mostly; 24 + unsigned long totalcma_pages __read_mostly; 25 + 26 + static inline void show_node(struct zone *zone) 27 + { 28 + if (IS_ENABLED(CONFIG_NUMA)) 29 + printk("Node %d ", zone_to_nid(zone)); 30 + } 31 + 32 + long si_mem_available(void) 33 + { 34 + long available; 35 + unsigned long pagecache; 36 + unsigned long wmark_low = 0; 37 + unsigned long pages[NR_LRU_LISTS]; 38 + unsigned long reclaimable; 39 + struct zone *zone; 40 + int lru; 41 + 42 + for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 43 + pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 44 + 45 + for_each_zone(zone) 46 + wmark_low += low_wmark_pages(zone); 47 + 48 + /* 49 + * Estimate the amount of memory available for userspace allocations, 50 + * without causing swapping or OOM. 51 + */ 52 + available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; 53 + 54 + /* 55 + * Not all the page cache can be freed, otherwise the system will 56 + * start swapping or thrashing. Assume at least half of the page 57 + * cache, or the low watermark worth of cache, needs to stay. 58 + */ 59 + pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; 60 + pagecache -= min(pagecache / 2, wmark_low); 61 + available += pagecache; 62 + 63 + /* 64 + * Part of the reclaimable slab and other kernel memory consists of 65 + * items that are in use, and cannot be freed. Cap this estimate at the 66 + * low watermark. 67 + */ 68 + reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + 69 + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); 70 + available += reclaimable - min(reclaimable / 2, wmark_low); 71 + 72 + if (available < 0) 73 + available = 0; 74 + return available; 75 + } 76 + EXPORT_SYMBOL_GPL(si_mem_available); 77 + 78 + void si_meminfo(struct sysinfo *val) 79 + { 80 + val->totalram = totalram_pages(); 81 + val->sharedram = global_node_page_state(NR_SHMEM); 82 + val->freeram = global_zone_page_state(NR_FREE_PAGES); 83 + val->bufferram = nr_blockdev_pages(); 84 + val->totalhigh = totalhigh_pages(); 85 + val->freehigh = nr_free_highpages(); 86 + val->mem_unit = PAGE_SIZE; 87 + } 88 + 89 + EXPORT_SYMBOL(si_meminfo); 90 + 91 + #ifdef CONFIG_NUMA 92 + void si_meminfo_node(struct sysinfo *val, int nid) 93 + { 94 + int zone_type; /* needs to be signed */ 95 + unsigned long managed_pages = 0; 96 + unsigned long managed_highpages = 0; 97 + unsigned long free_highpages = 0; 98 + pg_data_t *pgdat = NODE_DATA(nid); 99 + 100 + for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 101 + managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); 102 + val->totalram = managed_pages; 103 + val->sharedram = node_page_state(pgdat, NR_SHMEM); 104 + val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 105 + #ifdef CONFIG_HIGHMEM 106 + for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 107 + struct zone *zone = &pgdat->node_zones[zone_type]; 108 + 109 + if (is_highmem(zone)) { 110 + managed_highpages += zone_managed_pages(zone); 111 + free_highpages += zone_page_state(zone, NR_FREE_PAGES); 112 + } 113 + } 114 + val->totalhigh = managed_highpages; 115 + val->freehigh = free_highpages; 116 + #else 117 + val->totalhigh = managed_highpages; 118 + val->freehigh = free_highpages; 119 + #endif 120 + val->mem_unit = PAGE_SIZE; 121 + } 122 + #endif 123 + 124 + /* 125 + * Determine whether the node should be displayed or not, depending on whether 126 + * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 127 + */ 128 + static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) 129 + { 130 + if (!(flags & SHOW_MEM_FILTER_NODES)) 131 + return false; 132 + 133 + /* 134 + * no node mask - aka implicit memory numa policy. Do not bother with 135 + * the synchronization - read_mems_allowed_begin - because we do not 136 + * have to be precise here. 137 + */ 138 + if (!nodemask) 139 + nodemask = &cpuset_current_mems_allowed; 140 + 141 + return !node_isset(nid, *nodemask); 142 + } 143 + 144 + static void show_migration_types(unsigned char type) 145 + { 146 + static const char types[MIGRATE_TYPES] = { 147 + [MIGRATE_UNMOVABLE] = 'U', 148 + [MIGRATE_MOVABLE] = 'M', 149 + [MIGRATE_RECLAIMABLE] = 'E', 150 + [MIGRATE_HIGHATOMIC] = 'H', 151 + #ifdef CONFIG_CMA 152 + [MIGRATE_CMA] = 'C', 153 + #endif 154 + #ifdef CONFIG_MEMORY_ISOLATION 155 + [MIGRATE_ISOLATE] = 'I', 156 + #endif 157 + }; 158 + char tmp[MIGRATE_TYPES + 1]; 159 + char *p = tmp; 160 + int i; 161 + 162 + for (i = 0; i < MIGRATE_TYPES; i++) { 163 + if (type & (1 << i)) 164 + *p++ = types[i]; 165 + } 166 + 167 + *p = '\0'; 168 + printk(KERN_CONT "(%s) ", tmp); 169 + } 170 + 171 + static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) 172 + { 173 + int zone_idx; 174 + for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) 175 + if (zone_managed_pages(pgdat->node_zones + zone_idx)) 176 + return true; 177 + return false; 178 + } 179 + 180 + /* 181 + * Show free area list (used inside shift_scroll-lock stuff) 182 + * We also calculate the percentage fragmentation. We do this by counting the 183 + * memory on each free list with the exception of the first item on the list. 184 + * 185 + * Bits in @filter: 186 + * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 187 + * cpuset. 188 + */ 189 + void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) 190 + { 191 + unsigned long free_pcp = 0; 192 + int cpu, nid; 193 + struct zone *zone; 194 + pg_data_t *pgdat; 195 + 196 + for_each_populated_zone(zone) { 197 + if (zone_idx(zone) > max_zone_idx) 198 + continue; 199 + if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 200 + continue; 201 + 202 + for_each_online_cpu(cpu) 203 + free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 204 + } 205 + 206 + printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 207 + " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 208 + " unevictable:%lu dirty:%lu writeback:%lu\n" 209 + " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 210 + " mapped:%lu shmem:%lu pagetables:%lu\n" 211 + " sec_pagetables:%lu bounce:%lu\n" 212 + " kernel_misc_reclaimable:%lu\n" 213 + " free:%lu free_pcp:%lu free_cma:%lu\n", 214 + global_node_page_state(NR_ACTIVE_ANON), 215 + global_node_page_state(NR_INACTIVE_ANON), 216 + global_node_page_state(NR_ISOLATED_ANON), 217 + global_node_page_state(NR_ACTIVE_FILE), 218 + global_node_page_state(NR_INACTIVE_FILE), 219 + global_node_page_state(NR_ISOLATED_FILE), 220 + global_node_page_state(NR_UNEVICTABLE), 221 + global_node_page_state(NR_FILE_DIRTY), 222 + global_node_page_state(NR_WRITEBACK), 223 + global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), 224 + global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), 225 + global_node_page_state(NR_FILE_MAPPED), 226 + global_node_page_state(NR_SHMEM), 227 + global_node_page_state(NR_PAGETABLE), 228 + global_node_page_state(NR_SECONDARY_PAGETABLE), 229 + global_zone_page_state(NR_BOUNCE), 230 + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), 231 + global_zone_page_state(NR_FREE_PAGES), 232 + free_pcp, 233 + global_zone_page_state(NR_FREE_CMA_PAGES)); 234 + 235 + for_each_online_pgdat(pgdat) { 236 + if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 237 + continue; 238 + if (!node_has_managed_zones(pgdat, max_zone_idx)) 239 + continue; 240 + 241 + printk("Node %d" 242 + " active_anon:%lukB" 243 + " inactive_anon:%lukB" 244 + " active_file:%lukB" 245 + " inactive_file:%lukB" 246 + " unevictable:%lukB" 247 + " isolated(anon):%lukB" 248 + " isolated(file):%lukB" 249 + " mapped:%lukB" 250 + " dirty:%lukB" 251 + " writeback:%lukB" 252 + " shmem:%lukB" 253 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 254 + " shmem_thp: %lukB" 255 + " shmem_pmdmapped: %lukB" 256 + " anon_thp: %lukB" 257 + #endif 258 + " writeback_tmp:%lukB" 259 + " kernel_stack:%lukB" 260 + #ifdef CONFIG_SHADOW_CALL_STACK 261 + " shadow_call_stack:%lukB" 262 + #endif 263 + " pagetables:%lukB" 264 + " sec_pagetables:%lukB" 265 + " all_unreclaimable? %s" 266 + "\n", 267 + pgdat->node_id, 268 + K(node_page_state(pgdat, NR_ACTIVE_ANON)), 269 + K(node_page_state(pgdat, NR_INACTIVE_ANON)), 270 + K(node_page_state(pgdat, NR_ACTIVE_FILE)), 271 + K(node_page_state(pgdat, NR_INACTIVE_FILE)), 272 + K(node_page_state(pgdat, NR_UNEVICTABLE)), 273 + K(node_page_state(pgdat, NR_ISOLATED_ANON)), 274 + K(node_page_state(pgdat, NR_ISOLATED_FILE)), 275 + K(node_page_state(pgdat, NR_FILE_MAPPED)), 276 + K(node_page_state(pgdat, NR_FILE_DIRTY)), 277 + K(node_page_state(pgdat, NR_WRITEBACK)), 278 + K(node_page_state(pgdat, NR_SHMEM)), 279 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 280 + K(node_page_state(pgdat, NR_SHMEM_THPS)), 281 + K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), 282 + K(node_page_state(pgdat, NR_ANON_THPS)), 283 + #endif 284 + K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 285 + node_page_state(pgdat, NR_KERNEL_STACK_KB), 286 + #ifdef CONFIG_SHADOW_CALL_STACK 287 + node_page_state(pgdat, NR_KERNEL_SCS_KB), 288 + #endif 289 + K(node_page_state(pgdat, NR_PAGETABLE)), 290 + K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), 291 + pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? 292 + "yes" : "no"); 293 + } 294 + 295 + for_each_populated_zone(zone) { 296 + int i; 297 + 298 + if (zone_idx(zone) > max_zone_idx) 299 + continue; 300 + if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 301 + continue; 302 + 303 + free_pcp = 0; 304 + for_each_online_cpu(cpu) 305 + free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 306 + 307 + show_node(zone); 308 + printk(KERN_CONT 309 + "%s" 310 + " free:%lukB" 311 + " boost:%lukB" 312 + " min:%lukB" 313 + " low:%lukB" 314 + " high:%lukB" 315 + " reserved_highatomic:%luKB" 316 + " active_anon:%lukB" 317 + " inactive_anon:%lukB" 318 + " active_file:%lukB" 319 + " inactive_file:%lukB" 320 + " unevictable:%lukB" 321 + " writepending:%lukB" 322 + " present:%lukB" 323 + " managed:%lukB" 324 + " mlocked:%lukB" 325 + " bounce:%lukB" 326 + " free_pcp:%lukB" 327 + " local_pcp:%ukB" 328 + " free_cma:%lukB" 329 + "\n", 330 + zone->name, 331 + K(zone_page_state(zone, NR_FREE_PAGES)), 332 + K(zone->watermark_boost), 333 + K(min_wmark_pages(zone)), 334 + K(low_wmark_pages(zone)), 335 + K(high_wmark_pages(zone)), 336 + K(zone->nr_reserved_highatomic), 337 + K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), 338 + K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), 339 + K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), 340 + K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), 341 + K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), 342 + K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), 343 + K(zone->present_pages), 344 + K(zone_managed_pages(zone)), 345 + K(zone_page_state(zone, NR_MLOCK)), 346 + K(zone_page_state(zone, NR_BOUNCE)), 347 + K(free_pcp), 348 + K(this_cpu_read(zone->per_cpu_pageset->count)), 349 + K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 350 + printk("lowmem_reserve[]:"); 351 + for (i = 0; i < MAX_NR_ZONES; i++) 352 + printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 353 + printk(KERN_CONT "\n"); 354 + } 355 + 356 + for_each_populated_zone(zone) { 357 + unsigned int order; 358 + unsigned long nr[MAX_ORDER + 1], flags, total = 0; 359 + unsigned char types[MAX_ORDER + 1]; 360 + 361 + if (zone_idx(zone) > max_zone_idx) 362 + continue; 363 + if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 364 + continue; 365 + show_node(zone); 366 + printk(KERN_CONT "%s: ", zone->name); 367 + 368 + spin_lock_irqsave(&zone->lock, flags); 369 + for (order = 0; order <= MAX_ORDER; order++) { 370 + struct free_area *area = &zone->free_area[order]; 371 + int type; 372 + 373 + nr[order] = area->nr_free; 374 + total += nr[order] << order; 375 + 376 + types[order] = 0; 377 + for (type = 0; type < MIGRATE_TYPES; type++) { 378 + if (!free_area_empty(area, type)) 379 + types[order] |= 1 << type; 380 + } 381 + } 382 + spin_unlock_irqrestore(&zone->lock, flags); 383 + for (order = 0; order <= MAX_ORDER; order++) { 384 + printk(KERN_CONT "%lu*%lukB ", 385 + nr[order], K(1UL) << order); 386 + if (nr[order]) 387 + show_migration_types(types[order]); 388 + } 389 + printk(KERN_CONT "= %lukB\n", K(total)); 390 + } 391 + 392 + for_each_online_node(nid) { 393 + if (show_mem_node_skip(filter, nid, nodemask)) 394 + continue; 395 + hugetlb_show_meminfo_node(nid); 396 + } 397 + 398 + printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 399 + 400 + show_swap_cache_info(); 401 + } 402 + 403 + void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) 404 + { 405 + unsigned long total = 0, reserved = 0, highmem = 0; 406 + struct zone *zone; 407 + 408 + printk("Mem-Info:\n"); 409 + __show_free_areas(filter, nodemask, max_zone_idx); 410 + 411 + for_each_populated_zone(zone) { 412 + 413 + total += zone->present_pages; 414 + reserved += zone->present_pages - zone_managed_pages(zone); 415 + 416 + if (is_highmem(zone)) 417 + highmem += zone->present_pages; 418 + } 419 + 420 + printk("%lu pages RAM\n", total); 421 + printk("%lu pages HighMem/MovableOnly\n", highmem); 422 + printk("%lu pages reserved\n", reserved); 423 + #ifdef CONFIG_CMA 424 + printk("%lu pages cma reserved\n", totalcma_pages); 425 + #endif 426 + #ifdef CONFIG_MEMORY_FAILURE 427 + printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); 428 + #endif 429 + }