Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: move most file-based accounting to the node

There are now a number of accounting oddities such as mapped file pages
being accounted for on the node while the total number of file pages are
accounted on the zone. This can be coped with to some extent but it's
confusing so this patch moves the relevant file-based accounted. Due to
throttling logic in the page allocator for reliable OOM detection, it is
still necessary to track dirty and writeback pages on a per-zone basis.

[mgorman@techsingularity.net: fix NR_ZONE_WRITE_PENDING accounting]
Link: http://lkml.kernel.org/r/1468404004-5085-5-git-send-email-mgorman@techsingularity.net
Link: http://lkml.kernel.org/r/1467970510-21195-20-git-send-email-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mel Gorman and committed by
Linus Torvalds
11fb9989 4b9d0fab

+155 -162
+1 -1
arch/s390/appldata/appldata_mem.c
··· 102 102 mem_data->totalhigh = P2K(val.totalhigh); 103 103 mem_data->freehigh = P2K(val.freehigh); 104 104 mem_data->bufferram = P2K(val.bufferram); 105 - mem_data->cached = P2K(global_page_state(NR_FILE_PAGES) 105 + mem_data->cached = P2K(global_node_page_state(NR_FILE_PAGES) 106 106 - val.bufferram); 107 107 108 108 si_swapinfo(&val);
+4 -4
arch/tile/mm/pgtable.c
··· 49 49 global_node_page_state(NR_ACTIVE_FILE)), 50 50 (global_node_page_state(NR_INACTIVE_ANON) + 51 51 global_node_page_state(NR_INACTIVE_FILE)), 52 - global_page_state(NR_FILE_DIRTY), 53 - global_page_state(NR_WRITEBACK), 54 - global_page_state(NR_UNSTABLE_NFS), 52 + global_node_page_state(NR_FILE_DIRTY), 53 + global_node_page_state(NR_WRITEBACK), 54 + global_node_page_state(NR_UNSTABLE_NFS), 55 55 global_page_state(NR_FREE_PAGES), 56 56 (global_page_state(NR_SLAB_RECLAIMABLE) + 57 57 global_page_state(NR_SLAB_UNRECLAIMABLE)), 58 58 global_node_page_state(NR_FILE_MAPPED), 59 59 global_page_state(NR_PAGETABLE), 60 60 global_page_state(NR_BOUNCE), 61 - global_page_state(NR_FILE_PAGES), 61 + global_node_page_state(NR_FILE_PAGES), 62 62 get_nr_swap_pages()); 63 63 64 64 for_each_zone(zone) {
+8 -8
drivers/base/node.c
··· 118 118 "Node %d ShmemPmdMapped: %8lu kB\n" 119 119 #endif 120 120 , 121 - nid, K(sum_zone_node_page_state(nid, NR_FILE_DIRTY)), 122 - nid, K(sum_zone_node_page_state(nid, NR_WRITEBACK)), 123 - nid, K(sum_zone_node_page_state(nid, NR_FILE_PAGES)), 121 + nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), 122 + nid, K(node_page_state(pgdat, NR_WRITEBACK)), 123 + nid, K(node_page_state(pgdat, NR_FILE_PAGES)), 124 124 nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), 125 125 nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), 126 126 nid, K(i.sharedram), 127 127 nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK) * 128 128 THREAD_SIZE / 1024, 129 129 nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), 130 - nid, K(sum_zone_node_page_state(nid, NR_UNSTABLE_NFS)), 130 + nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 131 131 nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), 132 - nid, K(sum_zone_node_page_state(nid, NR_WRITEBACK_TEMP)), 132 + nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 133 133 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) + 134 134 sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 135 135 nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)), 136 136 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 137 137 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), 138 - nid, K(sum_zone_node_page_state(nid, NR_ANON_THPS) * 138 + nid, K(node_page_state(pgdat, NR_ANON_THPS) * 139 139 HPAGE_PMD_NR), 140 - nid, K(sum_zone_node_page_state(nid, NR_SHMEM_THPS) * 140 + nid, K(node_page_state(pgdat, NR_SHMEM_THPS) * 141 141 HPAGE_PMD_NR), 142 - nid, K(sum_zone_node_page_state(nid, NR_SHMEM_PMDMAPPED) * 142 + nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) * 143 143 HPAGE_PMD_NR)); 144 144 #else 145 145 nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
+2 -2
drivers/staging/android/lowmemorykiller.c
··· 91 91 short selected_oom_score_adj; 92 92 int array_size = ARRAY_SIZE(lowmem_adj); 93 93 int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages; 94 - int other_file = global_page_state(NR_FILE_PAGES) - 95 - global_page_state(NR_SHMEM) - 94 + int other_file = global_node_page_state(NR_FILE_PAGES) - 95 + global_node_page_state(NR_SHMEM) - 96 96 total_swapcache_pages(); 97 97 98 98 if (lowmem_adj_size < array_size)
+4 -2
drivers/staging/lustre/lustre/osc/osc_cache.c
··· 1864 1864 LASSERT(page_count >= 0); 1865 1865 1866 1866 for (i = 0; i < page_count; i++) 1867 - dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS); 1867 + dec_node_page_state(desc->bd_iov[i].kiov_page, 1868 + NR_UNSTABLE_NFS); 1868 1869 1869 1870 atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr); 1870 1871 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0); ··· 1899 1898 LASSERT(page_count >= 0); 1900 1899 1901 1900 for (i = 0; i < page_count; i++) 1902 - inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS); 1901 + inc_node_page_state(desc->bd_iov[i].kiov_page, 1902 + NR_UNSTABLE_NFS); 1903 1903 1904 1904 LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0); 1905 1905 atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+2 -2
fs/fs-writeback.c
··· 1807 1807 */ 1808 1808 static unsigned long get_nr_dirty_pages(void) 1809 1809 { 1810 - return global_page_state(NR_FILE_DIRTY) + 1811 - global_page_state(NR_UNSTABLE_NFS) + 1810 + return global_node_page_state(NR_FILE_DIRTY) + 1811 + global_node_page_state(NR_UNSTABLE_NFS) + 1812 1812 get_nr_dirty_inodes(); 1813 1813 } 1814 1814
+4 -4
fs/fuse/file.c
··· 1452 1452 list_del(&req->writepages_entry); 1453 1453 for (i = 0; i < req->num_pages; i++) { 1454 1454 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1455 - dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP); 1455 + dec_node_page_state(req->pages[i], NR_WRITEBACK_TEMP); 1456 1456 wb_writeout_inc(&bdi->wb); 1457 1457 } 1458 1458 wake_up(&fi->page_waitq); ··· 1642 1642 req->inode = inode; 1643 1643 1644 1644 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 1645 - inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1645 + inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); 1646 1646 1647 1647 spin_lock(&fc->lock); 1648 1648 list_add(&req->writepages_entry, &fi->writepages); ··· 1756 1756 spin_unlock(&fc->lock); 1757 1757 1758 1758 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1759 - dec_zone_page_state(page, NR_WRITEBACK_TEMP); 1759 + dec_node_page_state(page, NR_WRITEBACK_TEMP); 1760 1760 wb_writeout_inc(&bdi->wb); 1761 1761 fuse_writepage_free(fc, new_req); 1762 1762 fuse_request_free(new_req); ··· 1855 1855 req->page_descs[req->num_pages].length = PAGE_SIZE; 1856 1856 1857 1857 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); 1858 - inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1858 + inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); 1859 1859 1860 1860 err = 0; 1861 1861 if (is_writeback && fuse_writepage_in_flight(req, page)) {
+1 -1
fs/nfs/internal.h
··· 623 623 if (!cinfo->dreq) { 624 624 struct inode *inode = page_file_mapping(page)->host; 625 625 626 - inc_zone_page_state(page, NR_UNSTABLE_NFS); 626 + inc_node_page_state(page, NR_UNSTABLE_NFS); 627 627 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE); 628 628 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 629 629 }
+1 -1
fs/nfs/write.c
··· 898 898 static void 899 899 nfs_clear_page_commit(struct page *page) 900 900 { 901 - dec_zone_page_state(page, NR_UNSTABLE_NFS); 901 + dec_node_page_state(page, NR_UNSTABLE_NFS); 902 902 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, 903 903 WB_RECLAIMABLE); 904 904 }
+8 -8
fs/proc/meminfo.c
··· 40 40 si_swapinfo(&i); 41 41 committed = percpu_counter_read_positive(&vm_committed_as); 42 42 43 - cached = global_page_state(NR_FILE_PAGES) - 43 + cached = global_node_page_state(NR_FILE_PAGES) - 44 44 total_swapcache_pages() - i.bufferram; 45 45 if (cached < 0) 46 46 cached = 0; ··· 138 138 #endif 139 139 K(i.totalswap), 140 140 K(i.freeswap), 141 - K(global_page_state(NR_FILE_DIRTY)), 142 - K(global_page_state(NR_WRITEBACK)), 141 + K(global_node_page_state(NR_FILE_DIRTY)), 142 + K(global_node_page_state(NR_WRITEBACK)), 143 143 K(global_node_page_state(NR_ANON_MAPPED)), 144 144 K(global_node_page_state(NR_FILE_MAPPED)), 145 145 K(i.sharedram), ··· 152 152 #ifdef CONFIG_QUICKLIST 153 153 K(quicklist_total_size()), 154 154 #endif 155 - K(global_page_state(NR_UNSTABLE_NFS)), 155 + K(global_node_page_state(NR_UNSTABLE_NFS)), 156 156 K(global_page_state(NR_BOUNCE)), 157 - K(global_page_state(NR_WRITEBACK_TEMP)), 157 + K(global_node_page_state(NR_WRITEBACK_TEMP)), 158 158 K(vm_commit_limit()), 159 159 K(committed), 160 160 (unsigned long)VMALLOC_TOTAL >> 10, ··· 164 164 , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10) 165 165 #endif 166 166 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 167 - , K(global_page_state(NR_ANON_THPS) * HPAGE_PMD_NR) 168 - , K(global_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR) 169 - , K(global_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR) 167 + , K(global_node_page_state(NR_ANON_THPS) * HPAGE_PMD_NR) 168 + , K(global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR) 169 + , K(global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR) 170 170 #endif 171 171 #ifdef CONFIG_CMA 172 172 , K(totalcma_pages)
+10 -9
include/linux/mmzone.h
··· 114 114 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ 115 115 NR_ZONE_LRU_ANON = NR_ZONE_LRU_BASE, 116 116 NR_ZONE_LRU_FILE, 117 + NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ 117 118 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 118 - NR_FILE_PAGES, 119 - NR_FILE_DIRTY, 120 - NR_WRITEBACK, 121 119 NR_SLAB_RECLAIMABLE, 122 120 NR_SLAB_UNRECLAIMABLE, 123 121 NR_PAGETABLE, /* used for pagetables */ 124 122 NR_KERNEL_STACK, 125 123 /* Second 128 byte cacheline */ 126 - NR_UNSTABLE_NFS, /* NFS unstable pages */ 127 124 NR_BOUNCE, 128 125 NR_VMSCAN_WRITE, 129 126 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 130 - NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ 131 - NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 132 127 NR_DIRTIED, /* page dirtyings since bootup */ 133 128 NR_WRITTEN, /* page writings since bootup */ 134 129 #if IS_ENABLED(CONFIG_ZSMALLOC) ··· 137 142 NUMA_LOCAL, /* allocation from local node */ 138 143 NUMA_OTHER, /* allocation from other node */ 139 144 #endif 140 - NR_ANON_THPS, 141 - NR_SHMEM_THPS, 142 - NR_SHMEM_PMDMAPPED, 143 145 NR_FREE_CMA_PAGES, 144 146 NR_VM_ZONE_STAT_ITEMS }; 145 147 ··· 156 164 NR_ANON_MAPPED, /* Mapped anonymous pages */ 157 165 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 158 166 only modified from process context */ 167 + NR_FILE_PAGES, 168 + NR_FILE_DIRTY, 169 + NR_WRITEBACK, 170 + NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ 171 + NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 172 + NR_SHMEM_THPS, 173 + NR_SHMEM_PMDMAPPED, 174 + NR_ANON_THPS, 175 + NR_UNSTABLE_NFS, /* NFS unstable pages */ 159 176 NR_VM_NODE_STAT_ITEMS 160 177 }; 161 178
+3 -3
include/trace/events/writeback.h
··· 412 412 ), 413 413 414 414 TP_fast_assign( 415 - __entry->nr_dirty = global_page_state(NR_FILE_DIRTY); 416 - __entry->nr_writeback = global_page_state(NR_WRITEBACK); 417 - __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS); 415 + __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY); 416 + __entry->nr_writeback = global_node_page_state(NR_WRITEBACK); 417 + __entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS); 418 418 __entry->nr_dirtied = global_page_state(NR_DIRTIED); 419 419 __entry->nr_written = global_page_state(NR_WRITTEN); 420 420 __entry->background_thresh = background_thresh;
+6 -6
mm/filemap.c
··· 218 218 219 219 /* hugetlb pages do not participate in page cache accounting. */ 220 220 if (!PageHuge(page)) 221 - __mod_zone_page_state(page_zone(page), NR_FILE_PAGES, -nr); 221 + __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); 222 222 if (PageSwapBacked(page)) { 223 - __mod_zone_page_state(page_zone(page), NR_SHMEM, -nr); 223 + __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); 224 224 if (PageTransHuge(page)) 225 - __dec_zone_page_state(page, NR_SHMEM_THPS); 225 + __dec_node_page_state(page, NR_SHMEM_THPS); 226 226 } else { 227 227 VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page); 228 228 } ··· 568 568 * hugetlb pages do not participate in page cache accounting. 569 569 */ 570 570 if (!PageHuge(new)) 571 - __inc_zone_page_state(new, NR_FILE_PAGES); 571 + __inc_node_page_state(new, NR_FILE_PAGES); 572 572 if (PageSwapBacked(new)) 573 - __inc_zone_page_state(new, NR_SHMEM); 573 + __inc_node_page_state(new, NR_SHMEM); 574 574 spin_unlock_irqrestore(&mapping->tree_lock, flags); 575 575 mem_cgroup_migrate(old, new); 576 576 radix_tree_preload_end(); ··· 677 677 678 678 /* hugetlb pages do not participate in page cache accounting. */ 679 679 if (!huge) 680 - __inc_zone_page_state(page, NR_FILE_PAGES); 680 + __inc_node_page_state(page, NR_FILE_PAGES); 681 681 spin_unlock_irq(&mapping->tree_lock); 682 682 if (!huge) 683 683 mem_cgroup_commit_charge(page, memcg, false, false);
+2 -2
mm/huge_memory.c
··· 1586 1586 1587 1587 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { 1588 1588 /* Last compound_mapcount is gone. */ 1589 - __dec_zone_page_state(page, NR_ANON_THPS); 1589 + __dec_node_page_state(page, NR_ANON_THPS); 1590 1590 if (TestClearPageDoubleMap(page)) { 1591 1591 /* No need in mapcount reference anymore */ 1592 1592 for (i = 0; i < HPAGE_PMD_NR; i++) ··· 2061 2061 list_del(page_deferred_list(head)); 2062 2062 } 2063 2063 if (mapping) 2064 - __dec_zone_page_state(page, NR_SHMEM_THPS); 2064 + __dec_node_page_state(page, NR_SHMEM_THPS); 2065 2065 spin_unlock(&pgdata->split_queue_lock); 2066 2066 __split_huge_page(page, list, flags); 2067 2067 ret = 0;
+3 -3
mm/khugepaged.c
··· 1483 1483 } 1484 1484 1485 1485 local_irq_save(flags); 1486 - __inc_zone_page_state(new_page, NR_SHMEM_THPS); 1486 + __inc_node_page_state(new_page, NR_SHMEM_THPS); 1487 1487 if (nr_none) { 1488 - __mod_zone_page_state(zone, NR_FILE_PAGES, nr_none); 1489 - __mod_zone_page_state(zone, NR_SHMEM, nr_none); 1488 + __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none); 1489 + __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none); 1490 1490 } 1491 1491 local_irq_restore(flags); 1492 1492
+8 -6
mm/migrate.c
··· 505 505 * are mapped to swap space. 506 506 */ 507 507 if (newzone != oldzone) { 508 - __dec_zone_state(oldzone, NR_FILE_PAGES); 509 - __inc_zone_state(newzone, NR_FILE_PAGES); 508 + __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES); 509 + __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES); 510 510 if (PageSwapBacked(page) && !PageSwapCache(page)) { 511 - __dec_zone_state(oldzone, NR_SHMEM); 512 - __inc_zone_state(newzone, NR_SHMEM); 511 + __dec_node_state(oldzone->zone_pgdat, NR_SHMEM); 512 + __inc_node_state(newzone->zone_pgdat, NR_SHMEM); 513 513 } 514 514 if (dirty && mapping_cap_account_dirty(mapping)) { 515 - __dec_zone_state(oldzone, NR_FILE_DIRTY); 516 - __inc_zone_state(newzone, NR_FILE_DIRTY); 515 + __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY); 516 + __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING); 517 + __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY); 518 + __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING); 517 519 } 518 520 } 519 521 local_irq_enable();
+22 -25
mm/page-writeback.c
··· 498 498 */ 499 499 bool node_dirty_ok(struct pglist_data *pgdat) 500 500 { 501 - int z; 502 501 unsigned long limit = node_dirty_limit(pgdat); 503 502 unsigned long nr_pages = 0; 504 503 505 - for (z = 0; z < MAX_NR_ZONES; z++) { 506 - struct zone *zone = pgdat->node_zones + z; 507 - 508 - if (!populated_zone(zone)) 509 - continue; 510 - 511 - nr_pages += zone_page_state(zone, NR_FILE_DIRTY); 512 - nr_pages += zone_page_state(zone, NR_UNSTABLE_NFS); 513 - nr_pages += zone_page_state(zone, NR_WRITEBACK); 514 - } 504 + nr_pages += node_page_state(pgdat, NR_FILE_DIRTY); 505 + nr_pages += node_page_state(pgdat, NR_UNSTABLE_NFS); 506 + nr_pages += node_page_state(pgdat, NR_WRITEBACK); 515 507 516 508 return nr_pages <= limit; 517 509 } ··· 1593 1601 * written to the server's write cache, but has not yet 1594 1602 * been flushed to permanent storage. 1595 1603 */ 1596 - nr_reclaimable = global_page_state(NR_FILE_DIRTY) + 1597 - global_page_state(NR_UNSTABLE_NFS); 1604 + nr_reclaimable = global_node_page_state(NR_FILE_DIRTY) + 1605 + global_node_page_state(NR_UNSTABLE_NFS); 1598 1606 gdtc->avail = global_dirtyable_memory(); 1599 - gdtc->dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); 1607 + gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK); 1600 1608 1601 1609 domain_dirty_limits(gdtc); 1602 1610 ··· 1933 1941 * as we're trying to decide whether to put more under writeback. 1934 1942 */ 1935 1943 gdtc->avail = global_dirtyable_memory(); 1936 - gdtc->dirty = global_page_state(NR_FILE_DIRTY) + 1937 - global_page_state(NR_UNSTABLE_NFS); 1944 + gdtc->dirty = global_node_page_state(NR_FILE_DIRTY) + 1945 + global_node_page_state(NR_UNSTABLE_NFS); 1938 1946 domain_dirty_limits(gdtc); 1939 1947 1940 1948 if (gdtc->dirty > gdtc->bg_thresh) ··· 1978 1986 */ 1979 1987 dirty_thresh += dirty_thresh / 10; /* wheeee... */ 1980 1988 1981 - if (global_page_state(NR_UNSTABLE_NFS) + 1982 - global_page_state(NR_WRITEBACK) <= dirty_thresh) 1989 + if (global_node_page_state(NR_UNSTABLE_NFS) + 1990 + global_node_page_state(NR_WRITEBACK) <= dirty_thresh) 1983 1991 break; 1984 1992 congestion_wait(BLK_RW_ASYNC, HZ/10); 1985 1993 ··· 2007 2015 void laptop_mode_timer_fn(unsigned long data) 2008 2016 { 2009 2017 struct request_queue *q = (struct request_queue *)data; 2010 - int nr_pages = global_page_state(NR_FILE_DIRTY) + 2011 - global_page_state(NR_UNSTABLE_NFS); 2018 + int nr_pages = global_node_page_state(NR_FILE_DIRTY) + 2019 + global_node_page_state(NR_UNSTABLE_NFS); 2012 2020 struct bdi_writeback *wb; 2013 2021 2014 2022 /* ··· 2459 2467 wb = inode_to_wb(inode); 2460 2468 2461 2469 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2462 - __inc_zone_page_state(page, NR_FILE_DIRTY); 2470 + __inc_node_page_state(page, NR_FILE_DIRTY); 2471 + __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2463 2472 __inc_zone_page_state(page, NR_DIRTIED); 2464 2473 __inc_wb_stat(wb, WB_RECLAIMABLE); 2465 2474 __inc_wb_stat(wb, WB_DIRTIED); ··· 2481 2488 { 2482 2489 if (mapping_cap_account_dirty(mapping)) { 2483 2490 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2484 - dec_zone_page_state(page, NR_FILE_DIRTY); 2491 + dec_node_page_state(page, NR_FILE_DIRTY); 2492 + dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2485 2493 dec_wb_stat(wb, WB_RECLAIMABLE); 2486 2494 task_io_account_cancelled_write(PAGE_SIZE); 2487 2495 } ··· 2738 2744 wb = unlocked_inode_to_wb_begin(inode, &locked); 2739 2745 if (TestClearPageDirty(page)) { 2740 2746 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); 2741 - dec_zone_page_state(page, NR_FILE_DIRTY); 2747 + dec_node_page_state(page, NR_FILE_DIRTY); 2748 + dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2742 2749 dec_wb_stat(wb, WB_RECLAIMABLE); 2743 2750 ret = 1; 2744 2751 } ··· 2785 2790 } 2786 2791 if (ret) { 2787 2792 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); 2788 - dec_zone_page_state(page, NR_WRITEBACK); 2793 + dec_node_page_state(page, NR_WRITEBACK); 2794 + dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2789 2795 inc_zone_page_state(page, NR_WRITTEN); 2790 2796 } 2791 2797 unlock_page_memcg(page); ··· 2840 2844 } 2841 2845 if (!ret) { 2842 2846 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); 2843 - inc_zone_page_state(page, NR_WRITEBACK); 2847 + inc_node_page_state(page, NR_WRITEBACK); 2848 + inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); 2844 2849 } 2845 2850 unlock_page_memcg(page); 2846 2851 return ret;
+32 -42
mm/page_alloc.c
··· 3492 3492 * prevent from pre mature OOM 3493 3493 */ 3494 3494 if (!did_some_progress) { 3495 - unsigned long writeback; 3496 - unsigned long dirty; 3495 + unsigned long write_pending; 3497 3496 3498 - writeback = zone_page_state_snapshot(zone, 3499 - NR_WRITEBACK); 3500 - dirty = zone_page_state_snapshot(zone, NR_FILE_DIRTY); 3497 + write_pending = zone_page_state_snapshot(zone, 3498 + NR_ZONE_WRITE_PENDING); 3501 3499 3502 - if (2*(writeback + dirty) > reclaimable) { 3500 + if (2 * write_pending > reclaimable) { 3503 3501 congestion_wait(BLK_RW_ASYNC, HZ/10); 3504 3502 return true; 3505 3503 } ··· 4173 4175 void si_meminfo(struct sysinfo *val) 4174 4176 { 4175 4177 val->totalram = totalram_pages; 4176 - val->sharedram = global_page_state(NR_SHMEM); 4178 + val->sharedram = global_node_page_state(NR_SHMEM); 4177 4179 val->freeram = global_page_state(NR_FREE_PAGES); 4178 4180 val->bufferram = nr_blockdev_pages(); 4179 4181 val->totalhigh = totalhigh_pages; ··· 4195 4197 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 4196 4198 managed_pages += pgdat->node_zones[zone_type].managed_pages; 4197 4199 val->totalram = managed_pages; 4198 - val->sharedram = sum_zone_node_page_state(nid, NR_SHMEM); 4200 + val->sharedram = node_page_state(pgdat, NR_SHMEM); 4199 4201 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 4200 4202 #ifdef CONFIG_HIGHMEM 4201 4203 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { ··· 4294 4296 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" 4295 4297 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 4296 4298 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 4297 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4298 - " anon_thp: %lu shmem_thp: %lu shmem_pmdmapped: %lu\n" 4299 - #endif 4300 4299 " free:%lu free_pcp:%lu free_cma:%lu\n", 4301 4300 global_node_page_state(NR_ACTIVE_ANON), 4302 4301 global_node_page_state(NR_INACTIVE_ANON), ··· 4302 4307 global_node_page_state(NR_INACTIVE_FILE), 4303 4308 global_node_page_state(NR_ISOLATED_FILE), 4304 4309 global_node_page_state(NR_UNEVICTABLE), 4305 - global_page_state(NR_FILE_DIRTY), 4306 - global_page_state(NR_WRITEBACK), 4307 - global_page_state(NR_UNSTABLE_NFS), 4310 + global_node_page_state(NR_FILE_DIRTY), 4311 + global_node_page_state(NR_WRITEBACK), 4312 + global_node_page_state(NR_UNSTABLE_NFS), 4308 4313 global_page_state(NR_SLAB_RECLAIMABLE), 4309 4314 global_page_state(NR_SLAB_UNRECLAIMABLE), 4310 4315 global_node_page_state(NR_FILE_MAPPED), 4311 - global_page_state(NR_SHMEM), 4316 + global_node_page_state(NR_SHMEM), 4312 4317 global_page_state(NR_PAGETABLE), 4313 4318 global_page_state(NR_BOUNCE), 4314 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4315 - global_page_state(NR_ANON_THPS) * HPAGE_PMD_NR, 4316 - global_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR, 4317 - global_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR, 4318 - #endif 4319 4319 global_page_state(NR_FREE_PAGES), 4320 4320 free_pcp, 4321 4321 global_page_state(NR_FREE_CMA_PAGES)); ··· 4325 4335 " isolated(anon):%lukB" 4326 4336 " isolated(file):%lukB" 4327 4337 " mapped:%lukB" 4338 + " dirty:%lukB" 4339 + " writeback:%lukB" 4340 + " shmem:%lukB" 4341 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4342 + " shmem_thp: %lukB" 4343 + " shmem_pmdmapped: %lukB" 4344 + " anon_thp: %lukB" 4345 + #endif 4346 + " writeback_tmp:%lukB" 4347 + " unstable:%lukB" 4328 4348 " all_unreclaimable? %s" 4329 4349 "\n", 4330 4350 pgdat->node_id, ··· 4346 4346 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 4347 4347 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 4348 4348 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4349 + K(node_page_state(pgdat, NR_FILE_DIRTY)), 4350 + K(node_page_state(pgdat, NR_WRITEBACK)), 4351 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4352 + K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), 4353 + K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) 4354 + * HPAGE_PMD_NR), 4355 + K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), 4356 + #endif 4357 + K(node_page_state(pgdat, NR_SHMEM)), 4358 + K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 4359 + K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 4349 4360 !pgdat_reclaimable(pgdat) ? "yes" : "no"); 4350 4361 } 4351 4362 ··· 4379 4368 " present:%lukB" 4380 4369 " managed:%lukB" 4381 4370 " mlocked:%lukB" 4382 - " dirty:%lukB" 4383 - " writeback:%lukB" 4384 - " shmem:%lukB" 4385 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4386 - " shmem_thp: %lukB" 4387 - " shmem_pmdmapped: %lukB" 4388 - " anon_thp: %lukB" 4389 - #endif 4390 4371 " slab_reclaimable:%lukB" 4391 4372 " slab_unreclaimable:%lukB" 4392 4373 " kernel_stack:%lukB" 4393 4374 " pagetables:%lukB" 4394 - " unstable:%lukB" 4395 4375 " bounce:%lukB" 4396 4376 " free_pcp:%lukB" 4397 4377 " local_pcp:%ukB" 4398 4378 " free_cma:%lukB" 4399 - " writeback_tmp:%lukB" 4400 4379 " node_pages_scanned:%lu" 4401 4380 "\n", 4402 4381 zone->name, ··· 4397 4396 K(zone->present_pages), 4398 4397 K(zone->managed_pages), 4399 4398 K(zone_page_state(zone, NR_MLOCK)), 4400 - K(zone_page_state(zone, NR_FILE_DIRTY)), 4401 - K(zone_page_state(zone, NR_WRITEBACK)), 4402 - K(zone_page_state(zone, NR_SHMEM)), 4403 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4404 - K(zone_page_state(zone, NR_SHMEM_THPS) * HPAGE_PMD_NR), 4405 - K(zone_page_state(zone, NR_SHMEM_PMDMAPPED) 4406 - * HPAGE_PMD_NR), 4407 - K(zone_page_state(zone, NR_ANON_THPS) * HPAGE_PMD_NR), 4408 - #endif 4409 4399 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 4410 4400 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 4411 4401 zone_page_state(zone, NR_KERNEL_STACK) * 4412 4402 THREAD_SIZE / 1024, 4413 4403 K(zone_page_state(zone, NR_PAGETABLE)), 4414 - K(zone_page_state(zone, NR_UNSTABLE_NFS)), 4415 4404 K(zone_page_state(zone, NR_BOUNCE)), 4416 4405 K(free_pcp), 4417 4406 K(this_cpu_read(zone->pageset->pcp.count)), 4418 4407 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), 4419 - K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 4420 4408 K(node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED))); 4421 4409 printk("lowmem_reserve[]:"); 4422 4410 for (i = 0; i < MAX_NR_ZONES; i++) ··· 4448 4458 4449 4459 hugetlb_show_meminfo(); 4450 4460 4451 - printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 4461 + printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 4452 4462 4453 4463 show_swap_cache_info(); 4454 4464 }
+5 -5
mm/rmap.c
··· 1213 1213 * disabled. 1214 1214 */ 1215 1215 if (compound) 1216 - __inc_zone_page_state(page, NR_ANON_THPS); 1216 + __inc_node_page_state(page, NR_ANON_THPS); 1217 1217 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1218 1218 } 1219 1219 if (unlikely(PageKsm(page))) ··· 1251 1251 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1252 1252 /* increment count (starts at -1) */ 1253 1253 atomic_set(compound_mapcount_ptr(page), 0); 1254 - __inc_zone_page_state(page, NR_ANON_THPS); 1254 + __inc_node_page_state(page, NR_ANON_THPS); 1255 1255 } else { 1256 1256 /* Anon THP always mapped first with PMD */ 1257 1257 VM_BUG_ON_PAGE(PageTransCompound(page), page); ··· 1282 1282 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1283 1283 goto out; 1284 1284 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1285 - __inc_zone_page_state(page, NR_SHMEM_PMDMAPPED); 1285 + __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); 1286 1286 } else { 1287 1287 if (PageTransCompound(page)) { 1288 1288 VM_BUG_ON_PAGE(!PageLocked(page), page); ··· 1322 1322 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1323 1323 goto out; 1324 1324 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1325 - __dec_zone_page_state(page, NR_SHMEM_PMDMAPPED); 1325 + __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); 1326 1326 } else { 1327 1327 if (!atomic_add_negative(-1, &page->_mapcount)) 1328 1328 goto out; ··· 1356 1356 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1357 1357 return; 1358 1358 1359 - __dec_zone_page_state(page, NR_ANON_THPS); 1359 + __dec_node_page_state(page, NR_ANON_THPS); 1360 1360 1361 1361 if (TestClearPageDoubleMap(page)) { 1362 1362 /*
+7 -7
mm/shmem.c
··· 575 575 if (!error) { 576 576 mapping->nrpages += nr; 577 577 if (PageTransHuge(page)) 578 - __inc_zone_page_state(page, NR_SHMEM_THPS); 579 - __mod_zone_page_state(page_zone(page), NR_FILE_PAGES, nr); 580 - __mod_zone_page_state(page_zone(page), NR_SHMEM, nr); 578 + __inc_node_page_state(page, NR_SHMEM_THPS); 579 + __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 580 + __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); 581 581 spin_unlock_irq(&mapping->tree_lock); 582 582 } else { 583 583 page->mapping = NULL; ··· 601 601 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 602 602 page->mapping = NULL; 603 603 mapping->nrpages--; 604 - __dec_zone_page_state(page, NR_FILE_PAGES); 605 - __dec_zone_page_state(page, NR_SHMEM); 604 + __dec_node_page_state(page, NR_FILE_PAGES); 605 + __dec_node_page_state(page, NR_SHMEM); 606 606 spin_unlock_irq(&mapping->tree_lock); 607 607 put_page(page); 608 608 BUG_ON(error); ··· 1493 1493 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1494 1494 newpage); 1495 1495 if (!error) { 1496 - __inc_zone_page_state(newpage, NR_FILE_PAGES); 1497 - __dec_zone_page_state(oldpage, NR_FILE_PAGES); 1496 + __inc_node_page_state(newpage, NR_FILE_PAGES); 1497 + __dec_node_page_state(oldpage, NR_FILE_PAGES); 1498 1498 } 1499 1499 spin_unlock_irq(&swap_mapping->tree_lock); 1500 1500
+2 -2
mm/swap_state.c
··· 95 95 entry.val, page); 96 96 if (likely(!error)) { 97 97 address_space->nrpages++; 98 - __inc_zone_page_state(page, NR_FILE_PAGES); 98 + __inc_node_page_state(page, NR_FILE_PAGES); 99 99 INC_CACHE_INFO(add_total); 100 100 } 101 101 spin_unlock_irq(&address_space->tree_lock); ··· 147 147 set_page_private(page, 0); 148 148 ClearPageSwapCache(page); 149 149 address_space->nrpages--; 150 - __dec_zone_page_state(page, NR_FILE_PAGES); 150 + __dec_node_page_state(page, NR_FILE_PAGES); 151 151 INC_CACHE_INFO(del_total); 152 152 } 153 153
+2 -2
mm/util.c
··· 528 528 529 529 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 530 530 free = global_page_state(NR_FREE_PAGES); 531 - free += global_page_state(NR_FILE_PAGES); 531 + free += global_node_page_state(NR_FILE_PAGES); 532 532 533 533 /* 534 534 * shmem pages shouldn't be counted as free in this ··· 536 536 * that won't affect the overall amount of available 537 537 * memory in the system. 538 538 */ 539 - free -= global_page_state(NR_SHMEM); 539 + free -= global_node_page_state(NR_SHMEM); 540 540 541 541 free += get_nr_swap_pages(); 542 542
+8 -8
mm/vmscan.c
··· 3587 3587 */ 3588 3588 int sysctl_min_slab_ratio = 5; 3589 3589 3590 - static inline unsigned long zone_unmapped_file_pages(struct zone *zone) 3590 + static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) 3591 3591 { 3592 - unsigned long file_mapped = node_page_state(zone->zone_pgdat, NR_FILE_MAPPED); 3593 - unsigned long file_lru = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) + 3594 - node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE); 3592 + unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); 3593 + unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + 3594 + node_page_state(pgdat, NR_ACTIVE_FILE); 3595 3595 3596 3596 /* 3597 3597 * It's possible for there to be more file mapped pages than ··· 3610 3610 /* 3611 3611 * If RECLAIM_UNMAP is set, then all file pages are considered 3612 3612 * potentially reclaimable. Otherwise, we have to worry about 3613 - * pages like swapcache and zone_unmapped_file_pages() provides 3613 + * pages like swapcache and node_unmapped_file_pages() provides 3614 3614 * a better estimate 3615 3615 */ 3616 3616 if (zone_reclaim_mode & RECLAIM_UNMAP) 3617 - nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); 3617 + nr_pagecache_reclaimable = node_page_state(zone->zone_pgdat, NR_FILE_PAGES); 3618 3618 else 3619 - nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); 3619 + nr_pagecache_reclaimable = node_unmapped_file_pages(zone->zone_pgdat); 3620 3620 3621 3621 /* If we can't clean pages, remove dirty pages from consideration */ 3622 3622 if (!(zone_reclaim_mode & RECLAIM_WRITE)) 3623 - delta += zone_page_state(zone, NR_FILE_DIRTY); 3623 + delta += node_page_state(zone->zone_pgdat, NR_FILE_DIRTY); 3624 3624 3625 3625 /* Watch for any possible underflows due to delta */ 3626 3626 if (unlikely(delta > nr_pagecache_reclaimable))
+10 -9
mm/vmstat.c
··· 924 924 "nr_alloc_batch", 925 925 "nr_zone_anon_lru", 926 926 "nr_zone_file_lru", 927 + "nr_zone_write_pending", 927 928 "nr_mlock", 928 - "nr_file_pages", 929 - "nr_dirty", 930 - "nr_writeback", 931 929 "nr_slab_reclaimable", 932 930 "nr_slab_unreclaimable", 933 931 "nr_page_table_pages", 934 932 "nr_kernel_stack", 935 - "nr_unstable", 936 933 "nr_bounce", 937 934 "nr_vmscan_write", 938 935 "nr_vmscan_immediate_reclaim", 939 - "nr_writeback_temp", 940 - "nr_shmem", 941 936 "nr_dirtied", 942 937 "nr_written", 943 938 #if IS_ENABLED(CONFIG_ZSMALLOC) ··· 946 951 "numa_local", 947 952 "numa_other", 948 953 #endif 949 - "nr_anon_transparent_hugepages", 950 - "nr_shmem_hugepages", 951 - "nr_shmem_pmdmapped", 952 954 "nr_free_cma", 953 955 954 956 /* Node-based counters */ ··· 962 970 "workingset_nodereclaim", 963 971 "nr_anon_pages", 964 972 "nr_mapped", 973 + "nr_file_pages", 974 + "nr_dirty", 975 + "nr_writeback", 976 + "nr_writeback_temp", 977 + "nr_shmem", 978 + "nr_shmem_hugepages", 979 + "nr_shmem_pmdmapped", 980 + "nr_anon_transparent_hugepages", 981 + "nr_unstable", 965 982 966 983 /* enum writeback_stat_item counters */ 967 984 "nr_dirty_threshold",