Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: convert totalram_pages and totalhigh_pages variables to atomic

totalram_pages and totalhigh_pages are made static inline function.

Main motivation was that managed_page_count_lock handling was complicating
things. It was discussed in length here,
https://lore.kernel.org/patchwork/patch/995739/#1181785 So it seemes
better to remove the lock and convert variables to atomic, with preventing
poteintial store-to-read tearing as a bonus.

[akpm@linux-foundation.org: coding style fixes]
Link: http://lkml.kernel.org/r/1542090790-21750-4-git-send-email-arunks@codeaurora.org
Signed-off-by: Arun KS <arunks@codeaurora.org>
Suggested-by: Michal Hocko <mhocko@suse.com>
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Arun KS and committed by
Linus Torvalds
ca79b0c2 9705bea5

+131 -81
+2 -2
arch/csky/mm/init.c
··· 71 71 ClearPageReserved(virt_to_page(start)); 72 72 init_page_count(virt_to_page(start)); 73 73 free_page(start); 74 - totalram_pages++; 74 + totalram_pages_inc(); 75 75 } 76 76 } 77 77 #endif ··· 88 88 ClearPageReserved(virt_to_page(addr)); 89 89 init_page_count(virt_to_page(addr)); 90 90 free_page(addr); 91 - totalram_pages++; 91 + totalram_pages_inc(); 92 92 addr += PAGE_SIZE; 93 93 } 94 94
+5 -5
arch/powerpc/platforms/pseries/cmm.c
··· 208 208 209 209 pa->page[pa->index++] = addr; 210 210 loaned_pages++; 211 - totalram_pages--; 211 + totalram_pages_dec(); 212 212 spin_unlock(&cmm_lock); 213 213 nr--; 214 214 } ··· 247 247 free_page(addr); 248 248 loaned_pages--; 249 249 nr--; 250 - totalram_pages++; 250 + totalram_pages_inc(); 251 251 } 252 252 spin_unlock(&cmm_lock); 253 253 cmm_dbg("End request with %ld pages unfulfilled\n", nr); ··· 291 291 int rc; 292 292 struct hvcall_mpp_data mpp_data; 293 293 signed long active_pages_target, page_loan_request, target; 294 - signed long total_pages = totalram_pages + loaned_pages; 294 + signed long total_pages = totalram_pages() + loaned_pages; 295 295 signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE; 296 296 297 297 rc = h_get_mpp(&mpp_data); ··· 322 322 323 323 cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n", 324 324 page_loan_request, loaned_pages, loaned_pages_target, 325 - oom_freed_pages, totalram_pages); 325 + oom_freed_pages, totalram_pages()); 326 326 } 327 327 328 328 static struct notifier_block cmm_oom_nb = { ··· 581 581 free_page(pa_curr->page[idx]); 582 582 freed++; 583 583 loaned_pages--; 584 - totalram_pages++; 584 + totalram_pages_inc(); 585 585 pa_curr->page[idx] = pa_last->page[--pa_last->index]; 586 586 if (pa_last->index == 0) { 587 587 if (pa_curr == pa_last)
+1 -1
arch/s390/mm/init.c
··· 59 59 order = 7; 60 60 61 61 /* Limit number of empty zero pages for small memory sizes */ 62 - while (order > 2 && (totalram_pages >> 10) < (1UL << order)) 62 + while (order > 2 && (totalram_pages() >> 10) < (1UL << order)) 63 63 order--; 64 64 65 65 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+1 -1
arch/um/kernel/mem.c
··· 51 51 52 52 /* this will put all low memory onto the freelists */ 53 53 memblock_free_all(); 54 - max_low_pfn = totalram_pages; 54 + max_low_pfn = totalram_pages(); 55 55 max_pfn = max_low_pfn; 56 56 mem_init_print_info(NULL); 57 57 kmalloc_ok = 1;
+1 -1
arch/x86/kernel/cpu/microcode/core.c
··· 434 434 size_t len, loff_t *ppos) 435 435 { 436 436 ssize_t ret = -EINVAL; 437 - unsigned long nr_pages = totalram_pages; 437 + unsigned long nr_pages = totalram_pages(); 438 438 439 439 if ((len >> PAGE_SHIFT) > nr_pages) { 440 440 pr_err("too much data (max %ld pages)\n", nr_pages);
+2 -2
drivers/char/agp/backend.c
··· 115 115 long memory, index, result; 116 116 117 117 #if PAGE_SHIFT < 20 118 - memory = totalram_pages >> (20 - PAGE_SHIFT); 118 + memory = totalram_pages() >> (20 - PAGE_SHIFT); 119 119 #else 120 - memory = totalram_pages << (PAGE_SHIFT - 20); 120 + memory = totalram_pages() << (PAGE_SHIFT - 20); 121 121 #endif 122 122 index = 1; 123 123
+1 -1
drivers/gpu/drm/i915/i915_gem.c
··· 2559 2559 * If there's no chance of allocating enough pages for the whole 2560 2560 * object, bail early. 2561 2561 */ 2562 - if (page_count > totalram_pages) 2562 + if (page_count > totalram_pages()) 2563 2563 return -ENOMEM; 2564 2564 2565 2565 st = kmalloc(sizeof(*st), GFP_KERNEL);
+2 -2
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
··· 170 170 * This should ensure that we do not run into the oomkiller during 171 171 * the test and take down the machine wilfully. 172 172 */ 173 - limit = totalram_pages << PAGE_SHIFT; 173 + limit = totalram_pages() << PAGE_SHIFT; 174 174 limit = min(ppgtt->vm.total, limit); 175 175 176 176 /* Check we can allocate the entire range */ ··· 1244 1244 u64 hole_start, u64 hole_end, 1245 1245 unsigned long end_time)) 1246 1246 { 1247 - const u64 limit = totalram_pages << PAGE_SHIFT; 1247 + const u64 limit = totalram_pages() << PAGE_SHIFT; 1248 1248 struct i915_gem_context *ctx; 1249 1249 struct i915_hw_ppgtt *ppgtt; 1250 1250 IGT_TIMEOUT(end_time);
+1 -1
drivers/hv/hv_balloon.c
··· 1090 1090 static unsigned long compute_balloon_floor(void) 1091 1091 { 1092 1092 unsigned long min_pages; 1093 - unsigned long nr_pages = totalram_pages; 1093 + unsigned long nr_pages = totalram_pages(); 1094 1094 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) 1095 1095 /* Simple continuous piecewiese linear function: 1096 1096 * max MiB -> min MiB gradient
+1 -1
drivers/md/dm-bufio.c
··· 1887 1887 dm_bufio_allocated_vmalloc = 0; 1888 1888 dm_bufio_current_allocated = 0; 1889 1889 1890 - mem = (__u64)mult_frac(totalram_pages - totalhigh_pages, 1890 + mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(), 1891 1891 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; 1892 1892 1893 1893 if (mem > ULONG_MAX)
+1 -1
drivers/md/dm-crypt.c
··· 2158 2158 2159 2159 static void crypt_calculate_pages_per_client(void) 2160 2160 { 2161 - unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100; 2161 + unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100; 2162 2162 2163 2163 if (!dm_crypt_clients_n) 2164 2164 return;
+1 -1
drivers/md/dm-integrity.c
··· 2843 2843 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, 2844 2844 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT); 2845 2845 journal_desc_size = journal_pages * sizeof(struct page_list); 2846 - if (journal_pages >= totalram_pages - totalhigh_pages || journal_desc_size > ULONG_MAX) { 2846 + if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) { 2847 2847 *error = "Journal doesn't fit into memory"; 2848 2848 r = -ENOMEM; 2849 2849 goto bad;
+1 -1
drivers/md/dm-stats.c
··· 85 85 a = shared_memory_amount + alloc_size; 86 86 if (a < shared_memory_amount) 87 87 return false; 88 - if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR) 88 + if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR) 89 89 return false; 90 90 #ifdef CONFIG_MMU 91 91 if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
+1 -1
drivers/media/platform/mtk-vpu/mtk_vpu.c
··· 855 855 /* Set PTCM to 96K and DTCM to 32K */ 856 856 vpu_cfg_writel(vpu, 0x2, VPU_TCM_CFG); 857 857 858 - vpu->enable_4GB = !!(totalram_pages > (SZ_2G >> PAGE_SHIFT)); 858 + vpu->enable_4GB = !!(totalram_pages() > (SZ_2G >> PAGE_SHIFT)); 859 859 dev_info(dev, "4GB mode %u\n", vpu->enable_4GB); 860 860 861 861 if (vpu->enable_4GB) {
+1 -1
drivers/misc/vmw_balloon.c
··· 570 570 unsigned long status; 571 571 unsigned long limit; 572 572 573 - limit = totalram_pages; 573 + limit = totalram_pages(); 574 574 575 575 /* Ensure limit fits in 32-bits */ 576 576 if (limit != (u32)limit)
+2 -2
drivers/parisc/ccio-dma.c
··· 1251 1251 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). 1252 1252 */ 1253 1253 1254 - iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver)); 1254 + iova_space_size = (u32) (totalram_pages() / count_parisc_driver(&ccio_driver)); 1255 1255 1256 1256 /* limit IOVA space size to 1MB-1GB */ 1257 1257 ··· 1290 1290 1291 1291 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n", 1292 1292 __func__, ioc->ioc_regs, 1293 - (unsigned long) totalram_pages >> (20 - PAGE_SHIFT), 1293 + (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT), 1294 1294 iova_space_size>>20, 1295 1295 iov_order + PAGE_SHIFT); 1296 1296
+2 -2
drivers/parisc/sba_iommu.c
··· 1414 1414 ** for DMA hints - ergo only 30 bits max. 1415 1415 */ 1416 1416 1417 - iova_space_size = (u32) (totalram_pages/global_ioc_cnt); 1417 + iova_space_size = (u32) (totalram_pages()/global_ioc_cnt); 1418 1418 1419 1419 /* limit IOVA space size to 1MB-1GB */ 1420 1420 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { ··· 1439 1439 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", 1440 1440 __func__, 1441 1441 ioc->ioc_hpa, 1442 - (unsigned long) totalram_pages >> (20 - PAGE_SHIFT), 1442 + (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT), 1443 1443 iova_space_size>>20, 1444 1444 iov_order + PAGE_SHIFT); 1445 1445
+1 -1
drivers/staging/android/ion/ion_system_heap.c
··· 110 110 unsigned long size_remaining = PAGE_ALIGN(size); 111 111 unsigned int max_order = orders[0]; 112 112 113 - if (size / PAGE_SIZE > totalram_pages / 2) 113 + if (size / PAGE_SIZE > totalram_pages() / 2) 114 114 return -ENOMEM; 115 115 116 116 INIT_LIST_HEAD(&pages);
+3 -3
drivers/xen/xen-selfballoon.c
··· 189 189 bool reset_timer = false; 190 190 191 191 if (xen_selfballooning_enabled) { 192 - cur_pages = totalram_pages; 192 + cur_pages = totalram_pages(); 193 193 tgt_pages = cur_pages; /* default is no change */ 194 194 goal_pages = vm_memory_committed() + 195 195 totalreserve_pages + ··· 227 227 if (tgt_pages < floor_pages) 228 228 tgt_pages = floor_pages; 229 229 balloon_set_new_target(tgt_pages + 230 - balloon_stats.current_pages - totalram_pages); 230 + balloon_stats.current_pages - totalram_pages()); 231 231 reset_timer = true; 232 232 } 233 233 #ifdef CONFIG_FRONTSWAP ··· 569 569 * much more reliably and response faster in some cases. 570 570 */ 571 571 if (!selfballoon_reserved_mb) { 572 - reserve_pages = totalram_pages / 10; 572 + reserve_pages = totalram_pages() / 10; 573 573 selfballoon_reserved_mb = PAGES2MB(reserve_pages); 574 574 } 575 575 schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ);
+1 -1
fs/ceph/super.h
··· 810 810 * This allows larger machines to have larger/more transfers. 811 811 * Limit the default to 256M 812 812 */ 813 - congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); 813 + congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); 814 814 if (congestion_kb > 256*1024) 815 815 congestion_kb = 256*1024; 816 816
+1 -1
fs/file_table.c
··· 380 380 void __init files_maxfiles_init(void) 381 381 { 382 382 unsigned long n; 383 - unsigned long nr_pages = totalram_pages; 383 + unsigned long nr_pages = totalram_pages(); 384 384 unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2; 385 385 386 386 memreserve = min(memreserve, nr_pages - 1);
+1 -1
fs/fuse/inode.c
··· 824 824 static void sanitize_global_limit(unsigned *limit) 825 825 { 826 826 if (*limit == 0) 827 - *limit = ((totalram_pages << PAGE_SHIFT) >> 13) / 827 + *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) / 828 828 sizeof(struct fuse_req); 829 829 830 830 if (*limit >= 1 << 16)
+1 -1
fs/nfs/write.c
··· 2121 2121 * This allows larger machines to have larger/more transfers. 2122 2122 * Limit the default to 256M 2123 2123 */ 2124 - nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); 2124 + nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); 2125 2125 if (nfs_congestion_kb > 256*1024) 2126 2126 nfs_congestion_kb = 256*1024; 2127 2127
+1 -1
fs/nfsd/nfscache.c
··· 99 99 nfsd_cache_size_limit(void) 100 100 { 101 101 unsigned int limit; 102 - unsigned long low_pages = totalram_pages - totalhigh_pages; 102 + unsigned long low_pages = totalram_pages() - totalhigh_pages(); 103 103 104 104 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); 105 105 return min_t(unsigned int, limit, 256*1024);
+1 -1
fs/ntfs/malloc.h
··· 47 47 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); 48 48 /* return (void *)__get_free_page(gfp_mask); */ 49 49 } 50 - if (likely((size >> PAGE_SHIFT) < totalram_pages)) 50 + if (likely((size >> PAGE_SHIFT) < totalram_pages())) 51 51 return __vmalloc(size, gfp_mask, PAGE_KERNEL); 52 52 return NULL; 53 53 }
+1 -1
fs/proc/base.c
··· 530 530 static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, 531 531 struct pid *pid, struct task_struct *task) 532 532 { 533 - unsigned long totalpages = totalram_pages + total_swap_pages; 533 + unsigned long totalpages = totalram_pages() + total_swap_pages; 534 534 unsigned long points = 0; 535 535 536 536 points = oom_badness(task, NULL, NULL, totalpages) *
+26 -2
include/linux/highmem.h
··· 36 36 37 37 /* declarations for linux/mm/highmem.c */ 38 38 unsigned int nr_free_highpages(void); 39 - extern unsigned long totalhigh_pages; 39 + extern atomic_long_t _totalhigh_pages; 40 + static inline unsigned long totalhigh_pages(void) 41 + { 42 + return (unsigned long)atomic_long_read(&_totalhigh_pages); 43 + } 44 + 45 + static inline void totalhigh_pages_inc(void) 46 + { 47 + atomic_long_inc(&_totalhigh_pages); 48 + } 49 + 50 + static inline void totalhigh_pages_dec(void) 51 + { 52 + atomic_long_dec(&_totalhigh_pages); 53 + } 54 + 55 + static inline void totalhigh_pages_add(long count) 56 + { 57 + atomic_long_add(count, &_totalhigh_pages); 58 + } 59 + 60 + static inline void totalhigh_pages_set(long val) 61 + { 62 + atomic_long_set(&_totalhigh_pages, val); 63 + } 40 64 41 65 void kmap_flush_unused(void); 42 66 ··· 75 51 return virt_to_page(addr); 76 52 } 77 53 78 - #define totalhigh_pages 0UL 54 + static inline unsigned long totalhigh_pages(void) { return 0UL; } 79 55 80 56 #ifndef ARCH_HAS_KMAP 81 57 static inline void *kmap(struct page *page)
+26 -1
include/linux/mm.h
··· 48 48 static inline void set_max_mapnr(unsigned long limit) { } 49 49 #endif 50 50 51 - extern unsigned long totalram_pages; 51 + extern atomic_long_t _totalram_pages; 52 + static inline unsigned long totalram_pages(void) 53 + { 54 + return (unsigned long)atomic_long_read(&_totalram_pages); 55 + } 56 + 57 + static inline void totalram_pages_inc(void) 58 + { 59 + atomic_long_inc(&_totalram_pages); 60 + } 61 + 62 + static inline void totalram_pages_dec(void) 63 + { 64 + atomic_long_dec(&_totalram_pages); 65 + } 66 + 67 + static inline void totalram_pages_add(long count) 68 + { 69 + atomic_long_add(count, &_totalram_pages); 70 + } 71 + 72 + static inline void totalram_pages_set(long val) 73 + { 74 + atomic_long_set(&_totalram_pages, val); 75 + } 76 + 52 77 extern void * high_memory; 53 78 extern int page_cluster; 54 79
-1
include/linux/swap.h
··· 310 310 } while (0) 311 311 312 312 /* linux/mm/page_alloc.c */ 313 - extern unsigned long totalram_pages; 314 313 extern unsigned long totalreserve_pages; 315 314 extern unsigned long nr_free_buffer_pages(void); 316 315 extern unsigned long nr_free_pagecache_pages(void);
+1 -1
kernel/fork.c
··· 744 744 static void set_max_threads(unsigned int max_threads_suggested) 745 745 { 746 746 u64 threads; 747 - unsigned long nr_pages = totalram_pages; 747 + unsigned long nr_pages = totalram_pages(); 748 748 749 749 /* 750 750 * The number of threads shall be limited such that the thread
+1 -1
kernel/kexec_core.c
··· 152 152 int i; 153 153 unsigned long nr_segments = image->nr_segments; 154 154 unsigned long total_pages = 0; 155 - unsigned long nr_pages = totalram_pages; 155 + unsigned long nr_pages = totalram_pages(); 156 156 157 157 /* 158 158 * Verify we have good destination addresses. The caller is
+1 -1
kernel/power/snapshot.c
··· 105 105 106 106 void __init hibernate_image_size_init(void) 107 107 { 108 - image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; 108 + image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE; 109 109 } 110 110 111 111 /*
+2 -3
mm/highmem.c
··· 105 105 } 106 106 #endif 107 107 108 - unsigned long totalhigh_pages __read_mostly; 109 - EXPORT_SYMBOL(totalhigh_pages); 110 - 108 + atomic_long_t _totalhigh_pages __read_mostly; 109 + EXPORT_SYMBOL(_totalhigh_pages); 111 110 112 111 EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); 113 112
+1 -1
mm/huge_memory.c
··· 420 420 * where the extra memory used could hurt more than TLB overhead 421 421 * is likely to save. The admin can still enable it through /sys. 422 422 */ 423 - if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { 423 + if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { 424 424 transparent_hugepage_flags = 0; 425 425 return 0; 426 426 }
+1 -1
mm/kasan/quarantine.c
··· 237 237 * Update quarantine size in case of hotplug. Allocate a fraction of 238 238 * the installed memory to quarantine minus per-cpu queue limits. 239 239 */ 240 - total_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / 240 + total_size = (totalram_pages() << PAGE_SHIFT) / 241 241 QUARANTINE_FRACTION; 242 242 percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); 243 243 new_quarantine_size = (total_size < percpu_quarantines) ?
+2 -2
mm/memblock.c
··· 1576 1576 1577 1577 for (; cursor < end; cursor++) { 1578 1578 memblock_free_pages(pfn_to_page(cursor), cursor, 0); 1579 - totalram_pages++; 1579 + totalram_pages_inc(); 1580 1580 } 1581 1581 } 1582 1582 ··· 1978 1978 reset_all_zones_managed_pages(); 1979 1979 1980 1980 pages = free_low_memory_core_early(); 1981 - totalram_pages += pages; 1981 + totalram_pages_add(pages); 1982 1982 1983 1983 return pages; 1984 1984 }
+1 -1
mm/mm_init.c
··· 146 146 s32 batch = max_t(s32, nr*2, 32); 147 147 148 148 /* batch size set to 0.4% of (total memory/#cpus), or max int32 */ 149 - memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff); 149 + memsized_batch = min_t(u64, (totalram_pages()/nr)/256, 0x7fffffff); 150 150 151 151 vm_committed_as_batch = max_t(s32, memsized_batch, batch); 152 152 }
+1 -1
mm/oom_kill.c
··· 269 269 } 270 270 271 271 /* Default to all available memory */ 272 - oc->totalpages = totalram_pages + total_swap_pages; 272 + oc->totalpages = totalram_pages() + total_swap_pages; 273 273 274 274 if (!IS_ENABLED(CONFIG_NUMA)) 275 275 return CONSTRAINT_NONE;
+11 -9
mm/page_alloc.c
··· 16 16 17 17 #include <linux/stddef.h> 18 18 #include <linux/mm.h> 19 + #include <linux/highmem.h> 19 20 #include <linux/swap.h> 20 21 #include <linux/interrupt.h> 21 22 #include <linux/pagemap.h> ··· 125 124 /* Protect totalram_pages and zone->managed_pages */ 126 125 static DEFINE_SPINLOCK(managed_page_count_lock); 127 126 128 - unsigned long totalram_pages __read_mostly; 127 + atomic_long_t _totalram_pages __read_mostly; 128 + EXPORT_SYMBOL(_totalram_pages); 129 129 unsigned long totalreserve_pages __read_mostly; 130 130 unsigned long totalcma_pages __read_mostly; 131 131 ··· 4749 4747 4750 4748 void si_meminfo(struct sysinfo *val) 4751 4749 { 4752 - val->totalram = totalram_pages; 4750 + val->totalram = totalram_pages(); 4753 4751 val->sharedram = global_node_page_state(NR_SHMEM); 4754 4752 val->freeram = global_zone_page_state(NR_FREE_PAGES); 4755 4753 val->bufferram = nr_blockdev_pages(); 4756 - val->totalhigh = totalhigh_pages; 4754 + val->totalhigh = totalhigh_pages(); 4757 4755 val->freehigh = nr_free_highpages(); 4758 4756 val->mem_unit = PAGE_SIZE; 4759 4757 } ··· 7079 7077 { 7080 7078 spin_lock(&managed_page_count_lock); 7081 7079 atomic_long_add(count, &page_zone(page)->managed_pages); 7082 - totalram_pages += count; 7080 + totalram_pages_add(count); 7083 7081 #ifdef CONFIG_HIGHMEM 7084 7082 if (PageHighMem(page)) 7085 - totalhigh_pages += count; 7083 + totalhigh_pages_add(count); 7086 7084 #endif 7087 7085 spin_unlock(&managed_page_count_lock); 7088 7086 } ··· 7125 7123 void free_highmem_page(struct page *page) 7126 7124 { 7127 7125 __free_reserved_page(page); 7128 - totalram_pages++; 7126 + totalram_pages_inc(); 7129 7127 atomic_long_inc(&page_zone(page)->managed_pages); 7130 - totalhigh_pages++; 7128 + totalhigh_pages_inc(); 7131 7129 } 7132 7130 #endif 7133 7131 ··· 7176 7174 physpages << (PAGE_SHIFT - 10), 7177 7175 codesize >> 10, datasize >> 10, rosize >> 10, 7178 7176 (init_data_size + init_code_size) >> 10, bss_size >> 10, 7179 - (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10), 7177 + (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10), 7180 7178 totalcma_pages << (PAGE_SHIFT - 10), 7181 7179 #ifdef CONFIG_HIGHMEM 7182 - totalhigh_pages << (PAGE_SHIFT - 10), 7180 + totalhigh_pages() << (PAGE_SHIFT - 10), 7183 7181 #endif 7184 7182 str ? ", " : "", str ? str : ""); 7185 7183 }
+5 -4
mm/shmem.c
··· 109 109 #ifdef CONFIG_TMPFS 110 110 static unsigned long shmem_default_max_blocks(void) 111 111 { 112 - return totalram_pages / 2; 112 + return totalram_pages() / 2; 113 113 } 114 114 115 115 static unsigned long shmem_default_max_inodes(void) 116 116 { 117 - unsigned long nr_pages = totalram_pages; 118 - return min(nr_pages - totalhigh_pages, nr_pages / 2); 117 + unsigned long nr_pages = totalram_pages(); 118 + 119 + return min(nr_pages - totalhigh_pages(), nr_pages / 2); 119 120 } 120 121 #endif 121 122 ··· 3303 3302 size = memparse(value,&rest); 3304 3303 if (*rest == '%') { 3305 3304 size <<= PAGE_SHIFT; 3306 - size *= totalram_pages; 3305 + size *= totalram_pages(); 3307 3306 do_div(size, 100); 3308 3307 rest++; 3309 3308 }
+1 -1
mm/slab.c
··· 1235 1235 * page orders on machines with more than 32MB of memory if 1236 1236 * not overridden on the command line. 1237 1237 */ 1238 - if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT) 1238 + if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT) 1239 1239 slab_max_order = SLAB_MAX_ORDER_HI; 1240 1240 1241 1241 /* Bootstrap is tricky, because several objects are allocated
+1 -1
mm/swap.c
··· 1022 1022 */ 1023 1023 void __init swap_setup(void) 1024 1024 { 1025 - unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); 1025 + unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); 1026 1026 1027 1027 /* Use a smaller cluster for small-memory machines */ 1028 1028 if (megs < 16)
+1 -1
mm/util.c
··· 593 593 if (sysctl_overcommit_kbytes) 594 594 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); 595 595 else 596 - allowed = ((totalram_pages - hugetlb_total_pages()) 596 + allowed = ((totalram_pages() - hugetlb_total_pages()) 597 597 * sysctl_overcommit_ratio / 100); 598 598 allowed += total_swap_pages; 599 599
+2 -2
mm/vmalloc.c
··· 1634 1634 1635 1635 might_sleep(); 1636 1636 1637 - if (count > totalram_pages) 1637 + if (count > totalram_pages()) 1638 1638 return NULL; 1639 1639 1640 1640 size = (unsigned long)count << PAGE_SHIFT; ··· 1739 1739 unsigned long real_size = size; 1740 1740 1741 1741 size = PAGE_ALIGN(size); 1742 - if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1742 + if (!size || (size >> PAGE_SHIFT) > totalram_pages()) 1743 1743 goto fail; 1744 1744 1745 1745 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
+1 -1
mm/workingset.c
··· 549 549 * double the initial memory by using totalram_pages as-is. 550 550 */ 551 551 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; 552 - max_order = fls_long(totalram_pages - 1); 552 + max_order = fls_long(totalram_pages() - 1); 553 553 if (max_order > timestamp_bits) 554 554 bucket_order = max_order - timestamp_bits; 555 555 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+2 -2
mm/zswap.c
··· 219 219 220 220 static bool zswap_is_full(void) 221 221 { 222 - return totalram_pages * zswap_max_pool_percent / 100 < 223 - DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); 222 + return totalram_pages() * zswap_max_pool_percent / 100 < 223 + DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); 224 224 } 225 225 226 226 static void zswap_update_total_size(void)
+1 -1
net/dccp/proto.c
··· 1131 1131 static int __init dccp_init(void) 1132 1132 { 1133 1133 unsigned long goal; 1134 - unsigned long nr_pages = totalram_pages; 1134 + unsigned long nr_pages = totalram_pages(); 1135 1135 int ehash_order, bhash_order, i; 1136 1136 int rc; 1137 1137
+1 -1
net/decnet/dn_route.c
··· 1866 1866 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; 1867 1867 add_timer(&dn_route_timer); 1868 1868 1869 - goal = totalram_pages >> (26 - PAGE_SHIFT); 1869 + goal = totalram_pages() >> (26 - PAGE_SHIFT); 1870 1870 1871 1871 for(order = 0; (1UL << order) < goal; order++) 1872 1872 /* NOTHING */;
+1 -1
net/ipv4/tcp_metrics.c
··· 1000 1000 1001 1001 slots = tcpmhash_entries; 1002 1002 if (!slots) { 1003 - if (totalram_pages >= 128 * 1024) 1003 + if (totalram_pages() >= 128 * 1024) 1004 1004 slots = 16 * 1024; 1005 1005 else 1006 1006 slots = 8 * 1024;
+1 -1
net/netfilter/nf_conntrack_core.c
··· 2248 2248 2249 2249 int nf_conntrack_init_start(void) 2250 2250 { 2251 - unsigned long nr_pages = totalram_pages; 2251 + unsigned long nr_pages = totalram_pages(); 2252 2252 int max_factor = 8; 2253 2253 int ret = -ENOMEM; 2254 2254 int i;
+1 -1
net/netfilter/xt_hashlimit.c
··· 274 274 struct xt_hashlimit_htable *hinfo; 275 275 const struct seq_operations *ops; 276 276 unsigned int size, i; 277 - unsigned long nr_pages = totalram_pages; 277 + unsigned long nr_pages = totalram_pages(); 278 278 int ret; 279 279 280 280 if (cfg->size) {
+1 -1
net/sctp/protocol.c
··· 1368 1368 int status = -EINVAL; 1369 1369 unsigned long goal; 1370 1370 unsigned long limit; 1371 - unsigned long nr_pages = totalram_pages; 1371 + unsigned long nr_pages = totalram_pages(); 1372 1372 int max_share; 1373 1373 int order; 1374 1374 int num_entries;
+1 -1
security/integrity/ima/ima_kexec.c
··· 106 106 kexec_segment_size = ALIGN(ima_get_binary_runtime_size() + 107 107 PAGE_SIZE / 2, PAGE_SIZE); 108 108 if ((kexec_segment_size == ULONG_MAX) || 109 - ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages / 2)) { 109 + ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages() / 2)) { 110 110 pr_err("Binary measurement list too large.\n"); 111 111 return; 112 112 }