Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: replace various uses of num_physpages by totalram_pages

Sizing of memory allocations shouldn't depend on the number of physical
pages found in a system, as that generally includes (perhaps a huge amount
of) non-RAM pages. The amount of what actually is usable as storage
should instead be used as a basis here.

Some of the calculations (i.e. those not intending to use high memory)
should likely even use (totalram_pages - totalhigh_pages).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Dave Airlie <airlied@linux.ie>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Patrick McHardy <kaber@trash.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Jan Beulich and committed by
Linus Torvalds
4481374c 4738e1b9

+38 -41
+2 -2
arch/x86/kernel/microcode_core.c
··· 210 210 { 211 211 ssize_t ret = -EINVAL; 212 212 213 - if ((len >> PAGE_SHIFT) > num_physpages) { 214 - pr_err("microcode: too much data (max %ld pages)\n", num_physpages); 213 + if ((len >> PAGE_SHIFT) > totalram_pages) { 214 + pr_err("microcode: too much data (max %ld pages)\n", totalram_pages); 215 215 return ret; 216 216 } 217 217
+2 -2
drivers/char/agp/backend.c
··· 114 114 long memory, index, result; 115 115 116 116 #if PAGE_SHIFT < 20 117 - memory = num_physpages >> (20 - PAGE_SHIFT); 117 + memory = totalram_pages >> (20 - PAGE_SHIFT); 118 118 #else 119 - memory = num_physpages << (PAGE_SHIFT - 20); 119 + memory = totalram_pages << (PAGE_SHIFT - 20); 120 120 #endif 121 121 index = 1; 122 122
+2 -2
drivers/parisc/ccio-dma.c
··· 1266 1266 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). 1267 1267 */ 1268 1268 1269 - iova_space_size = (u32) (num_physpages / count_parisc_driver(&ccio_driver)); 1269 + iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver)); 1270 1270 1271 1271 /* limit IOVA space size to 1MB-1GB */ 1272 1272 ··· 1305 1305 1306 1306 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n", 1307 1307 __func__, ioc->ioc_regs, 1308 - (unsigned long) num_physpages >> (20 - PAGE_SHIFT), 1308 + (unsigned long) totalram_pages >> (20 - PAGE_SHIFT), 1309 1309 iova_space_size>>20, 1310 1310 iov_order + PAGE_SHIFT); 1311 1311
+2 -2
drivers/parisc/sba_iommu.c
··· 1390 1390 ** for DMA hints - ergo only 30 bits max. 1391 1391 */ 1392 1392 1393 - iova_space_size = (u32) (num_physpages/global_ioc_cnt); 1393 + iova_space_size = (u32) (totalram_pages/global_ioc_cnt); 1394 1394 1395 1395 /* limit IOVA space size to 1MB-1GB */ 1396 1396 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { ··· 1415 1415 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", 1416 1416 __func__, 1417 1417 ioc->ioc_hpa, 1418 - (unsigned long) num_physpages >> (20 - PAGE_SHIFT), 1418 + (unsigned long) totalram_pages >> (20 - PAGE_SHIFT), 1419 1419 iova_space_size>>20, 1420 1420 iov_order + PAGE_SHIFT); 1421 1421
-4
drivers/xen/balloon.c
··· 96 96 /* We increase/decrease in batches which fit in a page */ 97 97 static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; 98 98 99 - /* VM /proc information for memory */ 100 - extern unsigned long totalram_pages; 101 - 102 99 #ifdef CONFIG_HIGHMEM 103 - extern unsigned long totalhigh_pages; 104 100 #define inc_totalhigh_pages() (totalhigh_pages++) 105 101 #define dec_totalhigh_pages() (totalhigh_pages--) 106 102 #else
+1 -1
fs/ntfs/malloc.h
··· 47 47 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); 48 48 /* return (void *)__get_free_page(gfp_mask); */ 49 49 } 50 - if (likely(size >> PAGE_SHIFT < num_physpages)) 50 + if (likely((size >> PAGE_SHIFT) < totalram_pages)) 51 51 return __vmalloc(size, gfp_mask, PAGE_KERNEL); 52 52 return NULL; 53 53 }
+1
include/linux/mm.h
··· 25 25 #endif 26 26 27 27 extern unsigned long num_physpages; 28 + extern unsigned long totalram_pages; 28 29 extern void * high_memory; 29 30 extern int page_cluster; 30 31
+2 -2
init/main.c
··· 668 668 #endif 669 669 thread_info_cache_init(); 670 670 cred_init(); 671 - fork_init(num_physpages); 671 + fork_init(totalram_pages); 672 672 proc_caches_init(); 673 673 buffer_init(); 674 674 key_init(); 675 675 security_init(); 676 - vfs_caches_init(num_physpages); 676 + vfs_caches_init(totalram_pages); 677 677 radix_tree_init(); 678 678 signals_init(); 679 679 /* rootfs populating might need page-writeback */
+1 -1
mm/slab.c
··· 1384 1384 * Fragmentation resistance on low memory - only use bigger 1385 1385 * page orders on machines with more than 32MB of memory. 1386 1386 */ 1387 - if (num_physpages > (32 << 20) >> PAGE_SHIFT) 1387 + if (totalram_pages > (32 << 20) >> PAGE_SHIFT) 1388 1388 slab_break_gfp_order = BREAK_GFP_ORDER_HI; 1389 1389 1390 1390 /* Bootstrap is tricky, because several objects are allocated
+1 -1
mm/swap.c
··· 496 496 */ 497 497 void __init swap_setup(void) 498 498 { 499 - unsigned long megs = num_physpages >> (20 - PAGE_SHIFT); 499 + unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); 500 500 501 501 #ifdef CONFIG_SWAP 502 502 bdi_init(swapper_space.backing_dev_info);
+2 -2
mm/vmalloc.c
··· 1386 1386 1387 1387 might_sleep(); 1388 1388 1389 - if (count > num_physpages) 1389 + if (count > totalram_pages) 1390 1390 return NULL; 1391 1391 1392 1392 area = get_vm_area_caller((count << PAGE_SHIFT), flags, ··· 1493 1493 unsigned long real_size = size; 1494 1494 1495 1495 size = PAGE_ALIGN(size); 1496 - if (!size || (size >> PAGE_SHIFT) > num_physpages) 1496 + if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1497 1497 return NULL; 1498 1498 1499 1499 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
+2 -2
net/core/sock.c
··· 1206 1206 1207 1207 void __init sk_init(void) 1208 1208 { 1209 - if (num_physpages <= 4096) { 1209 + if (totalram_pages <= 4096) { 1210 1210 sysctl_wmem_max = 32767; 1211 1211 sysctl_rmem_max = 32767; 1212 1212 sysctl_wmem_default = 32767; 1213 1213 sysctl_rmem_default = 32767; 1214 - } else if (num_physpages >= 131072) { 1214 + } else if (totalram_pages >= 131072) { 1215 1215 sysctl_wmem_max = 131071; 1216 1216 sysctl_rmem_max = 131071; 1217 1217 }
+3 -3
net/dccp/proto.c
··· 1049 1049 * 1050 1050 * The methodology is similar to that of the buffer cache. 1051 1051 */ 1052 - if (num_physpages >= (128 * 1024)) 1053 - goal = num_physpages >> (21 - PAGE_SHIFT); 1052 + if (totalram_pages >= (128 * 1024)) 1053 + goal = totalram_pages >> (21 - PAGE_SHIFT); 1054 1054 else 1055 - goal = num_physpages >> (23 - PAGE_SHIFT); 1055 + goal = totalram_pages >> (23 - PAGE_SHIFT); 1056 1056 1057 1057 if (thash_entries) 1058 1058 goal = (thash_entries *
+1 -1
net/decnet/dn_route.c
··· 1750 1750 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; 1751 1751 add_timer(&dn_route_timer); 1752 1752 1753 - goal = num_physpages >> (26 - PAGE_SHIFT); 1753 + goal = totalram_pages >> (26 - PAGE_SHIFT); 1754 1754 1755 1755 for(order = 0; (1UL << order) < goal; order++) 1756 1756 /* NOTHING */;
+1 -1
net/ipv4/route.c
··· 3414 3414 alloc_large_system_hash("IP route cache", 3415 3415 sizeof(struct rt_hash_bucket), 3416 3416 rhash_entries, 3417 - (num_physpages >= 128 * 1024) ? 3417 + (totalram_pages >= 128 * 1024) ? 3418 3418 15 : 17, 3419 3419 0, 3420 3420 &rt_hash_log,
+2 -2
net/ipv4/tcp.c
··· 2862 2862 alloc_large_system_hash("TCP established", 2863 2863 sizeof(struct inet_ehash_bucket), 2864 2864 thash_entries, 2865 - (num_physpages >= 128 * 1024) ? 2865 + (totalram_pages >= 128 * 1024) ? 2866 2866 13 : 15, 2867 2867 0, 2868 2868 &tcp_hashinfo.ehash_size, ··· 2879 2879 alloc_large_system_hash("TCP bind", 2880 2880 sizeof(struct inet_bind_hashbucket), 2881 2881 tcp_hashinfo.ehash_size, 2882 - (num_physpages >= 128 * 1024) ? 2882 + (totalram_pages >= 128 * 1024) ? 2883 2883 13 : 15, 2884 2884 0, 2885 2885 &tcp_hashinfo.bhash_size,
+2 -2
net/netfilter/nf_conntrack_core.c
··· 1245 1245 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ 1246 1246 if (!nf_conntrack_htable_size) { 1247 1247 nf_conntrack_htable_size 1248 - = (((num_physpages << PAGE_SHIFT) / 16384) 1248 + = (((totalram_pages << PAGE_SHIFT) / 16384) 1249 1249 / sizeof(struct hlist_head)); 1250 - if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) 1250 + if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) 1251 1251 nf_conntrack_htable_size = 16384; 1252 1252 if (nf_conntrack_htable_size < 32) 1253 1253 nf_conntrack_htable_size = 32;
+1 -1
net/netfilter/x_tables.c
··· 617 617 int cpu; 618 618 619 619 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ 620 - if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages) 620 + if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) 621 621 return NULL; 622 622 623 623 newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
+4 -4
net/netfilter/xt_hashlimit.c
··· 194 194 if (minfo->cfg.size) 195 195 size = minfo->cfg.size; 196 196 else { 197 - size = ((num_physpages << PAGE_SHIFT) / 16384) / 197 + size = ((totalram_pages << PAGE_SHIFT) / 16384) / 198 198 sizeof(struct list_head); 199 - if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) 199 + if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) 200 200 size = 8192; 201 201 if (size < 16) 202 202 size = 16; ··· 266 266 if (minfo->cfg.size) { 267 267 size = minfo->cfg.size; 268 268 } else { 269 - size = (num_physpages << PAGE_SHIFT) / 16384 / 269 + size = (totalram_pages << PAGE_SHIFT) / 16384 / 270 270 sizeof(struct list_head); 271 - if (num_physpages > 1024 * 1024 * 1024 / PAGE_SIZE) 271 + if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE) 272 272 size = 8192; 273 273 if (size < 16) 274 274 size = 16;
+3 -3
net/netlink/af_netlink.c
··· 2091 2091 if (!nl_table) 2092 2092 goto panic; 2093 2093 2094 - if (num_physpages >= (128 * 1024)) 2095 - limit = num_physpages >> (21 - PAGE_SHIFT); 2094 + if (totalram_pages >= (128 * 1024)) 2095 + limit = totalram_pages >> (21 - PAGE_SHIFT); 2096 2096 else 2097 - limit = num_physpages >> (23 - PAGE_SHIFT); 2097 + limit = totalram_pages >> (23 - PAGE_SHIFT); 2098 2098 2099 2099 order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; 2100 2100 limit = (1UL << order) / sizeof(struct hlist_head);
+3 -3
net/sctp/protocol.c
··· 1184 1184 /* Size and allocate the association hash table. 1185 1185 * The methodology is similar to that of the tcp hash tables. 1186 1186 */ 1187 - if (num_physpages >= (128 * 1024)) 1188 - goal = num_physpages >> (22 - PAGE_SHIFT); 1187 + if (totalram_pages >= (128 * 1024)) 1188 + goal = totalram_pages >> (22 - PAGE_SHIFT); 1189 1189 else 1190 - goal = num_physpages >> (24 - PAGE_SHIFT); 1190 + goal = totalram_pages >> (24 - PAGE_SHIFT); 1191 1191 1192 1192 for (order = 0; (1UL << order) < goal; order++) 1193 1193 ;