[PATCH] ZVC: Scale thresholds depending on the size of the system

The ZVC counter update threshold is currently set to a fixed value of 32.
This patch sets up the threshold depending on the number of processors and
the sizes of the zones in the system.

With the current threshold of 32, I was able to observe slight contention
when more than 130-140 processors concurrently updated the counters. The
contention vanished when I either increased the threshold to 64 or used
Andrew's idea of overstepping the interval (see ZVC overstep patch).

However, we saw contention again at 220-230 processors. So we need higher
values for larger systems.

But the current default is already a bit of an overkill for smaller
systems. Some systems have tiny zones where precision matters. For
example i386 and x86_64 have 16M DMA zones and either 900M ZONE_NORMAL or
ZONE_DMA32. These are even present on SMP and NUMA systems.

The patch here sets up a threshold based on the number of processors in the
system and the size of the zone that these counters are used for. The
threshold should grow logarithmically, so we use fls() as an easy
approximation.

Results of tests on a system with 1024 processors (4TB RAM)

The following output is from a test allocating 1GB of memory concurrently
on each processor (Forking the process. So contention on mmap_sem and the
pte locks is not a factor):

X MIN
TYPE: CPUS WALL WALL SYS USER TOTCPU
fork 1 0.552 0.552 0.540 0.012 0.552
fork 4 0.552 0.548 2.164 0.036 2.200
fork 16 0.564 0.548 8.812 0.164 8.976
fork 128 0.580 0.572 72.204 1.208 73.412
fork 256 1.300 0.660 310.400 2.160 312.560
fork 512 3.512 0.696 1526.836 4.816 1531.652
fork 1020 20.024 0.700 17243.176 6.688 17249.863

So a threshold of 32 is fine up to 128 processors. At 256 processors contention
becomes a factor.

Overstepping the counter (earlier patch) improves the numbers a bit:

fork 4 0.552 0.548 2.164 0.040 2.204
fork 16 0.552 0.548 8.640 0.148 8.788
fork 128 0.556 0.548 69.676 0.956 70.632
fork 256 0.876 0.636 212.468 2.108 214.576
fork 512 2.276 0.672 997.324 4.260 1001.584
fork 1020 13.564 0.680 11586.436 6.088 11592.523

Still contention at 512 and 1020. Contention at 1020 is down by a third.
256 still has a slight bit of contention.

After this patch the counter threshold will be set to 125 which reduces
contention significantly:

fork 128 0.560 0.548 69.776 0.932 70.708
fork 256 0.636 0.556 143.460 2.036 145.496
fork 512 0.640 0.548 284.244 4.236 288.480
fork 1020 1.500 0.588 1326.152 8.892 1335.044

[akpm@osdl.org: !SMP build fix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Christoph Lameter and committed by Linus Torvalds df9ecaba a302eb4e

+120 -20
+1
include/linux/mmzone.h
··· 77 struct per_cpu_pageset { 78 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ 79 #ifdef CONFIG_SMP 80 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 81 #endif 82 } ____cacheline_aligned_in_smp;
··· 77 struct per_cpu_pageset { 78 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ 79 #ifdef CONFIG_SMP 80 + s8 stat_threshold; 81 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 82 #endif 83 } ____cacheline_aligned_in_smp;
+119 -20
mm/vmstat.c
··· 12 #include <linux/config.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 16 void __get_zone_counts(unsigned long *active, unsigned long *inactive, 17 unsigned long *free, struct pglist_data *pgdat) ··· 115 116 #ifdef CONFIG_SMP 117 118 - #define STAT_THRESHOLD 32 119 120 /* 121 - * Determine pointer to currently valid differential byte given a zone and 122 - * the item number. 123 - * 124 - * Preemption must be off 125 */ 126 - static inline s8 *diff_pointer(struct zone *zone, enum zone_stat_item item) 127 { 128 - return &zone_pcp(zone, smp_processor_id())->vm_stat_diff[item]; 129 } 130 131 /* ··· 189 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 190 int delta) 191 { 192 - s8 *p; 193 long x; 194 195 - p = diff_pointer(zone, item); 196 x = delta + *p; 197 198 - if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) { 199 zone_page_state_add(x, zone, item); 200 x = 0; 201 } 202 - 203 *p = x; 204 } 205 EXPORT_SYMBOL(__mod_zone_page_state); ··· 227 * No overflow check is necessary and therefore the differential can be 228 * incremented or decremented in place which may allow the compilers to 229 * generate better code. 230 - * 231 * The increment or decrement is known and therefore one boundary check can 232 * be omitted. 233 * 234 * Some processors have inc/dec instructions that are atomic vs an interrupt. 235 * However, the code must first determine the differential location in a zone ··· 242 */ 243 static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 244 { 245 - s8 *p = diff_pointer(zone, item); 246 247 (*p)++; 248 249 - if (unlikely(*p > STAT_THRESHOLD)) { 250 - zone_page_state_add(*p + STAT_THRESHOLD / 2, zone, item); 251 - *p = -STAT_THRESHOLD / 2; 252 } 253 } 254 ··· 264 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 265 { 266 struct zone *zone = page_zone(page); 267 - s8 *p = diff_pointer(zone, item); 268 269 (*p)--; 270 271 - if (unlikely(*p < -STAT_THRESHOLD)) { 272 - zone_page_state_add(*p - STAT_THRESHOLD / 2, zone, item); 273 - *p = STAT_THRESHOLD /2; 274 } 275 } 276 EXPORT_SYMBOL(__dec_zone_page_state); ··· 578 pageset->pcp[j].high, 579 pageset->pcp[j].batch); 580 } 581 } 582 seq_printf(m, 583 "\n all_unreclaimable: %u" ··· 670 671 #endif /* CONFIG_PROC_FS */ 672
··· 12 #include <linux/config.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 + #include <linux/cpu.h> 16 17 void __get_zone_counts(unsigned long *active, unsigned long *inactive, 18 unsigned long *free, struct pglist_data *pgdat) ··· 114 115 #ifdef CONFIG_SMP 116 117 + static int calculate_threshold(struct zone *zone) 118 + { 119 + int threshold; 120 + int mem; /* memory in 128 MB units */ 121 + 122 + /* 123 + * The threshold scales with the number of processors and the amount 124 + * of memory per zone. More memory means that we can defer updates for 125 + * longer, more processors could lead to more contention. 126 + * fls() is used to have a cheap way of logarithmic scaling. 127 + * 128 + * Some sample thresholds: 129 + * 130 + * Threshold Processors (fls) Zonesize fls(mem+1) 131 + * ------------------------------------------------------------------ 132 + * 8 1 1 0.9-1 GB 4 133 + * 16 2 2 0.9-1 GB 4 134 + * 20 2 2 1-2 GB 5 135 + * 24 2 2 2-4 GB 6 136 + * 28 2 2 4-8 GB 7 137 + * 32 2 2 8-16 GB 8 138 + * 4 2 2 <128M 1 139 + * 30 4 3 2-4 GB 5 140 + * 48 4 3 8-16 GB 8 141 + * 32 8 4 1-2 GB 4 142 + * 32 8 4 0.9-1GB 4 143 + * 10 16 5 <128M 1 144 + * 40 16 5 900M 4 145 + * 70 64 7 2-4 GB 5 146 + * 84 64 7 4-8 GB 6 147 + * 108 512 9 4-8 GB 6 148 + * 125 1024 10 8-16 GB 8 149 + * 125 1024 10 16-32 GB 9 150 + */ 151 + 152 + mem = zone->present_pages >> (27 - PAGE_SHIFT); 153 + 154 + threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); 155 + 156 + /* 157 + * Maximum threshold is 125 158 + */ 159 + threshold = min(125, threshold); 160 + 161 + return threshold; 162 + } 163 164 /* 165 + * Refresh the thresholds for each zone. 166 */ 167 + static void refresh_zone_stat_thresholds(void) 168 { 169 + struct zone *zone; 170 + int cpu; 171 + int threshold; 172 + 173 + for_each_zone(zone) { 174 + 175 + if (!zone->present_pages) 176 + continue; 177 + 178 + threshold = calculate_threshold(zone); 179 + 180 + for_each_online_cpu(cpu) 181 + zone_pcp(zone, cpu)->stat_threshold = threshold; 182 + } 183 } 184 185 /* ··· 133 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 134 int delta) 135 { 136 + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 137 + s8 *p = pcp->vm_stat_diff + item; 138 long x; 139 140 x = delta + *p; 141 142 + if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { 143 zone_page_state_add(x, zone, item); 144 x = 0; 145 } 146 *p = x; 147 } 148 EXPORT_SYMBOL(__mod_zone_page_state); ··· 172 * No overflow check is necessary and therefore the differential can be 173 * incremented or decremented in place which may allow the compilers to 174 * generate better code. 175 * The increment or decrement is known and therefore one boundary check can 176 * be omitted. 177 + * 178 + * NOTE: These functions are very performance sensitive. Change only 179 + * with care. 180 * 181 * Some processors have inc/dec instructions that are atomic vs an interrupt. 182 * However, the code must first determine the differential location in a zone ··· 185 */ 186 static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 187 { 188 + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 189 + s8 *p = pcp->vm_stat_diff + item; 190 191 (*p)++; 192 193 + if (unlikely(*p > pcp->stat_threshold)) { 194 + int overstep = pcp->stat_threshold / 2; 195 + 196 + zone_page_state_add(*p + overstep, zone, item); 197 + *p = -overstep; 198 } 199 } 200 ··· 204 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 205 { 206 struct zone *zone = page_zone(page); 207 + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 208 + s8 *p = pcp->vm_stat_diff + item; 209 210 (*p)--; 211 212 + if (unlikely(*p < - pcp->stat_threshold)) { 213 + int overstep = pcp->stat_threshold / 2; 214 + 215 + zone_page_state_add(*p - overstep, zone, item); 216 + *p = overstep; 217 } 218 } 219 EXPORT_SYMBOL(__dec_zone_page_state); ··· 515 pageset->pcp[j].high, 516 pageset->pcp[j].batch); 517 } 518 + #ifdef CONFIG_SMP 519 + seq_printf(m, "\n vm stats threshold: %d", 520 + pageset->stat_threshold); 521 + #endif 522 } 523 seq_printf(m, 524 "\n all_unreclaimable: %u" ··· 603 604 #endif /* CONFIG_PROC_FS */ 605 606 + #ifdef CONFIG_SMP 607 + /* 608 + * Use the cpu notifier to insure that the thresholds are recalculated 609 + * when necessary. 610 + */ 611 + static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, 612 + unsigned long action, 613 + void *hcpu) 614 + { 615 + switch (action) { 616 + case CPU_UP_PREPARE: 617 + case CPU_UP_CANCELED: 618 + case CPU_DEAD: 619 + refresh_zone_stat_thresholds(); 620 + break; 621 + default: 622 + break; 623 + } 624 + return NOTIFY_OK; 625 + } 626 + 627 + static struct notifier_block __cpuinitdata vmstat_notifier = 628 + { &vmstat_cpuup_callback, NULL, 0 }; 629 + 630 + int __init setup_vmstat(void) 631 + { 632 + refresh_zone_stat_thresholds(); 633 + register_cpu_notifier(&vmstat_notifier); 634 + return 0; 635 + } 636 + module_init(setup_vmstat) 637 + #endif