[PATCH] ZVC: Scale thresholds depending on the size of the system

The ZVC counter update threshold is currently set to a fixed value of 32.
This patch sets up the threshold depending on the number of processors and
the sizes of the zones in the system.

With the current threshold of 32, I was able to observe slight contention
when more than 130-140 processors concurrently updated the counters. The
contention vanished when I either increased the threshold to 64 or used
Andrew's idea of overstepping the interval (see ZVC overstep patch).

However, we saw contention again at 220-230 processors. So we need higher
values for larger systems.

But the current default is already a bit of an overkill for smaller
systems. Some systems have tiny zones where precision matters. For
example i386 and x86_64 have 16M DMA zones and either 900M ZONE_NORMAL or
ZONE_DMA32. These are even present on SMP and NUMA systems.

The patch here sets up a threshold based on the number of processors in the
system and the size of the zone that these counters are used for. The
threshold should grow logarithmically, so we use fls() as an easy
approximation.

Results of tests on a system with 1024 processors (4TB RAM)

The following output is from a test allocating 1GB of memory concurrently
on each processor (Forking the process. So contention on mmap_sem and the
pte locks is not a factor):

X MIN
TYPE: CPUS WALL WALL SYS USER TOTCPU
fork 1 0.552 0.552 0.540 0.012 0.552
fork 4 0.552 0.548 2.164 0.036 2.200
fork 16 0.564 0.548 8.812 0.164 8.976
fork 128 0.580 0.572 72.204 1.208 73.412
fork 256 1.300 0.660 310.400 2.160 312.560
fork 512 3.512 0.696 1526.836 4.816 1531.652
fork 1020 20.024 0.700 17243.176 6.688 17249.863

So a threshold of 32 is fine up to 128 processors. At 256 processors contention
becomes a factor.

Overstepping the counter (earlier patch) improves the numbers a bit:

fork 4 0.552 0.548 2.164 0.040 2.204
fork 16 0.552 0.548 8.640 0.148 8.788
fork 128 0.556 0.548 69.676 0.956 70.632
fork 256 0.876 0.636 212.468 2.108 214.576
fork 512 2.276 0.672 997.324 4.260 1001.584
fork 1020 13.564 0.680 11586.436 6.088 11592.523

Still contention at 512 and 1020. Contention at 1020 is down by a third.
256 still has a slight bit of contention.

After this patch the counter threshold will be set to 125 which reduces
contention significantly:

fork 128 0.560 0.548 69.776 0.932 70.708
fork 256 0.636 0.556 143.460 2.036 145.496
fork 512 0.640 0.548 284.244 4.236 288.480
fork 1020 1.500 0.588 1326.152 8.892 1335.044

[akpm@osdl.org: !SMP build fix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Christoph Lameter and committed by Linus Torvalds df9ecaba a302eb4e

+120 -20
+1
include/linux/mmzone.h
··· 77 77 struct per_cpu_pageset { 78 78 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ 79 79 #ifdef CONFIG_SMP 80 + s8 stat_threshold; 80 81 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 81 82 #endif 82 83 } ____cacheline_aligned_in_smp;
+119 -20
mm/vmstat.c
··· 12 12 #include <linux/config.h> 13 13 #include <linux/mm.h> 14 14 #include <linux/module.h> 15 + #include <linux/cpu.h> 15 16 16 17 void __get_zone_counts(unsigned long *active, unsigned long *inactive, 17 18 unsigned long *free, struct pglist_data *pgdat) ··· 115 114 116 115 #ifdef CONFIG_SMP 117 116 118 - #define STAT_THRESHOLD 32 117 + static int calculate_threshold(struct zone *zone) 118 + { 119 + int threshold; 120 + int mem; /* memory in 128 MB units */ 121 + 122 + /* 123 + * The threshold scales with the number of processors and the amount 124 + * of memory per zone. More memory means that we can defer updates for 125 + * longer, more processors could lead to more contention. 126 + * fls() is used to have a cheap way of logarithmic scaling. 127 + * 128 + * Some sample thresholds: 129 + * 130 + * Threshold Processors (fls) Zonesize fls(mem+1) 131 + * ------------------------------------------------------------------ 132 + * 8 1 1 0.9-1 GB 4 133 + * 16 2 2 0.9-1 GB 4 134 + * 20 2 2 1-2 GB 5 135 + * 24 2 2 2-4 GB 6 136 + * 28 2 2 4-8 GB 7 137 + * 32 2 2 8-16 GB 8 138 + * 4 2 2 <128M 1 139 + * 30 4 3 2-4 GB 5 140 + * 48 4 3 8-16 GB 8 141 + * 32 8 4 1-2 GB 4 142 + * 32 8 4 0.9-1GB 4 143 + * 10 16 5 <128M 1 144 + * 40 16 5 900M 4 145 + * 70 64 7 2-4 GB 5 146 + * 84 64 7 4-8 GB 6 147 + * 108 512 9 4-8 GB 6 148 + * 125 1024 10 8-16 GB 8 149 + * 125 1024 10 16-32 GB 9 150 + */ 151 + 152 + mem = zone->present_pages >> (27 - PAGE_SHIFT); 153 + 154 + threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); 155 + 156 + /* 157 + * Maximum threshold is 125 158 + */ 159 + threshold = min(125, threshold); 160 + 161 + return threshold; 162 + } 119 163 120 164 /* 121 - * Determine pointer to currently valid differential byte given a zone and 122 - * the item number. 123 - * 124 - * Preemption must be off 165 + * Refresh the thresholds for each zone. 125 166 */ 126 - static inline s8 *diff_pointer(struct zone *zone, enum zone_stat_item item) 167 + static void refresh_zone_stat_thresholds(void) 127 168 { 128 - return &zone_pcp(zone, smp_processor_id())->vm_stat_diff[item]; 169 + struct zone *zone; 170 + int cpu; 171 + int threshold; 172 + 173 + for_each_zone(zone) { 174 + 175 + if (!zone->present_pages) 176 + continue; 177 + 178 + threshold = calculate_threshold(zone); 179 + 180 + for_each_online_cpu(cpu) 181 + zone_pcp(zone, cpu)->stat_threshold = threshold; 182 + } 129 183 } 130 184 131 185 /* ··· 189 133 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 190 134 int delta) 191 135 { 192 - s8 *p; 136 + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 137 + s8 *p = pcp->vm_stat_diff + item; 193 138 long x; 194 139 195 - p = diff_pointer(zone, item); 196 140 x = delta + *p; 197 141 198 - if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) { 142 + if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { 199 143 zone_page_state_add(x, zone, item); 200 144 x = 0; 201 145 } 202 - 203 146 *p = x; 204 147 } 205 148 EXPORT_SYMBOL(__mod_zone_page_state); ··· 227 172 * No overflow check is necessary and therefore the differential can be 228 173 * incremented or decremented in place which may allow the compilers to 229 174 * generate better code. 230 - * 231 175 * The increment or decrement is known and therefore one boundary check can 232 176 * be omitted. 177 + * 178 + * NOTE: These functions are very performance sensitive. Change only 179 + * with care. 233 180 * 234 181 * Some processors have inc/dec instructions that are atomic vs an interrupt. 235 182 * However, the code must first determine the differential location in a zone ··· 242 185 */ 243 186 static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 244 187 { 245 - s8 *p = diff_pointer(zone, item); 188 + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 189 + s8 *p = pcp->vm_stat_diff + item; 246 190 247 191 (*p)++; 248 192 249 - if (unlikely(*p > STAT_THRESHOLD)) { 250 - zone_page_state_add(*p + STAT_THRESHOLD / 2, zone, item); 251 - *p = -STAT_THRESHOLD / 2; 193 + if (unlikely(*p > pcp->stat_threshold)) { 194 + int overstep = pcp->stat_threshold / 2; 195 + 196 + zone_page_state_add(*p + overstep, zone, item); 197 + *p = -overstep; 252 198 } 253 199 } 254 200 ··· 264 204 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 265 205 { 266 206 struct zone *zone = page_zone(page); 267 - s8 *p = diff_pointer(zone, item); 207 + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); 208 + s8 *p = pcp->vm_stat_diff + item; 268 209 269 210 (*p)--; 270 211 271 - if (unlikely(*p < -STAT_THRESHOLD)) { 272 - zone_page_state_add(*p - STAT_THRESHOLD / 2, zone, item); 273 - *p = STAT_THRESHOLD /2; 212 + if (unlikely(*p < - pcp->stat_threshold)) { 213 + int overstep = pcp->stat_threshold / 2; 214 + 215 + zone_page_state_add(*p - overstep, zone, item); 216 + *p = overstep; 274 217 } 275 218 } 276 219 EXPORT_SYMBOL(__dec_zone_page_state); ··· 578 515 pageset->pcp[j].high, 579 516 pageset->pcp[j].batch); 580 517 } 518 + #ifdef CONFIG_SMP 519 + seq_printf(m, "\n vm stats threshold: %d", 520 + pageset->stat_threshold); 521 + #endif 581 522 } 582 523 seq_printf(m, 583 524 "\n all_unreclaimable: %u" ··· 670 603 671 604 #endif /* CONFIG_PROC_FS */ 672 605 606 + #ifdef CONFIG_SMP 607 + /* 608 + * Use the cpu notifier to insure that the thresholds are recalculated 609 + * when necessary. 610 + */ 611 + static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, 612 + unsigned long action, 613 + void *hcpu) 614 + { 615 + switch (action) { 616 + case CPU_UP_PREPARE: 617 + case CPU_UP_CANCELED: 618 + case CPU_DEAD: 619 + refresh_zone_stat_thresholds(); 620 + break; 621 + default: 622 + break; 623 + } 624 + return NOTIFY_OK; 625 + } 626 + 627 + static struct notifier_block __cpuinitdata vmstat_notifier = 628 + { &vmstat_cpuup_callback, NULL, 0 }; 629 + 630 + int __init setup_vmstat(void) 631 + { 632 + refresh_zone_stat_thresholds(); 633 + register_cpu_notifier(&vmstat_notifier); 634 + return 0; 635 + } 636 + module_init(setup_vmstat) 637 + #endif