Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm, pcp: allow restoring percpu_pagelist_fraction default

Oleg reports a division by zero error on zero-length write() to the
percpu_pagelist_fraction sysctl:

divide error: 0000 [#1] SMP DEBUG_PAGEALLOC
CPU: 1 PID: 9142 Comm: badarea_io Not tainted 3.15.0-rc2-vm-nfs+ #19
Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
task: ffff8800d5aeb6e0 ti: ffff8800d87a2000 task.ti: ffff8800d87a2000
RIP: 0010: percpu_pagelist_fraction_sysctl_handler+0x84/0x120
RSP: 0018:ffff8800d87a3e78 EFLAGS: 00010246
RAX: 0000000000000f89 RBX: ffff88011f7fd000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: 0000000000000001 RDI: 0000000000000010
RBP: ffff8800d87a3e98 R08: ffffffff81d002c8 R09: ffff8800d87a3f50
R10: 000000000000000b R11: 0000000000000246 R12: 0000000000000060
R13: ffffffff81c3c3e0 R14: ffffffff81cfddf8 R15: ffff8801193b0800
FS: 00007f614f1e9740(0000) GS:ffff88011f440000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: 00007f614f1fa000 CR3: 00000000d9291000 CR4: 00000000000006e0
Call Trace:
proc_sys_call_handler+0xb3/0xc0
proc_sys_write+0x14/0x20
vfs_write+0xba/0x1e0
SyS_write+0x46/0xb0
tracesys+0xe1/0xe6

However, if the percpu_pagelist_fraction sysctl is set by the user, it
is also impossible to restore it to the kernel default since the user
cannot write 0 to the sysctl.

This patch allows the user to write 0 to restore the default behavior.
It still requires a fraction equal to or larger than 8, however, as
stated by the documentation for sanity. If a value in the range [1, 7]
is written, the sysctl will return EINVAL.

This successfully solves the divide by zero issue at the same time.

Signed-off-by: David Rientjes <rientjes@google.com>
Reported-by: Oleg Drokin <green@linuxhacker.ru>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

David Rientjes and committed by
Linus Torvalds
7cd2b0a3 df2e1ef6

+34 -18
+2 -1
Documentation/sysctl/vm.txt
··· 702 702 set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8) 703 703 704 704 The initial value is zero. Kernel does not use this value at boot time to set 705 - the high water marks for each per cpu page list. 705 + the high water marks for each per cpu page list. If the user writes '0' to this 706 + sysctl, it will revert to this default behavior. 706 707 707 708 ============================================================== 708 709
+1 -2
kernel/sysctl.c
··· 136 136 /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ 137 137 static int maxolduid = 65535; 138 138 static int minolduid; 139 - static int min_percpu_pagelist_fract = 8; 140 139 141 140 static int ngroups_max = NGROUPS_MAX; 142 141 static const int cap_last_cap = CAP_LAST_CAP; ··· 1316 1317 .maxlen = sizeof(percpu_pagelist_fraction), 1317 1318 .mode = 0644, 1318 1319 .proc_handler = percpu_pagelist_fraction_sysctl_handler, 1319 - .extra1 = &min_percpu_pagelist_fract, 1320 + .extra1 = &zero, 1320 1321 }, 1321 1322 #ifdef CONFIG_MMU 1322 1323 {
+31 -15
mm/page_alloc.c
··· 69 69 70 70 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 71 71 static DEFINE_MUTEX(pcp_batch_high_lock); 72 + #define MIN_PERCPU_PAGELIST_FRACTION (8) 72 73 73 74 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 74 75 DEFINE_PER_CPU(int, numa_node); ··· 4146 4145 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 4147 4146 #endif 4148 4147 4149 - static int __meminit zone_batchsize(struct zone *zone) 4148 + static int zone_batchsize(struct zone *zone) 4150 4149 { 4151 4150 #ifdef CONFIG_MMU 4152 4151 int batch; ··· 4262 4261 pageset_update(&p->pcp, high, batch); 4263 4262 } 4264 4263 4265 - static void __meminit pageset_set_high_and_batch(struct zone *zone, 4266 - struct per_cpu_pageset *pcp) 4264 + static void pageset_set_high_and_batch(struct zone *zone, 4265 + struct per_cpu_pageset *pcp) 4267 4266 { 4268 4267 if (percpu_pagelist_fraction) 4269 4268 pageset_set_high(pcp, ··· 5882 5881 void __user *buffer, size_t *length, loff_t *ppos) 5883 5882 { 5884 5883 struct zone *zone; 5885 - unsigned int cpu; 5884 + int old_percpu_pagelist_fraction; 5886 5885 int ret; 5887 5886 5888 - ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5889 - if (!write || (ret < 0)) 5890 - return ret; 5891 - 5892 5887 mutex_lock(&pcp_batch_high_lock); 5893 - for_each_populated_zone(zone) { 5894 - unsigned long high; 5895 - high = zone->managed_pages / percpu_pagelist_fraction; 5896 - for_each_possible_cpu(cpu) 5897 - pageset_set_high(per_cpu_ptr(zone->pageset, cpu), 5898 - high); 5888 + old_percpu_pagelist_fraction = percpu_pagelist_fraction; 5889 + 5890 + ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5891 + if (!write || ret < 0) 5892 + goto out; 5893 + 5894 + /* Sanity checking to avoid pcp imbalance */ 5895 + if (percpu_pagelist_fraction && 5896 + percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { 5897 + percpu_pagelist_fraction = old_percpu_pagelist_fraction; 5898 + ret = -EINVAL; 5899 + goto out; 5899 5900 } 5901 + 5902 + /* No change? */ 5903 + if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) 5904 + goto out; 5905 + 5906 + for_each_populated_zone(zone) { 5907 + unsigned int cpu; 5908 + 5909 + for_each_possible_cpu(cpu) 5910 + pageset_set_high_and_batch(zone, 5911 + per_cpu_ptr(zone->pageset, cpu)); 5912 + } 5913 + out: 5900 5914 mutex_unlock(&pcp_batch_high_lock); 5901 - return 0; 5915 + return ret; 5902 5916 } 5903 5917 5904 5918 int hashdist = HASHDIST_DEFAULT;