Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sysctl: pass kernel pointers to ->proc_handler

Instead of having all the sysctl handlers deal with user pointers, which
is rather hairy in terms of the BPF interaction, copy the input to and
from userspace in common code. This also means that the strings are
always NUL-terminated by the common code, making the API a little bit
safer.

As most handler just pass through the data to one of the common handlers
a lot of the changes are mechnical.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Andrey Ignatov <rdna@fb.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

authored by

Christoph Hellwig and committed by
Al Viro
32927393 f461d2dc

+459 -654
+1 -1
arch/arm64/kernel/armv8_deprecated.c
··· 203 203 } 204 204 205 205 static int emulation_proc_handler(struct ctl_table *table, int write, 206 - void __user *buffer, size_t *lenp, 206 + void *buffer, size_t *lenp, 207 207 loff_t *ppos) 208 208 { 209 209 int ret = 0;
+1 -2
arch/arm64/kernel/fpsimd.c
··· 341 341 #ifdef CONFIG_SYSCTL 342 342 343 343 static int sve_proc_do_default_vl(struct ctl_table *table, int write, 344 - void __user *buffer, size_t *lenp, 345 - loff_t *ppos) 344 + void *buffer, size_t *lenp, loff_t *ppos) 346 345 { 347 346 int ret; 348 347 int vl = sve_default_vl;
+5 -8
arch/mips/lasat/sysctl.c
··· 95 95 len = 0; 96 96 p = buffer; 97 97 while (len < *lenp) { 98 - if (get_user(c, p++)) 99 - return -EFAULT; 98 + c = *p; 99 + p++; 100 100 if (c == 0 || c == '\n') 101 101 break; 102 102 len++; 103 103 } 104 104 if (len >= sizeof(ipbuf)-1) 105 105 len = sizeof(ipbuf) - 1; 106 - if (copy_from_user(ipbuf, buffer, len)) 107 - return -EFAULT; 106 + memcpy(ipbuf, buffer, len); 108 107 ipbuf[len] = 0; 109 108 *ppos += *lenp; 110 109 /* Now see if we can convert it to a valid IP */ ··· 121 122 if (len > *lenp) 122 123 len = *lenp; 123 124 if (len) 124 - if (copy_to_user(buffer, ipbuf, len)) 125 - return -EFAULT; 125 + memcpy(buffer, ipbuf, len); 126 126 if (len < *lenp) { 127 - if (put_user('\n', ((char *) buffer) + len)) 128 - return -EFAULT; 127 + *((char *)buffer + len) = '\n'; 129 128 len++; 130 129 } 131 130 *lenp = len;
+5 -6
arch/s390/appldata/appldata_base.c
··· 51 51 */ 52 52 static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata"; 53 53 static int appldata_timer_handler(struct ctl_table *ctl, int write, 54 - void __user *buffer, size_t *lenp, loff_t *ppos); 54 + void *buffer, size_t *lenp, loff_t *ppos); 55 55 static int appldata_interval_handler(struct ctl_table *ctl, int write, 56 - void __user *buffer, 57 - size_t *lenp, loff_t *ppos); 56 + void *buffer, size_t *lenp, loff_t *ppos); 58 57 59 58 static struct ctl_table_header *appldata_sysctl_header; 60 59 static struct ctl_table appldata_table[] = { ··· 216 217 */ 217 218 static int 218 219 appldata_timer_handler(struct ctl_table *ctl, int write, 219 - void __user *buffer, size_t *lenp, loff_t *ppos) 220 + void *buffer, size_t *lenp, loff_t *ppos) 220 221 { 221 222 int timer_active = appldata_timer_active; 222 223 int rc; ··· 249 250 */ 250 251 static int 251 252 appldata_interval_handler(struct ctl_table *ctl, int write, 252 - void __user *buffer, size_t *lenp, loff_t *ppos) 253 + void *buffer, size_t *lenp, loff_t *ppos) 253 254 { 254 255 int interval = appldata_interval; 255 256 int rc; ··· 279 280 */ 280 281 static int 281 282 appldata_generic_handler(struct ctl_table *ctl, int write, 282 - void __user *buffer, size_t *lenp, loff_t *ppos) 283 + void *buffer, size_t *lenp, loff_t *ppos) 283 284 { 284 285 struct appldata_ops *ops = NULL, *tmp_ops; 285 286 struct list_head *lh;
+1 -1
arch/s390/kernel/debug.c
··· 867 867 * if debug_active is already off 868 868 */ 869 869 static int s390dbf_procactive(struct ctl_table *table, int write, 870 - void __user *buffer, size_t *lenp, loff_t *ppos) 870 + void *buffer, size_t *lenp, loff_t *ppos) 871 871 { 872 872 if (!write || debug_stoppable || !debug_active) 873 873 return proc_dointvec(table, write, buffer, lenp, ppos);
+1 -1
arch/s390/kernel/topology.c
··· 594 594 early_param("topology", topology_setup); 595 595 596 596 static int topology_ctl_handler(struct ctl_table *ctl, int write, 597 - void __user *buffer, size_t *lenp, loff_t *ppos) 597 + void *buffer, size_t *lenp, loff_t *ppos) 598 598 { 599 599 int enabled = topology_is_enabled(); 600 600 int new_mode;
+5 -7
arch/s390/mm/cmm.c
··· 245 245 } 246 246 247 247 static int cmm_pages_handler(struct ctl_table *ctl, int write, 248 - void __user *buffer, size_t *lenp, loff_t *ppos) 248 + void *buffer, size_t *lenp, loff_t *ppos) 249 249 { 250 250 long nr = cmm_get_pages(); 251 251 struct ctl_table ctl_entry = { ··· 264 264 } 265 265 266 266 static int cmm_timed_pages_handler(struct ctl_table *ctl, int write, 267 - void __user *buffer, size_t *lenp, 267 + void *buffer, size_t *lenp, 268 268 loff_t *ppos) 269 269 { 270 270 long nr = cmm_get_timed_pages(); ··· 284 284 } 285 285 286 286 static int cmm_timeout_handler(struct ctl_table *ctl, int write, 287 - void __user *buffer, size_t *lenp, loff_t *ppos) 287 + void *buffer, size_t *lenp, loff_t *ppos) 288 288 { 289 289 char buf[64], *p; 290 290 long nr, seconds; ··· 297 297 298 298 if (write) { 299 299 len = min(*lenp, sizeof(buf)); 300 - if (copy_from_user(buf, buffer, len)) 301 - return -EFAULT; 300 + memcpy(buf, buffer, len); 302 301 buf[len - 1] = '\0'; 303 302 cmm_skip_blanks(buf, &p); 304 303 nr = simple_strtoul(p, &p, 0); ··· 310 311 cmm_timeout_pages, cmm_timeout_seconds); 311 312 if (len > *lenp) 312 313 len = *lenp; 313 - if (copy_to_user(buffer, buf, len)) 314 - return -EFAULT; 314 + memcpy(buffer, buf, len); 315 315 *lenp = len; 316 316 *ppos += len; 317 317 }
+1 -2
arch/x86/kernel/itmt.c
··· 39 39 unsigned int __read_mostly sysctl_sched_itmt_enabled; 40 40 41 41 static int sched_itmt_update_handler(struct ctl_table *table, int write, 42 - void __user *buffer, size_t *lenp, 43 - loff_t *ppos) 42 + void *buffer, size_t *lenp, loff_t *ppos) 44 43 { 45 44 unsigned int old_sysctl; 46 45 int ret;
+1 -1
drivers/cdrom/cdrom.c
··· 3631 3631 } 3632 3632 3633 3633 static int cdrom_sysctl_handler(struct ctl_table *ctl, int write, 3634 - void __user *buffer, size_t *lenp, loff_t *ppos) 3634 + void *buffer, size_t *lenp, loff_t *ppos) 3635 3635 { 3636 3636 int ret; 3637 3637
+1 -1
drivers/char/random.c
··· 2057 2057 * sysctl system call, as 16 bytes of binary data. 2058 2058 */ 2059 2059 static int proc_do_uuid(struct ctl_table *table, int write, 2060 - void __user *buffer, size_t *lenp, loff_t *ppos) 2060 + void *buffer, size_t *lenp, loff_t *ppos) 2061 2061 { 2062 2062 struct ctl_table fake_table; 2063 2063 unsigned char buf[64], tmp_uuid[16], *uuid;
+1 -2
drivers/macintosh/mac_hid.c
··· 183 183 } 184 184 185 185 static int mac_hid_toggle_emumouse(struct ctl_table *table, int write, 186 - void __user *buffer, size_t *lenp, 187 - loff_t *ppos) 186 + void *buffer, size_t *lenp, loff_t *ppos) 188 187 { 189 188 int *valp = table->data; 190 189 int old_val = *valp;
+18 -21
drivers/parport/procfs.c
··· 34 34 #define PARPORT_MAX_SPINTIME_VALUE 1000 35 35 36 36 static int do_active_device(struct ctl_table *table, int write, 37 - void __user *result, size_t *lenp, loff_t *ppos) 37 + void *result, size_t *lenp, loff_t *ppos) 38 38 { 39 39 struct parport *port = (struct parport *)table->extra1; 40 40 char buffer[256]; ··· 65 65 *lenp = len; 66 66 67 67 *ppos += len; 68 - 69 - return copy_to_user(result, buffer, len) ? -EFAULT : 0; 68 + memcpy(result, buffer, len); 69 + return 0; 70 70 } 71 71 72 72 #ifdef CONFIG_PARPORT_1284 73 73 static int do_autoprobe(struct ctl_table *table, int write, 74 - void __user *result, size_t *lenp, loff_t *ppos) 74 + void *result, size_t *lenp, loff_t *ppos) 75 75 { 76 76 struct parport_device_info *info = table->extra2; 77 77 const char *str; ··· 108 108 109 109 *ppos += len; 110 110 111 - return copy_to_user (result, buffer, len) ? -EFAULT : 0; 111 + memcpy(result, buffer, len); 112 + return 0; 112 113 } 113 114 #endif /* IEEE1284.3 support. */ 114 115 115 116 static int do_hardware_base_addr(struct ctl_table *table, int write, 116 - void __user *result, 117 - size_t *lenp, loff_t *ppos) 117 + void *result, size_t *lenp, loff_t *ppos) 118 118 { 119 119 struct parport *port = (struct parport *)table->extra1; 120 120 char buffer[20]; ··· 136 136 *lenp = len; 137 137 138 138 *ppos += len; 139 - 140 - return copy_to_user(result, buffer, len) ? -EFAULT : 0; 139 + memcpy(result, buffer, len); 140 + return 0; 141 141 } 142 142 143 143 static int do_hardware_irq(struct ctl_table *table, int write, 144 - void __user *result, 145 - size_t *lenp, loff_t *ppos) 144 + void *result, size_t *lenp, loff_t *ppos) 146 145 { 147 146 struct parport *port = (struct parport *)table->extra1; 148 147 char buffer[20]; ··· 163 164 *lenp = len; 164 165 165 166 *ppos += len; 166 - 167 - return copy_to_user(result, buffer, len) ? -EFAULT : 0; 167 + memcpy(result, buffer, len); 168 + return 0; 168 169 } 169 170 170 171 static int do_hardware_dma(struct ctl_table *table, int write, 171 - void __user *result, 172 - size_t *lenp, loff_t *ppos) 172 + void *result, size_t *lenp, loff_t *ppos) 173 173 { 174 174 struct parport *port = (struct parport *)table->extra1; 175 175 char buffer[20]; ··· 190 192 *lenp = len; 191 193 192 194 *ppos += len; 193 - 194 - return copy_to_user(result, buffer, len) ? -EFAULT : 0; 195 + memcpy(result, buffer, len); 196 + return 0; 195 197 } 196 198 197 199 static int do_hardware_modes(struct ctl_table *table, int write, 198 - void __user *result, 199 - size_t *lenp, loff_t *ppos) 200 + void *result, size_t *lenp, loff_t *ppos) 200 201 { 201 202 struct parport *port = (struct parport *)table->extra1; 202 203 char buffer[40]; ··· 228 231 *lenp = len; 229 232 230 233 *ppos += len; 231 - 232 - return copy_to_user(result, buffer, len) ? -EFAULT : 0; 234 + memcpy(result, buffer, len); 235 + return 0; 233 236 } 234 237 235 238 #define PARPORT_PORT_DIR(CHILD) { .procname = NULL, .mode = 0555, .child = CHILD }
+1 -1
fs/dcache.c
··· 165 165 return sum < 0 ? 0 : sum; 166 166 } 167 167 168 - int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, 168 + int proc_nr_dentry(struct ctl_table *table, int write, void *buffer, 169 169 size_t *lenp, loff_t *ppos) 170 170 { 171 171 dentry_stat.nr_dentry = get_nr_dentry();
+1 -1
fs/drop_caches.c
··· 47 47 } 48 48 49 49 int drop_caches_sysctl_handler(struct ctl_table *table, int write, 50 - void __user *buffer, size_t *length, loff_t *ppos) 50 + void *buffer, size_t *length, loff_t *ppos) 51 51 { 52 52 int ret; 53 53
+2 -2
fs/file_table.c
··· 80 80 */ 81 81 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 82 82 int proc_nr_files(struct ctl_table *table, int write, 83 - void __user *buffer, size_t *lenp, loff_t *ppos) 83 + void *buffer, size_t *lenp, loff_t *ppos) 84 84 { 85 85 files_stat.nr_files = get_nr_files(); 86 86 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 87 87 } 88 88 #else 89 89 int proc_nr_files(struct ctl_table *table, int write, 90 - void __user *buffer, size_t *lenp, loff_t *ppos) 90 + void *buffer, size_t *lenp, loff_t *ppos) 91 91 { 92 92 return -ENOSYS; 93 93 }
+1 -2
fs/fscache/main.c
··· 51 51 static struct ctl_table_header *fscache_sysctl_header; 52 52 53 53 static int fscache_max_active_sysctl(struct ctl_table *table, int write, 54 - void __user *buffer, 55 - size_t *lenp, loff_t *ppos) 54 + void *buffer, size_t *lenp, loff_t *ppos) 56 55 { 57 56 struct workqueue_struct **wqp = table->extra1; 58 57 unsigned int *datap = table->data;
+1 -1
fs/inode.c
··· 108 108 */ 109 109 #ifdef CONFIG_SYSCTL 110 110 int proc_nr_inodes(struct ctl_table *table, int write, 111 - void __user *buffer, size_t *lenp, loff_t *ppos) 111 + void *buffer, size_t *lenp, loff_t *ppos) 112 112 { 113 113 inodes_stat.nr_inodes = get_nr_inodes(); 114 114 inodes_stat.nr_unused = get_nr_inodes_unused();
+31 -20
fs/proc/proc_sysctl.c
··· 539 539 return err; 540 540 } 541 541 542 - static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf, 542 + static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf, 543 543 size_t count, loff_t *ppos, int write) 544 544 { 545 545 struct inode *inode = file_inode(filp); 546 546 struct ctl_table_header *head = grab_header(inode); 547 547 struct ctl_table *table = PROC_I(inode)->sysctl_entry; 548 - void *new_buf = NULL; 548 + void *kbuf; 549 549 ssize_t error; 550 550 551 551 if (IS_ERR(head)) ··· 564 564 if (!table->proc_handler) 565 565 goto out; 566 566 567 - error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, &count, 568 - ppos, &new_buf); 569 - if (error) 570 - goto out; 571 - 572 - /* careful: calling conventions are nasty here */ 573 - if (new_buf) { 574 - mm_segment_t old_fs; 575 - 576 - old_fs = get_fs(); 577 - set_fs(KERNEL_DS); 578 - error = table->proc_handler(table, write, (void __user *)new_buf, 579 - &count, ppos); 580 - set_fs(old_fs); 581 - kfree(new_buf); 567 + if (write) { 568 + kbuf = memdup_user_nul(ubuf, count); 569 + if (IS_ERR(kbuf)) { 570 + error = PTR_ERR(kbuf); 571 + goto out; 572 + } 582 573 } else { 583 - error = table->proc_handler(table, write, buf, &count, ppos); 574 + error = -ENOMEM; 575 + kbuf = kzalloc(count, GFP_KERNEL); 576 + if (!kbuf) 577 + goto out; 584 578 } 585 579 586 - if (!error) 587 - error = count; 580 + error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, &kbuf, &count, 581 + ppos); 582 + if (error) 583 + goto out_free_buf; 584 + 585 + /* careful: calling conventions are nasty here */ 586 + error = table->proc_handler(table, write, kbuf, &count, ppos); 587 + if (error) 588 + goto out_free_buf; 589 + 590 + if (!write) { 591 + error = -EFAULT; 592 + if (copy_to_user(ubuf, kbuf, count)) 593 + goto out_free_buf; 594 + } 595 + 596 + error = count; 597 + out_free_buf: 598 + kfree(kbuf); 588 599 out: 589 600 sysctl_head_finish(head); 590 601
+1 -1
fs/quota/dquot.c
··· 2841 2841 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops); 2842 2842 2843 2843 static int do_proc_dqstats(struct ctl_table *table, int write, 2844 - void __user *buffer, size_t *lenp, loff_t *ppos) 2844 + void *buffer, size_t *lenp, loff_t *ppos) 2845 2845 { 2846 2846 unsigned int type = (unsigned long *)table->data - dqstats.stat; 2847 2847 s64 value = percpu_counter_sum(&dqstats.counter[type]);
+2 -2
fs/xfs/xfs_sysctl.c
··· 13 13 xfs_stats_clear_proc_handler( 14 14 struct ctl_table *ctl, 15 15 int write, 16 - void __user *buffer, 16 + void *buffer, 17 17 size_t *lenp, 18 18 loff_t *ppos) 19 19 { ··· 33 33 xfs_panic_mask_proc_handler( 34 34 struct ctl_table *ctl, 35 35 int write, 36 - void __user *buffer, 36 + void *buffer, 37 37 size_t *lenp, 38 38 loff_t *ppos) 39 39 {
+4 -5
include/linux/bpf-cgroup.h
··· 138 138 139 139 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 140 140 struct ctl_table *table, int write, 141 - void __user *buf, size_t *pcount, 142 - loff_t *ppos, void **new_buf, 141 + void **buf, size_t *pcount, loff_t *ppos, 143 142 enum bpf_attach_type type); 144 143 145 144 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, ··· 301 302 }) 302 303 303 304 304 - #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \ 305 + #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \ 305 306 ({ \ 306 307 int __ret = 0; \ 307 308 if (cgroup_bpf_enabled) \ 308 309 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ 309 - buf, count, pos, nbuf, \ 310 + buf, count, pos, \ 310 311 BPF_CGROUP_SYSCTL); \ 311 312 __ret; \ 312 313 }) ··· 428 429 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) 429 430 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) 430 431 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) 431 - #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) 432 + #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) 432 433 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) 433 434 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ 434 435 optlen, max_optlen, retval) ({ retval; })
+1 -1
include/linux/compaction.h
··· 86 86 #ifdef CONFIG_COMPACTION 87 87 extern int sysctl_compact_memory; 88 88 extern int sysctl_compaction_handler(struct ctl_table *table, int write, 89 - void __user *buffer, size_t *length, loff_t *ppos); 89 + void *buffer, size_t *length, loff_t *ppos); 90 90 extern int sysctl_extfrag_threshold; 91 91 extern int sysctl_compact_unevictable_allowed; 92 92
+3 -3
include/linux/fs.h
··· 3536 3536 3537 3537 struct ctl_table; 3538 3538 int proc_nr_files(struct ctl_table *table, int write, 3539 - void __user *buffer, size_t *lenp, loff_t *ppos); 3539 + void *buffer, size_t *lenp, loff_t *ppos); 3540 3540 int proc_nr_dentry(struct ctl_table *table, int write, 3541 - void __user *buffer, size_t *lenp, loff_t *ppos); 3541 + void *buffer, size_t *lenp, loff_t *ppos); 3542 3542 int proc_nr_inodes(struct ctl_table *table, int write, 3543 - void __user *buffer, size_t *lenp, loff_t *ppos); 3543 + void *buffer, size_t *lenp, loff_t *ppos); 3544 3544 int __init get_filesystem_list(char *buf); 3545 3545 3546 3546 #define __FMODE_EXEC ((__force int) FMODE_EXEC)
+1 -2
include/linux/ftrace.h
··· 1005 1005 extern int __disable_trace_on_warning; 1006 1006 1007 1007 int tracepoint_printk_sysctl(struct ctl_table *table, int write, 1008 - void __user *buffer, size_t *lenp, 1009 - loff_t *ppos); 1008 + void *buffer, size_t *lenp, loff_t *ppos); 1010 1009 1011 1010 #else /* CONFIG_TRACING */ 1012 1011 static inline void disable_trace_on_warning(void) { }
+7 -8
include/linux/hugetlb.h
··· 105 105 void hugepage_put_subpool(struct hugepage_subpool *spool); 106 106 107 107 void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 108 - int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 109 - int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 110 - int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 111 - 112 - #ifdef CONFIG_NUMA 113 - int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, 114 - void __user *, size_t *, loff_t *); 115 - #endif 108 + int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 109 + int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, 110 + loff_t *); 111 + int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, 112 + loff_t *); 113 + int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, 114 + loff_t *); 116 115 117 116 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 118 117 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
+1 -1
include/linux/kprobes.h
··· 312 312 #ifdef CONFIG_SYSCTL 313 313 extern int sysctl_kprobes_optimization; 314 314 extern int proc_kprobes_optimization_handler(struct ctl_table *table, 315 - int write, void __user *buffer, 315 + int write, void *buffer, 316 316 size_t *length, loff_t *ppos); 317 317 #endif 318 318 extern void wait_for_kprobe_optimizer(void);
+2 -2
include/linux/latencytop.h
··· 38 38 39 39 void clear_tsk_latency_tracing(struct task_struct *p); 40 40 41 - extern int sysctl_latencytop(struct ctl_table *table, int write, 42 - void __user *buffer, size_t *lenp, loff_t *ppos); 41 + int sysctl_latencytop(struct ctl_table *table, int write, void *buffer, 42 + size_t *lenp, loff_t *ppos); 43 43 44 44 #else 45 45
+6 -6
include/linux/mm.h
··· 201 201 extern int sysctl_overcommit_ratio; 202 202 extern unsigned long sysctl_overcommit_kbytes; 203 203 204 - extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, 205 - size_t *, loff_t *); 206 - extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, 207 - size_t *, loff_t *); 204 + int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, 205 + loff_t *); 206 + int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, 207 + loff_t *); 208 208 209 209 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 210 210 ··· 2957 2957 2958 2958 #ifdef CONFIG_SYSCTL 2959 2959 extern int sysctl_drop_caches; 2960 - int drop_caches_sysctl_handler(struct ctl_table *, int, 2961 - void __user *, size_t *, loff_t *); 2960 + int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, 2961 + loff_t *); 2962 2962 #endif 2963 2963 2964 2964 void drop_slab(void);
+11 -12
include/linux/mmzone.h
··· 910 910 /* These two functions are used to setup the per zone pages min values */ 911 911 struct ctl_table; 912 912 913 - int min_free_kbytes_sysctl_handler(struct ctl_table *, int, 914 - void __user *, size_t *, loff_t *); 915 - int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, 916 - void __user *, size_t *, loff_t *); 913 + int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, 914 + loff_t *); 915 + int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, 916 + size_t *, loff_t *); 917 917 extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; 918 - int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, 919 - void __user *, size_t *, loff_t *); 918 + int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, 919 + size_t *, loff_t *); 920 920 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, 921 - void __user *, size_t *, loff_t *); 921 + void *, size_t *, loff_t *); 922 922 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 923 - void __user *, size_t *, loff_t *); 923 + void *, size_t *, loff_t *); 924 924 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, 925 - void __user *, size_t *, loff_t *); 926 - 927 - extern int numa_zonelist_order_handler(struct ctl_table *, int, 928 - void __user *, size_t *, loff_t *); 925 + void *, size_t *, loff_t *); 926 + int numa_zonelist_order_handler(struct ctl_table *, int, 927 + void *, size_t *, loff_t *); 929 928 extern int percpu_pagelist_fraction; 930 929 extern char numa_zonelist_order[]; 931 930 #define NUMA_ZONELIST_ORDER_LEN 16
+5 -10
include/linux/nmi.h
··· 202 202 #endif 203 203 204 204 struct ctl_table; 205 - extern int proc_watchdog(struct ctl_table *, int , 206 - void __user *, size_t *, loff_t *); 207 - extern int proc_nmi_watchdog(struct ctl_table *, int , 208 - void __user *, size_t *, loff_t *); 209 - extern int proc_soft_watchdog(struct ctl_table *, int , 210 - void __user *, size_t *, loff_t *); 211 - extern int proc_watchdog_thresh(struct ctl_table *, int , 212 - void __user *, size_t *, loff_t *); 213 - extern int proc_watchdog_cpumask(struct ctl_table *, int, 214 - void __user *, size_t *, loff_t *); 205 + int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *); 206 + int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); 207 + int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); 208 + int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *); 209 + int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *); 215 210 216 211 #ifdef CONFIG_HAVE_ACPI_APEI_NMI 217 212 #include <asm/nmi.h>
+5 -8
include/linux/perf_event.h
··· 1280 1280 1281 1281 extern void perf_sample_event_took(u64 sample_len_ns); 1282 1282 1283 - extern int perf_proc_update_handler(struct ctl_table *table, int write, 1284 - void __user *buffer, size_t *lenp, 1285 - loff_t *ppos); 1286 - extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 1287 - void __user *buffer, size_t *lenp, 1288 - loff_t *ppos); 1289 - 1283 + int perf_proc_update_handler(struct ctl_table *table, int write, 1284 + void *buffer, size_t *lenp, loff_t *ppos); 1285 + int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 1286 + void *buffer, size_t *lenp, loff_t *ppos); 1290 1287 int perf_event_max_stack_handler(struct ctl_table *table, int write, 1291 - void __user *buffer, size_t *lenp, loff_t *ppos); 1288 + void *buffer, size_t *lenp, loff_t *ppos); 1292 1289 1293 1290 /* Access to perf_event_open(2) syscall. */ 1294 1291 #define PERF_SECURITY_OPEN 0
+1 -1
include/linux/printk.h
··· 189 189 extern int dmesg_restrict; 190 190 191 191 extern int 192 - devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf, 192 + devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void *buf, 193 193 size_t *lenp, loff_t *ppos); 194 194 195 195 extern void wake_up_klogd(void);
+15 -29
include/linux/sched/sysctl.h
··· 12 12 extern unsigned long sysctl_hung_task_timeout_secs; 13 13 extern unsigned long sysctl_hung_task_check_interval_secs; 14 14 extern int sysctl_hung_task_warnings; 15 - extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 16 - void __user *buffer, 17 - size_t *lenp, loff_t *ppos); 15 + int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 16 + void *buffer, size_t *lenp, loff_t *ppos); 18 17 #else 19 18 /* Avoid need for ifdefs elsewhere in the code */ 20 19 enum { sysctl_hung_task_timeout_secs = 0 }; ··· 42 43 extern __read_mostly unsigned int sysctl_sched_nr_migrate; 43 44 44 45 int sched_proc_update_handler(struct ctl_table *table, int write, 45 - void __user *buffer, size_t *length, 46 - loff_t *ppos); 46 + void *buffer, size_t *length, loff_t *ppos); 47 47 #endif 48 48 49 49 /* ··· 70 72 extern int sysctl_sched_rr_timeslice; 71 73 extern int sched_rr_timeslice; 72 74 73 - extern int sched_rr_handler(struct ctl_table *table, int write, 74 - void __user *buffer, size_t *lenp, 75 - loff_t *ppos); 76 - 77 - extern int sched_rt_handler(struct ctl_table *table, int write, 78 - void __user *buffer, size_t *lenp, 79 - loff_t *ppos); 80 - 81 - #ifdef CONFIG_UCLAMP_TASK 82 - extern int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 83 - void __user *buffer, size_t *lenp, 84 - loff_t *ppos); 85 - #endif 86 - 87 - extern int sysctl_numa_balancing(struct ctl_table *table, int write, 88 - void __user *buffer, size_t *lenp, 89 - loff_t *ppos); 90 - 91 - extern int sysctl_schedstats(struct ctl_table *table, int write, 92 - void __user *buffer, size_t *lenp, 93 - loff_t *ppos); 75 + int sched_rr_handler(struct ctl_table *table, int write, void *buffer, 76 + size_t *lenp, loff_t *ppos); 77 + int sched_rt_handler(struct ctl_table *table, int write, void *buffer, 78 + size_t *lenp, loff_t *ppos); 79 + int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 80 + void *buffer, size_t *lenp, loff_t *ppos); 81 + int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer, 82 + size_t *lenp, loff_t *ppos); 83 + int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 84 + size_t *lenp, loff_t *ppos); 94 85 95 86 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 96 87 extern unsigned int sysctl_sched_energy_aware; 97 - extern int sched_energy_aware_handler(struct ctl_table *table, int write, 98 - void __user *buffer, size_t *lenp, 99 - loff_t *ppos); 88 + int sched_energy_aware_handler(struct ctl_table *table, int write, 89 + void *buffer, size_t *lenp, loff_t *ppos); 100 90 #endif 101 91 102 92 #endif /* _LINUX_SCHED_SYSCTL_H */
+1 -1
include/linux/security.h
··· 211 211 212 212 #ifdef CONFIG_MMU 213 213 extern int mmap_min_addr_handler(struct ctl_table *table, int write, 214 - void __user *buffer, size_t *lenp, loff_t *ppos); 214 + void *buffer, size_t *lenp, loff_t *ppos); 215 215 #endif 216 216 217 217 /* security_inode_init_security callback function to write xattrs */
+21 -30
include/linux/sysctl.h
··· 44 44 45 45 extern const int sysctl_vals[]; 46 46 47 - typedef int proc_handler (struct ctl_table *ctl, int write, 48 - void __user *buffer, size_t *lenp, loff_t *ppos); 47 + typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer, 48 + size_t *lenp, loff_t *ppos); 49 49 50 - extern int proc_dostring(struct ctl_table *, int, 51 - void __user *, size_t *, loff_t *); 52 - extern int proc_dointvec(struct ctl_table *, int, 53 - void __user *, size_t *, loff_t *); 54 - extern int proc_douintvec(struct ctl_table *, int, 55 - void __user *, size_t *, loff_t *); 56 - extern int proc_dointvec_minmax(struct ctl_table *, int, 57 - void __user *, size_t *, loff_t *); 58 - extern int proc_douintvec_minmax(struct ctl_table *table, int write, 59 - void __user *buffer, size_t *lenp, 60 - loff_t *ppos); 61 - extern int proc_dointvec_jiffies(struct ctl_table *, int, 62 - void __user *, size_t *, loff_t *); 63 - extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, 64 - void __user *, size_t *, loff_t *); 65 - extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, 66 - void __user *, size_t *, loff_t *); 67 - extern int proc_doulongvec_minmax(struct ctl_table *, int, 68 - void __user *, size_t *, loff_t *); 69 - extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, 70 - void __user *, size_t *, loff_t *); 71 - extern int proc_do_large_bitmap(struct ctl_table *, int, 72 - void __user *, size_t *, loff_t *); 73 - extern int proc_do_static_key(struct ctl_table *table, int write, 74 - void __user *buffer, size_t *lenp, 75 - loff_t *ppos); 50 + int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *); 51 + int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *); 52 + int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *); 53 + int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); 54 + int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer, 55 + size_t *lenp, loff_t *ppos); 56 + int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *); 57 + int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *, 58 + loff_t *); 59 + int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *, 60 + loff_t *); 61 + int proc_doulongvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); 62 + int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void *, 63 + size_t *, loff_t *); 64 + int proc_do_large_bitmap(struct ctl_table *, int, void *, size_t *, loff_t *); 65 + int proc_do_static_key(struct ctl_table *table, int write, void *buffer, 66 + size_t *lenp, loff_t *ppos); 76 67 77 68 /* 78 69 * Register a set of sysctl names by calling register_sysctl_table ··· 237 246 238 247 #endif /* CONFIG_SYSCTL */ 239 248 240 - int sysctl_max_threads(struct ctl_table *table, int write, 241 - void __user *buffer, size_t *lenp, loff_t *ppos); 249 + int sysctl_max_threads(struct ctl_table *table, int write, void *buffer, 250 + size_t *lenp, loff_t *ppos); 242 251 243 252 #endif /* _LINUX_SYSCTL_H */
+1 -2
include/linux/timer.h
··· 201 201 202 202 extern unsigned int sysctl_timer_migration; 203 203 int timer_migration_handler(struct ctl_table *table, int write, 204 - void __user *buffer, size_t *lenp, 205 - loff_t *ppos); 204 + void *buffer, size_t *lenp, loff_t *ppos); 206 205 #endif 207 206 208 207 unsigned long __round_jiffies(unsigned long j, int cpu);
+4 -4
include/linux/vmstat.h
··· 16 16 #define DISABLE_NUMA_STAT 0 17 17 extern int sysctl_vm_numa_stat; 18 18 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); 19 - extern int sysctl_vm_numa_stat_handler(struct ctl_table *table, 20 - int write, void __user *buffer, size_t *length, loff_t *ppos); 19 + int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, 20 + void *buffer, size_t *length, loff_t *ppos); 21 21 #endif 22 22 23 23 struct reclaim_stat { ··· 274 274 void refresh_zone_stat_thresholds(void); 275 275 276 276 struct ctl_table; 277 - int vmstat_refresh(struct ctl_table *, int write, 278 - void __user *buffer, size_t *lenp, loff_t *ppos); 277 + int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp, 278 + loff_t *ppos); 279 279 280 280 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); 281 281
+11 -17
include/linux/writeback.h
··· 362 362 extern int block_dump; 363 363 extern int laptop_mode; 364 364 365 - extern int dirty_background_ratio_handler(struct ctl_table *table, int write, 366 - void __user *buffer, size_t *lenp, 367 - loff_t *ppos); 368 - extern int dirty_background_bytes_handler(struct ctl_table *table, int write, 369 - void __user *buffer, size_t *lenp, 370 - loff_t *ppos); 371 - extern int dirty_ratio_handler(struct ctl_table *table, int write, 372 - void __user *buffer, size_t *lenp, 373 - loff_t *ppos); 374 - extern int dirty_bytes_handler(struct ctl_table *table, int write, 375 - void __user *buffer, size_t *lenp, 376 - loff_t *ppos); 365 + int dirty_background_ratio_handler(struct ctl_table *table, int write, 366 + void *buffer, size_t *lenp, loff_t *ppos); 367 + int dirty_background_bytes_handler(struct ctl_table *table, int write, 368 + void *buffer, size_t *lenp, loff_t *ppos); 369 + int dirty_ratio_handler(struct ctl_table *table, int write, 370 + void *buffer, size_t *lenp, loff_t *ppos); 371 + int dirty_bytes_handler(struct ctl_table *table, int write, 372 + void *buffer, size_t *lenp, loff_t *ppos); 377 373 int dirtytime_interval_handler(struct ctl_table *table, int write, 378 - void __user *buffer, size_t *lenp, loff_t *ppos); 379 - 380 - struct ctl_table; 381 - int dirty_writeback_centisecs_handler(struct ctl_table *, int, 382 - void __user *, size_t *, loff_t *); 374 + void *buffer, size_t *lenp, loff_t *ppos); 375 + int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, 376 + void *buffer, size_t *lenp, loff_t *ppos); 383 377 384 378 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); 385 379 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
+5 -5
ipc/ipc_sysctl.c
··· 24 24 25 25 #ifdef CONFIG_PROC_SYSCTL 26 26 static int proc_ipc_dointvec(struct ctl_table *table, int write, 27 - void __user *buffer, size_t *lenp, loff_t *ppos) 27 + void *buffer, size_t *lenp, loff_t *ppos) 28 28 { 29 29 struct ctl_table ipc_table; 30 30 ··· 35 35 } 36 36 37 37 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write, 38 - void __user *buffer, size_t *lenp, loff_t *ppos) 38 + void *buffer, size_t *lenp, loff_t *ppos) 39 39 { 40 40 struct ctl_table ipc_table; 41 41 ··· 46 46 } 47 47 48 48 static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write, 49 - void __user *buffer, size_t *lenp, loff_t *ppos) 49 + void *buffer, size_t *lenp, loff_t *ppos) 50 50 { 51 51 struct ipc_namespace *ns = current->nsproxy->ipc_ns; 52 52 int err = proc_ipc_dointvec_minmax(table, write, buffer, lenp, ppos); ··· 59 59 } 60 60 61 61 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write, 62 - void __user *buffer, size_t *lenp, loff_t *ppos) 62 + void *buffer, size_t *lenp, loff_t *ppos) 63 63 { 64 64 struct ctl_table ipc_table; 65 65 memcpy(&ipc_table, table, sizeof(ipc_table)); ··· 70 70 } 71 71 72 72 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write, 73 - void __user *buffer, size_t *lenp, loff_t *ppos) 73 + void *buffer, size_t *lenp, loff_t *ppos) 74 74 { 75 75 struct ctl_table ipc_table; 76 76 int dummy = 0;
+2 -2
ipc/mq_sysctl.c
··· 19 19 } 20 20 21 21 static int proc_mq_dointvec(struct ctl_table *table, int write, 22 - void __user *buffer, size_t *lenp, loff_t *ppos) 22 + void *buffer, size_t *lenp, loff_t *ppos) 23 23 { 24 24 struct ctl_table mq_table; 25 25 memcpy(&mq_table, table, sizeof(mq_table)); ··· 29 29 } 30 30 31 31 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write, 32 - void __user *buffer, size_t *lenp, loff_t *ppos) 32 + void *buffer, size_t *lenp, loff_t *ppos) 33 33 { 34 34 struct ctl_table mq_table; 35 35 memcpy(&mq_table, table, sizeof(mq_table));
+12 -23
kernel/bpf/cgroup.c
··· 1137 1137 * @head: sysctl table header 1138 1138 * @table: sysctl table 1139 1139 * @write: sysctl is being read (= 0) or written (= 1) 1140 - * @buf: pointer to buffer passed by user space 1140 + * @buf: pointer to buffer (in and out) 1141 1141 * @pcount: value-result argument: value is size of buffer pointed to by @buf, 1142 1142 * result is size of @new_buf if program set new value, initial value 1143 1143 * otherwise 1144 1144 * @ppos: value-result argument: value is position at which read from or write 1145 1145 * to sysctl is happening, result is new position if program overrode it, 1146 1146 * initial value otherwise 1147 - * @new_buf: pointer to pointer to new buffer that will be allocated if program 1148 - * overrides new value provided by user space on sysctl write 1149 - * NOTE: it's caller responsibility to free *new_buf if it was set 1150 1147 * @type: type of program to be executed 1151 1148 * 1152 1149 * Program is run when sysctl is being accessed, either read or written, and ··· 1154 1157 */ 1155 1158 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 1156 1159 struct ctl_table *table, int write, 1157 - void __user *buf, size_t *pcount, 1158 - loff_t *ppos, void **new_buf, 1160 + void **buf, size_t *pcount, loff_t *ppos, 1159 1161 enum bpf_attach_type type) 1160 1162 { 1161 1163 struct bpf_sysctl_kern ctx = { ··· 1169 1173 .new_updated = 0, 1170 1174 }; 1171 1175 struct cgroup *cgrp; 1176 + loff_t pos = 0; 1172 1177 int ret; 1173 1178 1174 1179 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); 1175 - if (ctx.cur_val) { 1176 - mm_segment_t old_fs; 1177 - loff_t pos = 0; 1178 - 1179 - old_fs = get_fs(); 1180 - set_fs(KERNEL_DS); 1181 - if (table->proc_handler(table, 0, (void __user *)ctx.cur_val, 1182 - &ctx.cur_len, &pos)) { 1183 - /* Let BPF program decide how to proceed. */ 1184 - ctx.cur_len = 0; 1185 - } 1186 - set_fs(old_fs); 1187 - } else { 1180 + if (!ctx.cur_val || 1181 + table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) { 1188 1182 /* Let BPF program decide how to proceed. */ 1189 1183 ctx.cur_len = 0; 1190 1184 } 1191 1185 1192 - if (write && buf && *pcount) { 1186 + if (write && *buf && *pcount) { 1193 1187 /* BPF program should be able to override new value with a 1194 1188 * buffer bigger than provided by user. 1195 1189 */ 1196 1190 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); 1197 1191 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); 1198 - if (!ctx.new_val || 1199 - copy_from_user(ctx.new_val, buf, ctx.new_len)) 1192 + if (ctx.new_val) { 1193 + memcpy(ctx.new_val, *buf, ctx.new_len); 1194 + } else { 1200 1195 /* Let BPF program decide how to proceed. */ 1201 1196 ctx.new_len = 0; 1197 + } 1202 1198 } 1203 1199 1204 1200 rcu_read_lock(); ··· 1201 1213 kfree(ctx.cur_val); 1202 1214 1203 1215 if (ret == 1 && ctx.new_updated) { 1204 - *new_buf = ctx.new_val; 1216 + kfree(*buf); 1217 + *buf = ctx.new_val; 1205 1218 *pcount = ctx.new_len; 1206 1219 } else { 1207 1220 kfree(ctx.new_val);
+1 -1
kernel/events/callchain.c
··· 236 236 * sysctl_perf_event_max_contexts_per_stack. 237 237 */ 238 238 int perf_event_max_stack_handler(struct ctl_table *table, int write, 239 - void __user *buffer, size_t *lenp, loff_t *ppos) 239 + void *buffer, size_t *lenp, loff_t *ppos) 240 240 { 241 241 int *value = table->data; 242 242 int new_value = *value, ret;
+2 -4
kernel/events/core.c
··· 437 437 static bool perf_rotate_context(struct perf_cpu_context *cpuctx); 438 438 439 439 int perf_proc_update_handler(struct ctl_table *table, int write, 440 - void __user *buffer, size_t *lenp, 441 - loff_t *ppos) 440 + void *buffer, size_t *lenp, loff_t *ppos) 442 441 { 443 442 int ret; 444 443 int perf_cpu = sysctl_perf_cpu_time_max_percent; ··· 461 462 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 462 463 463 464 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 464 - void __user *buffer, size_t *lenp, 465 - loff_t *ppos) 465 + void *buffer, size_t *lenp, loff_t *ppos) 466 466 { 467 467 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 468 468
+1 -1
kernel/kprobes.c
··· 892 892 static DEFINE_MUTEX(kprobe_sysctl_mutex); 893 893 int sysctl_kprobes_optimization; 894 894 int proc_kprobes_optimization_handler(struct ctl_table *table, int write, 895 - void __user *buffer, size_t *length, 895 + void *buffer, size_t *length, 896 896 loff_t *ppos) 897 897 { 898 898 int ret;
+2 -2
kernel/latencytop.c
··· 269 269 return 0; 270 270 } 271 271 272 - int sysctl_latencytop(struct ctl_table *table, int write, 273 - void __user *buffer, size_t *lenp, loff_t *ppos) 272 + int sysctl_latencytop(struct ctl_table *table, int write, void *buffer, 273 + size_t *lenp, loff_t *ppos) 274 274 { 275 275 int err; 276 276
+1 -1
kernel/pid_namespace.c
··· 263 263 264 264 #ifdef CONFIG_CHECKPOINT_RESTORE 265 265 static int pid_ns_ctl_handler(struct ctl_table *table, int write, 266 - void __user *buffer, size_t *lenp, loff_t *ppos) 266 + void *buffer, size_t *lenp, loff_t *ppos) 267 267 { 268 268 struct pid_namespace *pid_ns = task_active_pid_ns(current); 269 269 struct ctl_table tmp = *table;
+1 -1
kernel/printk/printk.c
··· 173 173 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit"; 174 174 175 175 int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, 176 - void __user *buffer, size_t *lenp, loff_t *ppos) 176 + void *buffer, size_t *lenp, loff_t *ppos) 177 177 { 178 178 char old_str[DEVKMSG_STR_MAX_SIZE]; 179 179 unsigned int old;
+4 -5
kernel/sched/core.c
··· 1110 1110 #endif 1111 1111 1112 1112 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1113 - void __user *buffer, size_t *lenp, 1114 - loff_t *ppos) 1113 + void *buffer, size_t *lenp, loff_t *ppos) 1115 1114 { 1116 1115 bool update_root_tg = false; 1117 1116 int old_min, old_max; ··· 2722 2723 2723 2724 #ifdef CONFIG_PROC_SYSCTL 2724 2725 int sysctl_numa_balancing(struct ctl_table *table, int write, 2725 - void __user *buffer, size_t *lenp, loff_t *ppos) 2726 + void *buffer, size_t *lenp, loff_t *ppos) 2726 2727 { 2727 2728 struct ctl_table t; 2728 2729 int err; ··· 2796 2797 } 2797 2798 2798 2799 #ifdef CONFIG_PROC_SYSCTL 2799 - int sysctl_schedstats(struct ctl_table *table, int write, 2800 - void __user *buffer, size_t *lenp, loff_t *ppos) 2800 + int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 2801 + size_t *lenp, loff_t *ppos) 2801 2802 { 2802 2803 struct ctl_table t; 2803 2804 int err;
+1 -2
kernel/sched/fair.c
··· 645 645 */ 646 646 647 647 int sched_proc_update_handler(struct ctl_table *table, int write, 648 - void __user *buffer, size_t *lenp, 649 - loff_t *ppos) 648 + void *buffer, size_t *lenp, loff_t *ppos) 650 649 { 651 650 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 652 651 unsigned int factor = get_update_sysctl_factor();
+4 -6
kernel/sched/rt.c
··· 2714 2714 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 2715 2715 } 2716 2716 2717 - int sched_rt_handler(struct ctl_table *table, int write, 2718 - void __user *buffer, size_t *lenp, 2719 - loff_t *ppos) 2717 + int sched_rt_handler(struct ctl_table *table, int write, void *buffer, 2718 + size_t *lenp, loff_t *ppos) 2720 2719 { 2721 2720 int old_period, old_runtime; 2722 2721 static DEFINE_MUTEX(mutex); ··· 2753 2754 return ret; 2754 2755 } 2755 2756 2756 - int sched_rr_handler(struct ctl_table *table, int write, 2757 - void __user *buffer, size_t *lenp, 2758 - loff_t *ppos) 2757 + int sched_rr_handler(struct ctl_table *table, int write, void *buffer, 2758 + size_t *lenp, loff_t *ppos) 2759 2759 { 2760 2760 int ret; 2761 2761 static DEFINE_MUTEX(mutex);
+1 -1
kernel/sched/topology.c
··· 209 209 210 210 #ifdef CONFIG_PROC_SYSCTL 211 211 int sched_energy_aware_handler(struct ctl_table *table, int write, 212 - void __user *buffer, size_t *lenp, loff_t *ppos) 212 + void *buffer, size_t *lenp, loff_t *ppos) 213 213 { 214 214 int ret, state; 215 215
+1 -1
kernel/seccomp.c
··· 1776 1776 } 1777 1777 1778 1778 static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write, 1779 - void __user *buffer, size_t *lenp, 1779 + void *buffer, size_t *lenp, 1780 1780 loff_t *ppos) 1781 1781 { 1782 1782 int ret;
+91 -148
kernel/sysctl.c
··· 208 208 #ifdef CONFIG_PROC_SYSCTL 209 209 210 210 static int _proc_do_string(char *data, int maxlen, int write, 211 - char __user *buffer, 212 - size_t *lenp, loff_t *ppos) 211 + char *buffer, size_t *lenp, loff_t *ppos) 213 212 { 214 213 size_t len; 215 - char __user *p; 216 - char c; 214 + char c, *p; 217 215 218 216 if (!data || !maxlen || !*lenp) { 219 217 *lenp = 0; ··· 236 238 *ppos += *lenp; 237 239 p = buffer; 238 240 while ((p - buffer) < *lenp && len < maxlen - 1) { 239 - if (get_user(c, p++)) 240 - return -EFAULT; 241 + c = *(p++); 241 242 if (c == 0 || c == '\n') 242 243 break; 243 244 data[len++] = c; ··· 258 261 if (len > *lenp) 259 262 len = *lenp; 260 263 if (len) 261 - if (copy_to_user(buffer, data, len)) 262 - return -EFAULT; 264 + memcpy(buffer, data, len); 263 265 if (len < *lenp) { 264 - if (put_user('\n', buffer + len)) 265 - return -EFAULT; 266 + buffer[len] = '\n'; 266 267 len++; 267 268 } 268 269 *lenp = len; ··· 321 326 * Returns 0 on success. 322 327 */ 323 328 int proc_dostring(struct ctl_table *table, int write, 324 - void __user *buffer, size_t *lenp, loff_t *ppos) 329 + void *buffer, size_t *lenp, loff_t *ppos) 325 330 { 326 331 if (write) 327 332 proc_first_pos_non_zero_ignore(ppos, table); 328 333 329 - return _proc_do_string((char *)(table->data), table->maxlen, write, 330 - (char __user *)buffer, lenp, ppos); 334 + return _proc_do_string(table->data, table->maxlen, write, buffer, lenp, 335 + ppos); 331 336 } 332 337 333 338 static size_t proc_skip_spaces(char **buf) ··· 458 463 * @val: the integer to be converted 459 464 * @neg: sign of the number, %TRUE for negative 460 465 * 461 - * In case of success %0 is returned and @buf and @size are updated with 462 - * the amount of bytes written. 466 + * In case of success @buf and @size are updated with the amount of bytes 467 + * written. 463 468 */ 464 - static int proc_put_long(void __user **buf, size_t *size, unsigned long val, 465 - bool neg) 469 + static void proc_put_long(void **buf, size_t *size, unsigned long val, bool neg) 466 470 { 467 471 int len; 468 472 char tmp[TMPBUFLEN], *p = tmp; ··· 470 476 len = strlen(tmp); 471 477 if (len > *size) 472 478 len = *size; 473 - if (copy_to_user(*buf, tmp, len)) 474 - return -EFAULT; 479 + memcpy(*buf, tmp, len); 475 480 *size -= len; 476 481 *buf += len; 477 - return 0; 478 482 } 479 483 #undef TMPBUFLEN 480 484 481 - static int proc_put_char(void __user **buf, size_t *size, char c) 485 + static void proc_put_char(void **buf, size_t *size, char c) 482 486 { 483 487 if (*size) { 484 - char __user **buffer = (char __user **)buf; 485 - if (put_user(c, *buffer)) 486 - return -EFAULT; 487 - (*size)--, (*buffer)++; 488 + char **buffer = (char **)buf; 489 + **buffer = c; 490 + 491 + (*size)--; 492 + (*buffer)++; 488 493 *buf = *buffer; 489 494 } 490 - return 0; 491 495 } 492 496 493 497 static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp, ··· 533 541 static const char proc_wspace_sep[] = { ' ', '\t', '\n' }; 534 542 535 543 static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, 536 - int write, void __user *buffer, 544 + int write, void *buffer, 537 545 size_t *lenp, loff_t *ppos, 538 546 int (*conv)(bool *negp, unsigned long *lvalp, int *valp, 539 547 int write, void *data), ··· 541 549 { 542 550 int *i, vleft, first = 1, err = 0; 543 551 size_t left; 544 - char *kbuf = NULL, *p; 552 + char *p; 545 553 546 554 if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) { 547 555 *lenp = 0; ··· 561 569 562 570 if (left > PAGE_SIZE - 1) 563 571 left = PAGE_SIZE - 1; 564 - p = kbuf = memdup_user_nul(buffer, left); 565 - if (IS_ERR(kbuf)) 566 - return PTR_ERR(kbuf); 572 + p = buffer; 567 573 } 568 574 569 575 for (; left && vleft--; i++, first=0) { ··· 588 598 break; 589 599 } 590 600 if (!first) 591 - err = proc_put_char(&buffer, &left, '\t'); 592 - if (err) 593 - break; 594 - err = proc_put_long(&buffer, &left, lval, neg); 595 - if (err) 596 - break; 601 + proc_put_char(&buffer, &left, '\t'); 602 + proc_put_long(&buffer, &left, lval, neg); 597 603 } 598 604 } 599 605 600 606 if (!write && !first && left && !err) 601 - err = proc_put_char(&buffer, &left, '\n'); 607 + proc_put_char(&buffer, &left, '\n'); 602 608 if (write && !err && left) 603 609 left -= proc_skip_spaces(&p); 604 - if (write) { 605 - kfree(kbuf); 606 - if (first) 607 - return err ? : -EINVAL; 608 - } 610 + if (write && first) 611 + return err ? : -EINVAL; 609 612 *lenp -= left; 610 613 out: 611 614 *ppos += *lenp; ··· 606 623 } 607 624 608 625 static int do_proc_dointvec(struct ctl_table *table, int write, 609 - void __user *buffer, size_t *lenp, loff_t *ppos, 626 + void *buffer, size_t *lenp, loff_t *ppos, 610 627 int (*conv)(bool *negp, unsigned long *lvalp, int *valp, 611 628 int write, void *data), 612 629 void *data) ··· 617 634 618 635 static int do_proc_douintvec_w(unsigned int *tbl_data, 619 636 struct ctl_table *table, 620 - void __user *buffer, 637 + void *buffer, 621 638 size_t *lenp, loff_t *ppos, 622 639 int (*conv)(unsigned long *lvalp, 623 640 unsigned int *valp, ··· 628 645 int err = 0; 629 646 size_t left; 630 647 bool neg; 631 - char *kbuf = NULL, *p; 648 + char *p = buffer; 632 649 633 650 left = *lenp; 634 651 ··· 637 654 638 655 if (left > PAGE_SIZE - 1) 639 656 left = PAGE_SIZE - 1; 640 - 641 - p = kbuf = memdup_user_nul(buffer, left); 642 - if (IS_ERR(kbuf)) 643 - return -EINVAL; 644 657 645 658 left -= proc_skip_spaces(&p); 646 659 if (!left) { ··· 661 682 left -= proc_skip_spaces(&p); 662 683 663 684 out_free: 664 - kfree(kbuf); 665 685 if (err) 666 686 return -EINVAL; 667 687 ··· 672 694 return err; 673 695 } 674 696 675 - static int do_proc_douintvec_r(unsigned int *tbl_data, void __user *buffer, 697 + static int do_proc_douintvec_r(unsigned int *tbl_data, void *buffer, 676 698 size_t *lenp, loff_t *ppos, 677 699 int (*conv)(unsigned long *lvalp, 678 700 unsigned int *valp, ··· 690 712 goto out; 691 713 } 692 714 693 - err = proc_put_long(&buffer, &left, lval, false); 694 - if (err || !left) 715 + proc_put_long(&buffer, &left, lval, false); 716 + if (!left) 695 717 goto out; 696 718 697 - err = proc_put_char(&buffer, &left, '\n'); 719 + proc_put_char(&buffer, &left, '\n'); 698 720 699 721 out: 700 722 *lenp -= left; ··· 704 726 } 705 727 706 728 static int __do_proc_douintvec(void *tbl_data, struct ctl_table *table, 707 - int write, void __user *buffer, 729 + int write, void *buffer, 708 730 size_t *lenp, loff_t *ppos, 709 731 int (*conv)(unsigned long *lvalp, 710 732 unsigned int *valp, ··· 740 762 } 741 763 742 764 static int do_proc_douintvec(struct ctl_table *table, int write, 743 - void __user *buffer, size_t *lenp, loff_t *ppos, 765 + void *buffer, size_t *lenp, loff_t *ppos, 744 766 int (*conv)(unsigned long *lvalp, 745 767 unsigned int *valp, 746 768 int write, void *data), ··· 763 785 * 764 786 * Returns 0 on success. 765 787 */ 766 - int proc_dointvec(struct ctl_table *table, int write, 767 - void __user *buffer, size_t *lenp, loff_t *ppos) 788 + int proc_dointvec(struct ctl_table *table, int write, void *buffer, 789 + size_t *lenp, loff_t *ppos) 768 790 { 769 791 return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL); 770 792 } 771 793 772 794 #ifdef CONFIG_COMPACTION 773 795 static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table, 774 - int write, void __user *buffer, 775 - size_t *lenp, loff_t *ppos) 796 + int write, void *buffer, size_t *lenp, loff_t *ppos) 776 797 { 777 798 int ret, old; 778 799 ··· 803 826 * 804 827 * Returns 0 on success. 805 828 */ 806 - int proc_douintvec(struct ctl_table *table, int write, 807 - void __user *buffer, size_t *lenp, loff_t *ppos) 829 + int proc_douintvec(struct ctl_table *table, int write, void *buffer, 830 + size_t *lenp, loff_t *ppos) 808 831 { 809 832 return do_proc_douintvec(table, write, buffer, lenp, ppos, 810 833 do_proc_douintvec_conv, NULL); ··· 815 838 * This means we can safely use a temporary. 816 839 */ 817 840 static int proc_taint(struct ctl_table *table, int write, 818 - void __user *buffer, size_t *lenp, loff_t *ppos) 841 + void *buffer, size_t *lenp, loff_t *ppos) 819 842 { 820 843 struct ctl_table t; 821 844 unsigned long tmptaint = get_taint(); ··· 847 870 848 871 #ifdef CONFIG_PRINTK 849 872 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, 850 - void __user *buffer, size_t *lenp, loff_t *ppos) 873 + void *buffer, size_t *lenp, loff_t *ppos) 851 874 { 852 875 if (write && !capable(CAP_SYS_ADMIN)) 853 876 return -EPERM; ··· 913 936 * Returns 0 on success or -EINVAL on write when the range check fails. 914 937 */ 915 938 int proc_dointvec_minmax(struct ctl_table *table, int write, 916 - void __user *buffer, size_t *lenp, loff_t *ppos) 939 + void *buffer, size_t *lenp, loff_t *ppos) 917 940 { 918 941 struct do_proc_dointvec_minmax_conv_param param = { 919 942 .min = (int *) table->extra1, ··· 982 1005 * Returns 0 on success or -ERANGE on write when the range check fails. 983 1006 */ 984 1007 int proc_douintvec_minmax(struct ctl_table *table, int write, 985 - void __user *buffer, size_t *lenp, loff_t *ppos) 1008 + void *buffer, size_t *lenp, loff_t *ppos) 986 1009 { 987 1010 struct do_proc_douintvec_minmax_conv_param param = { 988 1011 .min = (unsigned int *) table->extra1, ··· 1013 1036 } 1014 1037 1015 1038 static int proc_dopipe_max_size(struct ctl_table *table, int write, 1016 - void __user *buffer, size_t *lenp, loff_t *ppos) 1039 + void *buffer, size_t *lenp, loff_t *ppos) 1017 1040 { 1018 1041 return do_proc_douintvec(table, write, buffer, lenp, ppos, 1019 1042 do_proc_dopipe_max_size_conv, NULL); ··· 1034 1057 } 1035 1058 1036 1059 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, 1037 - void __user *buffer, size_t *lenp, loff_t *ppos) 1060 + void *buffer, size_t *lenp, loff_t *ppos) 1038 1061 { 1039 1062 int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 1040 1063 if (!error) ··· 1044 1067 1045 1068 #ifdef CONFIG_COREDUMP 1046 1069 static int proc_dostring_coredump(struct ctl_table *table, int write, 1047 - void __user *buffer, size_t *lenp, loff_t *ppos) 1070 + void *buffer, size_t *lenp, loff_t *ppos) 1048 1071 { 1049 1072 int error = proc_dostring(table, write, buffer, lenp, ppos); 1050 1073 if (!error) ··· 1055 1078 1056 1079 #ifdef CONFIG_MAGIC_SYSRQ 1057 1080 static int sysrq_sysctl_handler(struct ctl_table *table, int write, 1058 - void __user *buffer, size_t *lenp, loff_t *ppos) 1081 + void *buffer, size_t *lenp, loff_t *ppos) 1059 1082 { 1060 1083 int tmp, ret; 1061 1084 ··· 1073 1096 } 1074 1097 #endif 1075 1098 1076 - static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write, 1077 - void __user *buffer, 1078 - size_t *lenp, loff_t *ppos, 1079 - unsigned long convmul, 1080 - unsigned long convdiv) 1099 + static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, 1100 + int write, void *buffer, size_t *lenp, loff_t *ppos, 1101 + unsigned long convmul, unsigned long convdiv) 1081 1102 { 1082 1103 unsigned long *i, *min, *max; 1083 1104 int vleft, first = 1, err = 0; 1084 1105 size_t left; 1085 - char *kbuf = NULL, *p; 1106 + char *p; 1086 1107 1087 1108 if (!data || !table->maxlen || !*lenp || (*ppos && !write)) { 1088 1109 *lenp = 0; ··· 1099 1124 1100 1125 if (left > PAGE_SIZE - 1) 1101 1126 left = PAGE_SIZE - 1; 1102 - p = kbuf = memdup_user_nul(buffer, left); 1103 - if (IS_ERR(kbuf)) 1104 - return PTR_ERR(kbuf); 1127 + p = buffer; 1105 1128 } 1106 1129 1107 1130 for (; left && vleft--; i++, first = 0) { ··· 1127 1154 *i = val; 1128 1155 } else { 1129 1156 val = convdiv * (*i) / convmul; 1130 - if (!first) { 1131 - err = proc_put_char(&buffer, &left, '\t'); 1132 - if (err) 1133 - break; 1134 - } 1135 - err = proc_put_long(&buffer, &left, val, false); 1136 - if (err) 1137 - break; 1157 + if (!first) 1158 + proc_put_char(&buffer, &left, '\t'); 1159 + proc_put_long(&buffer, &left, val, false); 1138 1160 } 1139 1161 } 1140 1162 1141 1163 if (!write && !first && left && !err) 1142 - err = proc_put_char(&buffer, &left, '\n'); 1164 + proc_put_char(&buffer, &left, '\n'); 1143 1165 if (write && !err) 1144 1166 left -= proc_skip_spaces(&p); 1145 - if (write) { 1146 - kfree(kbuf); 1147 - if (first) 1148 - return err ? : -EINVAL; 1149 - } 1167 + if (write && first) 1168 + return err ? : -EINVAL; 1150 1169 *lenp -= left; 1151 1170 out: 1152 1171 *ppos += *lenp; ··· 1146 1181 } 1147 1182 1148 1183 static int do_proc_doulongvec_minmax(struct ctl_table *table, int write, 1149 - void __user *buffer, 1150 - size_t *lenp, loff_t *ppos, 1151 - unsigned long convmul, 1152 - unsigned long convdiv) 1184 + void *buffer, size_t *lenp, loff_t *ppos, unsigned long convmul, 1185 + unsigned long convdiv) 1153 1186 { 1154 1187 return __do_proc_doulongvec_minmax(table->data, table, write, 1155 1188 buffer, lenp, ppos, convmul, convdiv); ··· 1170 1207 * Returns 0 on success. 1171 1208 */ 1172 1209 int proc_doulongvec_minmax(struct ctl_table *table, int write, 1173 - void __user *buffer, size_t *lenp, loff_t *ppos) 1210 + void *buffer, size_t *lenp, loff_t *ppos) 1174 1211 { 1175 1212 return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l); 1176 1213 } ··· 1193 1230 * Returns 0 on success. 1194 1231 */ 1195 1232 int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, 1196 - void __user *buffer, 1197 - size_t *lenp, loff_t *ppos) 1233 + void *buffer, size_t *lenp, loff_t *ppos) 1198 1234 { 1199 1235 return do_proc_doulongvec_minmax(table, write, buffer, 1200 1236 lenp, ppos, HZ, 1000l); ··· 1287 1325 * Returns 0 on success. 1288 1326 */ 1289 1327 int proc_dointvec_jiffies(struct ctl_table *table, int write, 1290 - void __user *buffer, size_t *lenp, loff_t *ppos) 1328 + void *buffer, size_t *lenp, loff_t *ppos) 1291 1329 { 1292 1330 return do_proc_dointvec(table,write,buffer,lenp,ppos, 1293 1331 do_proc_dointvec_jiffies_conv,NULL); ··· 1309 1347 * Returns 0 on success. 1310 1348 */ 1311 1349 int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, 1312 - void __user *buffer, size_t *lenp, loff_t *ppos) 1350 + void *buffer, size_t *lenp, loff_t *ppos) 1313 1351 { 1314 1352 return do_proc_dointvec(table,write,buffer,lenp,ppos, 1315 1353 do_proc_dointvec_userhz_jiffies_conv,NULL); ··· 1331 1369 * 1332 1370 * Returns 0 on success. 1333 1371 */ 1334 - int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, 1335 - void __user *buffer, size_t *lenp, loff_t *ppos) 1372 + int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, void *buffer, 1373 + size_t *lenp, loff_t *ppos) 1336 1374 { 1337 1375 return do_proc_dointvec(table, write, buffer, lenp, ppos, 1338 1376 do_proc_dointvec_ms_jiffies_conv, NULL); 1339 1377 } 1340 1378 1341 - static int proc_do_cad_pid(struct ctl_table *table, int write, 1342 - void __user *buffer, size_t *lenp, loff_t *ppos) 1379 + static int proc_do_cad_pid(struct ctl_table *table, int write, void *buffer, 1380 + size_t *lenp, loff_t *ppos) 1343 1381 { 1344 1382 struct pid *new_pid; 1345 1383 pid_t tmp; ··· 1378 1416 * Returns 0 on success. 1379 1417 */ 1380 1418 int proc_do_large_bitmap(struct ctl_table *table, int write, 1381 - void __user *buffer, size_t *lenp, loff_t *ppos) 1419 + void *buffer, size_t *lenp, loff_t *ppos) 1382 1420 { 1383 1421 int err = 0; 1384 1422 bool first = 1; ··· 1394 1432 } 1395 1433 1396 1434 if (write) { 1397 - char *kbuf, *p; 1435 + char *p = buffer; 1398 1436 size_t skipped = 0; 1399 1437 1400 1438 if (left > PAGE_SIZE - 1) { ··· 1403 1441 skipped = *lenp - left; 1404 1442 } 1405 1443 1406 - p = kbuf = memdup_user_nul(buffer, left); 1407 - if (IS_ERR(kbuf)) 1408 - return PTR_ERR(kbuf); 1409 - 1410 1444 tmp_bitmap = bitmap_zalloc(bitmap_len, GFP_KERNEL); 1411 - if (!tmp_bitmap) { 1412 - kfree(kbuf); 1445 + if (!tmp_bitmap) 1413 1446 return -ENOMEM; 1414 - } 1415 1447 proc_skip_char(&p, &left, '\n'); 1416 1448 while (!err && left) { 1417 1449 unsigned long val_a, val_b; ··· 1469 1513 first = 0; 1470 1514 proc_skip_char(&p, &left, '\n'); 1471 1515 } 1472 - kfree(kbuf); 1473 1516 left += skipped; 1474 1517 } else { 1475 1518 unsigned long bit_a, bit_b = 0; ··· 1480 1525 bit_b = find_next_zero_bit(bitmap, bitmap_len, 1481 1526 bit_a + 1) - 1; 1482 1527 1483 - if (!first) { 1484 - err = proc_put_char(&buffer, &left, ','); 1485 - if (err) 1486 - break; 1487 - } 1488 - err = proc_put_long(&buffer, &left, bit_a, false); 1489 - if (err) 1490 - break; 1528 + if (!first) 1529 + proc_put_char(&buffer, &left, ','); 1530 + proc_put_long(&buffer, &left, bit_a, false); 1491 1531 if (bit_a != bit_b) { 1492 - err = proc_put_char(&buffer, &left, '-'); 1493 - if (err) 1494 - break; 1495 - err = proc_put_long(&buffer, &left, bit_b, false); 1496 - if (err) 1497 - break; 1532 + proc_put_char(&buffer, &left, '-'); 1533 + proc_put_long(&buffer, &left, bit_b, false); 1498 1534 } 1499 1535 1500 1536 first = 0; bit_b++; 1501 1537 } 1502 - if (!err) 1503 - err = proc_put_char(&buffer, &left, '\n'); 1538 + proc_put_char(&buffer, &left, '\n'); 1504 1539 } 1505 1540 1506 1541 if (!err) { ··· 1511 1566 #else /* CONFIG_PROC_SYSCTL */ 1512 1567 1513 1568 int proc_dostring(struct ctl_table *table, int write, 1514 - void __user *buffer, size_t *lenp, loff_t *ppos) 1569 + void *buffer, size_t *lenp, loff_t *ppos) 1515 1570 { 1516 1571 return -ENOSYS; 1517 1572 } 1518 1573 1519 1574 int proc_dointvec(struct ctl_table *table, int write, 1520 - void __user *buffer, size_t *lenp, loff_t *ppos) 1575 + void *buffer, size_t *lenp, loff_t *ppos) 1521 1576 { 1522 1577 return -ENOSYS; 1523 1578 } 1524 1579 1525 1580 int proc_douintvec(struct ctl_table *table, int write, 1526 - void __user *buffer, size_t *lenp, loff_t *ppos) 1581 + void *buffer, size_t *lenp, loff_t *ppos) 1527 1582 { 1528 1583 return -ENOSYS; 1529 1584 } 1530 1585 1531 1586 int proc_dointvec_minmax(struct ctl_table *table, int write, 1532 - void __user *buffer, size_t *lenp, loff_t *ppos) 1587 + void *buffer, size_t *lenp, loff_t *ppos) 1533 1588 { 1534 1589 return -ENOSYS; 1535 1590 } 1536 1591 1537 1592 int proc_douintvec_minmax(struct ctl_table *table, int write, 1538 - void __user *buffer, size_t *lenp, loff_t *ppos) 1593 + void *buffer, size_t *lenp, loff_t *ppos) 1539 1594 { 1540 1595 return -ENOSYS; 1541 1596 } 1542 1597 1543 1598 int proc_dointvec_jiffies(struct ctl_table *table, int write, 1544 - void __user *buffer, size_t *lenp, loff_t *ppos) 1599 + void *buffer, size_t *lenp, loff_t *ppos) 1545 1600 { 1546 1601 return -ENOSYS; 1547 1602 } 1548 1603 1549 1604 int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, 1550 - void __user *buffer, size_t *lenp, loff_t *ppos) 1605 + void *buffer, size_t *lenp, loff_t *ppos) 1551 1606 { 1552 1607 return -ENOSYS; 1553 1608 } 1554 1609 1555 1610 int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, 1556 - void __user *buffer, size_t *lenp, loff_t *ppos) 1611 + void *buffer, size_t *lenp, loff_t *ppos) 1557 1612 { 1558 1613 return -ENOSYS; 1559 1614 } 1560 1615 1561 1616 int proc_doulongvec_minmax(struct ctl_table *table, int write, 1562 - void __user *buffer, size_t *lenp, loff_t *ppos) 1617 + void *buffer, size_t *lenp, loff_t *ppos) 1563 1618 { 1564 1619 return -ENOSYS; 1565 1620 } 1566 1621 1567 1622 int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, 1568 - void __user *buffer, 1569 - size_t *lenp, loff_t *ppos) 1623 + void *buffer, size_t *lenp, loff_t *ppos) 1570 1624 { 1571 - return -ENOSYS; 1625 + return -ENOSYS; 1572 1626 } 1573 1627 1574 1628 int proc_do_large_bitmap(struct ctl_table *table, int write, 1575 - void __user *buffer, size_t *lenp, loff_t *ppos) 1629 + void *buffer, size_t *lenp, loff_t *ppos) 1576 1630 { 1577 1631 return -ENOSYS; 1578 1632 } ··· 1580 1636 1581 1637 #if defined(CONFIG_SYSCTL) 1582 1638 int proc_do_static_key(struct ctl_table *table, int write, 1583 - void __user *buffer, size_t *lenp, 1584 - loff_t *ppos) 1639 + void *buffer, size_t *lenp, loff_t *ppos) 1585 1640 { 1586 1641 struct static_key *key = (struct static_key *)table->data; 1587 1642 static DEFINE_MUTEX(static_key_mutex);
+1 -2
kernel/time/timer.c
··· 249 249 } 250 250 251 251 int timer_migration_handler(struct ctl_table *table, int write, 252 - void __user *buffer, size_t *lenp, 253 - loff_t *ppos) 252 + void *buffer, size_t *lenp, loff_t *ppos) 254 253 { 255 254 int ret; 256 255
+1 -1
kernel/trace/trace.c
··· 2661 2661 } 2662 2662 2663 2663 int tracepoint_printk_sysctl(struct ctl_table *table, int write, 2664 - void __user *buffer, size_t *lenp, 2664 + void *buffer, size_t *lenp, 2665 2665 loff_t *ppos) 2666 2666 { 2667 2667 int save_tracepoint_printk;
+1 -1
kernel/umh.c
··· 630 630 EXPORT_SYMBOL(call_usermodehelper); 631 631 632 632 static int proc_cap_handler(struct ctl_table *table, int write, 633 - void __user *buffer, size_t *lenp, loff_t *ppos) 633 + void *buffer, size_t *lenp, loff_t *ppos) 634 634 { 635 635 struct ctl_table t; 636 636 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
+1 -1
kernel/utsname_sysctl.c
··· 30 30 * to observe. Should this be in kernel/sys.c ???? 31 31 */ 32 32 static int proc_do_uts_string(struct ctl_table *table, int write, 33 - void __user *buffer, size_t *lenp, loff_t *ppos) 33 + void *buffer, size_t *lenp, loff_t *ppos) 34 34 { 35 35 struct ctl_table uts_table; 36 36 int r;
+6 -6
kernel/watchdog.c
··· 661 661 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED 662 662 */ 663 663 static int proc_watchdog_common(int which, struct ctl_table *table, int write, 664 - void __user *buffer, size_t *lenp, loff_t *ppos) 664 + void *buffer, size_t *lenp, loff_t *ppos) 665 665 { 666 666 int err, old, *param = table->data; 667 667 ··· 688 688 * /proc/sys/kernel/watchdog 689 689 */ 690 690 int proc_watchdog(struct ctl_table *table, int write, 691 - void __user *buffer, size_t *lenp, loff_t *ppos) 691 + void *buffer, size_t *lenp, loff_t *ppos) 692 692 { 693 693 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED, 694 694 table, write, buffer, lenp, ppos); ··· 698 698 * /proc/sys/kernel/nmi_watchdog 699 699 */ 700 700 int proc_nmi_watchdog(struct ctl_table *table, int write, 701 - void __user *buffer, size_t *lenp, loff_t *ppos) 701 + void *buffer, size_t *lenp, loff_t *ppos) 702 702 { 703 703 if (!nmi_watchdog_available && write) 704 704 return -ENOTSUPP; ··· 710 710 * /proc/sys/kernel/soft_watchdog 711 711 */ 712 712 int proc_soft_watchdog(struct ctl_table *table, int write, 713 - void __user *buffer, size_t *lenp, loff_t *ppos) 713 + void *buffer, size_t *lenp, loff_t *ppos) 714 714 { 715 715 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED, 716 716 table, write, buffer, lenp, ppos); ··· 720 720 * /proc/sys/kernel/watchdog_thresh 721 721 */ 722 722 int proc_watchdog_thresh(struct ctl_table *table, int write, 723 - void __user *buffer, size_t *lenp, loff_t *ppos) 723 + void *buffer, size_t *lenp, loff_t *ppos) 724 724 { 725 725 int err, old; 726 726 ··· 743 743 * been brought online, if desired. 744 744 */ 745 745 int proc_watchdog_cpumask(struct ctl_table *table, int write, 746 - void __user *buffer, size_t *lenp, loff_t *ppos) 746 + void *buffer, size_t *lenp, loff_t *ppos) 747 747 { 748 748 int err; 749 749
+1 -1
mm/compaction.c
··· 2463 2463 * /proc/sys/vm/compact_memory 2464 2464 */ 2465 2465 int sysctl_compaction_handler(struct ctl_table *table, int write, 2466 - void __user *buffer, size_t *length, loff_t *ppos) 2466 + void *buffer, size_t *length, loff_t *ppos) 2467 2467 { 2468 2468 if (write) 2469 2469 compact_nodes();
+4 -5
mm/hugetlb.c
··· 3352 3352 #ifdef CONFIG_SYSCTL 3353 3353 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 3354 3354 struct ctl_table *table, int write, 3355 - void __user *buffer, size_t *length, loff_t *ppos) 3355 + void *buffer, size_t *length, loff_t *ppos) 3356 3356 { 3357 3357 struct hstate *h = &default_hstate; 3358 3358 unsigned long tmp = h->max_huge_pages; ··· 3375 3375 } 3376 3376 3377 3377 int hugetlb_sysctl_handler(struct ctl_table *table, int write, 3378 - void __user *buffer, size_t *length, loff_t *ppos) 3378 + void *buffer, size_t *length, loff_t *ppos) 3379 3379 { 3380 3380 3381 3381 return hugetlb_sysctl_handler_common(false, table, write, ··· 3384 3384 3385 3385 #ifdef CONFIG_NUMA 3386 3386 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 3387 - void __user *buffer, size_t *length, loff_t *ppos) 3387 + void *buffer, size_t *length, loff_t *ppos) 3388 3388 { 3389 3389 return hugetlb_sysctl_handler_common(true, table, write, 3390 3390 buffer, length, ppos); ··· 3392 3392 #endif /* CONFIG_NUMA */ 3393 3393 3394 3394 int hugetlb_overcommit_handler(struct ctl_table *table, int write, 3395 - void __user *buffer, 3396 - size_t *length, loff_t *ppos) 3395 + void *buffer, size_t *length, loff_t *ppos) 3397 3396 { 3398 3397 struct hstate *h = &default_hstate; 3399 3398 unsigned long tmp;
+6 -10
mm/page-writeback.c
··· 512 512 } 513 513 514 514 int dirty_background_ratio_handler(struct ctl_table *table, int write, 515 - void __user *buffer, size_t *lenp, 516 - loff_t *ppos) 515 + void *buffer, size_t *lenp, loff_t *ppos) 517 516 { 518 517 int ret; 519 518 ··· 523 524 } 524 525 525 526 int dirty_background_bytes_handler(struct ctl_table *table, int write, 526 - void __user *buffer, size_t *lenp, 527 - loff_t *ppos) 527 + void *buffer, size_t *lenp, loff_t *ppos) 528 528 { 529 529 int ret; 530 530 ··· 533 535 return ret; 534 536 } 535 537 536 - int dirty_ratio_handler(struct ctl_table *table, int write, 537 - void __user *buffer, size_t *lenp, 538 - loff_t *ppos) 538 + int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer, 539 + size_t *lenp, loff_t *ppos) 539 540 { 540 541 int old_ratio = vm_dirty_ratio; 541 542 int ret; ··· 548 551 } 549 552 550 553 int dirty_bytes_handler(struct ctl_table *table, int write, 551 - void __user *buffer, size_t *lenp, 552 - loff_t *ppos) 554 + void *buffer, size_t *lenp, loff_t *ppos) 553 555 { 554 556 unsigned long old_bytes = vm_dirty_bytes; 555 557 int ret; ··· 1968 1972 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs 1969 1973 */ 1970 1974 int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, 1971 - void __user *buffer, size_t *length, loff_t *ppos) 1975 + void *buffer, size_t *length, loff_t *ppos) 1972 1976 { 1973 1977 unsigned int old_interval = dirty_writeback_interval; 1974 1978 int ret;
+10 -20
mm/page_alloc.c
··· 5546 5546 * sysctl handler for numa_zonelist_order 5547 5547 */ 5548 5548 int numa_zonelist_order_handler(struct ctl_table *table, int write, 5549 - void __user *buffer, size_t *length, 5550 - loff_t *ppos) 5549 + void *buffer, size_t *length, loff_t *ppos) 5551 5550 { 5552 - char *str; 5553 - int ret; 5554 - 5555 - if (!write) 5556 - return proc_dostring(table, write, buffer, length, ppos); 5557 - str = memdup_user_nul(buffer, 16); 5558 - if (IS_ERR(str)) 5559 - return PTR_ERR(str); 5560 - 5561 - ret = __parse_numa_zonelist_order(str); 5562 - kfree(str); 5563 - return ret; 5551 + if (write) 5552 + return __parse_numa_zonelist_order(buffer); 5553 + return proc_dostring(table, write, buffer, length, ppos); 5564 5554 } 5565 5555 5566 5556 ··· 7953 7963 * changes. 7954 7964 */ 7955 7965 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 7956 - void __user *buffer, size_t *length, loff_t *ppos) 7966 + void *buffer, size_t *length, loff_t *ppos) 7957 7967 { 7958 7968 int rc; 7959 7969 ··· 7969 7979 } 7970 7980 7971 7981 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 7972 - void __user *buffer, size_t *length, loff_t *ppos) 7982 + void *buffer, size_t *length, loff_t *ppos) 7973 7983 { 7974 7984 int rc; 7975 7985 ··· 7999 8009 8000 8010 8001 8011 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 8002 - void __user *buffer, size_t *length, loff_t *ppos) 8012 + void *buffer, size_t *length, loff_t *ppos) 8003 8013 { 8004 8014 int rc; 8005 8015 ··· 8026 8036 } 8027 8037 8028 8038 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 8029 - void __user *buffer, size_t *length, loff_t *ppos) 8039 + void *buffer, size_t *length, loff_t *ppos) 8030 8040 { 8031 8041 int rc; 8032 8042 ··· 8050 8060 * if in function of the boot time zone sizes. 8051 8061 */ 8052 8062 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 8053 - void __user *buffer, size_t *length, loff_t *ppos) 8063 + void *buffer, size_t *length, loff_t *ppos) 8054 8064 { 8055 8065 proc_dointvec_minmax(table, write, buffer, length, ppos); 8056 8066 setup_per_zone_lowmem_reserve(); ··· 8072 8082 * pagelist can have before it gets flushed back to buddy allocator. 8073 8083 */ 8074 8084 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, 8075 - void __user *buffer, size_t *length, loff_t *ppos) 8085 + void *buffer, size_t *length, loff_t *ppos) 8076 8086 { 8077 8087 struct zone *zone; 8078 8088 int old_percpu_pagelist_fraction;
+4 -6
mm/util.c
··· 717 717 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 718 718 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 719 719 720 - int overcommit_ratio_handler(struct ctl_table *table, int write, 721 - void __user *buffer, size_t *lenp, 722 - loff_t *ppos) 720 + int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, 721 + size_t *lenp, loff_t *ppos) 723 722 { 724 723 int ret; 725 724 ··· 728 729 return ret; 729 730 } 730 731 731 - int overcommit_kbytes_handler(struct ctl_table *table, int write, 732 - void __user *buffer, size_t *lenp, 733 - loff_t *ppos) 732 + int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, 733 + size_t *lenp, loff_t *ppos) 734 734 { 735 735 int ret; 736 736
+2 -2
mm/vmstat.c
··· 76 76 static DEFINE_MUTEX(vm_numa_stat_lock); 77 77 78 78 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, 79 - void __user *buffer, size_t *length, loff_t *ppos) 79 + void *buffer, size_t *length, loff_t *ppos) 80 80 { 81 81 int ret, oldval; 82 82 ··· 1751 1751 } 1752 1752 1753 1753 int vmstat_refresh(struct ctl_table *table, int write, 1754 - void __user *buffer, size_t *lenp, loff_t *ppos) 1754 + void *buffer, size_t *lenp, loff_t *ppos) 1755 1755 { 1756 1756 long val; 1757 1757 int err;
+1 -1
net/bridge/br_netfilter_hooks.c
··· 1027 1027 #ifdef CONFIG_SYSCTL 1028 1028 static 1029 1029 int brnf_sysctl_call_tables(struct ctl_table *ctl, int write, 1030 - void __user *buffer, size_t *lenp, loff_t *ppos) 1030 + void *buffer, size_t *lenp, loff_t *ppos) 1031 1031 { 1032 1032 int ret; 1033 1033
+13 -15
net/core/neighbour.c
··· 3379 3379 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN); 3380 3380 3381 3381 static int proc_unres_qlen(struct ctl_table *ctl, int write, 3382 - void __user *buffer, size_t *lenp, loff_t *ppos) 3382 + void *buffer, size_t *lenp, loff_t *ppos) 3383 3383 { 3384 3384 int size, ret; 3385 3385 struct ctl_table tmp = *ctl; ··· 3443 3443 } 3444 3444 3445 3445 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write, 3446 - void __user *buffer, 3447 - size_t *lenp, loff_t *ppos) 3446 + void *buffer, size_t *lenp, 3447 + loff_t *ppos) 3448 3448 { 3449 3449 struct ctl_table tmp = *ctl; 3450 3450 int ret; ··· 3457 3457 return ret; 3458 3458 } 3459 3459 3460 - int neigh_proc_dointvec(struct ctl_table *ctl, int write, 3461 - void __user *buffer, size_t *lenp, loff_t *ppos) 3460 + int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer, 3461 + size_t *lenp, loff_t *ppos) 3462 3462 { 3463 3463 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 3464 3464 ··· 3467 3467 } 3468 3468 EXPORT_SYMBOL(neigh_proc_dointvec); 3469 3469 3470 - int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, 3471 - void __user *buffer, 3470 + int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer, 3472 3471 size_t *lenp, loff_t *ppos) 3473 3472 { 3474 3473 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); ··· 3478 3479 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies); 3479 3480 3480 3481 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write, 3481 - void __user *buffer, 3482 - size_t *lenp, loff_t *ppos) 3482 + void *buffer, size_t *lenp, 3483 + loff_t *ppos) 3483 3484 { 3484 3485 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos); 3485 3486 ··· 3488 3489 } 3489 3490 3490 3491 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 3491 - void __user *buffer, 3492 - size_t *lenp, loff_t *ppos) 3492 + void *buffer, size_t *lenp, loff_t *ppos) 3493 3493 { 3494 3494 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3495 3495 ··· 3498 3500 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies); 3499 3501 3500 3502 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, 3501 - void __user *buffer, 3502 - size_t *lenp, loff_t *ppos) 3503 + void *buffer, size_t *lenp, 3504 + loff_t *ppos) 3503 3505 { 3504 3506 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos); 3505 3507 ··· 3508 3510 } 3509 3511 3510 3512 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, 3511 - void __user *buffer, 3512 - size_t *lenp, loff_t *ppos) 3513 + void *buffer, size_t *lenp, 3514 + loff_t *ppos) 3513 3515 { 3514 3516 struct neigh_parms *p = ctl->extra2; 3515 3517 int ret;
+10 -17
net/core/sysctl_net_core.c
··· 45 45 46 46 #ifdef CONFIG_RPS 47 47 static int rps_sock_flow_sysctl(struct ctl_table *table, int write, 48 - void __user *buffer, size_t *lenp, loff_t *ppos) 48 + void *buffer, size_t *lenp, loff_t *ppos) 49 49 { 50 50 unsigned int orig_size, size; 51 51 int ret, i; ··· 115 115 static DEFINE_MUTEX(flow_limit_update_mutex); 116 116 117 117 static int flow_limit_cpu_sysctl(struct ctl_table *table, int write, 118 - void __user *buffer, size_t *lenp, 119 - loff_t *ppos) 118 + void *buffer, size_t *lenp, loff_t *ppos) 120 119 { 121 120 struct sd_flow_limit *cur; 122 121 struct softnet_data *sd; ··· 179 180 } 180 181 if (len < *lenp) 181 182 kbuf[len++] = '\n'; 182 - if (copy_to_user(buffer, kbuf, len)) { 183 - ret = -EFAULT; 184 - goto done; 185 - } 183 + memcpy(buffer, kbuf, len); 186 184 *lenp = len; 187 185 *ppos += len; 188 186 } ··· 190 194 } 191 195 192 196 static int flow_limit_table_len_sysctl(struct ctl_table *table, int write, 193 - void __user *buffer, size_t *lenp, 194 - loff_t *ppos) 197 + void *buffer, size_t *lenp, loff_t *ppos) 195 198 { 196 199 unsigned int old, *ptr; 197 200 int ret; ··· 212 217 213 218 #ifdef CONFIG_NET_SCHED 214 219 static int set_default_qdisc(struct ctl_table *table, int write, 215 - void __user *buffer, size_t *lenp, loff_t *ppos) 220 + void *buffer, size_t *lenp, loff_t *ppos) 216 221 { 217 222 char id[IFNAMSIZ]; 218 223 struct ctl_table tbl = { ··· 231 236 #endif 232 237 233 238 static int proc_do_dev_weight(struct ctl_table *table, int write, 234 - void __user *buffer, size_t *lenp, loff_t *ppos) 239 + void *buffer, size_t *lenp, loff_t *ppos) 235 240 { 236 241 int ret; 237 242 ··· 246 251 } 247 252 248 253 static int proc_do_rss_key(struct ctl_table *table, int write, 249 - void __user *buffer, size_t *lenp, loff_t *ppos) 254 + void *buffer, size_t *lenp, loff_t *ppos) 250 255 { 251 256 struct ctl_table fake_table; 252 257 char buf[NETDEV_RSS_KEY_LEN * 3]; ··· 259 264 260 265 #ifdef CONFIG_BPF_JIT 261 266 static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, 262 - void __user *buffer, size_t *lenp, 267 + void *buffer, size_t *lenp, 263 268 loff_t *ppos) 264 269 { 265 270 int ret, jit_enable = *(int *)table->data; ··· 286 291 # ifdef CONFIG_HAVE_EBPF_JIT 287 292 static int 288 293 proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, 289 - void __user *buffer, size_t *lenp, 290 - loff_t *ppos) 294 + void *buffer, size_t *lenp, loff_t *ppos) 291 295 { 292 296 if (!capable(CAP_SYS_ADMIN)) 293 297 return -EPERM; ··· 297 303 298 304 static int 299 305 proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write, 300 - void __user *buffer, size_t *lenp, 301 - loff_t *ppos) 306 + void *buffer, size_t *lenp, loff_t *ppos) 302 307 { 303 308 if (!capable(CAP_SYS_ADMIN)) 304 309 return -EPERM;
+3 -4
net/decnet/dn_dev.c
··· 160 160 static int min_priority[1]; 161 161 static int max_priority[] = { 127 }; /* From DECnet spec */ 162 162 163 - static int dn_forwarding_proc(struct ctl_table *, int, 164 - void __user *, size_t *, loff_t *); 163 + static int dn_forwarding_proc(struct ctl_table *, int, void *, size_t *, 164 + loff_t *); 165 165 static struct dn_dev_sysctl_table { 166 166 struct ctl_table_header *sysctl_header; 167 167 struct ctl_table dn_dev_vars[5]; ··· 245 245 } 246 246 247 247 static int dn_forwarding_proc(struct ctl_table *table, int write, 248 - void __user *buffer, 249 - size_t *lenp, loff_t *ppos) 248 + void *buffer, size_t *lenp, loff_t *ppos) 250 249 { 251 250 #ifdef CONFIG_DECNET_ROUTER 252 251 struct net_device *dev = table->extra1;
+8 -19
net/decnet/sysctl_net_decnet.c
··· 134 134 } 135 135 136 136 static int dn_node_address_handler(struct ctl_table *table, int write, 137 - void __user *buffer, 138 - size_t *lenp, loff_t *ppos) 137 + void *buffer, size_t *lenp, loff_t *ppos) 139 138 { 140 139 char addr[DN_ASCBUF_LEN]; 141 140 size_t len; ··· 147 148 148 149 if (write) { 149 150 len = (*lenp < DN_ASCBUF_LEN) ? *lenp : (DN_ASCBUF_LEN-1); 150 - 151 - if (copy_from_user(addr, buffer, len)) 152 - return -EFAULT; 153 - 151 + memcpy(addr, buffer, len); 154 152 addr[len] = 0; 155 153 strip_it(addr); 156 154 ··· 169 173 len = strlen(addr); 170 174 addr[len++] = '\n'; 171 175 172 - if (len > *lenp) len = *lenp; 173 - 174 - if (copy_to_user(buffer, addr, len)) 175 - return -EFAULT; 176 - 176 + if (len > *lenp) 177 + len = *lenp; 178 + memcpy(buffer, addr, len); 177 179 *lenp = len; 178 180 *ppos += len; 179 181 ··· 179 185 } 180 186 181 187 static int dn_def_dev_handler(struct ctl_table *table, int write, 182 - void __user *buffer, 183 - size_t *lenp, loff_t *ppos) 188 + void *buffer, size_t *lenp, loff_t *ppos) 184 189 { 185 190 size_t len; 186 191 struct net_device *dev; ··· 194 201 if (*lenp > 16) 195 202 return -E2BIG; 196 203 197 - if (copy_from_user(devname, buffer, *lenp)) 198 - return -EFAULT; 199 - 204 + memcpy(devname, buffer, *lenp); 200 205 devname[*lenp] = 0; 201 206 strip_it(devname); 202 207 ··· 229 238 230 239 if (len > *lenp) len = *lenp; 231 240 232 - if (copy_to_user(buffer, devname, len)) 233 - return -EFAULT; 234 - 241 + memcpy(buffer, devname, len); 235 242 *lenp = len; 236 243 *ppos += len; 237 244
+3 -6
net/ipv4/devinet.c
··· 2361 2361 } 2362 2362 2363 2363 static int devinet_conf_proc(struct ctl_table *ctl, int write, 2364 - void __user *buffer, 2365 - size_t *lenp, loff_t *ppos) 2364 + void *buffer, size_t *lenp, loff_t *ppos) 2366 2365 { 2367 2366 int old_value = *(int *)ctl->data; 2368 2367 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); ··· 2413 2414 } 2414 2415 2415 2416 static int devinet_sysctl_forward(struct ctl_table *ctl, int write, 2416 - void __user *buffer, 2417 - size_t *lenp, loff_t *ppos) 2417 + void *buffer, size_t *lenp, loff_t *ppos) 2418 2418 { 2419 2419 int *valp = ctl->data; 2420 2420 int val = *valp; ··· 2456 2458 } 2457 2459 2458 2460 static int ipv4_doint_and_flush(struct ctl_table *ctl, int write, 2459 - void __user *buffer, 2460 - size_t *lenp, loff_t *ppos) 2461 + void *buffer, size_t *lenp, loff_t *ppos) 2461 2462 { 2462 2463 int *valp = ctl->data; 2463 2464 int val = *valp;
+1 -2
net/ipv4/route.c
··· 3336 3336 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU; 3337 3337 3338 3338 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, 3339 - void __user *buffer, 3340 - size_t *lenp, loff_t *ppos) 3339 + void *buffer, size_t *lenp, loff_t *ppos) 3341 3340 { 3342 3341 struct net *net = (struct net *)__ctl->extra1; 3343 3342
+15 -23
net/ipv4/sysctl_net_ipv4.c
··· 71 71 72 72 /* Validate changes from /proc interface. */ 73 73 static int ipv4_local_port_range(struct ctl_table *table, int write, 74 - void __user *buffer, 75 - size_t *lenp, loff_t *ppos) 74 + void *buffer, size_t *lenp, loff_t *ppos) 76 75 { 77 76 struct net *net = 78 77 container_of(table->data, struct net, ipv4.ip_local_ports.range); ··· 106 107 107 108 /* Validate changes from /proc interface. */ 108 109 static int ipv4_privileged_ports(struct ctl_table *table, int write, 109 - void __user *buffer, size_t *lenp, loff_t *ppos) 110 + void *buffer, size_t *lenp, loff_t *ppos) 110 111 { 111 112 struct net *net = container_of(table->data, struct net, 112 113 ipv4.sysctl_ip_prot_sock); ··· 167 168 168 169 /* Validate changes from /proc interface. */ 169 170 static int ipv4_ping_group_range(struct ctl_table *table, int write, 170 - void __user *buffer, 171 - size_t *lenp, loff_t *ppos) 171 + void *buffer, size_t *lenp, loff_t *ppos) 172 172 { 173 173 struct user_namespace *user_ns = current_user_ns(); 174 174 int ret; ··· 202 204 } 203 205 204 206 static int ipv4_fwd_update_priority(struct ctl_table *table, int write, 205 - void __user *buffer, 206 - size_t *lenp, loff_t *ppos) 207 + void *buffer, size_t *lenp, loff_t *ppos) 207 208 { 208 209 struct net *net; 209 210 int ret; ··· 218 221 } 219 222 220 223 static int proc_tcp_congestion_control(struct ctl_table *ctl, int write, 221 - void __user *buffer, size_t *lenp, loff_t *ppos) 224 + void *buffer, size_t *lenp, loff_t *ppos) 222 225 { 223 226 struct net *net = container_of(ctl->data, struct net, 224 227 ipv4.tcp_congestion_control); ··· 238 241 } 239 242 240 243 static int proc_tcp_available_congestion_control(struct ctl_table *ctl, 241 - int write, 242 - void __user *buffer, size_t *lenp, 243 - loff_t *ppos) 244 + int write, void *buffer, 245 + size_t *lenp, loff_t *ppos) 244 246 { 245 247 struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, }; 246 248 int ret; ··· 254 258 } 255 259 256 260 static int proc_allowed_congestion_control(struct ctl_table *ctl, 257 - int write, 258 - void __user *buffer, size_t *lenp, 259 - loff_t *ppos) 261 + int write, void *buffer, 262 + size_t *lenp, loff_t *ppos) 260 263 { 261 264 struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; 262 265 int ret; ··· 291 296 } 292 297 293 298 static int proc_tcp_fastopen_key(struct ctl_table *table, int write, 294 - void __user *buffer, size_t *lenp, 295 - loff_t *ppos) 299 + void *buffer, size_t *lenp, loff_t *ppos) 296 300 { 297 301 struct net *net = container_of(table->data, struct net, 298 302 ipv4.sysctl_tcp_fastopen); ··· 393 399 } 394 400 395 401 static int proc_tcp_early_demux(struct ctl_table *table, int write, 396 - void __user *buffer, size_t *lenp, loff_t *ppos) 402 + void *buffer, size_t *lenp, loff_t *ppos) 397 403 { 398 404 int ret = 0; 399 405 ··· 409 415 } 410 416 411 417 static int proc_udp_early_demux(struct ctl_table *table, int write, 412 - void __user *buffer, size_t *lenp, loff_t *ppos) 418 + void *buffer, size_t *lenp, loff_t *ppos) 413 419 { 414 420 int ret = 0; 415 421 ··· 425 431 } 426 432 427 433 static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table, 428 - int write, 429 - void __user *buffer, 434 + int write, void *buffer, 430 435 size_t *lenp, loff_t *ppos) 431 436 { 432 437 struct net *net = container_of(table->data, struct net, ··· 440 447 } 441 448 442 449 static int proc_tcp_available_ulp(struct ctl_table *ctl, 443 - int write, 444 - void __user *buffer, size_t *lenp, 450 + int write, void *buffer, size_t *lenp, 445 451 loff_t *ppos) 446 452 { 447 453 struct ctl_table tbl = { .maxlen = TCP_ULP_BUF_MAX, }; ··· 458 466 459 467 #ifdef CONFIG_IP_ROUTE_MULTIPATH 460 468 static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write, 461 - void __user *buffer, size_t *lenp, 469 + void *buffer, size_t *lenp, 462 470 loff_t *ppos) 463 471 { 464 472 struct net *net = container_of(table->data, struct net,
+13 -20
net/ipv6/addrconf.c
··· 6108 6108 6109 6109 #ifdef CONFIG_SYSCTL 6110 6110 6111 - static 6112 - int addrconf_sysctl_forward(struct ctl_table *ctl, int write, 6113 - void __user *buffer, size_t *lenp, loff_t *ppos) 6111 + static int addrconf_sysctl_forward(struct ctl_table *ctl, int write, 6112 + void *buffer, size_t *lenp, loff_t *ppos) 6114 6113 { 6115 6114 int *valp = ctl->data; 6116 6115 int val = *valp; ··· 6133 6134 return ret; 6134 6135 } 6135 6136 6136 - static 6137 - int addrconf_sysctl_mtu(struct ctl_table *ctl, int write, 6138 - void __user *buffer, size_t *lenp, loff_t *ppos) 6137 + static int addrconf_sysctl_mtu(struct ctl_table *ctl, int write, 6138 + void *buffer, size_t *lenp, loff_t *ppos) 6139 6139 { 6140 6140 struct inet6_dev *idev = ctl->extra1; 6141 6141 int min_mtu = IPV6_MIN_MTU; ··· 6204 6206 return 0; 6205 6207 } 6206 6208 6207 - static 6208 - int addrconf_sysctl_disable(struct ctl_table *ctl, int write, 6209 - void __user *buffer, size_t *lenp, loff_t *ppos) 6209 + static int addrconf_sysctl_disable(struct ctl_table *ctl, int write, 6210 + void *buffer, size_t *lenp, loff_t *ppos) 6210 6211 { 6211 6212 int *valp = ctl->data; 6212 6213 int val = *valp; ··· 6229 6232 return ret; 6230 6233 } 6231 6234 6232 - static 6233 - int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write, 6234 - void __user *buffer, size_t *lenp, loff_t *ppos) 6235 + static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write, 6236 + void *buffer, size_t *lenp, loff_t *ppos) 6235 6237 { 6236 6238 int *valp = ctl->data; 6237 6239 int ret; ··· 6271 6275 } 6272 6276 6273 6277 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write, 6274 - void __user *buffer, size_t *lenp, 6278 + void *buffer, size_t *lenp, 6275 6279 loff_t *ppos) 6276 6280 { 6277 6281 int ret = 0; ··· 6333 6337 } 6334 6338 6335 6339 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write, 6336 - void __user *buffer, size_t *lenp, 6340 + void *buffer, size_t *lenp, 6337 6341 loff_t *ppos) 6338 6342 { 6339 6343 int err; ··· 6400 6404 6401 6405 static 6402 6406 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl, 6403 - int write, 6404 - void __user *buffer, 6407 + int write, void *buffer, 6405 6408 size_t *lenp, 6406 6409 loff_t *ppos) 6407 6410 { ··· 6500 6505 return 0; 6501 6506 } 6502 6507 6503 - static 6504 - int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write, 6505 - void __user *buffer, size_t *lenp, 6506 - loff_t *ppos) 6508 + static int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write, 6509 + void *buffer, size_t *lenp, loff_t *ppos) 6507 6510 { 6508 6511 int *valp = ctl->data; 6509 6512 int val = *valp;
+2 -1
net/ipv6/ndisc.c
··· 1835 1835 } 1836 1836 } 1837 1837 1838 - int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) 1838 + int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void *buffer, 1839 + size_t *lenp, loff_t *ppos) 1839 1840 { 1840 1841 struct net_device *dev = ctl->extra1; 1841 1842 struct inet6_dev *idev;
+2 -3
net/ipv6/route.c
··· 6088 6088 6089 6089 #ifdef CONFIG_SYSCTL 6090 6090 6091 - static 6092 - int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, 6093 - void __user *buffer, size_t *lenp, loff_t *ppos) 6091 + static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, 6092 + void *buffer, size_t *lenp, loff_t *ppos) 6094 6093 { 6095 6094 struct net *net; 6096 6095 int delay;
+1 -2
net/ipv6/sysctl_net_ipv6.c
··· 26 26 static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX; 27 27 28 28 static int proc_rt6_multipath_hash_policy(struct ctl_table *table, int write, 29 - void __user *buffer, size_t *lenp, 30 - loff_t *ppos) 29 + void *buffer, size_t *lenp, loff_t *ppos) 31 30 { 32 31 struct net *net; 33 32 int ret;
+2 -3
net/mpls/af_mpls.c
··· 1362 1362 (&((struct mpls_dev *)0)->field) 1363 1363 1364 1364 static int mpls_conf_proc(struct ctl_table *ctl, int write, 1365 - void __user *buffer, 1366 - size_t *lenp, loff_t *ppos) 1365 + void *buffer, size_t *lenp, loff_t *ppos) 1367 1366 { 1368 1367 int oval = *(int *)ctl->data; 1369 1368 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); ··· 2593 2594 } 2594 2595 2595 2596 static int mpls_platform_labels(struct ctl_table *table, int write, 2596 - void __user *buffer, size_t *lenp, loff_t *ppos) 2597 + void *buffer, size_t *lenp, loff_t *ppos) 2597 2598 { 2598 2599 struct net *net = table->data; 2599 2600 int platform_labels = net->mpls.platform_labels;
+3 -3
net/netfilter/ipvs/ip_vs_ctl.c
··· 1736 1736 1737 1737 static int 1738 1738 proc_do_defense_mode(struct ctl_table *table, int write, 1739 - void __user *buffer, size_t *lenp, loff_t *ppos) 1739 + void *buffer, size_t *lenp, loff_t *ppos) 1740 1740 { 1741 1741 struct netns_ipvs *ipvs = table->extra2; 1742 1742 int *valp = table->data; ··· 1763 1763 1764 1764 static int 1765 1765 proc_do_sync_threshold(struct ctl_table *table, int write, 1766 - void __user *buffer, size_t *lenp, loff_t *ppos) 1766 + void *buffer, size_t *lenp, loff_t *ppos) 1767 1767 { 1768 1768 int *valp = table->data; 1769 1769 int val[2]; ··· 1788 1788 1789 1789 static int 1790 1790 proc_do_sync_ports(struct ctl_table *table, int write, 1791 - void __user *buffer, size_t *lenp, loff_t *ppos) 1791 + void *buffer, size_t *lenp, loff_t *ppos) 1792 1792 { 1793 1793 int *valp = table->data; 1794 1794 int val = *valp;
+1 -1
net/netfilter/nf_conntrack_standalone.c
··· 517 517 518 518 static int 519 519 nf_conntrack_hash_sysctl(struct ctl_table *table, int write, 520 - void __user *buffer, size_t *lenp, loff_t *ppos) 520 + void *buffer, size_t *lenp, loff_t *ppos) 521 521 { 522 522 int ret; 523 523
+1 -1
net/netfilter/nf_log.c
··· 414 414 }; 415 415 416 416 static int nf_log_proc_dostring(struct ctl_table *table, int write, 417 - void __user *buffer, size_t *lenp, loff_t *ppos) 417 + void *buffer, size_t *lenp, loff_t *ppos) 418 418 { 419 419 const struct nf_logger *logger; 420 420 char buf[NFLOGGER_NAME_LEN];
+1 -2
net/phonet/sysctl.c
··· 49 49 } 50 50 51 51 static int proc_local_port_range(struct ctl_table *table, int write, 52 - void __user *buffer, 53 - size_t *lenp, loff_t *ppos) 52 + void *buffer, size_t *lenp, loff_t *ppos) 54 53 { 55 54 int ret; 56 55 int range[2] = {local_port_range[0], local_port_range[1]};
+2 -4
net/rds/tcp.c
··· 62 62 static struct kmem_cache *rds_tcp_conn_slab; 63 63 64 64 static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, 65 - void __user *buffer, size_t *lenp, 66 - loff_t *fpos); 65 + void *buffer, size_t *lenp, loff_t *fpos); 67 66 68 67 static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF; 69 68 static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF; ··· 675 676 } 676 677 677 678 static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, 678 - void __user *buffer, size_t *lenp, 679 - loff_t *fpos) 679 + void *buffer, size_t *lenp, loff_t *fpos) 680 680 { 681 681 struct net *net = current->nsproxy->net_ns; 682 682 int err;
+11 -21
net/sctp/sysctl.c
··· 43 43 ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ; 44 44 45 45 static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, 46 - void __user *buffer, size_t *lenp, 47 - loff_t *ppos); 46 + void *buffer, size_t *lenp, loff_t *ppos); 48 47 static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, 49 - void __user *buffer, size_t *lenp, 50 - loff_t *ppos); 51 - static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, 52 - void __user *buffer, size_t *lenp, 53 - loff_t *ppos); 48 + void *buffer, size_t *lenp, loff_t *ppos); 49 + static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, void *buffer, 50 + size_t *lenp, loff_t *ppos); 54 51 static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, 55 - void __user *buffer, size_t *lenp, 56 - loff_t *ppos); 52 + void *buffer, size_t *lenp, loff_t *ppos); 57 53 static int proc_sctp_do_auth(struct ctl_table *ctl, int write, 58 - void __user *buffer, size_t *lenp, 59 - loff_t *ppos); 54 + void *buffer, size_t *lenp, loff_t *ppos); 60 55 61 56 static struct ctl_table sctp_table[] = { 62 57 { ··· 338 343 }; 339 344 340 345 static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, 341 - void __user *buffer, size_t *lenp, 342 - loff_t *ppos) 346 + void *buffer, size_t *lenp, loff_t *ppos) 343 347 { 344 348 struct net *net = current->nsproxy->net_ns; 345 349 struct ctl_table tbl; ··· 383 389 } 384 390 385 391 static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, 386 - void __user *buffer, size_t *lenp, 387 - loff_t *ppos) 392 + void *buffer, size_t *lenp, loff_t *ppos) 388 393 { 389 394 struct net *net = current->nsproxy->net_ns; 390 395 unsigned int min = *(unsigned int *) ctl->extra1; ··· 411 418 } 412 419 413 420 static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, 414 - void __user *buffer, size_t *lenp, 415 - loff_t *ppos) 421 + void *buffer, size_t *lenp, loff_t *ppos) 416 422 { 417 423 struct net *net = current->nsproxy->net_ns; 418 424 unsigned int min = *(unsigned int *) ctl->extra1; ··· 439 447 } 440 448 441 449 static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, 442 - void __user *buffer, size_t *lenp, 443 - loff_t *ppos) 450 + void *buffer, size_t *lenp, loff_t *ppos) 444 451 { 445 452 if (write) 446 453 pr_warn_once("Changing rto_alpha or rto_beta may lead to " ··· 449 458 } 450 459 451 460 static int proc_sctp_do_auth(struct ctl_table *ctl, int write, 452 - void __user *buffer, size_t *lenp, 453 - loff_t *ppos) 461 + void *buffer, size_t *lenp, loff_t *ppos) 454 462 { 455 463 struct net *net = current->nsproxy->net_ns; 456 464 struct ctl_table tbl;
+13 -16
net/sunrpc/sysctl.c
··· 60 60 } 61 61 62 62 static int proc_do_xprt(struct ctl_table *table, int write, 63 - void __user *buffer, size_t *lenp, loff_t *ppos) 63 + void *buffer, size_t *lenp, loff_t *ppos) 64 64 { 65 65 char tmpbuf[256]; 66 66 size_t len; ··· 70 70 return 0; 71 71 } 72 72 len = svc_print_xprts(tmpbuf, sizeof(tmpbuf)); 73 - return simple_read_from_buffer(buffer, *lenp, ppos, tmpbuf, len); 73 + return memory_read_from_buffer(buffer, *lenp, ppos, tmpbuf, len); 74 74 } 75 75 76 76 static int 77 - proc_dodebug(struct ctl_table *table, int write, 78 - void __user *buffer, size_t *lenp, loff_t *ppos) 77 + proc_dodebug(struct ctl_table *table, int write, void *buffer, size_t *lenp, 78 + loff_t *ppos) 79 79 { 80 - char tmpbuf[20], c, *s = NULL; 81 - char __user *p; 80 + char tmpbuf[20], *s = NULL; 81 + char *p; 82 82 unsigned int value; 83 83 size_t left, len; 84 84 ··· 90 90 left = *lenp; 91 91 92 92 if (write) { 93 - if (!access_ok(buffer, left)) 94 - return -EFAULT; 95 93 p = buffer; 96 - while (left && __get_user(c, p) >= 0 && isspace(c)) 97 - left--, p++; 94 + while (left && isspace(*p)) { 95 + left--; 96 + p++; 97 + } 98 98 if (!left) 99 99 goto done; 100 100 101 101 if (left > sizeof(tmpbuf) - 1) 102 102 return -EINVAL; 103 - if (copy_from_user(tmpbuf, p, left)) 104 - return -EFAULT; 103 + memcpy(tmpbuf, p, left); 105 104 tmpbuf[left] = '\0'; 106 105 107 106 value = simple_strtol(tmpbuf, &s, 0); ··· 120 121 len = sprintf(tmpbuf, "0x%04x", *(unsigned int *) table->data); 121 122 if (len > left) 122 123 len = left; 123 - if (copy_to_user(buffer, tmpbuf, len)) 124 - return -EFAULT; 124 + memcpy(buffer, tmpbuf, len); 125 125 if ((left -= len) > 0) { 126 - if (put_user('\n', (char __user *)buffer + len)) 127 - return -EFAULT; 126 + *((char *)buffer + len) = '\n'; 128 127 left--; 129 128 } 130 129 }
+3 -4
net/sunrpc/xprtrdma/svc_rdma.c
··· 80 80 * current value. 81 81 */ 82 82 static int read_reset_stat(struct ctl_table *table, int write, 83 - void __user *buffer, size_t *lenp, 84 - loff_t *ppos) 83 + void *buffer, size_t *lenp, loff_t *ppos) 85 84 { 86 85 atomic_t *stat = (atomic_t *)table->data; 87 86 ··· 102 103 len -= *ppos; 103 104 if (len > *lenp) 104 105 len = *lenp; 105 - if (len && copy_to_user(buffer, str_buf, len)) 106 - return -EFAULT; 106 + if (len) 107 + memcpy(buffer, str_buf, len); 107 108 *lenp = len; 108 109 *ppos += len; 109 110 }
+1 -1
security/apparmor/lsm.c
··· 1696 1696 1697 1697 #ifdef CONFIG_SYSCTL 1698 1698 static int apparmor_dointvec(struct ctl_table *table, int write, 1699 - void __user *buffer, size_t *lenp, loff_t *ppos) 1699 + void *buffer, size_t *lenp, loff_t *ppos) 1700 1700 { 1701 1701 if (!policy_admin_capable(NULL)) 1702 1702 return -EPERM;
+1 -1
security/min_addr.c
··· 30 30 * calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly 31 31 */ 32 32 int mmap_min_addr_handler(struct ctl_table *table, int write, 33 - void __user *buffer, size_t *lenp, loff_t *ppos) 33 + void *buffer, size_t *lenp, loff_t *ppos) 34 34 { 35 35 int ret; 36 36
+1 -1
security/yama/yama_lsm.c
··· 430 430 431 431 #ifdef CONFIG_SYSCTL 432 432 static int yama_dointvec_minmax(struct ctl_table *table, int write, 433 - void __user *buffer, size_t *lenp, loff_t *ppos) 433 + void *buffer, size_t *lenp, loff_t *ppos) 434 434 { 435 435 struct ctl_table table_copy; 436 436