Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sysctl: treewide: constify the ctl_table argument of proc_handlers

const qualify the struct ctl_table argument in the proc_handler function
signatures. This is a prerequisite to moving the static ctl_table
structs into .rodata data which will ensure that proc_handler function
pointers cannot be modified.

This patch has been generated by the following coccinelle script:

```
virtual patch

@r1@
identifier ctl, write, buffer, lenp, ppos;
identifier func !~ "appldata_(timer|interval)_handler|sched_(rt|rr)_handler|rds_tcp_skbuf_handler|proc_sctp_do_(hmac_alg|rto_min|rto_max|udp_port|alpha_beta|auth|probe_interval)";
@@

int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int write, void *buffer, size_t *lenp, loff_t *ppos);

@r2@
identifier func, ctl, write, buffer, lenp, ppos;
@@

int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int write, void *buffer, size_t *lenp, loff_t *ppos)
{ ... }

@r3@
identifier func;
@@

int func(
- struct ctl_table *
+ const struct ctl_table *
,int , void *, size_t *, loff_t *);

@r4@
identifier func, ctl;
@@

int func(
- struct ctl_table *ctl
+ const struct ctl_table *ctl
,int , void *, size_t *, loff_t *);

@r5@
identifier func, write, buffer, lenp, ppos;
@@

int func(
- struct ctl_table *
+ const struct ctl_table *
,int write, void *buffer, size_t *lenp, loff_t *ppos);

```

* Code formatting was adjusted in xfs_sysctl.c to comply with code
conventions. The xfs_stats_clear_proc_handler,
xfs_panic_mask_proc_handler and xfs_deprecated_dointvec_minmax where
adjusted.

* The ctl_table argument in proc_watchdog_common was const qualified.
This is called from a proc_handler itself and is calling back into
another proc_handler, making it necessary to change it as part of the
proc_handler migration.

Co-developed-by: Thomas Weißschuh <linux@weissschuh.net>
Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
Co-developed-by: Joel Granados <j.granados@samsung.com>
Signed-off-by: Joel Granados <j.granados@samsung.com>

+258 -258
+1 -1
arch/arm64/kernel/armv8_deprecated.c
··· 507 507 return ret; 508 508 } 509 509 510 - static int emulation_proc_handler(struct ctl_table *table, int write, 510 + static int emulation_proc_handler(const struct ctl_table *table, int write, 511 511 void *buffer, size_t *lenp, 512 512 loff_t *ppos) 513 513 {
+1 -1
arch/arm64/kernel/fpsimd.c
··· 535 535 536 536 #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL) 537 537 538 - static int vec_proc_do_default_vl(struct ctl_table *table, int write, 538 + static int vec_proc_do_default_vl(const struct ctl_table *table, int write, 539 539 void *buffer, size_t *lenp, loff_t *ppos) 540 540 { 541 541 struct vl_info *info = table->extra1;
+5 -5
arch/s390/appldata/appldata_base.c
··· 46 46 * /proc entries (sysctl) 47 47 */ 48 48 static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata"; 49 - static int appldata_timer_handler(struct ctl_table *ctl, int write, 49 + static int appldata_timer_handler(const struct ctl_table *ctl, int write, 50 50 void *buffer, size_t *lenp, loff_t *ppos); 51 - static int appldata_interval_handler(struct ctl_table *ctl, int write, 51 + static int appldata_interval_handler(const struct ctl_table *ctl, int write, 52 52 void *buffer, size_t *lenp, loff_t *ppos); 53 53 54 54 static struct ctl_table_header *appldata_sysctl_header; ··· 199 199 * Start/Stop timer, show status of timer (0 = not active, 1 = active) 200 200 */ 201 201 static int 202 - appldata_timer_handler(struct ctl_table *ctl, int write, 202 + appldata_timer_handler(const struct ctl_table *ctl, int write, 203 203 void *buffer, size_t *lenp, loff_t *ppos) 204 204 { 205 205 int timer_active = appldata_timer_active; ··· 232 232 * current timer interval. 233 233 */ 234 234 static int 235 - appldata_interval_handler(struct ctl_table *ctl, int write, 235 + appldata_interval_handler(const struct ctl_table *ctl, int write, 236 236 void *buffer, size_t *lenp, loff_t *ppos) 237 237 { 238 238 int interval = appldata_interval; ··· 262 262 * monitoring (0 = not in process, 1 = in process) 263 263 */ 264 264 static int 265 - appldata_generic_handler(struct ctl_table *ctl, int write, 265 + appldata_generic_handler(const struct ctl_table *ctl, int write, 266 266 void *buffer, size_t *lenp, loff_t *ppos) 267 267 { 268 268 struct appldata_ops *ops = NULL, *tmp_ops;
+1 -1
arch/s390/kernel/debug.c
··· 954 954 * always allow read, allow write only if debug_stoppable is set or 955 955 * if debug_active is already off 956 956 */ 957 - static int s390dbf_procactive(struct ctl_table *table, int write, 957 + static int s390dbf_procactive(const struct ctl_table *table, int write, 958 958 void *buffer, size_t *lenp, loff_t *ppos) 959 959 { 960 960 if (!write || debug_stoppable || !debug_active)
+1 -1
arch/s390/kernel/topology.c
··· 594 594 } 595 595 early_param("topology", topology_setup); 596 596 597 - static int topology_ctl_handler(struct ctl_table *ctl, int write, 597 + static int topology_ctl_handler(const struct ctl_table *ctl, int write, 598 598 void *buffer, size_t *lenp, loff_t *ppos) 599 599 { 600 600 int enabled = topology_is_enabled();
+3 -3
arch/s390/mm/cmm.c
··· 243 243 return str != cp; 244 244 } 245 245 246 - static int cmm_pages_handler(struct ctl_table *ctl, int write, 246 + static int cmm_pages_handler(const struct ctl_table *ctl, int write, 247 247 void *buffer, size_t *lenp, loff_t *ppos) 248 248 { 249 249 long nr = cmm_get_pages(); ··· 262 262 return 0; 263 263 } 264 264 265 - static int cmm_timed_pages_handler(struct ctl_table *ctl, int write, 265 + static int cmm_timed_pages_handler(const struct ctl_table *ctl, int write, 266 266 void *buffer, size_t *lenp, 267 267 loff_t *ppos) 268 268 { ··· 282 282 return 0; 283 283 } 284 284 285 - static int cmm_timeout_handler(struct ctl_table *ctl, int write, 285 + static int cmm_timeout_handler(const struct ctl_table *ctl, int write, 286 286 void *buffer, size_t *lenp, loff_t *ppos) 287 287 { 288 288 char buf[64], *p;
+1 -1
arch/x86/kernel/itmt.c
··· 38 38 */ 39 39 unsigned int __read_mostly sysctl_sched_itmt_enabled; 40 40 41 - static int sched_itmt_update_handler(struct ctl_table *table, int write, 41 + static int sched_itmt_update_handler(const struct ctl_table *table, int write, 42 42 void *buffer, size_t *lenp, loff_t *ppos) 43 43 { 44 44 unsigned int old_sysctl;
+2 -2
drivers/cdrom/cdrom.c
··· 3473 3473 return 0; 3474 3474 } 3475 3475 3476 - static int cdrom_sysctl_info(struct ctl_table *ctl, int write, 3476 + static int cdrom_sysctl_info(const struct ctl_table *ctl, int write, 3477 3477 void *buffer, size_t *lenp, loff_t *ppos) 3478 3478 { 3479 3479 int pos; ··· 3586 3586 mutex_unlock(&cdrom_mutex); 3587 3587 } 3588 3588 3589 - static int cdrom_sysctl_handler(struct ctl_table *ctl, int write, 3589 + static int cdrom_sysctl_handler(const struct ctl_table *ctl, int write, 3590 3590 void *buffer, size_t *lenp, loff_t *ppos) 3591 3591 { 3592 3592 int ret;
+2 -2
drivers/char/random.c
··· 1620 1620 * UUID. The difference is in whether table->data is NULL; if it is, 1621 1621 * then a new UUID is generated and returned to the user. 1622 1622 */ 1623 - static int proc_do_uuid(struct ctl_table *table, int write, void *buf, 1623 + static int proc_do_uuid(const struct ctl_table *table, int write, void *buf, 1624 1624 size_t *lenp, loff_t *ppos) 1625 1625 { 1626 1626 u8 tmp_uuid[UUID_SIZE], *uuid; ··· 1651 1651 } 1652 1652 1653 1653 /* The same as proc_dointvec, but writes don't change anything. */ 1654 - static int proc_do_rointvec(struct ctl_table *table, int write, void *buf, 1654 + static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf, 1655 1655 size_t *lenp, loff_t *ppos) 1656 1656 { 1657 1657 return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
+1 -1
drivers/macintosh/mac_hid.c
··· 183 183 mac_hid_destroy_emumouse(); 184 184 } 185 185 186 - static int mac_hid_toggle_emumouse(struct ctl_table *table, int write, 186 + static int mac_hid_toggle_emumouse(const struct ctl_table *table, int write, 187 187 void *buffer, size_t *lenp, loff_t *ppos) 188 188 { 189 189 int *valp = table->data;
+1 -1
drivers/net/vrf.c
··· 1886 1886 return res; 1887 1887 } 1888 1888 1889 - static int vrf_shared_table_handler(struct ctl_table *table, int write, 1889 + static int vrf_shared_table_handler(const struct ctl_table *table, int write, 1890 1890 void *buffer, size_t *lenp, loff_t *ppos) 1891 1891 { 1892 1892 struct net *net = (struct net *)table->extra1;
+6 -6
drivers/parport/procfs.c
··· 33 33 #define PARPORT_MIN_SPINTIME_VALUE 1 34 34 #define PARPORT_MAX_SPINTIME_VALUE 1000 35 35 36 - static int do_active_device(struct ctl_table *table, int write, 36 + static int do_active_device(const struct ctl_table *table, int write, 37 37 void *result, size_t *lenp, loff_t *ppos) 38 38 { 39 39 struct parport *port = (struct parport *)table->extra1; ··· 70 70 } 71 71 72 72 #ifdef CONFIG_PARPORT_1284 73 - static int do_autoprobe(struct ctl_table *table, int write, 73 + static int do_autoprobe(const struct ctl_table *table, int write, 74 74 void *result, size_t *lenp, loff_t *ppos) 75 75 { 76 76 struct parport_device_info *info = table->extra2; ··· 113 113 } 114 114 #endif /* IEEE1284.3 support. */ 115 115 116 - static int do_hardware_base_addr(struct ctl_table *table, int write, 116 + static int do_hardware_base_addr(const struct ctl_table *table, int write, 117 117 void *result, size_t *lenp, loff_t *ppos) 118 118 { 119 119 struct parport *port = (struct parport *)table->extra1; ··· 140 140 return 0; 141 141 } 142 142 143 - static int do_hardware_irq(struct ctl_table *table, int write, 143 + static int do_hardware_irq(const struct ctl_table *table, int write, 144 144 void *result, size_t *lenp, loff_t *ppos) 145 145 { 146 146 struct parport *port = (struct parport *)table->extra1; ··· 167 167 return 0; 168 168 } 169 169 170 - static int do_hardware_dma(struct ctl_table *table, int write, 170 + static int do_hardware_dma(const struct ctl_table *table, int write, 171 171 void *result, size_t *lenp, loff_t *ppos) 172 172 { 173 173 struct parport *port = (struct parport *)table->extra1; ··· 194 194 return 0; 195 195 } 196 196 197 - static int do_hardware_modes(struct ctl_table *table, int write, 197 + static int do_hardware_modes(const struct ctl_table *table, int write, 198 198 void *result, size_t *lenp, loff_t *ppos) 199 199 { 200 200 struct parport *port = (struct parport *)table->extra1;
+1 -1
drivers/perf/arm_pmuv3.c
··· 1257 1257 armv8pmu_disable_user_access(); 1258 1258 } 1259 1259 1260 - static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write, 1260 + static int armv8pmu_proc_user_access_handler(const struct ctl_table *table, int write, 1261 1261 void *buffer, size_t *lenp, loff_t *ppos) 1262 1262 { 1263 1263 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+1 -1
drivers/perf/riscv_pmu_sbi.c
··· 1277 1277 csr_write(CSR_SCOUNTEREN, 0x2); 1278 1278 } 1279 1279 1280 - static int riscv_pmu_proc_user_access_handler(struct ctl_table *table, 1280 + static int riscv_pmu_proc_user_access_handler(const struct ctl_table *table, 1281 1281 int write, void *buffer, 1282 1282 size_t *lenp, loff_t *ppos) 1283 1283 {
+1 -1
fs/coredump.c
··· 991 991 } 992 992 } 993 993 994 - static int proc_dostring_coredump(struct ctl_table *table, int write, 994 + static int proc_dostring_coredump(const struct ctl_table *table, int write, 995 995 void *buffer, size_t *lenp, loff_t *ppos) 996 996 { 997 997 int error = proc_dostring(table, write, buffer, lenp, ppos);
+1 -1
fs/dcache.c
··· 177 177 return sum < 0 ? 0 : sum; 178 178 } 179 179 180 - static int proc_nr_dentry(struct ctl_table *table, int write, void *buffer, 180 + static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer, 181 181 size_t *lenp, loff_t *ppos) 182 182 { 183 183 dentry_stat.nr_dentry = get_nr_dentry();
+1 -1
fs/drop_caches.c
··· 48 48 iput(toput_inode); 49 49 } 50 50 51 - int drop_caches_sysctl_handler(struct ctl_table *table, int write, 51 + int drop_caches_sysctl_handler(const struct ctl_table *table, int write, 52 52 void *buffer, size_t *length, loff_t *ppos) 53 53 { 54 54 int ret;
+1 -1
fs/exec.c
··· 2204 2204 2205 2205 #ifdef CONFIG_SYSCTL 2206 2206 2207 - static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, 2207 + static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int write, 2208 2208 void *buffer, size_t *lenp, loff_t *ppos) 2209 2209 { 2210 2210 int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+1 -1
fs/file_table.c
··· 96 96 /* 97 97 * Handle nr_files sysctl 98 98 */ 99 - static int proc_nr_files(struct ctl_table *table, int write, void *buffer, 99 + static int proc_nr_files(const struct ctl_table *table, int write, void *buffer, 100 100 size_t *lenp, loff_t *ppos) 101 101 { 102 102 files_stat.nr_files = get_nr_files();
+1 -1
fs/fs-writeback.c
··· 2413 2413 } 2414 2414 __initcall(start_dirtytime_writeback); 2415 2415 2416 - int dirtytime_interval_handler(struct ctl_table *table, int write, 2416 + int dirtytime_interval_handler(const struct ctl_table *table, int write, 2417 2417 void *buffer, size_t *lenp, loff_t *ppos) 2418 2418 { 2419 2419 int ret;
+1 -1
fs/inode.c
··· 107 107 */ 108 108 static struct inodes_stat_t inodes_stat; 109 109 110 - static int proc_nr_inodes(struct ctl_table *table, int write, void *buffer, 110 + static int proc_nr_inodes(const struct ctl_table *table, int write, void *buffer, 111 111 size_t *lenp, loff_t *ppos) 112 112 { 113 113 inodes_stat.nr_inodes = get_nr_inodes();
+1 -1
fs/pipe.c
··· 1469 1469 return 0; 1470 1470 } 1471 1471 1472 - static int proc_dopipe_max_size(struct ctl_table *table, int write, 1472 + static int proc_dopipe_max_size(const struct ctl_table *table, int write, 1473 1473 void *buffer, size_t *lenp, loff_t *ppos) 1474 1474 { 1475 1475 return do_proc_douintvec(table, write, buffer, lenp, ppos,
+1 -1
fs/quota/dquot.c
··· 2913 2913 }; 2914 2914 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops); 2915 2915 2916 - static int do_proc_dqstats(struct ctl_table *table, int write, 2916 + static int do_proc_dqstats(const struct ctl_table *table, int write, 2917 2917 void *buffer, size_t *lenp, loff_t *ppos) 2918 2918 { 2919 2919 unsigned int type = (unsigned long *)table->data - dqstats.stat;
+3 -3
fs/xfs/xfs_sysctl.c
··· 11 11 #ifdef CONFIG_PROC_FS 12 12 STATIC int 13 13 xfs_stats_clear_proc_handler( 14 - struct ctl_table *ctl, 14 + const struct ctl_table *ctl, 15 15 int write, 16 16 void *buffer, 17 17 size_t *lenp, ··· 31 31 32 32 STATIC int 33 33 xfs_panic_mask_proc_handler( 34 - struct ctl_table *ctl, 34 + const struct ctl_table *ctl, 35 35 int write, 36 36 void *buffer, 37 37 size_t *lenp, ··· 52 52 53 53 STATIC int 54 54 xfs_deprecated_dointvec_minmax( 55 - struct ctl_table *ctl, 55 + const struct ctl_table *ctl, 56 56 int write, 57 57 void *buffer, 58 58 size_t *lenp,
+2 -2
include/linux/ftrace.h
··· 471 471 472 472 extern int stack_tracer_enabled; 473 473 474 - int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, 474 + int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer, 475 475 size_t *lenp, loff_t *ppos); 476 476 477 477 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ ··· 1175 1175 extern void disable_trace_on_warning(void); 1176 1176 extern int __disable_trace_on_warning; 1177 1177 1178 - int tracepoint_printk_sysctl(struct ctl_table *table, int write, 1178 + int tracepoint_printk_sysctl(const struct ctl_table *table, int write, 1179 1179 void *buffer, size_t *lenp, loff_t *ppos); 1180 1180 1181 1181 #else /* CONFIG_TRACING */
+4 -4
include/linux/mm.h
··· 204 204 extern int sysctl_overcommit_ratio; 205 205 extern unsigned long sysctl_overcommit_kbytes; 206 206 207 - int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, 207 + int overcommit_ratio_handler(const struct ctl_table *, int, void *, size_t *, 208 208 loff_t *); 209 - int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, 209 + int overcommit_kbytes_handler(const struct ctl_table *, int, void *, size_t *, 210 210 loff_t *); 211 - int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, 211 + int overcommit_policy_handler(const struct ctl_table *, int, void *, size_t *, 212 212 loff_t *); 213 213 214 214 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) ··· 3854 3854 3855 3855 #ifdef CONFIG_SYSCTL 3856 3856 extern int sysctl_drop_caches; 3857 - int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, 3857 + int drop_caches_sysctl_handler(const struct ctl_table *, int, void *, size_t *, 3858 3858 loff_t *); 3859 3859 #endif 3860 3860
+3 -3
include/linux/perf_event.h
··· 1582 1582 1583 1583 extern void perf_sample_event_took(u64 sample_len_ns); 1584 1584 1585 - int perf_event_max_sample_rate_handler(struct ctl_table *table, int write, 1585 + int perf_event_max_sample_rate_handler(const struct ctl_table *table, int write, 1586 1586 void *buffer, size_t *lenp, loff_t *ppos); 1587 - int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 1587 + int perf_cpu_time_max_percent_handler(const struct ctl_table *table, int write, 1588 1588 void *buffer, size_t *lenp, loff_t *ppos); 1589 - int perf_event_max_stack_handler(struct ctl_table *table, int write, 1589 + int perf_event_max_stack_handler(const struct ctl_table *table, int write, 1590 1590 void *buffer, size_t *lenp, loff_t *ppos); 1591 1591 1592 1592 /* Access to perf_event_open(2) syscall. */
+1 -1
include/linux/security.h
··· 228 228 #define LSM_UNSAFE_NO_NEW_PRIVS 4 229 229 230 230 #ifdef CONFIG_MMU 231 - extern int mmap_min_addr_handler(struct ctl_table *table, int write, 231 + extern int mmap_min_addr_handler(const struct ctl_table *table, int write, 232 232 void *buffer, size_t *lenp, loff_t *ppos); 233 233 #endif 234 234
+17 -17
include/linux/sysctl.h
··· 61 61 62 62 extern const unsigned long sysctl_long_vals[]; 63 63 64 - typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer, 64 + typedef int proc_handler(const struct ctl_table *ctl, int write, void *buffer, 65 65 size_t *lenp, loff_t *ppos); 66 66 67 - int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *); 68 - int proc_dobool(struct ctl_table *table, int write, void *buffer, 67 + int proc_dostring(const struct ctl_table *, int, void *, size_t *, loff_t *); 68 + int proc_dobool(const struct ctl_table *table, int write, void *buffer, 69 69 size_t *lenp, loff_t *ppos); 70 - int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *); 71 - int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *); 72 - int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); 73 - int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer, 70 + int proc_dointvec(const struct ctl_table *, int, void *, size_t *, loff_t *); 71 + int proc_douintvec(const struct ctl_table *, int, void *, size_t *, loff_t *); 72 + int proc_dointvec_minmax(const struct ctl_table *, int, void *, size_t *, loff_t *); 73 + int proc_douintvec_minmax(const struct ctl_table *table, int write, void *buffer, 74 74 size_t *lenp, loff_t *ppos); 75 - int proc_dou8vec_minmax(struct ctl_table *table, int write, void *buffer, 75 + int proc_dou8vec_minmax(const struct ctl_table *table, int write, void *buffer, 76 76 size_t *lenp, loff_t *ppos); 77 - int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *); 78 - int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write, 77 + int proc_dointvec_jiffies(const struct ctl_table *, int, void *, size_t *, loff_t *); 78 + int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int write, 79 79 void *buffer, size_t *lenp, loff_t *ppos); 80 - int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *, 80 + int proc_dointvec_userhz_jiffies(const struct ctl_table *, int, void *, size_t *, 81 81 loff_t *); 82 - int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *, 82 + int proc_dointvec_ms_jiffies(const struct ctl_table *, int, void *, size_t *, 83 83 loff_t *); 84 - int proc_doulongvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); 85 - int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void *, 84 + int proc_doulongvec_minmax(const struct ctl_table *, int, void *, size_t *, loff_t *); 85 + int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int, void *, 86 86 size_t *, loff_t *); 87 - int proc_do_large_bitmap(struct ctl_table *, int, void *, size_t *, loff_t *); 88 - int proc_do_static_key(struct ctl_table *table, int write, void *buffer, 87 + int proc_do_large_bitmap(const struct ctl_table *, int, void *, size_t *, loff_t *); 88 + int proc_do_static_key(const struct ctl_table *table, int write, void *buffer, 89 89 size_t *lenp, loff_t *ppos); 90 90 91 91 /* ··· 287 287 } 288 288 #endif /* CONFIG_SYSCTL */ 289 289 290 - int sysctl_max_threads(struct ctl_table *table, int write, void *buffer, 290 + int sysctl_max_threads(const struct ctl_table *table, int write, void *buffer, 291 291 size_t *lenp, loff_t *ppos); 292 292 293 293 #endif /* _LINUX_SYSCTL_H */
+2 -2
include/linux/vmstat.h
··· 17 17 #define DISABLE_NUMA_STAT 0 18 18 extern int sysctl_vm_numa_stat; 19 19 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); 20 - int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, 20 + int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write, 21 21 void *buffer, size_t *length, loff_t *ppos); 22 22 #endif 23 23 ··· 301 301 void refresh_zone_stat_thresholds(void); 302 302 303 303 struct ctl_table; 304 - int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp, 304 + int vmstat_refresh(const struct ctl_table *, int write, void *buffer, size_t *lenp, 305 305 loff_t *ppos); 306 306 307 307 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
+1 -1
include/linux/writeback.h
··· 350 350 extern unsigned int dirtytime_expire_interval; 351 351 extern int laptop_mode; 352 352 353 - int dirtytime_interval_handler(struct ctl_table *table, int write, 353 + int dirtytime_interval_handler(const struct ctl_table *table, int write, 354 354 void *buffer, size_t *lenp, loff_t *ppos); 355 355 356 356 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
+1 -1
include/net/ndisc.h
··· 486 486 487 487 488 488 #ifdef CONFIG_SYSCTL 489 - int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, 489 + int ndisc_ifinfo_sysctl_change(const struct ctl_table *ctl, int write, 490 490 void *buffer, size_t *lenp, loff_t *ppos); 491 491 #endif 492 492
+3 -3
include/net/neighbour.h
··· 412 412 void *neigh_seq_next(struct seq_file *, void *, loff_t *); 413 413 void neigh_seq_stop(struct seq_file *, void *); 414 414 415 - int neigh_proc_dointvec(struct ctl_table *ctl, int write, 415 + int neigh_proc_dointvec(const struct ctl_table *ctl, int write, 416 416 void *buffer, size_t *lenp, loff_t *ppos); 417 - int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, 417 + int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write, 418 418 void *buffer, 419 419 size_t *lenp, loff_t *ppos); 420 - int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 420 + int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write, 421 421 void *buffer, size_t *lenp, loff_t *ppos); 422 422 423 423 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
+1 -1
include/net/netfilter/nf_hooks_lwtunnel.h
··· 2 2 #include <linux/types.h> 3 3 4 4 #ifdef CONFIG_SYSCTL 5 - int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write, 5 + int nf_hooks_lwtunnel_sysctl_handler(const struct ctl_table *table, int write, 6 6 void *buffer, size_t *lenp, loff_t *ppos); 7 7 #endif
+3 -3
ipc/ipc_sysctl.c
··· 17 17 #include <linux/cred.h> 18 18 #include "util.h" 19 19 20 - static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write, 20 + static int proc_ipc_dointvec_minmax_orphans(const struct ctl_table *table, int write, 21 21 void *buffer, size_t *lenp, loff_t *ppos) 22 22 { 23 23 struct ipc_namespace *ns = ··· 33 33 return err; 34 34 } 35 35 36 - static int proc_ipc_auto_msgmni(struct ctl_table *table, int write, 36 + static int proc_ipc_auto_msgmni(const struct ctl_table *table, int write, 37 37 void *buffer, size_t *lenp, loff_t *ppos) 38 38 { 39 39 struct ctl_table ipc_table; ··· 48 48 return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); 49 49 } 50 50 51 - static int proc_ipc_sem_dointvec(struct ctl_table *table, int write, 51 + static int proc_ipc_sem_dointvec(const struct ctl_table *table, int write, 52 52 void *buffer, size_t *lenp, loff_t *ppos) 53 53 { 54 54 struct ipc_namespace *ns =
+2 -2
kernel/bpf/syscall.c
··· 5983 5983 }; 5984 5984 5985 5985 #ifdef CONFIG_SYSCTL 5986 - static int bpf_stats_handler(struct ctl_table *table, int write, 5986 + static int bpf_stats_handler(const struct ctl_table *table, int write, 5987 5987 void *buffer, size_t *lenp, loff_t *ppos) 5988 5988 { 5989 5989 struct static_key *key = (struct static_key *)table->data; ··· 6018 6018 { 6019 6019 } 6020 6020 6021 - static int bpf_unpriv_handler(struct ctl_table *table, int write, 6021 + static int bpf_unpriv_handler(const struct ctl_table *table, int write, 6022 6022 void *buffer, size_t *lenp, loff_t *ppos) 6023 6023 { 6024 6024 int ret, unpriv_enable = *(int *)table->data;
+1 -1
kernel/delayacct.c
··· 44 44 } 45 45 46 46 #ifdef CONFIG_PROC_SYSCTL 47 - static int sysctl_delayacct(struct ctl_table *table, int write, void *buffer, 47 + static int sysctl_delayacct(const struct ctl_table *table, int write, void *buffer, 48 48 size_t *lenp, loff_t *ppos) 49 49 { 50 50 int state = delayacct_on;
+1 -1
kernel/events/callchain.c
··· 270 270 * Used for sysctl_perf_event_max_stack and 271 271 * sysctl_perf_event_max_contexts_per_stack. 272 272 */ 273 - int perf_event_max_stack_handler(struct ctl_table *table, int write, 273 + int perf_event_max_stack_handler(const struct ctl_table *table, int write, 274 274 void *buffer, size_t *lenp, loff_t *ppos) 275 275 { 276 276 int *value = table->data;
+2 -2
kernel/events/core.c
··· 450 450 451 451 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc); 452 452 453 - int perf_event_max_sample_rate_handler(struct ctl_table *table, int write, 453 + int perf_event_max_sample_rate_handler(const struct ctl_table *table, int write, 454 454 void *buffer, size_t *lenp, loff_t *ppos) 455 455 { 456 456 int ret; ··· 474 474 475 475 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; 476 476 477 - int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 477 + int perf_cpu_time_max_percent_handler(const struct ctl_table *table, int write, 478 478 void *buffer, size_t *lenp, loff_t *ppos) 479 479 { 480 480 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+1 -1
kernel/fork.c
··· 3404 3404 return 0; 3405 3405 } 3406 3406 3407 - int sysctl_max_threads(struct ctl_table *table, int write, 3407 + int sysctl_max_threads(const struct ctl_table *table, int write, 3408 3408 void *buffer, size_t *lenp, loff_t *ppos) 3409 3409 { 3410 3410 struct ctl_table t;
+1 -1
kernel/hung_task.c
··· 239 239 /* 240 240 * Process updating of timeout sysctl 241 241 */ 242 - static int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, 242 + static int proc_dohung_task_timeout_secs(const struct ctl_table *table, int write, 243 243 void *buffer, 244 244 size_t *lenp, loff_t *ppos) 245 245 {
+1 -1
kernel/kexec_core.c
··· 888 888 static int kexec_load_disabled; 889 889 890 890 #ifdef CONFIG_SYSCTL 891 - static int kexec_limit_handler(struct ctl_table *table, int write, 891 + static int kexec_limit_handler(const struct ctl_table *table, int write, 892 892 void *buffer, size_t *lenp, loff_t *ppos) 893 893 { 894 894 struct kexec_load_limit *limit = table->data;
+1 -1
kernel/kprobes.c
··· 939 939 940 940 static DEFINE_MUTEX(kprobe_sysctl_mutex); 941 941 static int sysctl_kprobes_optimization; 942 - static int proc_kprobes_optimization_handler(struct ctl_table *table, 942 + static int proc_kprobes_optimization_handler(const struct ctl_table *table, 943 943 int write, void *buffer, 944 944 size_t *length, loff_t *ppos) 945 945 {
+1 -1
kernel/latencytop.c
··· 65 65 int latencytop_enabled; 66 66 67 67 #ifdef CONFIG_SYSCTL 68 - static int sysctl_latencytop(struct ctl_table *table, int write, void *buffer, 68 + static int sysctl_latencytop(const struct ctl_table *table, int write, void *buffer, 69 69 size_t *lenp, loff_t *ppos) 70 70 { 71 71 int err;
+1 -1
kernel/pid_namespace.c
··· 261 261 } 262 262 263 263 #ifdef CONFIG_CHECKPOINT_RESTORE 264 - static int pid_ns_ctl_handler(struct ctl_table *table, int write, 264 + static int pid_ns_ctl_handler(const struct ctl_table *table, int write, 265 265 void *buffer, size_t *lenp, loff_t *ppos) 266 266 { 267 267 struct pid_namespace *pid_ns = task_active_pid_ns(current);
+1 -1
kernel/pid_sysctl.h
··· 5 5 #include <linux/pid_namespace.h> 6 6 7 7 #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) 8 - static int pid_mfd_noexec_dointvec_minmax(struct ctl_table *table, 8 + static int pid_mfd_noexec_dointvec_minmax(const struct ctl_table *table, 9 9 int write, void *buf, size_t *lenp, loff_t *ppos) 10 10 { 11 11 struct pid_namespace *ns = task_active_pid_ns(current);
+1 -1
kernel/printk/internal.h
··· 8 8 9 9 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL) 10 10 void __init printk_sysctl_init(void); 11 - int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, 11 + int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write, 12 12 void *buffer, size_t *lenp, loff_t *ppos); 13 13 #else 14 14 #define printk_sysctl_init() do { } while (0)
+1 -1
kernel/printk/printk.c
··· 197 197 198 198 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit"; 199 199 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL) 200 - int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, 200 + int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write, 201 201 void *buffer, size_t *lenp, loff_t *ppos) 202 202 { 203 203 char old_str[DEVKMSG_STR_MAX_SIZE];
+1 -1
kernel/printk/sysctl.c
··· 11 11 12 12 static const int ten_thousand = 10000; 13 13 14 - static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, 14 + static int proc_dointvec_minmax_sysadmin(const struct ctl_table *table, int write, 15 15 void *buffer, size_t *lenp, loff_t *ppos) 16 16 { 17 17 if (write && !capable(CAP_SYS_ADMIN))
+3 -3
kernel/sched/core.c
··· 1806 1806 uclamp_update_util_min_rt_default(p); 1807 1807 } 1808 1808 1809 - static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1809 + static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write, 1810 1810 void *buffer, size_t *lenp, loff_t *ppos) 1811 1811 { 1812 1812 bool update_root_tg = false; ··· 4392 4392 } 4393 4393 } 4394 4394 4395 - static int sysctl_numa_balancing(struct ctl_table *table, int write, 4395 + static int sysctl_numa_balancing(const struct ctl_table *table, int write, 4396 4396 void *buffer, size_t *lenp, loff_t *ppos) 4397 4397 { 4398 4398 struct ctl_table t; ··· 4461 4461 __setup("schedstats=", setup_schedstats); 4462 4462 4463 4463 #ifdef CONFIG_PROC_SYSCTL 4464 - static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 4464 + static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer, 4465 4465 size_t *lenp, loff_t *ppos) 4466 4466 { 4467 4467 struct ctl_table t;
+4 -4
kernel/sched/rt.c
··· 26 26 27 27 #ifdef CONFIG_SYSCTL 28 28 static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ; 29 - static int sched_rt_handler(struct ctl_table *table, int write, void *buffer, 29 + static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer, 30 30 size_t *lenp, loff_t *ppos); 31 - static int sched_rr_handler(struct ctl_table *table, int write, void *buffer, 31 + static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer, 32 32 size_t *lenp, loff_t *ppos); 33 33 static struct ctl_table sched_rt_sysctls[] = { 34 34 { ··· 2952 2952 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 2953 2953 } 2954 2954 2955 - static int sched_rt_handler(struct ctl_table *table, int write, void *buffer, 2955 + static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer, 2956 2956 size_t *lenp, loff_t *ppos) 2957 2957 { 2958 2958 int old_period, old_runtime; ··· 2991 2991 return ret; 2992 2992 } 2993 2993 2994 - static int sched_rr_handler(struct ctl_table *table, int write, void *buffer, 2994 + static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer, 2995 2995 size_t *lenp, loff_t *ppos) 2996 2996 { 2997 2997 int ret;
+1 -1
kernel/sched/topology.c
··· 285 285 } 286 286 287 287 #ifdef CONFIG_PROC_SYSCTL 288 - static int sched_energy_aware_handler(struct ctl_table *table, int write, 288 + static int sched_energy_aware_handler(const struct ctl_table *table, int write, 289 289 void *buffer, size_t *lenp, loff_t *ppos) 290 290 { 291 291 int ret, state;
+1 -1
kernel/seccomp.c
··· 2431 2431 return audit_seccomp_actions_logged(new, old, !ret); 2432 2432 } 2433 2433 2434 - static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write, 2434 + static int seccomp_actions_logged_handler(const struct ctl_table *ro_table, int write, 2435 2435 void *buffer, size_t *lenp, 2436 2436 loff_t *ppos) 2437 2437 {
+1 -1
kernel/stackleak.c
··· 21 21 static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass); 22 22 23 23 #ifdef CONFIG_SYSCTL 24 - static int stack_erasing_sysctl(struct ctl_table *table, int write, 24 + static int stack_erasing_sysctl(const struct ctl_table *table, int write, 25 25 void __user *buffer, size_t *lenp, loff_t *ppos) 26 26 { 27 27 int ret = 0;
+32 -32
kernel/sysctl.c
··· 256 256 * 257 257 * Returns 0 on success. 258 258 */ 259 - int proc_dostring(struct ctl_table *table, int write, 259 + int proc_dostring(const struct ctl_table *table, int write, 260 260 void *buffer, size_t *lenp, loff_t *ppos) 261 261 { 262 262 if (write) ··· 702 702 * 703 703 * Returns 0 on success. 704 704 */ 705 - int proc_dobool(struct ctl_table *table, int write, void *buffer, 705 + int proc_dobool(const struct ctl_table *table, int write, void *buffer, 706 706 size_t *lenp, loff_t *ppos) 707 707 { 708 708 struct ctl_table tmp; ··· 739 739 * 740 740 * Returns 0 on success. 741 741 */ 742 - int proc_dointvec(struct ctl_table *table, int write, void *buffer, 742 + int proc_dointvec(const struct ctl_table *table, int write, void *buffer, 743 743 size_t *lenp, loff_t *ppos) 744 744 { 745 745 return do_proc_dointvec(table, write, buffer, lenp, ppos, NULL, NULL); ··· 758 758 * 759 759 * Returns 0 on success. 760 760 */ 761 - int proc_douintvec(struct ctl_table *table, int write, void *buffer, 761 + int proc_douintvec(const struct ctl_table *table, int write, void *buffer, 762 762 size_t *lenp, loff_t *ppos) 763 763 { 764 764 return do_proc_douintvec(table, write, buffer, lenp, ppos, ··· 769 769 * Taint values can only be increased 770 770 * This means we can safely use a temporary. 771 771 */ 772 - static int proc_taint(struct ctl_table *table, int write, 772 + static int proc_taint(const struct ctl_table *table, int write, 773 773 void *buffer, size_t *lenp, loff_t *ppos) 774 774 { 775 775 struct ctl_table t; ··· 864 864 * 865 865 * Returns 0 on success or -EINVAL on write when the range check fails. 866 866 */ 867 - int proc_dointvec_minmax(struct ctl_table *table, int write, 867 + int proc_dointvec_minmax(const struct ctl_table *table, int write, 868 868 void *buffer, size_t *lenp, loff_t *ppos) 869 869 { 870 870 struct do_proc_dointvec_minmax_conv_param param = { ··· 933 933 * 934 934 * Returns 0 on success or -ERANGE on write when the range check fails. 935 935 */ 936 - int proc_douintvec_minmax(struct ctl_table *table, int write, 936 + int proc_douintvec_minmax(const struct ctl_table *table, int write, 937 937 void *buffer, size_t *lenp, loff_t *ppos) 938 938 { 939 939 struct do_proc_douintvec_minmax_conv_param param = { ··· 961 961 * 962 962 * Returns 0 on success or an error on write when the range check fails. 963 963 */ 964 - int proc_dou8vec_minmax(struct ctl_table *table, int write, 964 + int proc_dou8vec_minmax(const struct ctl_table *table, int write, 965 965 void *buffer, size_t *lenp, loff_t *ppos) 966 966 { 967 967 struct ctl_table tmp; ··· 998 998 EXPORT_SYMBOL_GPL(proc_dou8vec_minmax); 999 999 1000 1000 #ifdef CONFIG_MAGIC_SYSRQ 1001 - static int sysrq_sysctl_handler(struct ctl_table *table, int write, 1001 + static int sysrq_sysctl_handler(const struct ctl_table *table, int write, 1002 1002 void *buffer, size_t *lenp, loff_t *ppos) 1003 1003 { 1004 1004 int tmp, ret; ··· 1115 1115 * 1116 1116 * Returns 0 on success. 1117 1117 */ 1118 - int proc_doulongvec_minmax(struct ctl_table *table, int write, 1118 + int proc_doulongvec_minmax(const struct ctl_table *table, int write, 1119 1119 void *buffer, size_t *lenp, loff_t *ppos) 1120 1120 { 1121 1121 return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l); ··· 1138 1138 * 1139 1139 * Returns 0 on success. 1140 1140 */ 1141 - int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, 1141 + int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int write, 1142 1142 void *buffer, size_t *lenp, loff_t *ppos) 1143 1143 { 1144 1144 return do_proc_doulongvec_minmax(table, write, buffer, ··· 1259 1259 * 1260 1260 * Returns 0 on success. 1261 1261 */ 1262 - int proc_dointvec_jiffies(struct ctl_table *table, int write, 1262 + int proc_dointvec_jiffies(const struct ctl_table *table, int write, 1263 1263 void *buffer, size_t *lenp, loff_t *ppos) 1264 1264 { 1265 1265 return do_proc_dointvec(table,write,buffer,lenp,ppos, 1266 1266 do_proc_dointvec_jiffies_conv,NULL); 1267 1267 } 1268 1268 1269 - int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write, 1269 + int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int write, 1270 1270 void *buffer, size_t *lenp, loff_t *ppos) 1271 1271 { 1272 1272 struct do_proc_dointvec_minmax_conv_param param = { ··· 1292 1292 * 1293 1293 * Returns 0 on success. 1294 1294 */ 1295 - int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, 1295 + int proc_dointvec_userhz_jiffies(const struct ctl_table *table, int write, 1296 1296 void *buffer, size_t *lenp, loff_t *ppos) 1297 1297 { 1298 1298 return do_proc_dointvec(table, write, buffer, lenp, ppos, ··· 1315 1315 * 1316 1316 * Returns 0 on success. 1317 1317 */ 1318 - int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, void *buffer, 1318 + int proc_dointvec_ms_jiffies(const struct ctl_table *table, int write, void *buffer, 1319 1319 size_t *lenp, loff_t *ppos) 1320 1320 { 1321 1321 return do_proc_dointvec(table, write, buffer, lenp, ppos, 1322 1322 do_proc_dointvec_ms_jiffies_conv, NULL); 1323 1323 } 1324 1324 1325 - static int proc_do_cad_pid(struct ctl_table *table, int write, void *buffer, 1325 + static int proc_do_cad_pid(const struct ctl_table *table, int write, void *buffer, 1326 1326 size_t *lenp, loff_t *ppos) 1327 1327 { 1328 1328 struct pid *new_pid; ··· 1361 1361 * 1362 1362 * Returns 0 on success. 1363 1363 */ 1364 - int proc_do_large_bitmap(struct ctl_table *table, int write, 1364 + int proc_do_large_bitmap(const struct ctl_table *table, int write, 1365 1365 void *buffer, size_t *lenp, loff_t *ppos) 1366 1366 { 1367 1367 int err = 0; ··· 1493 1493 1494 1494 #else /* CONFIG_PROC_SYSCTL */ 1495 1495 1496 - int proc_dostring(struct ctl_table *table, int write, 1496 + int proc_dostring(const struct ctl_table *table, int write, 1497 1497 void *buffer, size_t *lenp, loff_t *ppos) 1498 1498 { 1499 1499 return -ENOSYS; 1500 1500 } 1501 1501 1502 - int proc_dobool(struct ctl_table *table, int write, 1502 + int proc_dobool(const struct ctl_table *table, int write, 1503 1503 void *buffer, size_t *lenp, loff_t *ppos) 1504 1504 { 1505 1505 return -ENOSYS; 1506 1506 } 1507 1507 1508 - int proc_dointvec(struct ctl_table *table, int write, 1508 + int proc_dointvec(const struct ctl_table *table, int write, 1509 1509 void *buffer, size_t *lenp, loff_t *ppos) 1510 1510 { 1511 1511 return -ENOSYS; 1512 1512 } 1513 1513 1514 - int proc_douintvec(struct ctl_table *table, int write, 1514 + int proc_douintvec(const struct ctl_table *table, int write, 1515 1515 void *buffer, size_t *lenp, loff_t *ppos) 1516 1516 { 1517 1517 return -ENOSYS; 1518 1518 } 1519 1519 1520 - int proc_dointvec_minmax(struct ctl_table *table, int write, 1520 + int proc_dointvec_minmax(const struct ctl_table *table, int write, 1521 1521 void *buffer, size_t *lenp, loff_t *ppos) 1522 1522 { 1523 1523 return -ENOSYS; 1524 1524 } 1525 1525 1526 - int proc_douintvec_minmax(struct ctl_table *table, int write, 1526 + int proc_douintvec_minmax(const struct ctl_table *table, int write, 1527 1527 void *buffer, size_t *lenp, loff_t *ppos) 1528 1528 { 1529 1529 return -ENOSYS; 1530 1530 } 1531 1531 1532 - int proc_dou8vec_minmax(struct ctl_table *table, int write, 1532 + int proc_dou8vec_minmax(const struct ctl_table *table, int write, 1533 1533 void *buffer, size_t *lenp, loff_t *ppos) 1534 1534 { 1535 1535 return -ENOSYS; 1536 1536 } 1537 1537 1538 - int proc_dointvec_jiffies(struct ctl_table *table, int write, 1538 + int proc_dointvec_jiffies(const struct ctl_table *table, int write, 1539 1539 void *buffer, size_t *lenp, loff_t *ppos) 1540 1540 { 1541 1541 return -ENOSYS; 1542 1542 } 1543 1543 1544 - int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write, 1544 + int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int write, 1545 1545 void *buffer, size_t *lenp, loff_t *ppos) 1546 1546 { 1547 1547 return -ENOSYS; 1548 1548 } 1549 1549 1550 - int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, 1550 + int proc_dointvec_userhz_jiffies(const struct ctl_table *table, int write, 1551 1551 void *buffer, size_t *lenp, loff_t *ppos) 1552 1552 { 1553 1553 return -ENOSYS; 1554 1554 } 1555 1555 1556 - int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, 1556 + int proc_dointvec_ms_jiffies(const struct ctl_table *table, int write, 1557 1557 void *buffer, size_t *lenp, loff_t *ppos) 1558 1558 { 1559 1559 return -ENOSYS; 1560 1560 } 1561 1561 1562 - int proc_doulongvec_minmax(struct ctl_table *table, int write, 1562 + int proc_doulongvec_minmax(const struct ctl_table *table, int write, 1563 1563 void *buffer, size_t *lenp, loff_t *ppos) 1564 1564 { 1565 1565 return -ENOSYS; 1566 1566 } 1567 1567 1568 - int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, 1568 + int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int write, 1569 1569 void *buffer, size_t *lenp, loff_t *ppos) 1570 1570 { 1571 1571 return -ENOSYS; 1572 1572 } 1573 1573 1574 - int proc_do_large_bitmap(struct ctl_table *table, int write, 1574 + int proc_do_large_bitmap(const struct ctl_table *table, int write, 1575 1575 void *buffer, size_t *lenp, loff_t *ppos) 1576 1576 { 1577 1577 return -ENOSYS; ··· 1580 1580 #endif /* CONFIG_PROC_SYSCTL */ 1581 1581 1582 1582 #if defined(CONFIG_SYSCTL) 1583 - int proc_do_static_key(struct ctl_table *table, int write, 1583 + int proc_do_static_key(const struct ctl_table *table, int write, 1584 1584 void *buffer, size_t *lenp, loff_t *ppos) 1585 1585 { 1586 1586 struct static_key *key = (struct static_key *)table->data;
+1 -1
kernel/time/timer.c
··· 289 289 } 290 290 291 291 #ifdef CONFIG_SYSCTL 292 - static int timer_migration_handler(struct ctl_table *table, int write, 292 + static int timer_migration_handler(const struct ctl_table *table, int write, 293 293 void *buffer, size_t *lenp, loff_t *ppos) 294 294 { 295 295 int ret;
+1 -1
kernel/trace/ftrace.c
··· 8735 8735 } 8736 8736 8737 8737 static int 8738 - ftrace_enable_sysctl(struct ctl_table *table, int write, 8738 + ftrace_enable_sysctl(const struct ctl_table *table, int write, 8739 8739 void *buffer, size_t *lenp, loff_t *ppos) 8740 8740 { 8741 8741 int ret = -ENODEV;
+1 -1
kernel/trace/trace.c
··· 2767 2767 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags); 2768 2768 } 2769 2769 2770 - int tracepoint_printk_sysctl(struct ctl_table *table, int write, 2770 + int tracepoint_printk_sysctl(const struct ctl_table *table, int write, 2771 2771 void *buffer, size_t *lenp, 2772 2772 loff_t *ppos) 2773 2773 {
+1 -1
kernel/trace/trace_events_user.c
··· 2885 2885 return -ENODEV; 2886 2886 } 2887 2887 2888 - static int set_max_user_events_sysctl(struct ctl_table *table, int write, 2888 + static int set_max_user_events_sysctl(const struct ctl_table *table, int write, 2889 2889 void *buffer, size_t *lenp, loff_t *ppos) 2890 2890 { 2891 2891 int ret;
+1 -1
kernel/trace/trace_stack.c
··· 514 514 #endif /* CONFIG_DYNAMIC_FTRACE */ 515 515 516 516 int 517 - stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, 517 + stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer, 518 518 size_t *lenp, loff_t *ppos) 519 519 { 520 520 int was_enabled;
+1 -1
kernel/umh.c
··· 495 495 EXPORT_SYMBOL(call_usermodehelper); 496 496 497 497 #if defined(CONFIG_SYSCTL) 498 - static int proc_cap_handler(struct ctl_table *table, int write, 498 + static int proc_cap_handler(const struct ctl_table *table, int write, 499 499 void *buffer, size_t *lenp, loff_t *ppos) 500 500 { 501 501 struct ctl_table t;
+1 -1
kernel/utsname_sysctl.c
··· 30 30 * Special case of dostring for the UTS structure. This has locks 31 31 * to observe. Should this be in kernel/sys.c ???? 32 32 */ 33 - static int proc_do_uts_string(struct ctl_table *table, int write, 33 + static int proc_do_uts_string(const struct ctl_table *table, int write, 34 34 void *buffer, size_t *lenp, loff_t *ppos) 35 35 { 36 36 struct ctl_table uts_table;
+6 -6
kernel/watchdog.c
··· 983 983 * -------------------|----------------------------------|------------------------------- 984 984 * proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED 985 985 */ 986 - static int proc_watchdog_common(int which, struct ctl_table *table, int write, 986 + static int proc_watchdog_common(int which, const struct ctl_table *table, int write, 987 987 void *buffer, size_t *lenp, loff_t *ppos) 988 988 { 989 989 int err, old, *param = table->data; ··· 1010 1010 /* 1011 1011 * /proc/sys/kernel/watchdog 1012 1012 */ 1013 - static int proc_watchdog(struct ctl_table *table, int write, 1013 + static int proc_watchdog(const struct ctl_table *table, int write, 1014 1014 void *buffer, size_t *lenp, loff_t *ppos) 1015 1015 { 1016 1016 return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED | ··· 1021 1021 /* 1022 1022 * /proc/sys/kernel/nmi_watchdog 1023 1023 */ 1024 - static int proc_nmi_watchdog(struct ctl_table *table, int write, 1024 + static int proc_nmi_watchdog(const struct ctl_table *table, int write, 1025 1025 void *buffer, size_t *lenp, loff_t *ppos) 1026 1026 { 1027 1027 if (!watchdog_hardlockup_available && write) ··· 1034 1034 /* 1035 1035 * /proc/sys/kernel/soft_watchdog 1036 1036 */ 1037 - static int proc_soft_watchdog(struct ctl_table *table, int write, 1037 + static int proc_soft_watchdog(const struct ctl_table *table, int write, 1038 1038 void *buffer, size_t *lenp, loff_t *ppos) 1039 1039 { 1040 1040 return proc_watchdog_common(WATCHDOG_SOFTOCKUP_ENABLED, ··· 1045 1045 /* 1046 1046 * /proc/sys/kernel/watchdog_thresh 1047 1047 */ 1048 - static int proc_watchdog_thresh(struct ctl_table *table, int write, 1048 + static int proc_watchdog_thresh(const struct ctl_table *table, int write, 1049 1049 void *buffer, size_t *lenp, loff_t *ppos) 1050 1050 { 1051 1051 int err, old; ··· 1068 1068 * user to specify a mask that will include cpus that have not yet 1069 1069 * been brought online, if desired. 1070 1070 */ 1071 - static int proc_watchdog_cpumask(struct ctl_table *table, int write, 1071 + static int proc_watchdog_cpumask(const struct ctl_table *table, int write, 1072 1072 void *buffer, size_t *lenp, loff_t *ppos) 1073 1073 { 1074 1074 int err;
+3 -3
mm/compaction.c
··· 2962 2962 return 0; 2963 2963 } 2964 2964 2965 - static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, 2965 + static int compaction_proactiveness_sysctl_handler(const struct ctl_table *table, int write, 2966 2966 void *buffer, size_t *length, loff_t *ppos) 2967 2967 { 2968 2968 int rc, nid; ··· 2992 2992 * This is the entry point for compacting all nodes via 2993 2993 * /proc/sys/vm/compact_memory 2994 2994 */ 2995 - static int sysctl_compaction_handler(struct ctl_table *table, int write, 2995 + static int sysctl_compaction_handler(const struct ctl_table *table, int write, 2996 2996 void *buffer, size_t *length, loff_t *ppos) 2997 2997 { 2998 2998 int ret; ··· 3303 3303 return 0; 3304 3304 } 3305 3305 3306 - static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table, 3306 + static int proc_dointvec_minmax_warn_RT_change(const struct ctl_table *table, 3307 3307 int write, void *buffer, size_t *lenp, loff_t *ppos) 3308 3308 { 3309 3309 int ret, old;
+3 -3
mm/hugetlb.c
··· 4925 4925 return ret; 4926 4926 } 4927 4927 4928 - static int hugetlb_sysctl_handler(struct ctl_table *table, int write, 4928 + static int hugetlb_sysctl_handler(const struct ctl_table *table, int write, 4929 4929 void *buffer, size_t *length, loff_t *ppos) 4930 4930 { 4931 4931 ··· 4934 4934 } 4935 4935 4936 4936 #ifdef CONFIG_NUMA 4937 - static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 4937 + static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write, 4938 4938 void *buffer, size_t *length, loff_t *ppos) 4939 4939 { 4940 4940 return hugetlb_sysctl_handler_common(true, table, write, ··· 4942 4942 } 4943 4943 #endif /* CONFIG_NUMA */ 4944 4944 4945 - static int hugetlb_overcommit_handler(struct ctl_table *table, int write, 4945 + static int hugetlb_overcommit_handler(const struct ctl_table *table, int write, 4946 4946 void *buffer, size_t *length, loff_t *ppos) 4947 4947 { 4948 4948 struct hstate *h = &default_hstate;
+5 -5
mm/page-writeback.c
··· 506 506 } 507 507 508 508 #ifdef CONFIG_SYSCTL 509 - static int dirty_background_ratio_handler(struct ctl_table *table, int write, 509 + static int dirty_background_ratio_handler(const struct ctl_table *table, int write, 510 510 void *buffer, size_t *lenp, loff_t *ppos) 511 511 { 512 512 int ret; ··· 517 517 return ret; 518 518 } 519 519 520 - static int dirty_background_bytes_handler(struct ctl_table *table, int write, 520 + static int dirty_background_bytes_handler(const struct ctl_table *table, int write, 521 521 void *buffer, size_t *lenp, loff_t *ppos) 522 522 { 523 523 int ret; ··· 535 535 return ret; 536 536 } 537 537 538 - static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer, 538 + static int dirty_ratio_handler(const struct ctl_table *table, int write, void *buffer, 539 539 size_t *lenp, loff_t *ppos) 540 540 { 541 541 int old_ratio = vm_dirty_ratio; ··· 549 549 return ret; 550 550 } 551 551 552 - static int dirty_bytes_handler(struct ctl_table *table, int write, 552 + static int dirty_bytes_handler(const struct ctl_table *table, int write, 553 553 void *buffer, size_t *lenp, loff_t *ppos) 554 554 { 555 555 unsigned long old_bytes = vm_dirty_bytes; ··· 2203 2203 /* 2204 2204 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs 2205 2205 */ 2206 - static int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, 2206 + static int dirty_writeback_centisecs_handler(const struct ctl_table *table, int write, 2207 2207 void *buffer, size_t *length, loff_t *ppos) 2208 2208 { 2209 2209 unsigned int old_interval = dirty_writeback_interval;
+7 -7
mm/page_alloc.c
··· 5127 5127 /* 5128 5128 * sysctl handler for numa_zonelist_order 5129 5129 */ 5130 - static int numa_zonelist_order_handler(struct ctl_table *table, int write, 5130 + static int numa_zonelist_order_handler(const struct ctl_table *table, int write, 5131 5131 void *buffer, size_t *length, loff_t *ppos) 5132 5132 { 5133 5133 if (write) ··· 6091 6091 * that we can call two helper functions whenever min_free_kbytes 6092 6092 * changes. 6093 6093 */ 6094 - static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 6094 + static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, 6095 6095 void *buffer, size_t *length, loff_t *ppos) 6096 6096 { 6097 6097 int rc; ··· 6107 6107 return 0; 6108 6108 } 6109 6109 6110 - static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 6110 + static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, 6111 6111 void *buffer, size_t *length, loff_t *ppos) 6112 6112 { 6113 6113 int rc; ··· 6137 6137 } 6138 6138 6139 6139 6140 - static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 6140 + static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, 6141 6141 void *buffer, size_t *length, loff_t *ppos) 6142 6142 { 6143 6143 int rc; ··· 6164 6164 sysctl_min_slab_ratio) / 100; 6165 6165 } 6166 6166 6167 - static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 6167 + static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, 6168 6168 void *buffer, size_t *length, loff_t *ppos) 6169 6169 { 6170 6170 int rc; ··· 6188 6188 * minimum watermarks. The lowmem reserve ratio can only make sense 6189 6189 * if in function of the boot time zone sizes. 6190 6190 */ 6191 - static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, 6191 + static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, 6192 6192 int write, void *buffer, size_t *length, loff_t *ppos) 6193 6193 { 6194 6194 int i; ··· 6209 6209 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6210 6210 * pagelist can have before it gets flushed back to buddy allocator. 6211 6211 */ 6212 - static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, 6212 + static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, 6213 6213 int write, void *buffer, size_t *length, loff_t *ppos) 6214 6214 { 6215 6215 struct zone *zone;
+3 -3
mm/util.c
··· 868 868 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 869 869 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 870 870 871 - int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, 871 + int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer, 872 872 size_t *lenp, loff_t *ppos) 873 873 { 874 874 int ret; ··· 884 884 percpu_counter_sync(&vm_committed_as); 885 885 } 886 886 887 - int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, 887 + int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer, 888 888 size_t *lenp, loff_t *ppos) 889 889 { 890 890 struct ctl_table t; ··· 920 920 return ret; 921 921 } 922 922 923 - int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, 923 + int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer, 924 924 size_t *lenp, loff_t *ppos) 925 925 { 926 926 int ret;
+2 -2
mm/vmstat.c
··· 74 74 75 75 static DEFINE_MUTEX(vm_numa_stat_lock); 76 76 77 - int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, 77 + int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write, 78 78 void *buffer, size_t *length, loff_t *ppos) 79 79 { 80 80 int ret, oldval; ··· 1888 1888 refresh_cpu_vm_stats(true); 1889 1889 } 1890 1890 1891 - int vmstat_refresh(struct ctl_table *table, int write, 1891 + int vmstat_refresh(const struct ctl_table *table, int write, 1892 1892 void *buffer, size_t *lenp, loff_t *ppos) 1893 1893 { 1894 1894 long val;
+1 -1
net/bridge/br_netfilter_hooks.c
··· 1189 1189 1190 1190 #ifdef CONFIG_SYSCTL 1191 1191 static 1192 - int brnf_sysctl_call_tables(struct ctl_table *ctl, int write, 1192 + int brnf_sysctl_call_tables(const struct ctl_table *ctl, int write, 1193 1193 void *buffer, size_t *lenp, loff_t *ppos) 1194 1194 { 1195 1195 int ret;
+9 -9
net/core/neighbour.c
··· 3543 3543 #ifdef CONFIG_SYSCTL 3544 3544 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN); 3545 3545 3546 - static int proc_unres_qlen(struct ctl_table *ctl, int write, 3546 + static int proc_unres_qlen(const struct ctl_table *ctl, int write, 3547 3547 void *buffer, size_t *lenp, loff_t *ppos) 3548 3548 { 3549 3549 int size, ret; ··· 3595 3595 neigh_copy_dflt_parms(net, p, index); 3596 3596 } 3597 3597 3598 - static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write, 3598 + static int neigh_proc_dointvec_zero_intmax(const struct ctl_table *ctl, int write, 3599 3599 void *buffer, size_t *lenp, 3600 3600 loff_t *ppos) 3601 3601 { ··· 3610 3610 return ret; 3611 3611 } 3612 3612 3613 - static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write, 3613 + static int neigh_proc_dointvec_ms_jiffies_positive(const struct ctl_table *ctl, int write, 3614 3614 void *buffer, size_t *lenp, loff_t *ppos) 3615 3615 { 3616 3616 struct ctl_table tmp = *ctl; ··· 3626 3626 return ret; 3627 3627 } 3628 3628 3629 - int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer, 3629 + int neigh_proc_dointvec(const struct ctl_table *ctl, int write, void *buffer, 3630 3630 size_t *lenp, loff_t *ppos) 3631 3631 { 3632 3632 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); ··· 3636 3636 } 3637 3637 EXPORT_SYMBOL(neigh_proc_dointvec); 3638 3638 3639 - int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer, 3639 + int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write, void *buffer, 3640 3640 size_t *lenp, loff_t *ppos) 3641 3641 { 3642 3642 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); ··· 3646 3646 } 3647 3647 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies); 3648 3648 3649 - static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write, 3649 + static int neigh_proc_dointvec_userhz_jiffies(const struct ctl_table *ctl, int write, 3650 3650 void *buffer, size_t *lenp, 3651 3651 loff_t *ppos) 3652 3652 { ··· 3656 3656 return ret; 3657 3657 } 3658 3658 3659 - int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 3659 + int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write, 3660 3660 void *buffer, size_t *lenp, loff_t *ppos) 3661 3661 { 3662 3662 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); ··· 3666 3666 } 3667 3667 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies); 3668 3668 3669 - static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, 3669 + static int neigh_proc_dointvec_unres_qlen(const struct ctl_table *ctl, int write, 3670 3670 void *buffer, size_t *lenp, 3671 3671 loff_t *ppos) 3672 3672 { ··· 3676 3676 return ret; 3677 3677 } 3678 3678 3679 - static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, 3679 + static int neigh_proc_base_reachable_time(const struct ctl_table *ctl, int write, 3680 3680 void *buffer, size_t *lenp, 3681 3681 loff_t *ppos) 3682 3682 {
+10 -10
net/core/sysctl_net_core.c
··· 95 95 return rps_default_mask; 96 96 } 97 97 98 - static int rps_default_mask_sysctl(struct ctl_table *table, int write, 98 + static int rps_default_mask_sysctl(const struct ctl_table *table, int write, 99 99 void *buffer, size_t *lenp, loff_t *ppos) 100 100 { 101 101 struct net *net = (struct net *)table->data; ··· 126 126 return err; 127 127 } 128 128 129 - static int rps_sock_flow_sysctl(struct ctl_table *table, int write, 129 + static int rps_sock_flow_sysctl(const struct ctl_table *table, int write, 130 130 void *buffer, size_t *lenp, loff_t *ppos) 131 131 { 132 132 unsigned int orig_size, size; ··· 198 198 #ifdef CONFIG_NET_FLOW_LIMIT 199 199 static DEFINE_MUTEX(flow_limit_update_mutex); 200 200 201 - static int flow_limit_cpu_sysctl(struct ctl_table *table, int write, 201 + static int flow_limit_cpu_sysctl(const struct ctl_table *table, int write, 202 202 void *buffer, size_t *lenp, loff_t *ppos) 203 203 { 204 204 struct sd_flow_limit *cur; ··· 255 255 return ret; 256 256 } 257 257 258 - static int flow_limit_table_len_sysctl(struct ctl_table *table, int write, 258 + static int flow_limit_table_len_sysctl(const struct ctl_table *table, int write, 259 259 void *buffer, size_t *lenp, loff_t *ppos) 260 260 { 261 261 unsigned int old, *ptr; ··· 277 277 #endif /* CONFIG_NET_FLOW_LIMIT */ 278 278 279 279 #ifdef CONFIG_NET_SCHED 280 - static int set_default_qdisc(struct ctl_table *table, int write, 280 + static int set_default_qdisc(const struct ctl_table *table, int write, 281 281 void *buffer, size_t *lenp, loff_t *ppos) 282 282 { 283 283 char id[IFNAMSIZ]; ··· 296 296 } 297 297 #endif 298 298 299 - static int proc_do_dev_weight(struct ctl_table *table, int write, 299 + static int proc_do_dev_weight(const struct ctl_table *table, int write, 300 300 void *buffer, size_t *lenp, loff_t *ppos) 301 301 { 302 302 static DEFINE_MUTEX(dev_weight_mutex); ··· 314 314 return ret; 315 315 } 316 316 317 - static int proc_do_rss_key(struct ctl_table *table, int write, 317 + static int proc_do_rss_key(const struct ctl_table *table, int write, 318 318 void *buffer, size_t *lenp, loff_t *ppos) 319 319 { 320 320 struct ctl_table fake_table; ··· 327 327 } 328 328 329 329 #ifdef CONFIG_BPF_JIT 330 - static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, 330 + static int proc_dointvec_minmax_bpf_enable(const struct ctl_table *table, int write, 331 331 void *buffer, size_t *lenp, 332 332 loff_t *ppos) 333 333 { ··· 360 360 361 361 # ifdef CONFIG_HAVE_EBPF_JIT 362 362 static int 363 - proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, 363 + proc_dointvec_minmax_bpf_restricted(const struct ctl_table *table, int write, 364 364 void *buffer, size_t *lenp, loff_t *ppos) 365 365 { 366 366 if (!capable(CAP_SYS_ADMIN)) ··· 371 371 # endif /* CONFIG_HAVE_EBPF_JIT */ 372 372 373 373 static int 374 - proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write, 374 + proc_dolongvec_minmax_bpf_restricted(const struct ctl_table *table, int write, 375 375 void *buffer, size_t *lenp, loff_t *ppos) 376 376 { 377 377 if (!capable(CAP_SYS_ADMIN))
+3 -3
net/ipv4/devinet.c
··· 2390 2390 } 2391 2391 } 2392 2392 2393 - static int devinet_conf_proc(struct ctl_table *ctl, int write, 2393 + static int devinet_conf_proc(const struct ctl_table *ctl, int write, 2394 2394 void *buffer, size_t *lenp, loff_t *ppos) 2395 2395 { 2396 2396 int old_value = *(int *)ctl->data; ··· 2442 2442 return ret; 2443 2443 } 2444 2444 2445 - static int devinet_sysctl_forward(struct ctl_table *ctl, int write, 2445 + static int devinet_sysctl_forward(const struct ctl_table *ctl, int write, 2446 2446 void *buffer, size_t *lenp, loff_t *ppos) 2447 2447 { 2448 2448 int *valp = ctl->data; ··· 2489 2489 return ret; 2490 2490 } 2491 2491 2492 - static int ipv4_doint_and_flush(struct ctl_table *ctl, int write, 2492 + static int ipv4_doint_and_flush(const struct ctl_table *ctl, int write, 2493 2493 void *buffer, size_t *lenp, loff_t *ppos) 2494 2494 { 2495 2495 int *valp = ctl->data;
+1 -1
net/ipv4/route.c
··· 3388 3388 static int ip_rt_gc_elasticity __read_mostly = 8; 3389 3389 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU; 3390 3390 3391 - static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write, 3391 + static int ipv4_sysctl_rtcache_flush(const struct ctl_table *__ctl, int write, 3392 3392 void *buffer, size_t *lenp, loff_t *ppos) 3393 3393 { 3394 3394 struct net *net = (struct net *)__ctl->extra1;
+15 -15
net/ipv4/sysctl_net_ipv4.c
··· 62 62 } 63 63 64 64 /* Validate changes from /proc interface. */ 65 - static int ipv4_local_port_range(struct ctl_table *table, int write, 65 + static int ipv4_local_port_range(const struct ctl_table *table, int write, 66 66 void *buffer, size_t *lenp, loff_t *ppos) 67 67 { 68 68 struct net *net = table->data; ··· 96 96 } 97 97 98 98 /* Validate changes from /proc interface. */ 99 - static int ipv4_privileged_ports(struct ctl_table *table, int write, 99 + static int ipv4_privileged_ports(const struct ctl_table *table, int write, 100 100 void *buffer, size_t *lenp, loff_t *ppos) 101 101 { 102 102 struct net *net = container_of(table->data, struct net, ··· 159 159 } 160 160 161 161 /* Validate changes from /proc interface. */ 162 - static int ipv4_ping_group_range(struct ctl_table *table, int write, 162 + static int ipv4_ping_group_range(const struct ctl_table *table, int write, 163 163 void *buffer, size_t *lenp, loff_t *ppos) 164 164 { 165 165 struct user_namespace *user_ns = current_user_ns(); ··· 194 194 return ret; 195 195 } 196 196 197 - static int ipv4_fwd_update_priority(struct ctl_table *table, int write, 197 + static int ipv4_fwd_update_priority(const struct ctl_table *table, int write, 198 198 void *buffer, size_t *lenp, loff_t *ppos) 199 199 { 200 200 struct net *net; ··· 210 210 return ret; 211 211 } 212 212 213 - static int proc_tcp_congestion_control(struct ctl_table *ctl, int write, 213 + static int proc_tcp_congestion_control(const struct ctl_table *ctl, int write, 214 214 void *buffer, size_t *lenp, loff_t *ppos) 215 215 { 216 216 struct net *net = container_of(ctl->data, struct net, ··· 230 230 return ret; 231 231 } 232 232 233 - static int proc_tcp_available_congestion_control(struct ctl_table *ctl, 233 + static int proc_tcp_available_congestion_control(const struct ctl_table *ctl, 234 234 int write, void *buffer, 235 235 size_t *lenp, loff_t *ppos) 236 236 { ··· 246 246 return ret; 247 247 } 248 248 249 - static int proc_allowed_congestion_control(struct ctl_table *ctl, 249 + static int proc_allowed_congestion_control(const struct ctl_table *ctl, 250 250 int write, void *buffer, 251 251 size_t *lenp, loff_t *ppos) 252 252 { ··· 283 283 return ret; 284 284 } 285 285 286 - static int proc_tcp_fastopen_key(struct ctl_table *table, int write, 286 + static int proc_tcp_fastopen_key(const struct ctl_table *table, int write, 287 287 void *buffer, size_t *lenp, loff_t *ppos) 288 288 { 289 289 struct net *net = container_of(table->data, struct net, ··· 354 354 return ret; 355 355 } 356 356 357 - static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table, 357 + static int proc_tfo_blackhole_detect_timeout(const struct ctl_table *table, 358 358 int write, void *buffer, 359 359 size_t *lenp, loff_t *ppos) 360 360 { ··· 369 369 return ret; 370 370 } 371 371 372 - static int proc_tcp_available_ulp(struct ctl_table *ctl, 372 + static int proc_tcp_available_ulp(const struct ctl_table *ctl, 373 373 int write, void *buffer, size_t *lenp, 374 374 loff_t *ppos) 375 375 { ··· 386 386 return ret; 387 387 } 388 388 389 - static int proc_tcp_ehash_entries(struct ctl_table *table, int write, 389 + static int proc_tcp_ehash_entries(const struct ctl_table *table, int write, 390 390 void *buffer, size_t *lenp, loff_t *ppos) 391 391 { 392 392 struct net *net = container_of(table->data, struct net, ··· 410 410 return proc_dointvec(&tbl, write, buffer, lenp, ppos); 411 411 } 412 412 413 - static int proc_udp_hash_entries(struct ctl_table *table, int write, 413 + static int proc_udp_hash_entries(const struct ctl_table *table, int write, 414 414 void *buffer, size_t *lenp, loff_t *ppos) 415 415 { 416 416 struct net *net = container_of(table->data, struct net, ··· 434 434 } 435 435 436 436 #ifdef CONFIG_IP_ROUTE_MULTIPATH 437 - static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write, 437 + static int proc_fib_multipath_hash_policy(const struct ctl_table *table, int write, 438 438 void *buffer, size_t *lenp, 439 439 loff_t *ppos) 440 440 { ··· 449 449 return ret; 450 450 } 451 451 452 - static int proc_fib_multipath_hash_fields(struct ctl_table *table, int write, 452 + static int proc_fib_multipath_hash_fields(const struct ctl_table *table, int write, 453 453 void *buffer, size_t *lenp, 454 454 loff_t *ppos) 455 455 { ··· 484 484 WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed, new); 485 485 } 486 486 487 - static int proc_fib_multipath_hash_seed(struct ctl_table *table, int write, 487 + static int proc_fib_multipath_hash_seed(const struct ctl_table *table, int write, 488 488 void *buffer, size_t *lenp, 489 489 loff_t *ppos) 490 490 {
+8 -8
net/ipv6/addrconf.c
··· 6309 6309 6310 6310 #ifdef CONFIG_SYSCTL 6311 6311 6312 - static int addrconf_sysctl_forward(struct ctl_table *ctl, int write, 6312 + static int addrconf_sysctl_forward(const struct ctl_table *ctl, int write, 6313 6313 void *buffer, size_t *lenp, loff_t *ppos) 6314 6314 { 6315 6315 int *valp = ctl->data; ··· 6334 6334 return ret; 6335 6335 } 6336 6336 6337 - static int addrconf_sysctl_mtu(struct ctl_table *ctl, int write, 6337 + static int addrconf_sysctl_mtu(const struct ctl_table *ctl, int write, 6338 6338 void *buffer, size_t *lenp, loff_t *ppos) 6339 6339 { 6340 6340 struct inet6_dev *idev = ctl->extra1; ··· 6405 6405 return 0; 6406 6406 } 6407 6407 6408 - static int addrconf_sysctl_disable(struct ctl_table *ctl, int write, 6408 + static int addrconf_sysctl_disable(const struct ctl_table *ctl, int write, 6409 6409 void *buffer, size_t *lenp, loff_t *ppos) 6410 6410 { 6411 6411 int *valp = ctl->data; ··· 6430 6430 return ret; 6431 6431 } 6432 6432 6433 - static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write, 6433 + static int addrconf_sysctl_proxy_ndp(const struct ctl_table *ctl, int write, 6434 6434 void *buffer, size_t *lenp, loff_t *ppos) 6435 6435 { 6436 6436 int *valp = ctl->data; ··· 6471 6471 return ret; 6472 6472 } 6473 6473 6474 - static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write, 6474 + static int addrconf_sysctl_addr_gen_mode(const struct ctl_table *ctl, int write, 6475 6475 void *buffer, size_t *lenp, 6476 6476 loff_t *ppos) 6477 6477 { ··· 6534 6534 return ret; 6535 6535 } 6536 6536 6537 - static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write, 6537 + static int addrconf_sysctl_stable_secret(const struct ctl_table *ctl, int write, 6538 6538 void *buffer, size_t *lenp, 6539 6539 loff_t *ppos) 6540 6540 { ··· 6602 6602 } 6603 6603 6604 6604 static 6605 - int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl, 6605 + int addrconf_sysctl_ignore_routes_with_linkdown(const struct ctl_table *ctl, 6606 6606 int write, void *buffer, 6607 6607 size_t *lenp, 6608 6608 loff_t *ppos) ··· 6702 6702 return 0; 6703 6703 } 6704 6704 6705 - static int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write, 6705 + static int addrconf_sysctl_disable_policy(const struct ctl_table *ctl, int write, 6706 6706 void *buffer, size_t *lenp, loff_t *ppos) 6707 6707 { 6708 6708 int *valp = ctl->data;
+1 -1
net/ipv6/ndisc.c
··· 1951 1951 } 1952 1952 } 1953 1953 1954 - int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void *buffer, 1954 + int ndisc_ifinfo_sysctl_change(const struct ctl_table *ctl, int write, void *buffer, 1955 1955 size_t *lenp, loff_t *ppos) 1956 1956 { 1957 1957 struct net_device *dev = ctl->extra1;
+1 -1
net/ipv6/route.c
··· 6334 6334 6335 6335 #ifdef CONFIG_SYSCTL 6336 6336 6337 - static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, 6337 + static int ipv6_sysctl_rtcache_flush(const struct ctl_table *ctl, int write, 6338 6338 void *buffer, size_t *lenp, loff_t *ppos) 6339 6339 { 6340 6340 struct net *net;
+2 -2
net/ipv6/sysctl_net_ipv6.c
··· 30 30 static u32 ioam6_id_max = IOAM6_DEFAULT_ID; 31 31 static u64 ioam6_id_wide_max = IOAM6_DEFAULT_ID_WIDE; 32 32 33 - static int proc_rt6_multipath_hash_policy(struct ctl_table *table, int write, 33 + static int proc_rt6_multipath_hash_policy(const struct ctl_table *table, int write, 34 34 void *buffer, size_t *lenp, loff_t *ppos) 35 35 { 36 36 struct net *net; ··· 46 46 } 47 47 48 48 static int 49 - proc_rt6_multipath_hash_fields(struct ctl_table *table, int write, void *buffer, 49 + proc_rt6_multipath_hash_fields(const struct ctl_table *table, int write, void *buffer, 50 50 size_t *lenp, loff_t *ppos) 51 51 { 52 52 struct net *net;
+2 -2
net/mpls/af_mpls.c
··· 1347 1347 #define MPLS_PERDEV_SYSCTL_OFFSET(field) \ 1348 1348 (&((struct mpls_dev *)0)->field) 1349 1349 1350 - static int mpls_conf_proc(struct ctl_table *ctl, int write, 1350 + static int mpls_conf_proc(const struct ctl_table *ctl, int write, 1351 1351 void *buffer, size_t *lenp, loff_t *ppos) 1352 1352 { 1353 1353 int oval = *(int *)ctl->data; ··· 2600 2600 return -ENOMEM; 2601 2601 } 2602 2602 2603 - static int mpls_platform_labels(struct ctl_table *table, int write, 2603 + static int mpls_platform_labels(const struct ctl_table *table, int write, 2604 2604 void *buffer, size_t *lenp, loff_t *ppos) 2605 2605 { 2606 2606 struct net *net = table->data;
+2 -2
net/mptcp/ctrl.c
··· 113 113 return ret; 114 114 } 115 115 116 - static int proc_scheduler(struct ctl_table *ctl, int write, 116 + static int proc_scheduler(const struct ctl_table *ctl, int write, 117 117 void *buffer, size_t *lenp, loff_t *ppos) 118 118 { 119 119 const struct net *net = current->nsproxy->net_ns; ··· 133 133 return ret; 134 134 } 135 135 136 - static int proc_available_schedulers(struct ctl_table *ctl, 136 + static int proc_available_schedulers(const struct ctl_table *ctl, 137 137 int write, void *buffer, 138 138 size_t *lenp, loff_t *ppos) 139 139 {
+6 -6
net/netfilter/ipvs/ip_vs_ctl.c
··· 1846 1846 #ifdef CONFIG_SYSCTL 1847 1847 1848 1848 static int 1849 - proc_do_defense_mode(struct ctl_table *table, int write, 1849 + proc_do_defense_mode(const struct ctl_table *table, int write, 1850 1850 void *buffer, size_t *lenp, loff_t *ppos) 1851 1851 { 1852 1852 struct netns_ipvs *ipvs = table->extra2; ··· 1873 1873 } 1874 1874 1875 1875 static int 1876 - proc_do_sync_threshold(struct ctl_table *table, int write, 1876 + proc_do_sync_threshold(const struct ctl_table *table, int write, 1877 1877 void *buffer, size_t *lenp, loff_t *ppos) 1878 1878 { 1879 1879 struct netns_ipvs *ipvs = table->extra2; ··· 1901 1901 } 1902 1902 1903 1903 static int 1904 - proc_do_sync_ports(struct ctl_table *table, int write, 1904 + proc_do_sync_ports(const struct ctl_table *table, int write, 1905 1905 void *buffer, size_t *lenp, loff_t *ppos) 1906 1906 { 1907 1907 int *valp = table->data; ··· 1984 1984 return ret; 1985 1985 } 1986 1986 1987 - static int ipvs_proc_est_cpulist(struct ctl_table *table, int write, 1987 + static int ipvs_proc_est_cpulist(const struct ctl_table *table, int write, 1988 1988 void *buffer, size_t *lenp, loff_t *ppos) 1989 1989 { 1990 1990 int ret; ··· 2011 2011 return ret; 2012 2012 } 2013 2013 2014 - static int ipvs_proc_est_nice(struct ctl_table *table, int write, 2014 + static int ipvs_proc_est_nice(const struct ctl_table *table, int write, 2015 2015 void *buffer, size_t *lenp, loff_t *ppos) 2016 2016 { 2017 2017 struct netns_ipvs *ipvs = table->extra2; ··· 2041 2041 return ret; 2042 2042 } 2043 2043 2044 - static int ipvs_proc_run_estimation(struct ctl_table *table, int write, 2044 + static int ipvs_proc_run_estimation(const struct ctl_table *table, int write, 2045 2045 void *buffer, size_t *lenp, loff_t *ppos) 2046 2046 { 2047 2047 struct netns_ipvs *ipvs = table->extra2;
+1 -1
net/netfilter/nf_conntrack_standalone.c
··· 524 524 static unsigned int nf_conntrack_htable_size_user __read_mostly; 525 525 526 526 static int 527 - nf_conntrack_hash_sysctl(struct ctl_table *table, int write, 527 + nf_conntrack_hash_sysctl(const struct ctl_table *table, int write, 528 528 void *buffer, size_t *lenp, loff_t *ppos) 529 529 { 530 530 int ret;
+1 -1
net/netfilter/nf_hooks_lwtunnel.c
··· 28 28 } 29 29 30 30 #ifdef CONFIG_SYSCTL 31 - int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write, 31 + int nf_hooks_lwtunnel_sysctl_handler(const struct ctl_table *table, int write, 32 32 void *buffer, size_t *lenp, loff_t *ppos) 33 33 { 34 34 int proc_nf_hooks_lwtunnel_enabled = 0;
+1 -1
net/netfilter/nf_log.c
··· 408 408 }, 409 409 }; 410 410 411 - static int nf_log_proc_dostring(struct ctl_table *table, int write, 411 + static int nf_log_proc_dostring(const struct ctl_table *table, int write, 412 412 void *buffer, size_t *lenp, loff_t *ppos) 413 413 { 414 414 const struct nf_logger *logger;
+1 -1
net/phonet/sysctl.c
··· 48 48 } while (read_seqretry(&local_port_range_lock, seq)); 49 49 } 50 50 51 - static int proc_local_port_range(struct ctl_table *table, int write, 51 + static int proc_local_port_range(const struct ctl_table *table, int write, 52 52 void *buffer, size_t *lenp, loff_t *ppos) 53 53 { 54 54 int ret;
+2 -2
net/rds/tcp.c
··· 61 61 62 62 static struct kmem_cache *rds_tcp_conn_slab; 63 63 64 - static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, 64 + static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write, 65 65 void *buffer, size_t *lenp, loff_t *fpos); 66 66 67 67 static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF; ··· 682 682 spin_unlock_irq(&rds_tcp_conn_lock); 683 683 } 684 684 685 - static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, 685 + static int rds_tcp_skbuf_handler(const struct ctl_table *ctl, int write, 686 686 void *buffer, size_t *lenp, loff_t *fpos) 687 687 { 688 688 struct net *net = current->nsproxy->net_ns;
+14 -14
net/sctp/sysctl.c
··· 43 43 (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX) 44 44 ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ; 45 45 46 - static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, 46 + static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write, 47 47 void *buffer, size_t *lenp, loff_t *ppos); 48 - static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, 48 + static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write, 49 49 void *buffer, size_t *lenp, loff_t *ppos); 50 - static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, void *buffer, 50 + static int proc_sctp_do_rto_max(const struct ctl_table *ctl, int write, void *buffer, 51 51 size_t *lenp, loff_t *ppos); 52 - static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, void *buffer, 52 + static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write, void *buffer, 53 53 size_t *lenp, loff_t *ppos); 54 - static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, 54 + static int proc_sctp_do_alpha_beta(const struct ctl_table *ctl, int write, 55 55 void *buffer, size_t *lenp, loff_t *ppos); 56 - static int proc_sctp_do_auth(struct ctl_table *ctl, int write, 56 + static int proc_sctp_do_auth(const struct ctl_table *ctl, int write, 57 57 void *buffer, size_t *lenp, loff_t *ppos); 58 - static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write, 58 + static int proc_sctp_do_probe_interval(const struct ctl_table *ctl, int write, 59 59 void *buffer, size_t *lenp, loff_t *ppos); 60 60 61 61 static struct ctl_table sctp_table[] = { ··· 384 384 }, 385 385 }; 386 386 387 - static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, 387 + static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write, 388 388 void *buffer, size_t *lenp, loff_t *ppos) 389 389 { 390 390 struct net *net = current->nsproxy->net_ns; ··· 429 429 return ret; 430 430 } 431 431 432 - static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, 432 + static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write, 433 433 void *buffer, size_t *lenp, loff_t *ppos) 434 434 { 435 435 struct net *net = current->nsproxy->net_ns; ··· 457 457 return ret; 458 458 } 459 459 460 - static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, 460 + static int proc_sctp_do_rto_max(const struct ctl_table *ctl, int write, 461 461 void *buffer, size_t *lenp, loff_t *ppos) 462 462 { 463 463 struct net *net = current->nsproxy->net_ns; ··· 485 485 return ret; 486 486 } 487 487 488 - static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, 488 + static int proc_sctp_do_alpha_beta(const struct ctl_table *ctl, int write, 489 489 void *buffer, size_t *lenp, loff_t *ppos) 490 490 { 491 491 if (write) ··· 495 495 return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); 496 496 } 497 497 498 - static int proc_sctp_do_auth(struct ctl_table *ctl, int write, 498 + static int proc_sctp_do_auth(const struct ctl_table *ctl, int write, 499 499 void *buffer, size_t *lenp, loff_t *ppos) 500 500 { 501 501 struct net *net = current->nsproxy->net_ns; ··· 524 524 return ret; 525 525 } 526 526 527 - static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, 527 + static int proc_sctp_do_udp_port(const struct ctl_table *ctl, int write, 528 528 void *buffer, size_t *lenp, loff_t *ppos) 529 529 { 530 530 struct net *net = current->nsproxy->net_ns; ··· 565 565 return ret; 566 566 } 567 567 568 - static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write, 568 + static int proc_sctp_do_probe_interval(const struct ctl_table *ctl, int write, 569 569 void *buffer, size_t *lenp, loff_t *ppos) 570 570 { 571 571 struct net *net = current->nsproxy->net_ns;
+2 -2
net/sunrpc/sysctl.c
··· 40 40 41 41 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 42 42 43 - static int proc_do_xprt(struct ctl_table *table, int write, 43 + static int proc_do_xprt(const struct ctl_table *table, int write, 44 44 void *buffer, size_t *lenp, loff_t *ppos) 45 45 { 46 46 char tmpbuf[256]; ··· 62 62 } 63 63 64 64 static int 65 - proc_dodebug(struct ctl_table *table, int write, void *buffer, size_t *lenp, 65 + proc_dodebug(const struct ctl_table *table, int write, void *buffer, size_t *lenp, 66 66 loff_t *ppos) 67 67 { 68 68 char tmpbuf[20], *s = NULL;
+1 -1
net/sunrpc/xprtrdma/svc_rdma.c
··· 74 74 SVCRDMA_COUNTER_BUFSIZ = sizeof(unsigned long long), 75 75 }; 76 76 77 - static int svcrdma_counter_handler(struct ctl_table *table, int write, 77 + static int svcrdma_counter_handler(const struct ctl_table *table, int write, 78 78 void *buffer, size_t *lenp, loff_t *ppos) 79 79 { 80 80 struct percpu_counter *stat = (struct percpu_counter *)table->data;
+1 -1
security/apparmor/lsm.c
··· 2029 2029 } 2030 2030 2031 2031 #ifdef CONFIG_SYSCTL 2032 - static int apparmor_dointvec(struct ctl_table *table, int write, 2032 + static int apparmor_dointvec(const struct ctl_table *table, int write, 2033 2033 void *buffer, size_t *lenp, loff_t *ppos) 2034 2034 { 2035 2035 if (!aa_current_policy_admin_capable(NULL))
+1 -1
security/min_addr.c
··· 29 29 * sysctl handler which just sets dac_mmap_min_addr = the new value and then 30 30 * calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly 31 31 */ 32 - int mmap_min_addr_handler(struct ctl_table *table, int write, 32 + int mmap_min_addr_handler(const struct ctl_table *table, int write, 33 33 void *buffer, size_t *lenp, loff_t *ppos) 34 34 { 35 35 int ret;
+1 -1
security/yama/yama_lsm.c
··· 436 436 }; 437 437 438 438 #ifdef CONFIG_SYSCTL 439 - static int yama_dointvec_minmax(struct ctl_table *table, int write, 439 + static int yama_dointvec_minmax(const struct ctl_table *table, int write, 440 440 void *buffer, size_t *lenp, loff_t *ppos) 441 441 { 442 442 struct ctl_table table_copy;