tangled
alpha
login
or
join now
pyrox.dev
/
nixpkgs
lol
0
fork
atom
overview
issues
pulls
pipelines
linux-copperhead: LTS based on regular 4.14
Tim Steinbach
7 years ago
a444dcad
7eb169a2
+2880
-8
4 changed files
expand all
collapse all
unified
split
pkgs
os-specific
linux
kernel
copperhead-4-14.patch
linux-4.14.nix
patches.nix
top-level
all-packages.nix
+2864
pkgs/os-specific/linux/kernel/copperhead-4-14.patch
···
1
1
+
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2
2
+
index 0380a45ecf4b..39956a3ef645 100644
3
3
+
--- a/Documentation/admin-guide/kernel-parameters.txt
4
4
+
+++ b/Documentation/admin-guide/kernel-parameters.txt
5
5
+
@@ -490,16 +490,6 @@
6
6
+
nosocket -- Disable socket memory accounting.
7
7
+
nokmem -- Disable kernel memory accounting.
8
8
+
9
9
+
- checkreqprot [SELINUX] Set initial checkreqprot flag value.
10
10
+
- Format: { "0" | "1" }
11
11
+
- See security/selinux/Kconfig help text.
12
12
+
- 0 -- check protection applied by kernel (includes
13
13
+
- any implied execute protection).
14
14
+
- 1 -- check protection requested by application.
15
15
+
- Default value is set via a kernel config option.
16
16
+
- Value can be changed at runtime via
17
17
+
- /selinux/checkreqprot.
18
18
+
-
19
19
+
cio_ignore= [S390]
20
20
+
See Documentation/s390/CommonIO for details.
21
21
+
clk_ignore_unused
22
22
+
@@ -2899,6 +2889,11 @@
23
23
+
the specified number of seconds. This is to be used if
24
24
+
your oopses keep scrolling off the screen.
25
25
+
26
26
+
+ extra_latent_entropy
27
27
+
+ Enable a very simple form of latent entropy extraction
28
28
+
+ from the first 4GB of memory as the bootmem allocator
29
29
+
+ passes the memory pages to the buddy allocator.
30
30
+
+
31
31
+
pcbit= [HW,ISDN]
32
32
+
33
33
+
pcd. [PARIDE]
34
34
+
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
35
35
+
index 694968c7523c..002d86416ef8 100644
36
36
+
--- a/Documentation/sysctl/kernel.txt
37
37
+
+++ b/Documentation/sysctl/kernel.txt
38
38
+
@@ -91,6 +91,7 @@ show up in /proc/sys/kernel:
39
39
+
- sysctl_writes_strict
40
40
+
- tainted
41
41
+
- threads-max
42
42
+
+- tiocsti_restrict
43
43
+
- unknown_nmi_panic
44
44
+
- watchdog
45
45
+
- watchdog_thresh
46
46
+
@@ -999,6 +1000,26 @@ available RAM pages threads-max is reduced accordingly.
47
47
+
48
48
+
==============================================================
49
49
+
50
50
+
+tiocsti_restrict:
51
51
+
+
52
52
+
+This toggle indicates whether unprivileged users are prevented
53
53
+
+from using the TIOCSTI ioctl to inject commands into other processes
54
54
+
+which share a tty session.
55
55
+
+
56
56
+
+When tiocsti_restrict is set to (0) there are no restrictions(accept
57
57
+
+the default restriction of only being able to injection commands into
58
58
+
+one's own tty). When tiocsti_restrict is set to (1), users must
59
59
+
+have CAP_SYS_ADMIN to use the TIOCSTI ioctl.
60
60
+
+
61
61
+
+When user namespaces are in use, the check for the capability
62
62
+
+CAP_SYS_ADMIN is done against the user namespace that originally
63
63
+
+opened the tty.
64
64
+
+
65
65
+
+The kernel config option CONFIG_SECURITY_TIOCSTI_RESTRICT sets the
66
66
+
+default value of tiocsti_restrict.
67
67
+
+
68
68
+
+==============================================================
69
69
+
+
70
70
+
unknown_nmi_panic:
71
71
+
72
72
+
The value in this file affects behavior of handling NMI. When the
73
73
+
diff --git a/Makefile b/Makefile
74
74
+
index 787cf6605209..e4fda5330730 100644
75
75
+
--- a/Makefile
76
76
+
+++ b/Makefile
77
77
+
@@ -710,6 +710,9 @@ endif
78
78
+
KBUILD_CFLAGS += $(stackp-flag)
79
79
+
80
80
+
ifeq ($(cc-name),clang)
81
81
+
+ifdef CONFIG_LOCAL_INIT
82
82
+
+KBUILD_CFLAGS += -fsanitize=local-init
83
83
+
+endif
84
84
+
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
85
85
+
KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
86
86
+
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
87
87
+
diff --git a/arch/Kconfig b/arch/Kconfig
88
88
+
index 400b9e1b2f27..4637096f7902 100644
89
89
+
--- a/arch/Kconfig
90
90
+
+++ b/arch/Kconfig
91
91
+
@@ -440,6 +440,11 @@ config GCC_PLUGIN_LATENT_ENTROPY
92
92
+
is some slowdown of the boot process (about 0.5%) and fork and
93
93
+
irq processing.
94
94
+
95
95
+
+ When extra_latent_entropy is passed on the kernel command line,
96
96
+
+ entropy will be extracted from up to the first 4GB of RAM while the
97
97
+
+ runtime memory allocator is being initialized. This costs even more
98
98
+
+ slowdown of the boot process.
99
99
+
+
100
100
+
Note that entropy extracted this way is not cryptographically
101
101
+
secure!
102
102
+
103
103
+
@@ -533,7 +538,7 @@ config CC_STACKPROTECTOR
104
104
+
choice
105
105
+
prompt "Stack Protector buffer overflow detection"
106
106
+
depends on HAVE_CC_STACKPROTECTOR
107
107
+
- default CC_STACKPROTECTOR_NONE
108
108
+
+ default CC_STACKPROTECTOR_STRONG
109
109
+
help
110
110
+
This option turns on the "stack-protector" GCC feature. This
111
111
+
feature puts, at the beginning of functions, a canary value on
112
112
+
@@ -735,7 +740,7 @@ config ARCH_MMAP_RND_BITS
113
113
+
int "Number of bits to use for ASLR of mmap base address" if EXPERT
114
114
+
range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
115
115
+
default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
116
116
+
- default ARCH_MMAP_RND_BITS_MIN
117
117
+
+ default ARCH_MMAP_RND_BITS_MAX
118
118
+
depends on HAVE_ARCH_MMAP_RND_BITS
119
119
+
help
120
120
+
This value can be used to select the number of bits to use to
121
121
+
@@ -769,7 +774,7 @@ config ARCH_MMAP_RND_COMPAT_BITS
122
122
+
int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
123
123
+
range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
124
124
+
default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
125
125
+
- default ARCH_MMAP_RND_COMPAT_BITS_MIN
126
126
+
+ default ARCH_MMAP_RND_COMPAT_BITS_MAX
127
127
+
depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
128
128
+
help
129
129
+
This value can be used to select the number of bits to use to
130
130
+
@@ -952,6 +957,7 @@ config ARCH_HAS_REFCOUNT
131
131
+
132
132
+
config REFCOUNT_FULL
133
133
+
bool "Perform full reference count validation at the expense of speed"
134
134
+
+ default y
135
135
+
help
136
136
+
Enabling this switches the refcounting infrastructure from a fast
137
137
+
unchecked atomic_t implementation to a fully state checked
138
138
+
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
139
139
+
index 2d5f7aca156d..aa4839a74c6a 100644
140
140
+
--- a/arch/arm64/Kconfig
141
141
+
+++ b/arch/arm64/Kconfig
142
142
+
@@ -918,6 +918,7 @@ endif
143
143
+
144
144
+
config ARM64_SW_TTBR0_PAN
145
145
+
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
146
146
+
+ default y
147
147
+
help
148
148
+
Enabling this option prevents the kernel from accessing
149
149
+
user-space memory directly by pointing TTBR0_EL1 to a reserved
150
150
+
@@ -1044,6 +1045,7 @@ config RANDOMIZE_BASE
151
151
+
bool "Randomize the address of the kernel image"
152
152
+
select ARM64_MODULE_PLTS if MODULES
153
153
+
select RELOCATABLE
154
154
+
+ default y
155
155
+
help
156
156
+
Randomizes the virtual address at which the kernel image is
157
157
+
loaded, as a security feature that deters exploit attempts
158
158
+
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
159
159
+
index cc6bd559af85..01d5442d4722 100644
160
160
+
--- a/arch/arm64/Kconfig.debug
161
161
+
+++ b/arch/arm64/Kconfig.debug
162
162
+
@@ -45,6 +45,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
163
163
+
config DEBUG_WX
164
164
+
bool "Warn on W+X mappings at boot"
165
165
+
select ARM64_PTDUMP_CORE
166
166
+
+ default y
167
167
+
---help---
168
168
+
Generate a warning if any W+X mappings are found at boot.
169
169
+
170
170
+
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
171
171
+
index 34480e9af2e7..26304242250c 100644
172
172
+
--- a/arch/arm64/configs/defconfig
173
173
+
+++ b/arch/arm64/configs/defconfig
174
174
+
@@ -1,4 +1,3 @@
175
175
+
-CONFIG_SYSVIPC=y
176
176
+
CONFIG_POSIX_MQUEUE=y
177
177
+
CONFIG_AUDIT=y
178
178
+
CONFIG_NO_HZ_IDLE=y
179
179
+
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
180
180
+
index 33be513ef24c..6f0c0e3ef0dd 100644
181
181
+
--- a/arch/arm64/include/asm/elf.h
182
182
+
+++ b/arch/arm64/include/asm/elf.h
183
183
+
@@ -114,10 +114,10 @@
184
184
+
185
185
+
/*
186
186
+
* This is the base location for PIE (ET_DYN with INTERP) loads. On
187
187
+
- * 64-bit, this is above 4GB to leave the entire 32-bit address
188
188
+
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
189
189
+
* space open for things that want to use the area for 32-bit pointers.
190
190
+
*/
191
191
+
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
192
192
+
+#define ELF_ET_DYN_BASE 0x100000000UL
193
193
+
194
194
+
#ifndef __ASSEMBLY__
195
195
+
196
196
+
@@ -158,10 +158,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
197
197
+
/* 1GB of VA */
198
198
+
#ifdef CONFIG_COMPAT
199
199
+
#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
200
200
+
- 0x7ff >> (PAGE_SHIFT - 12) : \
201
201
+
- 0x3ffff >> (PAGE_SHIFT - 12))
202
202
+
+ ((1UL << mmap_rnd_compat_bits) - 1) >> (PAGE_SHIFT - 12) : \
203
203
+
+ ((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
204
204
+
#else
205
205
+
-#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
206
206
+
+#define STACK_RND_MASK (((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
207
207
+
#endif
208
208
+
209
209
+
#ifdef __AARCH64EB__
210
210
+
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
211
211
+
index 9e773732520c..91359f45b5fc 100644
212
212
+
--- a/arch/arm64/kernel/process.c
213
213
+
+++ b/arch/arm64/kernel/process.c
214
214
+
@@ -419,9 +419,9 @@ unsigned long arch_align_stack(unsigned long sp)
215
215
+
unsigned long arch_randomize_brk(struct mm_struct *mm)
216
216
+
{
217
217
+
if (is_compat_task())
218
218
+
- return randomize_page(mm->brk, SZ_32M);
219
219
+
+ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
220
220
+
else
221
221
+
- return randomize_page(mm->brk, SZ_1G);
222
222
+
+ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
223
223
+
}
224
224
+
225
225
+
/*
226
226
+
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
227
227
+
index 7483cd514c32..835a86c45fb0 100644
228
228
+
--- a/arch/x86/Kconfig
229
229
+
+++ b/arch/x86/Kconfig
230
230
+
@@ -1153,8 +1153,7 @@ config VM86
231
231
+
default X86_LEGACY_VM86
232
232
+
233
233
+
config X86_16BIT
234
234
+
- bool "Enable support for 16-bit segments" if EXPERT
235
235
+
- default y
236
236
+
+ bool "Enable support for 16-bit segments"
237
237
+
depends on MODIFY_LDT_SYSCALL
238
238
+
---help---
239
239
+
This option is required by programs like Wine to run 16-bit
240
240
+
@@ -2228,7 +2227,7 @@ config COMPAT_VDSO
241
241
+
choice
242
242
+
prompt "vsyscall table for legacy applications"
243
243
+
depends on X86_64
244
244
+
- default LEGACY_VSYSCALL_EMULATE
245
245
+
+ default LEGACY_VSYSCALL_NONE
246
246
+
help
247
247
+
Legacy user code that does not know how to find the vDSO expects
248
248
+
to be able to issue three syscalls by calling fixed addresses in
249
249
+
@@ -2318,8 +2317,7 @@ config CMDLINE_OVERRIDE
250
250
+
be set to 'N' under normal conditions.
251
251
+
252
252
+
config MODIFY_LDT_SYSCALL
253
253
+
- bool "Enable the LDT (local descriptor table)" if EXPERT
254
254
+
- default y
255
255
+
+ bool "Enable the LDT (local descriptor table)"
256
256
+
---help---
257
257
+
Linux can allow user programs to install a per-process x86
258
258
+
Local Descriptor Table (LDT) using the modify_ldt(2) system
259
259
+
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
260
260
+
index 6293a8768a91..add82e0f1df3 100644
261
261
+
--- a/arch/x86/Kconfig.debug
262
262
+
+++ b/arch/x86/Kconfig.debug
263
263
+
@@ -101,6 +101,7 @@ config EFI_PGT_DUMP
264
264
+
config DEBUG_WX
265
265
+
bool "Warn on W+X mappings at boot"
266
266
+
select X86_PTDUMP_CORE
267
267
+
+ default y
268
268
+
---help---
269
269
+
Generate a warning if any W+X mappings are found at boot.
270
270
+
271
271
+
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
272
272
+
index e32fc1f274d8..d08acc76502a 100644
273
273
+
--- a/arch/x86/configs/x86_64_defconfig
274
274
+
+++ b/arch/x86/configs/x86_64_defconfig
275
275
+
@@ -1,5 +1,4 @@
276
276
+
# CONFIG_LOCALVERSION_AUTO is not set
277
277
+
-CONFIG_SYSVIPC=y
278
278
+
CONFIG_POSIX_MQUEUE=y
279
279
+
CONFIG_BSD_PROCESS_ACCT=y
280
280
+
CONFIG_TASKSTATS=y
281
281
+
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
282
282
+
index 1911310959f8..bba8dbbc07a8 100644
283
283
+
--- a/arch/x86/entry/vdso/vma.c
284
284
+
+++ b/arch/x86/entry/vdso/vma.c
285
285
+
@@ -203,55 +203,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
286
286
+
}
287
287
+
288
288
+
#ifdef CONFIG_X86_64
289
289
+
-/*
290
290
+
- * Put the vdso above the (randomized) stack with another randomized
291
291
+
- * offset. This way there is no hole in the middle of address space.
292
292
+
- * To save memory make sure it is still in the same PTE as the stack
293
293
+
- * top. This doesn't give that many random bits.
294
294
+
- *
295
295
+
- * Note that this algorithm is imperfect: the distribution of the vdso
296
296
+
- * start address within a PMD is biased toward the end.
297
297
+
- *
298
298
+
- * Only used for the 64-bit and x32 vdsos.
299
299
+
- */
300
300
+
-static unsigned long vdso_addr(unsigned long start, unsigned len)
301
301
+
-{
302
302
+
- unsigned long addr, end;
303
303
+
- unsigned offset;
304
304
+
-
305
305
+
- /*
306
306
+
- * Round up the start address. It can start out unaligned as a result
307
307
+
- * of stack start randomization.
308
308
+
- */
309
309
+
- start = PAGE_ALIGN(start);
310
310
+
-
311
311
+
- /* Round the lowest possible end address up to a PMD boundary. */
312
312
+
- end = (start + len + PMD_SIZE - 1) & PMD_MASK;
313
313
+
- if (end >= TASK_SIZE_MAX)
314
314
+
- end = TASK_SIZE_MAX;
315
315
+
- end -= len;
316
316
+
-
317
317
+
- if (end > start) {
318
318
+
- offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
319
319
+
- addr = start + (offset << PAGE_SHIFT);
320
320
+
- } else {
321
321
+
- addr = start;
322
322
+
- }
323
323
+
-
324
324
+
- /*
325
325
+
- * Forcibly align the final address in case we have a hardware
326
326
+
- * issue that requires alignment for performance reasons.
327
327
+
- */
328
328
+
- addr = align_vdso_addr(addr);
329
329
+
-
330
330
+
- return addr;
331
331
+
-}
332
332
+
-
333
333
+
static int map_vdso_randomized(const struct vdso_image *image)
334
334
+
{
335
335
+
- unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
336
336
+
-
337
337
+
- return map_vdso(image, addr);
338
338
+
+ return map_vdso(image, 0);
339
339
+
}
340
340
+
#endif
341
341
+
342
342
+
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
343
343
+
index 3a091cea36c5..0931c05a3348 100644
344
344
+
--- a/arch/x86/include/asm/elf.h
345
345
+
+++ b/arch/x86/include/asm/elf.h
346
346
+
@@ -249,11 +249,11 @@ extern int force_personality32;
347
347
+
348
348
+
/*
349
349
+
* This is the base location for PIE (ET_DYN with INTERP) loads. On
350
350
+
- * 64-bit, this is above 4GB to leave the entire 32-bit address
351
351
+
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
352
352
+
* space open for things that want to use the area for 32-bit pointers.
353
353
+
*/
354
354
+
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
355
355
+
- (DEFAULT_MAP_WINDOW / 3 * 2))
356
356
+
+ 0x100000000UL)
357
357
+
358
358
+
/* This yields a mask that user programs can use to figure out what
359
359
+
instruction set this CPU supports. This could be done in user space,
360
360
+
@@ -312,8 +312,8 @@ extern unsigned long get_mmap_base(int is_legacy);
361
361
+
362
362
+
#ifdef CONFIG_X86_32
363
363
+
364
364
+
-#define __STACK_RND_MASK(is32bit) (0x7ff)
365
365
+
-#define STACK_RND_MASK (0x7ff)
366
366
+
+#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
367
367
+
+#define STACK_RND_MASK ((1UL << mmap_rnd_bits) - 1)
368
368
+
369
369
+
#define ARCH_DLINFO ARCH_DLINFO_IA32
370
370
+
371
371
+
@@ -322,7 +322,11 @@ extern unsigned long get_mmap_base(int is_legacy);
372
372
+
#else /* CONFIG_X86_32 */
373
373
+
374
374
+
/* 1GB for 64bit, 8MB for 32bit */
375
375
+
-#define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
376
376
+
+#ifdef CONFIG_COMPAT
377
377
+
+#define __STACK_RND_MASK(is32bit) ((is32bit) ? (1UL << mmap_rnd_compat_bits) - 1 : (1UL << mmap_rnd_bits) - 1)
378
378
+
+#else
379
379
+
+#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
380
380
+
+#endif
381
381
+
#define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
382
382
+
383
383
+
#define ARCH_DLINFO \
384
384
+
@@ -380,5 +384,4 @@ struct va_alignment {
385
385
+
} ____cacheline_aligned;
386
386
+
387
387
+
extern struct va_alignment va_align;
388
388
+
-extern unsigned long align_vdso_addr(unsigned long);
389
389
+
#endif /* _ASM_X86_ELF_H */
390
390
+
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
391
391
+
index 704f31315dde..bb82b6344a7b 100644
392
392
+
--- a/arch/x86/include/asm/tlbflush.h
393
393
+
+++ b/arch/x86/include/asm/tlbflush.h
394
394
+
@@ -253,6 +253,7 @@ static inline void cr4_set_bits(unsigned long mask)
395
395
+
unsigned long cr4;
396
396
+
397
397
+
cr4 = this_cpu_read(cpu_tlbstate.cr4);
398
398
+
+ BUG_ON(cr4 != __read_cr4());
399
399
+
if ((cr4 | mask) != cr4) {
400
400
+
cr4 |= mask;
401
401
+
this_cpu_write(cpu_tlbstate.cr4, cr4);
402
402
+
@@ -266,6 +267,7 @@ static inline void cr4_clear_bits(unsigned long mask)
403
403
+
unsigned long cr4;
404
404
+
405
405
+
cr4 = this_cpu_read(cpu_tlbstate.cr4);
406
406
+
+ BUG_ON(cr4 != __read_cr4());
407
407
+
if ((cr4 & ~mask) != cr4) {
408
408
+
cr4 &= ~mask;
409
409
+
this_cpu_write(cpu_tlbstate.cr4, cr4);
410
410
+
@@ -278,6 +280,7 @@ static inline void cr4_toggle_bits(unsigned long mask)
411
411
+
unsigned long cr4;
412
412
+
413
413
+
cr4 = this_cpu_read(cpu_tlbstate.cr4);
414
414
+
+ BUG_ON(cr4 != __read_cr4());
415
415
+
cr4 ^= mask;
416
416
+
this_cpu_write(cpu_tlbstate.cr4, cr4);
417
417
+
__write_cr4(cr4);
418
418
+
@@ -386,6 +389,7 @@ static inline void __native_flush_tlb_global(void)
419
419
+
raw_local_irq_save(flags);
420
420
+
421
421
+
cr4 = this_cpu_read(cpu_tlbstate.cr4);
422
422
+
+ BUG_ON(cr4 != __read_cr4());
423
423
+
/* toggle PGE */
424
424
+
native_write_cr4(cr4 ^ X86_CR4_PGE);
425
425
+
/* write old PGE again and flush TLBs */
426
426
+
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
427
427
+
index 48e98964ecad..a94dc690612f 100644
428
428
+
--- a/arch/x86/kernel/cpu/common.c
429
429
+
+++ b/arch/x86/kernel/cpu/common.c
430
430
+
@@ -1637,7 +1637,6 @@ void cpu_init(void)
431
431
+
wrmsrl(MSR_KERNEL_GS_BASE, 0);
432
432
+
barrier();
433
433
+
434
434
+
- x86_configure_nx();
435
435
+
x2apic_setup();
436
436
+
437
437
+
/*
438
438
+
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
439
439
+
index 988a98f34c66..dc36d2d9078a 100644
440
440
+
--- a/arch/x86/kernel/process.c
441
441
+
+++ b/arch/x86/kernel/process.c
442
442
+
@@ -40,6 +40,8 @@
443
443
+
#include <asm/desc.h>
444
444
+
#include <asm/prctl.h>
445
445
+
#include <asm/spec-ctrl.h>
446
446
+
+#include <asm/elf.h>
447
447
+
+#include <linux/sizes.h>
448
448
+
449
449
+
/*
450
450
+
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
451
451
+
@@ -719,7 +721,10 @@ unsigned long arch_align_stack(unsigned long sp)
452
452
+
453
453
+
unsigned long arch_randomize_brk(struct mm_struct *mm)
454
454
+
{
455
455
+
- return randomize_page(mm->brk, 0x02000000);
456
456
+
+ if (mmap_is_ia32())
457
457
+
+ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
458
458
+
+ else
459
459
+
+ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
460
460
+
}
461
461
+
462
462
+
/*
463
463
+
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
464
464
+
index a63fe77b3217..e1085e76043e 100644
465
465
+
--- a/arch/x86/kernel/sys_x86_64.c
466
466
+
+++ b/arch/x86/kernel/sys_x86_64.c
467
467
+
@@ -54,13 +54,6 @@ static unsigned long get_align_bits(void)
468
468
+
return va_align.bits & get_align_mask();
469
469
+
}
470
470
+
471
471
+
-unsigned long align_vdso_addr(unsigned long addr)
472
472
+
-{
473
473
+
- unsigned long align_mask = get_align_mask();
474
474
+
- addr = (addr + align_mask) & ~align_mask;
475
475
+
- return addr | get_align_bits();
476
476
+
-}
477
477
+
-
478
478
+
static int __init control_va_addr_alignment(char *str)
479
479
+
{
480
480
+
/* guard against enabling this on other CPU families */
481
481
+
@@ -122,10 +115,7 @@ static void find_start_end(unsigned long addr, unsigned long flags,
482
482
+
}
483
483
+
484
484
+
*begin = get_mmap_base(1);
485
485
+
- if (in_compat_syscall())
486
486
+
- *end = task_size_32bit();
487
487
+
- else
488
488
+
- *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
489
489
+
+ *end = get_mmap_base(0);
490
490
+
}
491
491
+
492
492
+
unsigned long
493
493
+
@@ -206,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
494
494
+
495
495
+
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
496
496
+
info.length = len;
497
497
+
- info.low_limit = PAGE_SIZE;
498
498
+
+ info.low_limit = get_mmap_base(1);
499
499
+
info.high_limit = get_mmap_base(0);
500
500
+
501
501
+
/*
502
502
+
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
503
503
+
index 3141e67ec24c..e93173193f60 100644
504
504
+
--- a/arch/x86/mm/init_32.c
505
505
+
+++ b/arch/x86/mm/init_32.c
506
506
+
@@ -558,7 +558,7 @@ static void __init pagetable_init(void)
507
507
+
permanent_kmaps_init(pgd_base);
508
508
+
}
509
509
+
510
510
+
-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
511
511
+
+pteval_t __supported_pte_mask __ro_after_init = ~(_PAGE_NX | _PAGE_GLOBAL);
512
512
+
EXPORT_SYMBOL_GPL(__supported_pte_mask);
513
513
+
514
514
+
/* user-defined highmem size */
515
515
+
@@ -865,7 +865,7 @@ int arch_remove_memory(u64 start, u64 size)
516
516
+
#endif
517
517
+
#endif
518
518
+
519
519
+
-int kernel_set_to_readonly __read_mostly;
520
520
+
+int kernel_set_to_readonly __ro_after_init;
521
521
+
522
522
+
void set_kernel_text_rw(void)
523
523
+
{
524
524
+
@@ -917,12 +917,11 @@ void mark_rodata_ro(void)
525
525
+
unsigned long start = PFN_ALIGN(_text);
526
526
+
unsigned long size = PFN_ALIGN(_etext) - start;
527
527
+
528
528
+
+ kernel_set_to_readonly = 1;
529
529
+
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
530
530
+
printk(KERN_INFO "Write protecting the kernel text: %luk\n",
531
531
+
size >> 10);
532
532
+
533
533
+
- kernel_set_to_readonly = 1;
534
534
+
-
535
535
+
#ifdef CONFIG_CPA_DEBUG
536
536
+
printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
537
537
+
start, start+size);
538
538
+
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
539
539
+
index 642357aff216..8bbf93ce3cd2 100644
540
540
+
--- a/arch/x86/mm/init_64.c
541
541
+
+++ b/arch/x86/mm/init_64.c
542
542
+
@@ -65,7 +65,7 @@
543
543
+
* around without checking the pgd every time.
544
544
+
*/
545
545
+
546
546
+
-pteval_t __supported_pte_mask __read_mostly = ~0;
547
547
+
+pteval_t __supported_pte_mask __ro_after_init = ~0;
548
548
+
EXPORT_SYMBOL_GPL(__supported_pte_mask);
549
549
+
550
550
+
int force_personality32;
551
551
+
@@ -1185,7 +1185,7 @@ void __init mem_init(void)
552
552
+
mem_init_print_info(NULL);
553
553
+
}
554
554
+
555
555
+
-int kernel_set_to_readonly;
556
556
+
+int kernel_set_to_readonly __ro_after_init;
557
557
+
558
558
+
void set_kernel_text_rw(void)
559
559
+
{
560
560
+
@@ -1234,9 +1234,8 @@ void mark_rodata_ro(void)
561
561
+
562
562
+
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
563
563
+
(end - start) >> 10);
564
564
+
- set_memory_ro(start, (end - start) >> PAGE_SHIFT);
565
565
+
-
566
566
+
kernel_set_to_readonly = 1;
567
567
+
+ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
568
568
+
569
569
+
/*
570
570
+
* The rodata/data/bss/brk section (but not the kernel text!)
571
571
+
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
572
572
+
index 01e2b353a2b9..9aeddca4a29f 100644
573
573
+
--- a/block/blk-softirq.c
574
574
+
+++ b/block/blk-softirq.c
575
575
+
@@ -20,7 +20,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
576
576
+
* Softirq action handler - move entries to local list and loop over them
577
577
+
* while passing them to the queue registered handler.
578
578
+
*/
579
579
+
-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
580
580
+
+static __latent_entropy void blk_done_softirq(void)
581
581
+
{
582
582
+
struct list_head *cpu_list, local_list;
583
583
+
584
584
+
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
585
585
+
index 473f150d6b22..65a65f9824ed 100644
586
586
+
--- a/drivers/ata/libata-core.c
587
587
+
+++ b/drivers/ata/libata-core.c
588
588
+
@@ -5141,7 +5141,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
589
589
+
struct ata_port *ap;
590
590
+
unsigned int tag;
591
591
+
592
592
+
- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
593
593
+
+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
594
594
+
ap = qc->ap;
595
595
+
596
596
+
qc->flags = 0;
597
597
+
@@ -5158,7 +5158,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
598
598
+
struct ata_port *ap;
599
599
+
struct ata_link *link;
600
600
+
601
601
+
- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
602
602
+
+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
603
603
+
WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
604
604
+
ap = qc->ap;
605
605
+
link = qc->dev->link;
606
606
+
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
607
607
+
index c28dca0c613d..d4813f0d25ca 100644
608
608
+
--- a/drivers/char/Kconfig
609
609
+
+++ b/drivers/char/Kconfig
610
610
+
@@ -9,7 +9,6 @@ source "drivers/tty/Kconfig"
611
611
+
612
612
+
config DEVMEM
613
613
+
bool "/dev/mem virtual device support"
614
614
+
- default y
615
615
+
help
616
616
+
Say Y here if you want to support the /dev/mem device.
617
617
+
The /dev/mem device is used to access areas of physical
618
618
+
@@ -568,7 +567,6 @@ config TELCLOCK
619
619
+
config DEVPORT
620
620
+
bool "/dev/port character device"
621
621
+
depends on ISA || PCI
622
622
+
- default y
623
623
+
help
624
624
+
Say Y here if you want to support the /dev/port device. The /dev/port
625
625
+
device is similar to /dev/mem, but for I/O ports.
626
626
+
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
627
627
+
index e105532bfba8..e07d52bb9b62 100644
628
628
+
--- a/drivers/media/dvb-frontends/cx24116.c
629
629
+
+++ b/drivers/media/dvb-frontends/cx24116.c
630
630
+
@@ -1462,7 +1462,7 @@ static int cx24116_tune(struct dvb_frontend *fe, bool re_tune,
631
631
+
return cx24116_read_status(fe, status);
632
632
+
}
633
633
+
634
634
+
-static int cx24116_get_algo(struct dvb_frontend *fe)
635
635
+
+static enum dvbfe_algo cx24116_get_algo(struct dvb_frontend *fe)
636
636
+
{
637
637
+
return DVBFE_ALGO_HW;
638
638
+
}
639
639
+
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
640
640
+
index d37cb7762bd6..97e0feff0ede 100644
641
641
+
--- a/drivers/media/dvb-frontends/cx24117.c
642
642
+
+++ b/drivers/media/dvb-frontends/cx24117.c
643
643
+
@@ -1555,7 +1555,7 @@ static int cx24117_tune(struct dvb_frontend *fe, bool re_tune,
644
644
+
return cx24117_read_status(fe, status);
645
645
+
}
646
646
+
647
647
+
-static int cx24117_get_algo(struct dvb_frontend *fe)
648
648
+
+static enum dvbfe_algo cx24117_get_algo(struct dvb_frontend *fe)
649
649
+
{
650
650
+
return DVBFE_ALGO_HW;
651
651
+
}
652
652
+
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
653
653
+
index 7f11dcc94d85..01da670760ba 100644
654
654
+
--- a/drivers/media/dvb-frontends/cx24120.c
655
655
+
+++ b/drivers/media/dvb-frontends/cx24120.c
656
656
+
@@ -1491,7 +1491,7 @@ static int cx24120_tune(struct dvb_frontend *fe, bool re_tune,
657
657
+
return cx24120_read_status(fe, status);
658
658
+
}
659
659
+
660
660
+
-static int cx24120_get_algo(struct dvb_frontend *fe)
661
661
+
+static enum dvbfe_algo cx24120_get_algo(struct dvb_frontend *fe)
662
662
+
{
663
663
+
return DVBFE_ALGO_HW;
664
664
+
}
665
665
+
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
666
666
+
index 1d59d1d3bd82..41cd0e9ea199 100644
667
667
+
--- a/drivers/media/dvb-frontends/cx24123.c
668
668
+
+++ b/drivers/media/dvb-frontends/cx24123.c
669
669
+
@@ -1005,7 +1005,7 @@ static int cx24123_tune(struct dvb_frontend *fe,
670
670
+
return retval;
671
671
+
}
672
672
+
673
673
+
-static int cx24123_get_algo(struct dvb_frontend *fe)
674
674
+
+static enum dvbfe_algo cx24123_get_algo(struct dvb_frontend *fe)
675
675
+
{
676
676
+
return DVBFE_ALGO_HW;
677
677
+
}
678
678
+
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
679
679
+
index f6ebbb47b9b2..3e0d8cbd76da 100644
680
680
+
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
681
681
+
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
682
682
+
@@ -403,7 +403,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
683
683
+
return DVBFE_ALGO_SEARCH_ERROR;
684
684
+
}
685
685
+
686
686
+
-static int cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
687
687
+
+static enum dvbfe_algo cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
688
688
+
{
689
689
+
return DVBFE_ALGO_CUSTOM;
690
690
+
}
691
691
+
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
692
692
+
index e8ac8c3e2ec0..e0f4ba8302d1 100644
693
693
+
--- a/drivers/media/dvb-frontends/mb86a20s.c
694
694
+
+++ b/drivers/media/dvb-frontends/mb86a20s.c
695
695
+
@@ -2055,7 +2055,7 @@ static void mb86a20s_release(struct dvb_frontend *fe)
696
696
+
kfree(state);
697
697
+
}
698
698
+
699
699
+
-static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
700
700
+
+static enum dvbfe_algo mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
701
701
+
{
702
702
+
return DVBFE_ALGO_HW;
703
703
+
}
704
704
+
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
705
705
+
index 274544a3ae0e..9ef9b9bc1bd2 100644
706
706
+
--- a/drivers/media/dvb-frontends/s921.c
707
707
+
+++ b/drivers/media/dvb-frontends/s921.c
708
708
+
@@ -464,7 +464,7 @@ static int s921_tune(struct dvb_frontend *fe,
709
709
+
return rc;
710
710
+
}
711
711
+
712
712
+
-static int s921_get_algo(struct dvb_frontend *fe)
713
713
+
+static enum dvbfe_algo s921_get_algo(struct dvb_frontend *fe)
714
714
+
{
715
715
+
return DVBFE_ALGO_HW;
716
716
+
}
717
717
+
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
718
718
+
index 7166d2279465..fa682f9fdc4b 100644
719
719
+
--- a/drivers/media/pci/bt8xx/dst.c
720
720
+
+++ b/drivers/media/pci/bt8xx/dst.c
721
721
+
@@ -1657,7 +1657,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe,
722
722
+
return 0;
723
723
+
}
724
724
+
725
725
+
-static int dst_get_tuning_algo(struct dvb_frontend *fe)
726
726
+
+static enum dvbfe_algo dst_get_tuning_algo(struct dvb_frontend *fe)
727
727
+
{
728
728
+
return dst_algo ? DVBFE_ALGO_HW : DVBFE_ALGO_SW;
729
729
+
}
730
730
+
diff --git a/drivers/media/pci/pt1/va1j5jf8007s.c b/drivers/media/pci/pt1/va1j5jf8007s.c
731
731
+
index f75f69556be7..d913a6050e8c 100644
732
732
+
--- a/drivers/media/pci/pt1/va1j5jf8007s.c
733
733
+
+++ b/drivers/media/pci/pt1/va1j5jf8007s.c
734
734
+
@@ -98,7 +98,7 @@ static int va1j5jf8007s_read_snr(struct dvb_frontend *fe, u16 *snr)
735
735
+
return 0;
736
736
+
}
737
737
+
738
738
+
-static int va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
739
739
+
+static enum dvbfe_algo va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
740
740
+
{
741
741
+
return DVBFE_ALGO_HW;
742
742
+
}
743
743
+
diff --git a/drivers/media/pci/pt1/va1j5jf8007t.c b/drivers/media/pci/pt1/va1j5jf8007t.c
744
744
+
index 63fda79a75c0..4115c3ccd4a8 100644
745
745
+
--- a/drivers/media/pci/pt1/va1j5jf8007t.c
746
746
+
+++ b/drivers/media/pci/pt1/va1j5jf8007t.c
747
747
+
@@ -88,7 +88,7 @@ static int va1j5jf8007t_read_snr(struct dvb_frontend *fe, u16 *snr)
748
748
+
return 0;
749
749
+
}
750
750
+
751
751
+
-static int va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
752
752
+
+static enum dvbfe_algo va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
753
753
+
{
754
754
+
return DVBFE_ALGO_HW;
755
755
+
}
756
756
+
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
757
757
+
index 981b3ef71e47..9883da1da383 100644
758
758
+
--- a/drivers/misc/lkdtm_core.c
759
759
+
+++ b/drivers/misc/lkdtm_core.c
760
760
+
@@ -78,7 +78,7 @@ static irqreturn_t jp_handle_irq_event(unsigned int irq,
761
761
+
return 0;
762
762
+
}
763
763
+
764
764
+
-static void jp_tasklet_action(struct softirq_action *a)
765
765
+
+static void jp_tasklet_action(void)
766
766
+
{
767
767
+
lkdtm_handler();
768
768
+
jprobe_return();
769
769
+
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
770
770
+
index b811442c5ce6..4f62a63cbcb1 100644
771
771
+
--- a/drivers/tty/Kconfig
772
772
+
+++ b/drivers/tty/Kconfig
773
773
+
@@ -122,7 +122,6 @@ config UNIX98_PTYS
774
774
+
775
775
+
config LEGACY_PTYS
776
776
+
bool "Legacy (BSD) PTY support"
777
777
+
- default y
778
778
+
---help---
779
779
+
A pseudo terminal (PTY) is a software device consisting of two
780
780
+
halves: a master and a slave. The slave device behaves identical to
781
781
+
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
782
782
+
index 562d31073f9a..2184b9b5485f 100644
783
783
+
--- a/drivers/tty/tty_io.c
784
784
+
+++ b/drivers/tty/tty_io.c
785
785
+
@@ -171,6 +171,7 @@ static void free_tty_struct(struct tty_struct *tty)
786
786
+
put_device(tty->dev);
787
787
+
kfree(tty->write_buf);
788
788
+
tty->magic = 0xDEADDEAD;
789
789
+
+ put_user_ns(tty->owner_user_ns);
790
790
+
kfree(tty);
791
791
+
}
792
792
+
793
793
+
@@ -2154,11 +2155,19 @@ static int tty_fasync(int fd, struct file *filp, int on)
794
794
+
* FIXME: may race normal receive processing
795
795
+
*/
796
796
+
797
797
+
+int tiocsti_restrict = IS_ENABLED(CONFIG_SECURITY_TIOCSTI_RESTRICT);
798
798
+
+
799
799
+
static int tiocsti(struct tty_struct *tty, char __user *p)
800
800
+
{
801
801
+
char ch, mbz = 0;
802
802
+
struct tty_ldisc *ld;
803
803
+
804
804
+
+ if (tiocsti_restrict &&
805
805
+
+ !ns_capable(tty->owner_user_ns, CAP_SYS_ADMIN)) {
806
806
+
+ dev_warn_ratelimited(tty->dev,
807
807
+
+ "Denied TIOCSTI ioctl for non-privileged process\n");
808
808
+
+ return -EPERM;
809
809
+
+ }
810
810
+
if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
811
811
+
return -EPERM;
812
812
+
if (get_user(ch, p))
813
813
+
@@ -2841,6 +2850,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
814
814
+
tty->index = idx;
815
815
+
tty_line_name(driver, idx, tty->name);
816
816
+
tty->dev = tty_get_device(tty);
817
817
+
+ tty->owner_user_ns = get_user_ns(current_user_ns());
818
818
+
819
819
+
return tty;
820
820
+
}
821
821
+
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
822
822
+
index 442be7f312f6..788557d5c454 100644
823
823
+
--- a/drivers/usb/core/hub.c
824
824
+
+++ b/drivers/usb/core/hub.c
825
825
+
@@ -38,6 +38,8 @@
826
826
+
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
827
827
+
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
828
828
+
829
829
+
+extern int deny_new_usb;
830
830
+
+
831
831
+
/* Protect struct usb_device->state and ->children members
832
832
+
* Note: Both are also protected by ->dev.sem, except that ->state can
833
833
+
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
834
834
+
@@ -4806,6 +4808,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
835
835
+
goto done;
836
836
+
return;
837
837
+
}
838
838
+
+
839
839
+
+ if (deny_new_usb) {
840
840
+
+ dev_err(&port_dev->dev, "denied insert of USB device on port %d\n", port1);
841
841
+
+ goto done;
842
842
+
+ }
843
843
+
+
844
844
+
if (hub_is_superspeed(hub->hdev))
845
845
+
unit_load = 150;
846
846
+
else
847
847
+
diff --git a/fs/exec.c b/fs/exec.c
848
848
+
index 0da4d748b4e6..69fcee853363 100644
849
849
+
--- a/fs/exec.c
850
850
+
+++ b/fs/exec.c
851
851
+
@@ -62,6 +62,7 @@
852
852
+
#include <linux/oom.h>
853
853
+
#include <linux/compat.h>
854
854
+
#include <linux/vmalloc.h>
855
855
+
+#include <linux/random.h>
856
856
+
857
857
+
#include <linux/uaccess.h>
858
858
+
#include <asm/mmu_context.h>
859
859
+
@@ -321,6 +322,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
860
860
+
arch_bprm_mm_init(mm, vma);
861
861
+
up_write(&mm->mmap_sem);
862
862
+
bprm->p = vma->vm_end - sizeof(void *);
863
863
+
+ if (randomize_va_space)
864
864
+
+ bprm->p ^= get_random_int() & ~PAGE_MASK;
865
865
+
return 0;
866
866
+
err:
867
867
+
up_write(&mm->mmap_sem);
868
868
+
diff --git a/fs/namei.c b/fs/namei.c
869
869
+
index 0b46b858cd42..3ae8e72341da 100644
870
870
+
--- a/fs/namei.c
871
871
+
+++ b/fs/namei.c
872
872
+
@@ -902,8 +902,8 @@ static inline void put_link(struct nameidata *nd)
873
873
+
path_put(&last->link);
874
874
+
}
875
875
+
876
876
+
-int sysctl_protected_symlinks __read_mostly = 0;
877
877
+
-int sysctl_protected_hardlinks __read_mostly = 0;
878
878
+
+int sysctl_protected_symlinks __read_mostly = 1;
879
879
+
+int sysctl_protected_hardlinks __read_mostly = 1;
880
880
+
881
881
+
/**
882
882
+
* may_follow_link - Check symlink following for unsafe situations
883
883
+
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
884
884
+
index 5f93cfacb3d1..cea0d7d3b23e 100644
885
885
+
--- a/fs/nfs/Kconfig
886
886
+
+++ b/fs/nfs/Kconfig
887
887
+
@@ -195,4 +195,3 @@ config NFS_DEBUG
888
888
+
bool
889
889
+
depends on NFS_FS && SUNRPC_DEBUG
890
890
+
select CRC32
891
891
+
- default y
892
892
+
diff --git a/fs/pipe.c b/fs/pipe.c
893
893
+
index 8ef7d7bef775..b82f305ec13d 100644
894
894
+
--- a/fs/pipe.c
895
895
+
+++ b/fs/pipe.c
896
896
+
@@ -38,7 +38,7 @@ unsigned int pipe_max_size = 1048576;
897
897
+
/*
898
898
+
* Minimum pipe size, as required by POSIX
899
899
+
*/
900
900
+
-unsigned int pipe_min_size = PAGE_SIZE;
901
901
+
+unsigned int pipe_min_size __read_only = PAGE_SIZE;
902
902
+
903
903
+
/* Maximum allocatable pages per user. Hard limit is unset by default, soft
904
904
+
* matches default values.
905
905
+
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
906
906
+
index 1ade1206bb89..60b0f76dec47 100644
907
907
+
--- a/fs/proc/Kconfig
908
908
+
+++ b/fs/proc/Kconfig
909
909
+
@@ -39,7 +39,6 @@ config PROC_KCORE
910
910
+
config PROC_VMCORE
911
911
+
bool "/proc/vmcore support"
912
912
+
depends on PROC_FS && CRASH_DUMP
913
913
+
- default y
914
914
+
help
915
915
+
Exports the dump image of crashed kernel in ELF format.
916
916
+
917
917
+
diff --git a/fs/stat.c b/fs/stat.c
918
918
+
index 873785dae022..d3c2ada8b9c7 100644
919
919
+
--- a/fs/stat.c
920
920
+
+++ b/fs/stat.c
921
921
+
@@ -40,8 +40,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
922
922
+
stat->gid = inode->i_gid;
923
923
+
stat->rdev = inode->i_rdev;
924
924
+
stat->size = i_size_read(inode);
925
925
+
- stat->atime = inode->i_atime;
926
926
+
- stat->mtime = inode->i_mtime;
927
927
+
+ if (is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
928
928
+
+ stat->atime = inode->i_ctime;
929
929
+
+ stat->mtime = inode->i_ctime;
930
930
+
+ } else {
931
931
+
+ stat->atime = inode->i_atime;
932
932
+
+ stat->mtime = inode->i_mtime;
933
933
+
+ }
934
934
+
stat->ctime = inode->i_ctime;
935
935
+
stat->blksize = i_blocksize(inode);
936
936
+
stat->blocks = inode->i_blocks;
937
937
+
@@ -75,9 +80,14 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
938
938
+
stat->result_mask |= STATX_BASIC_STATS;
939
939
+
request_mask &= STATX_ALL;
940
940
+
query_flags &= KSTAT_QUERY_FLAGS;
941
941
+
- if (inode->i_op->getattr)
942
942
+
- return inode->i_op->getattr(path, stat, request_mask,
943
943
+
- query_flags);
944
944
+
+ if (inode->i_op->getattr) {
945
945
+
+ int retval = inode->i_op->getattr(path, stat, request_mask, query_flags);
946
946
+
+ if (!retval && is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
947
947
+
+ stat->atime = stat->ctime;
948
948
+
+ stat->mtime = stat->ctime;
949
949
+
+ }
950
950
+
+ return retval;
951
951
+
+ }
952
952
+
953
953
+
generic_fillattr(inode, stat);
954
954
+
return 0;
955
955
+
diff --git a/include/linux/cache.h b/include/linux/cache.h
956
956
+
index 750621e41d1c..e7157c18c62c 100644
957
957
+
--- a/include/linux/cache.h
958
958
+
+++ b/include/linux/cache.h
959
959
+
@@ -31,6 +31,8 @@
960
960
+
#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
961
961
+
#endif
962
962
+
963
963
+
+#define __read_only __ro_after_init
964
964
+
+
965
965
+
#ifndef ____cacheline_aligned
966
966
+
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
967
967
+
#endif
968
968
+
diff --git a/include/linux/capability.h b/include/linux/capability.h
969
969
+
index f640dcbc880c..2b4f5d651f19 100644
970
970
+
--- a/include/linux/capability.h
971
971
+
+++ b/include/linux/capability.h
972
972
+
@@ -207,6 +207,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap);
973
973
+
extern bool has_ns_capability_noaudit(struct task_struct *t,
974
974
+
struct user_namespace *ns, int cap);
975
975
+
extern bool capable(int cap);
976
976
+
+extern bool capable_noaudit(int cap);
977
977
+
extern bool ns_capable(struct user_namespace *ns, int cap);
978
978
+
extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
979
979
+
#else
980
980
+
@@ -232,6 +233,10 @@ static inline bool capable(int cap)
981
981
+
{
982
982
+
return true;
983
983
+
}
984
984
+
+static inline bool capable_noaudit(int cap)
985
985
+
+{
986
986
+
+ return true;
987
987
+
+}
988
988
+
static inline bool ns_capable(struct user_namespace *ns, int cap)
989
989
+
{
990
990
+
return true;
991
991
+
diff --git a/include/linux/fs.h b/include/linux/fs.h
992
992
+
index cc613f20e5a6..7606596d6c2e 100644
993
993
+
--- a/include/linux/fs.h
994
994
+
+++ b/include/linux/fs.h
995
995
+
@@ -3392,4 +3392,15 @@ static inline bool dir_relax_shared(struct inode *inode)
996
996
+
extern bool path_noexec(const struct path *path);
997
997
+
extern void inode_nohighmem(struct inode *inode);
998
998
+
999
999
+
+extern int device_sidechannel_restrict;
1000
1000
+
+
1001
1001
+
+static inline bool is_sidechannel_device(const struct inode *inode)
1002
1002
+
+{
1003
1003
+
+ umode_t mode;
1004
1004
+
+ if (!device_sidechannel_restrict)
1005
1005
+
+ return false;
1006
1006
+
+ mode = inode->i_mode;
1007
1007
+
+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
1008
1008
+
+}
1009
1009
+
+
1010
1010
+
#endif /* _LINUX_FS_H */
1011
1011
+
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
1012
1012
+
index bdaf22582f6e..326ff15d4637 100644
1013
1013
+
--- a/include/linux/fsnotify.h
1014
1014
+
+++ b/include/linux/fsnotify.h
1015
1015
+
@@ -181,6 +181,9 @@ static inline void fsnotify_access(struct file *file)
1016
1016
+
struct inode *inode = path->dentry->d_inode;
1017
1017
+
__u32 mask = FS_ACCESS;
1018
1018
+
1019
1019
+
+ if (is_sidechannel_device(inode))
1020
1020
+
+ return;
1021
1021
+
+
1022
1022
+
if (S_ISDIR(inode->i_mode))
1023
1023
+
mask |= FS_ISDIR;
1024
1024
+
1025
1025
+
@@ -199,6 +202,9 @@ static inline void fsnotify_modify(struct file *file)
1026
1026
+
struct inode *inode = path->dentry->d_inode;
1027
1027
+
__u32 mask = FS_MODIFY;
1028
1028
+
1029
1029
+
+ if (is_sidechannel_device(inode))
1030
1030
+
+ return;
1031
1031
+
+
1032
1032
+
if (S_ISDIR(inode->i_mode))
1033
1033
+
mask |= FS_ISDIR;
1034
1034
+
1035
1035
+
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
1036
1036
+
index b041f94678de..a5e0175c79e0 100644
1037
1037
+
--- a/include/linux/gfp.h
1038
1038
+
+++ b/include/linux/gfp.h
1039
1039
+
@@ -518,9 +518,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
1040
1040
+
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
1041
1041
+
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
1042
1042
+
1043
1043
+
-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
1044
1044
+
+void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
1045
1045
+
void free_pages_exact(void *virt, size_t size);
1046
1046
+
-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
1047
1047
+
+void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
1048
1048
+
1049
1049
+
#define __get_free_page(gfp_mask) \
1050
1050
+
__get_free_pages((gfp_mask), 0)
1051
1051
+
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
1052
1052
+
index 776f90f3a1cd..3f5c47000059 100644
1053
1053
+
--- a/include/linux/highmem.h
1054
1054
+
+++ b/include/linux/highmem.h
1055
1055
+
@@ -191,6 +191,13 @@ static inline void clear_highpage(struct page *page)
1056
1056
+
kunmap_atomic(kaddr);
1057
1057
+
}
1058
1058
+
1059
1059
+
+static inline void verify_zero_highpage(struct page *page)
1060
1060
+
+{
1061
1061
+
+ void *kaddr = kmap_atomic(page);
1062
1062
+
+ BUG_ON(memchr_inv(kaddr, 0, PAGE_SIZE));
1063
1063
+
+ kunmap_atomic(kaddr);
1064
1064
+
+}
1065
1065
+
+
1066
1066
+
static inline void zero_user_segments(struct page *page,
1067
1067
+
unsigned start1, unsigned end1,
1068
1068
+
unsigned start2, unsigned end2)
1069
1069
+
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
1070
1070
+
index 69c238210325..ee487ea4f48f 100644
1071
1071
+
--- a/include/linux/interrupt.h
1072
1072
+
+++ b/include/linux/interrupt.h
1073
1073
+
@@ -485,7 +485,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
1074
1074
+
1075
1075
+
struct softirq_action
1076
1076
+
{
1077
1077
+
- void (*action)(struct softirq_action *);
1078
1078
+
+ void (*action)(void);
1079
1079
+
};
1080
1080
+
1081
1081
+
asmlinkage void do_softirq(void);
1082
1082
+
@@ -500,7 +500,7 @@ static inline void do_softirq_own_stack(void)
1083
1083
+
}
1084
1084
+
#endif
1085
1085
+
1086
1086
+
-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
1087
1087
+
+extern void __init open_softirq(int nr, void (*action)(void));
1088
1088
+
extern void softirq_init(void);
1089
1089
+
extern void __raise_softirq_irqoff(unsigned int nr);
1090
1090
+
1091
1091
+
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
1092
1092
+
index df32d2508290..c992d130b94d 100644
1093
1093
+
--- a/include/linux/kobject_ns.h
1094
1094
+
+++ b/include/linux/kobject_ns.h
1095
1095
+
@@ -46,7 +46,7 @@ struct kobj_ns_type_operations {
1096
1096
+
void (*drop_ns)(void *);
1097
1097
+
};
1098
1098
+
1099
1099
+
-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
1100
1100
+
+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
1101
1101
+
int kobj_ns_type_registered(enum kobj_ns_type type);
1102
1102
+
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
1103
1103
+
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
1104
1104
+
diff --git a/include/linux/mm.h b/include/linux/mm.h
1105
1105
+
index f23215854c80..98df98c44cc0 100644
1106
1106
+
--- a/include/linux/mm.h
1107
1107
+
+++ b/include/linux/mm.h
1108
1108
+
@@ -525,7 +525,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
1109
1109
+
}
1110
1110
+
#endif
1111
1111
+
1112
1112
+
-extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
1113
1113
+
+extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __attribute__((alloc_size(1)));
1114
1114
+
static inline void *kvmalloc(size_t size, gfp_t flags)
1115
1115
+
{
1116
1116
+
return kvmalloc_node(size, flags, NUMA_NO_NODE);
1117
1117
+
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
1118
1118
+
index 296bbe49d5d1..b26652c9a98d 100644
1119
1119
+
--- a/include/linux/percpu.h
1120
1120
+
+++ b/include/linux/percpu.h
1121
1121
+
@@ -129,7 +129,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
1122
1122
+
pcpu_fc_populate_pte_fn_t populate_pte_fn);
1123
1123
+
#endif
1124
1124
+
1125
1125
+
-extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
1126
1126
+
+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
1127
1127
+
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
1128
1128
+
extern bool is_kernel_percpu_address(unsigned long addr);
1129
1129
+
1130
1130
+
@@ -137,8 +137,8 @@ extern bool is_kernel_percpu_address(unsigned long addr);
1131
1131
+
extern void __init setup_per_cpu_areas(void);
1132
1132
+
#endif
1133
1133
+
1134
1134
+
-extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
1135
1135
+
-extern void __percpu *__alloc_percpu(size_t size, size_t align);
1136
1136
+
+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __attribute__((alloc_size(1)));
1137
1137
+
+extern void __percpu *__alloc_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
1138
1138
+
extern void free_percpu(void __percpu *__pdata);
1139
1139
+
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
1140
1140
+
1141
1141
+
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
1142
1142
+
index 8e22f24ded6a..b7fecdfa6de5 100644
1143
1143
+
--- a/include/linux/perf_event.h
1144
1144
+
+++ b/include/linux/perf_event.h
1145
1145
+
@@ -1165,6 +1165,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1146
1146
+
int perf_event_max_stack_handler(struct ctl_table *table, int write,
1147
1147
+
void __user *buffer, size_t *lenp, loff_t *ppos);
1148
1148
+
1149
1149
+
+static inline bool perf_paranoid_any(void)
1150
1150
+
+{
1151
1151
+
+ return sysctl_perf_event_paranoid > 2;
1152
1152
+
+}
1153
1153
+
+
1154
1154
+
static inline bool perf_paranoid_tracepoint_raw(void)
1155
1155
+
{
1156
1156
+
return sysctl_perf_event_paranoid > -1;
1157
1157
+
diff --git a/include/linux/slab.h b/include/linux/slab.h
1158
1158
+
index ae5ed6492d54..fd0786124504 100644
1159
1159
+
--- a/include/linux/slab.h
1160
1160
+
+++ b/include/linux/slab.h
1161
1161
+
@@ -146,8 +146,8 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *);
1162
1162
+
/*
1163
1163
+
* Common kmalloc functions provided by all allocators
1164
1164
+
*/
1165
1165
+
-void * __must_check __krealloc(const void *, size_t, gfp_t);
1166
1166
+
-void * __must_check krealloc(const void *, size_t, gfp_t);
1167
1167
+
+void * __must_check __krealloc(const void *, size_t, gfp_t) __attribute__((alloc_size(2)));
1168
1168
+
+void * __must_check krealloc(const void *, size_t, gfp_t) __attribute((alloc_size(2)));
1169
1169
+
void kfree(const void *);
1170
1170
+
void kzfree(const void *);
1171
1171
+
size_t ksize(const void *);
1172
1172
+
@@ -324,7 +324,7 @@ static __always_inline int kmalloc_index(size_t size)
1173
1173
+
}
1174
1174
+
#endif /* !CONFIG_SLOB */
1175
1175
+
1176
1176
+
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
1177
1177
+
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
1178
1178
+
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
1179
1179
+
void kmem_cache_free(struct kmem_cache *, void *);
1180
1180
+
1181
1181
+
@@ -348,7 +348,7 @@ static __always_inline void kfree_bulk(size_t size, void **p)
1182
1182
+
}
1183
1183
+
1184
1184
+
#ifdef CONFIG_NUMA
1185
1185
+
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
1186
1186
+
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
1187
1187
+
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
1188
1188
+
#else
1189
1189
+
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
1190
1190
+
@@ -473,7 +473,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
1191
1191
+
* for general use, and so are not documented here. For a full list of
1192
1192
+
* potential flags, always refer to linux/gfp.h.
1193
1193
+
*/
1194
1194
+
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
1195
1195
+
+static __always_inline __attribute__((alloc_size(1))) void *kmalloc(size_t size, gfp_t flags)
1196
1196
+
{
1197
1197
+
if (__builtin_constant_p(size)) {
1198
1198
+
if (size > KMALLOC_MAX_CACHE_SIZE)
1199
1199
+
@@ -513,7 +513,7 @@ static __always_inline int kmalloc_size(int n)
1200
1200
+
return 0;
1201
1201
+
}
1202
1202
+
1203
1203
+
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
1204
1204
+
+static __always_inline __attribute__((alloc_size(1))) void *kmalloc_node(size_t size, gfp_t flags, int node)
1205
1205
+
{
1206
1206
+
#ifndef CONFIG_SLOB
1207
1207
+
if (__builtin_constant_p(size) &&
1208
1208
+
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
1209
1209
+
index 39fa09bcde23..0b7a48cd883b 100644
1210
1210
+
--- a/include/linux/slub_def.h
1211
1211
+
+++ b/include/linux/slub_def.h
1212
1212
+
@@ -120,6 +120,11 @@ struct kmem_cache {
1213
1213
+
unsigned long random;
1214
1214
+
#endif
1215
1215
+
1216
1216
+
+#ifdef CONFIG_SLAB_CANARY
1217
1217
+
+ unsigned long random_active;
1218
1218
+
+ unsigned long random_inactive;
1219
1219
+
+#endif
1220
1220
+
+
1221
1221
+
#ifdef CONFIG_NUMA
1222
1222
+
/*
1223
1223
+
* Defragmentation by allocating from a remote node.
1224
1224
+
diff --git a/include/linux/string.h b/include/linux/string.h
1225
1225
+
index cfd83eb2f926..b9ecb42c762d 100644
1226
1226
+
--- a/include/linux/string.h
1227
1227
+
+++ b/include/linux/string.h
1228
1228
+
@@ -234,10 +234,16 @@ void __read_overflow2(void) __compiletime_error("detected read beyond size of ob
1229
1229
+
void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
1230
1230
+
void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
1231
1231
+
1232
1232
+
+#ifdef CONFIG_FORTIFY_SOURCE_STRICT_STRING
1233
1233
+
+#define __string_size(p) __builtin_object_size(p, 1)
1234
1234
+
+#else
1235
1235
+
+#define __string_size(p) __builtin_object_size(p, 0)
1236
1236
+
+#endif
1237
1237
+
+
1238
1238
+
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
1239
1239
+
__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
1240
1240
+
{
1241
1241
+
- size_t p_size = __builtin_object_size(p, 0);
1242
1242
+
+ size_t p_size = __string_size(p);
1243
1243
+
if (__builtin_constant_p(size) && p_size < size)
1244
1244
+
__write_overflow();
1245
1245
+
if (p_size < size)
1246
1246
+
@@ -247,7 +253,7 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
1247
1247
+
1248
1248
+
__FORTIFY_INLINE char *strcat(char *p, const char *q)
1249
1249
+
{
1250
1250
+
- size_t p_size = __builtin_object_size(p, 0);
1251
1251
+
+ size_t p_size = __string_size(p);
1252
1252
+
if (p_size == (size_t)-1)
1253
1253
+
return __builtin_strcat(p, q);
1254
1254
+
if (strlcat(p, q, p_size) >= p_size)
1255
1255
+
@@ -258,7 +264,7 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q)
1256
1256
+
__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
1257
1257
+
{
1258
1258
+
__kernel_size_t ret;
1259
1259
+
- size_t p_size = __builtin_object_size(p, 0);
1260
1260
+
+ size_t p_size = __string_size(p);
1261
1261
+
1262
1262
+
/* Work around gcc excess stack consumption issue */
1263
1263
+
if (p_size == (size_t)-1 ||
1264
1264
+
@@ -273,7 +279,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
1265
1265
+
extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
1266
1266
+
__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
1267
1267
+
{
1268
1268
+
- size_t p_size = __builtin_object_size(p, 0);
1269
1269
+
+ size_t p_size = __string_size(p);
1270
1270
+
__kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
1271
1271
+
if (p_size <= ret && maxlen != ret)
1272
1272
+
fortify_panic(__func__);
1273
1273
+
@@ -285,8 +291,8 @@ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
1274
1274
+
__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
1275
1275
+
{
1276
1276
+
size_t ret;
1277
1277
+
- size_t p_size = __builtin_object_size(p, 0);
1278
1278
+
- size_t q_size = __builtin_object_size(q, 0);
1279
1279
+
+ size_t p_size = __string_size(p);
1280
1280
+
+ size_t q_size = __string_size(q);
1281
1281
+
if (p_size == (size_t)-1 && q_size == (size_t)-1)
1282
1282
+
return __real_strlcpy(p, q, size);
1283
1283
+
ret = strlen(q);
1284
1284
+
@@ -306,8 +312,8 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
1285
1285
+
__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
1286
1286
+
{
1287
1287
+
size_t p_len, copy_len;
1288
1288
+
- size_t p_size = __builtin_object_size(p, 0);
1289
1289
+
- size_t q_size = __builtin_object_size(q, 0);
1290
1290
+
+ size_t p_size = __string_size(p);
1291
1291
+
+ size_t q_size = __string_size(q);
1292
1292
+
if (p_size == (size_t)-1 && q_size == (size_t)-1)
1293
1293
+
return __builtin_strncat(p, q, count);
1294
1294
+
p_len = strlen(p);
1295
1295
+
@@ -420,8 +426,8 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
1296
1296
+
/* defined after fortified strlen and memcpy to reuse them */
1297
1297
+
__FORTIFY_INLINE char *strcpy(char *p, const char *q)
1298
1298
+
{
1299
1299
+
- size_t p_size = __builtin_object_size(p, 0);
1300
1300
+
- size_t q_size = __builtin_object_size(q, 0);
1301
1301
+
+ size_t p_size = __string_size(p);
1302
1302
+
+ size_t q_size = __string_size(q);
1303
1303
+
if (p_size == (size_t)-1 && q_size == (size_t)-1)
1304
1304
+
return __builtin_strcpy(p, q);
1305
1305
+
memcpy(p, q, strlen(q) + 1);
1306
1306
+
diff --git a/include/linux/tty.h b/include/linux/tty.h
1307
1307
+
index 1dd587ba6d88..9a9a04fb641d 100644
1308
1308
+
--- a/include/linux/tty.h
1309
1309
+
+++ b/include/linux/tty.h
1310
1310
+
@@ -13,6 +13,7 @@
1311
1311
+
#include <uapi/linux/tty.h>
1312
1312
+
#include <linux/rwsem.h>
1313
1313
+
#include <linux/llist.h>
1314
1314
+
+#include <linux/user_namespace.h>
1315
1315
+
1316
1316
+
1317
1317
+
/*
1318
1318
+
@@ -335,6 +336,7 @@ struct tty_struct {
1319
1319
+
/* If the tty has a pending do_SAK, queue it here - akpm */
1320
1320
+
struct work_struct SAK_work;
1321
1321
+
struct tty_port *port;
1322
1322
+
+ struct user_namespace *owner_user_ns;
1323
1323
+
} __randomize_layout;
1324
1324
+
1325
1325
+
/* Each of a tty's open files has private_data pointing to tty_file_private */
1326
1326
+
@@ -344,6 +346,8 @@ struct tty_file_private {
1327
1327
+
struct list_head list;
1328
1328
+
};
1329
1329
+
1330
1330
+
+extern int tiocsti_restrict;
1331
1331
+
+
1332
1332
+
/* tty magic number */
1333
1333
+
#define TTY_MAGIC 0x5401
1334
1334
+
1335
1335
+
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
1336
1336
+
index 1e5d8c392f15..66d0e49c9987 100644
1337
1337
+
--- a/include/linux/vmalloc.h
1338
1338
+
+++ b/include/linux/vmalloc.h
1339
1339
+
@@ -68,19 +68,19 @@ static inline void vmalloc_init(void)
1340
1340
+
}
1341
1341
+
#endif
1342
1342
+
1343
1343
+
-extern void *vmalloc(unsigned long size);
1344
1344
+
-extern void *vzalloc(unsigned long size);
1345
1345
+
-extern void *vmalloc_user(unsigned long size);
1346
1346
+
-extern void *vmalloc_node(unsigned long size, int node);
1347
1347
+
-extern void *vzalloc_node(unsigned long size, int node);
1348
1348
+
-extern void *vmalloc_exec(unsigned long size);
1349
1349
+
-extern void *vmalloc_32(unsigned long size);
1350
1350
+
-extern void *vmalloc_32_user(unsigned long size);
1351
1351
+
-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
1352
1352
+
+extern void *vmalloc(unsigned long size) __attribute__((alloc_size(1)));
1353
1353
+
+extern void *vzalloc(unsigned long size) __attribute__((alloc_size(1)));
1354
1354
+
+extern void *vmalloc_user(unsigned long size) __attribute__((alloc_size(1)));
1355
1355
+
+extern void *vmalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
1356
1356
+
+extern void *vzalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
1357
1357
+
+extern void *vmalloc_exec(unsigned long size) __attribute__((alloc_size(1)));
1358
1358
+
+extern void *vmalloc_32(unsigned long size) __attribute__((alloc_size(1)));
1359
1359
+
+extern void *vmalloc_32_user(unsigned long size) __attribute__((alloc_size(1)));
1360
1360
+
+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __attribute__((alloc_size(1)));
1361
1361
+
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
1362
1362
+
unsigned long start, unsigned long end, gfp_t gfp_mask,
1363
1363
+
pgprot_t prot, unsigned long vm_flags, int node,
1364
1364
+
- const void *caller);
1365
1365
+
+ const void *caller) __attribute__((alloc_size(1)));
1366
1366
+
#ifndef CONFIG_MMU
1367
1367
+
extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
1368
1368
+
static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
1369
1369
+
diff --git a/init/Kconfig b/init/Kconfig
1370
1370
+
index 46075327c165..0c78750bc76d 100644
1371
1371
+
--- a/init/Kconfig
1372
1372
+
+++ b/init/Kconfig
1373
1373
+
@@ -309,6 +309,7 @@ config USELIB
1374
1374
+
config AUDIT
1375
1375
+
bool "Auditing support"
1376
1376
+
depends on NET
1377
1377
+
+ default y
1378
1378
+
help
1379
1379
+
Enable auditing infrastructure that can be used with another
1380
1380
+
kernel subsystem, such as SELinux (which requires this for
1381
1381
+
@@ -1052,6 +1053,12 @@ config CC_OPTIMIZE_FOR_SIZE
1382
1382
+
1383
1383
+
endchoice
1384
1384
+
1385
1385
+
+config LOCAL_INIT
1386
1386
+
+ bool "Zero uninitialized locals"
1387
1387
+
+ help
1388
1388
+
+ Zero-fill uninitialized local variables, other than variable-length
1389
1389
+
+ arrays. Requires compiler support.
1390
1390
+
+
1391
1391
+
config SYSCTL
1392
1392
+
bool
1393
1393
+
1394
1394
+
@@ -1361,8 +1368,7 @@ config SHMEM
1395
1395
+
which may be appropriate on small systems without swap.
1396
1396
+
1397
1397
+
config AIO
1398
1398
+
- bool "Enable AIO support" if EXPERT
1399
1399
+
- default y
1400
1400
+
+ bool "Enable AIO support"
1401
1401
+
help
1402
1402
+
This option enables POSIX asynchronous I/O which may by used
1403
1403
+
by some high performance threaded applications. Disabling
1404
1404
+
@@ -1491,7 +1497,7 @@ config VM_EVENT_COUNTERS
1405
1405
+
1406
1406
+
config SLUB_DEBUG
1407
1407
+
default y
1408
1408
+
- bool "Enable SLUB debugging support" if EXPERT
1409
1409
+
+ bool "Enable SLUB debugging support"
1410
1410
+
depends on SLUB && SYSFS
1411
1411
+
help
1412
1412
+
SLUB has extensive debug support features. Disabling these can
1413
1413
+
@@ -1515,7 +1521,6 @@ config SLUB_MEMCG_SYSFS_ON
1414
1414
+
1415
1415
+
config COMPAT_BRK
1416
1416
+
bool "Disable heap randomization"
1417
1417
+
- default y
1418
1418
+
help
1419
1419
+
Randomizing heap placement makes heap exploits harder, but it
1420
1420
+
also breaks ancient binaries (including anything libc5 based).
1421
1421
+
@@ -1562,7 +1567,6 @@ endchoice
1422
1422
+
1423
1423
+
config SLAB_MERGE_DEFAULT
1424
1424
+
bool "Allow slab caches to be merged"
1425
1425
+
- default y
1426
1426
+
help
1427
1427
+
For reduced kernel memory fragmentation, slab caches can be
1428
1428
+
merged when they share the same size and other characteristics.
1429
1429
+
@@ -1575,9 +1579,9 @@ config SLAB_MERGE_DEFAULT
1430
1430
+
command line.
1431
1431
+
1432
1432
+
config SLAB_FREELIST_RANDOM
1433
1433
+
- default n
1434
1434
+
depends on SLAB || SLUB
1435
1435
+
bool "SLAB freelist randomization"
1436
1436
+
+ default y
1437
1437
+
help
1438
1438
+
Randomizes the freelist order used on creating new pages. This
1439
1439
+
security feature reduces the predictability of the kernel slab
1440
1440
+
@@ -1586,12 +1590,56 @@ config SLAB_FREELIST_RANDOM
1441
1441
+
config SLAB_FREELIST_HARDENED
1442
1442
+
bool "Harden slab freelist metadata"
1443
1443
+
depends on SLUB
1444
1444
+
+ default y
1445
1445
+
help
1446
1446
+
Many kernel heap attacks try to target slab cache metadata and
1447
1447
+
other infrastructure. This options makes minor performance
1448
1448
+
sacrifies to harden the kernel slab allocator against common
1449
1449
+
freelist exploit methods.
1450
1450
+
1451
1451
+
+config SLAB_HARDENED
1452
1452
+
+ default y
1453
1453
+
+ depends on SLUB
1454
1454
+
+ bool "Hardened SLAB infrastructure"
1455
1455
+
+ help
1456
1456
+
+ Make minor performance sacrifices to harden the kernel slab
1457
1457
+
+ allocator.
1458
1458
+
+
1459
1459
+
+config SLAB_CANARY
1460
1460
+
+ depends on SLUB
1461
1461
+
+ depends on !SLAB_MERGE_DEFAULT
1462
1462
+
+ bool "SLAB canaries"
1463
1463
+
+ default y
1464
1464
+
+ help
1465
1465
+
+ Place canaries at the end of kernel slab allocations, sacrificing
1466
1466
+
+ some performance and memory usage for security.
1467
1467
+
+
1468
1468
+
+ Canaries can detect some forms of heap corruption when allocations
1469
1469
+
+ are freed and as part of the HARDENED_USERCOPY feature. It provides
1470
1470
+
+ basic use-after-free detection for HARDENED_USERCOPY.
1471
1471
+
+
1472
1472
+
+ Canaries absorb small overflows (rendering them harmless), mitigate
1473
1473
+
+ non-NUL terminated C string overflows on 64-bit via a guaranteed zero
1474
1474
+
+ byte and provide basic double-free detection.
1475
1475
+
+
1476
1476
+
+config SLAB_SANITIZE
1477
1477
+
+ bool "Sanitize SLAB allocations"
1478
1478
+
+ depends on SLUB
1479
1479
+
+ default y
1480
1480
+
+ help
1481
1481
+
+ Zero fill slab allocations on free, reducing the lifetime of
1482
1482
+
+ sensitive data and helping to mitigate use-after-free bugs.
1483
1483
+
+
1484
1484
+
+ For slabs with debug poisoning enabling, this has no impact.
1485
1485
+
+
1486
1486
+
+config SLAB_SANITIZE_VERIFY
1487
1487
+
+ depends on SLAB_SANITIZE && PAGE_SANITIZE
1488
1488
+
+ default y
1489
1489
+
+ bool "Verify sanitized SLAB allocations"
1490
1490
+
+ help
1491
1491
+
+ Verify that newly allocated slab allocations are zeroed to detect
1492
1492
+
+ write-after-free bugs.
1493
1493
+
+
1494
1494
+
config SLUB_CPU_PARTIAL
1495
1495
+
default y
1496
1496
+
depends on SLUB && SMP
1497
1497
+
diff --git a/kernel/audit.c b/kernel/audit.c
1498
1498
+
index 5b34d3114af4..e57930192ce1 100644
1499
1499
+
--- a/kernel/audit.c
1500
1500
+
+++ b/kernel/audit.c
1501
1501
+
@@ -1573,6 +1573,9 @@ static int __init audit_enable(char *str)
1502
1502
+
audit_default = !!simple_strtol(str, NULL, 0);
1503
1503
+
if (!audit_default)
1504
1504
+
audit_initialized = AUDIT_DISABLED;
1505
1505
+
+ else
1506
1506
+
+ audit_initialized = AUDIT_UNINITIALIZED;
1507
1507
+
+
1508
1508
+
audit_enabled = audit_default;
1509
1509
+
audit_ever_enabled = !!audit_enabled;
1510
1510
+
1511
1511
+
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
1512
1512
+
index d203a5d6b726..2a6c3e2c57a6 100644
1513
1513
+
--- a/kernel/bpf/core.c
1514
1514
+
+++ b/kernel/bpf/core.c
1515
1515
+
@@ -539,7 +539,7 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
1516
1516
+
bpf_prog_unlock_free(fp);
1517
1517
+
}
1518
1518
+
1519
1519
+
-int bpf_jit_harden __read_mostly;
1520
1520
+
+int bpf_jit_harden __read_mostly = 2;
1521
1521
+
1522
1522
+
static int bpf_jit_blind_insn(const struct bpf_insn *from,
1523
1523
+
const struct bpf_insn *aux,
1524
1524
+
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
1525
1525
+
index 4e933219fec6..0f37db32a2b1 100644
1526
1526
+
--- a/kernel/bpf/syscall.c
1527
1527
+
+++ b/kernel/bpf/syscall.c
1528
1528
+
@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(prog_idr_lock);
1529
1529
+
static DEFINE_IDR(map_idr);
1530
1530
+
static DEFINE_SPINLOCK(map_idr_lock);
1531
1531
+
1532
1532
+
-int sysctl_unprivileged_bpf_disabled __read_mostly;
1533
1533
+
+int sysctl_unprivileged_bpf_disabled __read_mostly = 1;
1534
1534
+
1535
1535
+
static const struct bpf_map_ops * const bpf_map_types[] = {
1536
1536
+
#define BPF_PROG_TYPE(_id, _ops)
1537
1537
+
diff --git a/kernel/capability.c b/kernel/capability.c
1538
1538
+
index 1e1c0236f55b..452062fe45ce 100644
1539
1539
+
--- a/kernel/capability.c
1540
1540
+
+++ b/kernel/capability.c
1541
1541
+
@@ -431,6 +431,12 @@ bool capable(int cap)
1542
1542
+
return ns_capable(&init_user_ns, cap);
1543
1543
+
}
1544
1544
+
EXPORT_SYMBOL(capable);
1545
1545
+
+
1546
1546
+
+bool capable_noaudit(int cap)
1547
1547
+
+{
1548
1548
+
+ return ns_capable_noaudit(&init_user_ns, cap);
1549
1549
+
+}
1550
1550
+
+EXPORT_SYMBOL(capable_noaudit);
1551
1551
+
#endif /* CONFIG_MULTIUSER */
1552
1552
+
1553
1553
+
/**
1554
1554
+
diff --git a/kernel/events/core.c b/kernel/events/core.c
1555
1555
+
index cb8274d7824f..c1b3d232b0a4 100644
1556
1556
+
--- a/kernel/events/core.c
1557
1557
+
+++ b/kernel/events/core.c
1558
1558
+
@@ -397,8 +397,13 @@ static cpumask_var_t perf_online_mask;
1559
1559
+
* 0 - disallow raw tracepoint access for unpriv
1560
1560
+
* 1 - disallow cpu events for unpriv
1561
1561
+
* 2 - disallow kernel profiling for unpriv
1562
1562
+
+ * 3 - disallow all unpriv perf event use
1563
1563
+
*/
1564
1564
+
+#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT
1565
1565
+
+int sysctl_perf_event_paranoid __read_mostly = 3;
1566
1566
+
+#else
1567
1567
+
int sysctl_perf_event_paranoid __read_mostly = 2;
1568
1568
+
+#endif
1569
1569
+
1570
1570
+
/* Minimum for 512 kiB + 1 user control page */
1571
1571
+
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
1572
1572
+
@@ -9941,6 +9946,9 @@ SYSCALL_DEFINE5(perf_event_open,
1573
1573
+
if (flags & ~PERF_FLAG_ALL)
1574
1574
+
return -EINVAL;
1575
1575
+
1576
1576
+
+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
1577
1577
+
+ return -EACCES;
1578
1578
+
+
1579
1579
+
err = perf_copy_attr(attr_uptr, &attr);
1580
1580
+
if (err)
1581
1581
+
return err;
1582
1582
+
diff --git a/kernel/fork.c b/kernel/fork.c
1583
1583
+
index 98c91bd341b4..dbb9540ee61c 100644
1584
1584
+
--- a/kernel/fork.c
1585
1585
+
+++ b/kernel/fork.c
1586
1586
+
@@ -102,6 +102,11 @@
1587
1587
+
1588
1588
+
#define CREATE_TRACE_POINTS
1589
1589
+
#include <trace/events/task.h>
1590
1590
+
+#ifdef CONFIG_USER_NS
1591
1591
+
+extern int unprivileged_userns_clone;
1592
1592
+
+#else
1593
1593
+
+#define unprivileged_userns_clone 0
1594
1594
+
+#endif
1595
1595
+
1596
1596
+
/*
1597
1597
+
* Minimum number of threads to boot the kernel
1598
1598
+
@@ -1554,6 +1559,10 @@ static __latent_entropy struct task_struct *copy_process(
1599
1599
+
if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1600
1600
+
return ERR_PTR(-EINVAL);
1601
1601
+
1602
1602
+
+ if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
1603
1603
+
+ if (!capable(CAP_SYS_ADMIN))
1604
1604
+
+ return ERR_PTR(-EPERM);
1605
1605
+
+
1606
1606
+
/*
1607
1607
+
* Thread groups must share signals as well, and detached threads
1608
1608
+
* can only be started up within the thread group.
1609
1609
+
@@ -2347,6 +2356,12 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1610
1610
+
if (unshare_flags & CLONE_NEWNS)
1611
1611
+
unshare_flags |= CLONE_FS;
1612
1612
+
1613
1613
+
+ if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
1614
1614
+
+ err = -EPERM;
1615
1615
+
+ if (!capable(CAP_SYS_ADMIN))
1616
1616
+
+ goto bad_unshare_out;
1617
1617
+
+ }
1618
1618
+
+
1619
1619
+
err = check_unshare_flags(unshare_flags);
1620
1620
+
if (err)
1621
1621
+
goto bad_unshare_out;
1622
1622
+
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
1623
1623
+
index 0972a8e09d08..00dde7aad47a 100644
1624
1624
+
--- a/kernel/power/snapshot.c
1625
1625
+
+++ b/kernel/power/snapshot.c
1626
1626
+
@@ -1136,7 +1136,7 @@ void free_basic_memory_bitmaps(void)
1627
1627
+
1628
1628
+
void clear_free_pages(void)
1629
1629
+
{
1630
1630
+
-#ifdef CONFIG_PAGE_POISONING_ZERO
1631
1631
+
+#if defined(CONFIG_PAGE_POISONING_ZERO) || defined(CONFIG_PAGE_SANITIZE)
1632
1632
+
struct memory_bitmap *bm = free_pages_map;
1633
1633
+
unsigned long pfn;
1634
1634
+
1635
1635
+
@@ -1153,7 +1153,7 @@ void clear_free_pages(void)
1636
1636
+
}
1637
1637
+
memory_bm_position_reset(bm);
1638
1638
+
pr_info("PM: free pages cleared after restore\n");
1639
1639
+
-#endif /* PAGE_POISONING_ZERO */
1640
1640
+
+#endif /* PAGE_POISONING_ZERO || PAGE_SANITIZE */
1641
1641
+
}
1642
1642
+
1643
1643
+
/**
1644
1644
+
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
1645
1645
+
index a64eee0db39e..4d7de378fe4c 100644
1646
1646
+
--- a/kernel/rcu/tiny.c
1647
1647
+
+++ b/kernel/rcu/tiny.c
1648
1648
+
@@ -164,7 +164,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
1649
1649
+
}
1650
1650
+
}
1651
1651
+
1652
1652
+
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
1653
1653
+
+static __latent_entropy void rcu_process_callbacks(void)
1654
1654
+
{
1655
1655
+
__rcu_process_callbacks(&rcu_sched_ctrlblk);
1656
1656
+
__rcu_process_callbacks(&rcu_bh_ctrlblk);
1657
1657
+
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
1658
1658
+
index 3e3650e94ae6..7ecd7a5d04b3 100644
1659
1659
+
--- a/kernel/rcu/tree.c
1660
1660
+
+++ b/kernel/rcu/tree.c
1661
1661
+
@@ -2918,7 +2918,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
1662
1662
+
/*
1663
1663
+
* Do RCU core processing for the current CPU.
1664
1664
+
*/
1665
1665
+
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
1666
1666
+
+static __latent_entropy void rcu_process_callbacks(void)
1667
1667
+
{
1668
1668
+
struct rcu_state *rsp;
1669
1669
+
1670
1670
+
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1671
1671
+
index 5c09ddf8c832..f5db6ece105a 100644
1672
1672
+
--- a/kernel/sched/fair.c
1673
1673
+
+++ b/kernel/sched/fair.c
1674
1674
+
@@ -8986,7 +8986,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
1675
1675
+
* run_rebalance_domains is triggered when needed from the scheduler tick.
1676
1676
+
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
1677
1677
+
*/
1678
1678
+
-static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
1679
1679
+
+static __latent_entropy void run_rebalance_domains(void)
1680
1680
+
{
1681
1681
+
struct rq *this_rq = this_rq();
1682
1682
+
enum cpu_idle_type idle = this_rq->idle_balance ?
1683
1683
+
diff --git a/kernel/softirq.c b/kernel/softirq.c
1684
1684
+
index e89c3b0cff6d..0d3ebd520931 100644
1685
1685
+
--- a/kernel/softirq.c
1686
1686
+
+++ b/kernel/softirq.c
1687
1687
+
@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
1688
1688
+
EXPORT_SYMBOL(irq_stat);
1689
1689
+
#endif
1690
1690
+
1691
1691
+
-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1692
1692
+
+static struct softirq_action softirq_vec[NR_SOFTIRQS] __ro_after_init __aligned(PAGE_SIZE);
1693
1693
+
1694
1694
+
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
1695
1695
+
1696
1696
+
@@ -281,7 +281,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
1697
1697
+
kstat_incr_softirqs_this_cpu(vec_nr);
1698
1698
+
1699
1699
+
trace_softirq_entry(vec_nr);
1700
1700
+
- h->action(h);
1701
1701
+
+ h->action();
1702
1702
+
trace_softirq_exit(vec_nr);
1703
1703
+
if (unlikely(prev_count != preempt_count())) {
1704
1704
+
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
1705
1705
+
@@ -444,7 +444,7 @@ void __raise_softirq_irqoff(unsigned int nr)
1706
1706
+
or_softirq_pending(1UL << nr);
1707
1707
+
}
1708
1708
+
1709
1709
+
-void open_softirq(int nr, void (*action)(struct softirq_action *))
1710
1710
+
+void __init open_softirq(int nr, void (*action)(void))
1711
1711
+
{
1712
1712
+
softirq_vec[nr].action = action;
1713
1713
+
}
1714
1714
+
@@ -486,7 +486,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
1715
1715
+
}
1716
1716
+
EXPORT_SYMBOL(__tasklet_hi_schedule);
1717
1717
+
1718
1718
+
-static __latent_entropy void tasklet_action(struct softirq_action *a)
1719
1719
+
+static __latent_entropy void tasklet_action(void)
1720
1720
+
{
1721
1721
+
struct tasklet_struct *list;
1722
1722
+
1723
1723
+
@@ -522,7 +522,7 @@ static __latent_entropy void tasklet_action(struct softirq_action *a)
1724
1724
+
}
1725
1725
+
}
1726
1726
+
1727
1727
+
-static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
1728
1728
+
+static __latent_entropy void tasklet_hi_action(void)
1729
1729
+
{
1730
1730
+
struct tasklet_struct *list;
1731
1731
+
1732
1732
+
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
1733
1733
+
index 069550540a39..822783a174aa 100644
1734
1734
+
--- a/kernel/sysctl.c
1735
1735
+
+++ b/kernel/sysctl.c
1736
1736
+
@@ -66,6 +66,7 @@
1737
1737
+
#include <linux/kexec.h>
1738
1738
+
#include <linux/bpf.h>
1739
1739
+
#include <linux/mount.h>
1740
1740
+
+#include <linux/tty.h>
1741
1741
+
1742
1742
+
#include <linux/uaccess.h>
1743
1743
+
#include <asm/processor.h>
1744
1744
+
@@ -98,12 +99,19 @@
1745
1745
+
#if defined(CONFIG_SYSCTL)
1746
1746
+
1747
1747
+
/* External variables not in a header file. */
1748
1748
+
+#if IS_ENABLED(CONFIG_USB)
1749
1749
+
+int deny_new_usb __read_mostly = 0;
1750
1750
+
+EXPORT_SYMBOL(deny_new_usb);
1751
1751
+
+#endif
1752
1752
+
extern int suid_dumpable;
1753
1753
+
#ifdef CONFIG_COREDUMP
1754
1754
+
extern int core_uses_pid;
1755
1755
+
extern char core_pattern[];
1756
1756
+
extern unsigned int core_pipe_limit;
1757
1757
+
#endif
1758
1758
+
+#ifdef CONFIG_USER_NS
1759
1759
+
+extern int unprivileged_userns_clone;
1760
1760
+
+#endif
1761
1761
+
extern int pid_max;
1762
1762
+
extern int pid_max_min, pid_max_max;
1763
1763
+
extern int percpu_pagelist_fraction;
1764
1764
+
@@ -115,40 +123,43 @@ extern int sysctl_nr_trim_pages;
1765
1765
+
1766
1766
+
/* Constants used for minimum and maximum */
1767
1767
+
#ifdef CONFIG_LOCKUP_DETECTOR
1768
1768
+
-static int sixty = 60;
1769
1769
+
+static int sixty __read_only = 60;
1770
1770
+
#endif
1771
1771
+
1772
1772
+
-static int __maybe_unused neg_one = -1;
1773
1773
+
+static int __maybe_unused neg_one __read_only = -1;
1774
1774
+
1775
1775
+
static int zero;
1776
1776
+
-static int __maybe_unused one = 1;
1777
1777
+
-static int __maybe_unused two = 2;
1778
1778
+
-static int __maybe_unused four = 4;
1779
1779
+
-static unsigned long one_ul = 1;
1780
1780
+
-static int one_hundred = 100;
1781
1781
+
-static int one_thousand = 1000;
1782
1782
+
+static int __maybe_unused one __read_only = 1;
1783
1783
+
+static int __maybe_unused two __read_only = 2;
1784
1784
+
+static int __maybe_unused four __read_only = 4;
1785
1785
+
+static unsigned long one_ul __read_only = 1;
1786
1786
+
+static int one_hundred __read_only = 100;
1787
1787
+
+static int one_thousand __read_only = 1000;
1788
1788
+
#ifdef CONFIG_PRINTK
1789
1789
+
-static int ten_thousand = 10000;
1790
1790
+
+static int ten_thousand __read_only = 10000;
1791
1791
+
#endif
1792
1792
+
#ifdef CONFIG_PERF_EVENTS
1793
1793
+
-static int six_hundred_forty_kb = 640 * 1024;
1794
1794
+
+static int six_hundred_forty_kb __read_only = 640 * 1024;
1795
1795
+
#endif
1796
1796
+
1797
1797
+
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
1798
1798
+
-static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
1799
1799
+
+static unsigned long dirty_bytes_min __read_only = 2 * PAGE_SIZE;
1800
1800
+
1801
1801
+
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
1802
1802
+
-static int maxolduid = 65535;
1803
1803
+
-static int minolduid;
1804
1804
+
+static int maxolduid __read_only = 65535;
1805
1805
+
+static int minolduid __read_only;
1806
1806
+
1807
1807
+
-static int ngroups_max = NGROUPS_MAX;
1808
1808
+
+static int ngroups_max __read_only = NGROUPS_MAX;
1809
1809
+
static const int cap_last_cap = CAP_LAST_CAP;
1810
1810
+
1811
1811
+
/*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */
1812
1812
+
#ifdef CONFIG_DETECT_HUNG_TASK
1813
1813
+
-static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
1814
1814
+
+static unsigned long hung_task_timeout_max __read_only = (LONG_MAX/HZ);
1815
1815
+
#endif
1816
1816
+
1817
1817
+
+int device_sidechannel_restrict __read_mostly = 1;
1818
1818
+
+EXPORT_SYMBOL(device_sidechannel_restrict);
1819
1819
+
+
1820
1820
+
#ifdef CONFIG_INOTIFY_USER
1821
1821
+
#include <linux/inotify.h>
1822
1822
+
#endif
1823
1823
+
@@ -286,19 +297,19 @@ static struct ctl_table sysctl_base_table[] = {
1824
1824
+
};
1825
1825
+
1826
1826
+
#ifdef CONFIG_SCHED_DEBUG
1827
1827
+
-static int min_sched_granularity_ns = 100000; /* 100 usecs */
1828
1828
+
-static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
1829
1829
+
-static int min_wakeup_granularity_ns; /* 0 usecs */
1830
1830
+
-static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
1831
1831
+
+static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
1832
1832
+
+static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
1833
1833
+
+static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
1834
1834
+
+static int max_wakeup_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
1835
1835
+
#ifdef CONFIG_SMP
1836
1836
+
-static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
1837
1837
+
-static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
1838
1838
+
+static int min_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_NONE;
1839
1839
+
+static int max_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_END-1;
1840
1840
+
#endif /* CONFIG_SMP */
1841
1841
+
#endif /* CONFIG_SCHED_DEBUG */
1842
1842
+
1843
1843
+
#ifdef CONFIG_COMPACTION
1844
1844
+
-static int min_extfrag_threshold;
1845
1845
+
-static int max_extfrag_threshold = 1000;
1846
1846
+
+static int min_extfrag_threshold __read_only;
1847
1847
+
+static int max_extfrag_threshold __read_only = 1000;
1848
1848
+
#endif
1849
1849
+
1850
1850
+
static struct ctl_table kern_table[] = {
1851
1851
+
@@ -512,6 +523,15 @@ static struct ctl_table kern_table[] = {
1852
1852
+
.proc_handler = proc_dointvec,
1853
1853
+
},
1854
1854
+
#endif
1855
1855
+
+#ifdef CONFIG_USER_NS
1856
1856
+
+ {
1857
1857
+
+ .procname = "unprivileged_userns_clone",
1858
1858
+
+ .data = &unprivileged_userns_clone,
1859
1859
+
+ .maxlen = sizeof(int),
1860
1860
+
+ .mode = 0644,
1861
1861
+
+ .proc_handler = proc_dointvec,
1862
1862
+
+ },
1863
1863
+
+#endif
1864
1864
+
#ifdef CONFIG_PROC_SYSCTL
1865
1865
+
{
1866
1866
+
.procname = "tainted",
1867
1867
+
@@ -853,6 +873,37 @@ static struct ctl_table kern_table[] = {
1868
1868
+
.extra1 = &zero,
1869
1869
+
.extra2 = &two,
1870
1870
+
},
1871
1871
+
+#endif
1872
1872
+
+#if defined CONFIG_TTY
1873
1873
+
+ {
1874
1874
+
+ .procname = "tiocsti_restrict",
1875
1875
+
+ .data = &tiocsti_restrict,
1876
1876
+
+ .maxlen = sizeof(int),
1877
1877
+
+ .mode = 0644,
1878
1878
+
+ .proc_handler = proc_dointvec_minmax_sysadmin,
1879
1879
+
+ .extra1 = &zero,
1880
1880
+
+ .extra2 = &one,
1881
1881
+
+ },
1882
1882
+
+#endif
1883
1883
+
+ {
1884
1884
+
+ .procname = "device_sidechannel_restrict",
1885
1885
+
+ .data = &device_sidechannel_restrict,
1886
1886
+
+ .maxlen = sizeof(int),
1887
1887
+
+ .mode = 0644,
1888
1888
+
+ .proc_handler = proc_dointvec_minmax_sysadmin,
1889
1889
+
+ .extra1 = &zero,
1890
1890
+
+ .extra2 = &one,
1891
1891
+
+ },
1892
1892
+
+#if IS_ENABLED(CONFIG_USB)
1893
1893
+
+ {
1894
1894
+
+ .procname = "deny_new_usb",
1895
1895
+
+ .data = &deny_new_usb,
1896
1896
+
+ .maxlen = sizeof(int),
1897
1897
+
+ .mode = 0644,
1898
1898
+
+ .proc_handler = proc_dointvec_minmax_sysadmin,
1899
1899
+
+ .extra1 = &zero,
1900
1900
+
+ .extra2 = &one,
1901
1901
+
+ },
1902
1902
+
#endif
1903
1903
+
{
1904
1904
+
.procname = "ngroups_max",
1905
1905
+
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
1906
1906
+
index 9fe525f410bf..6a85b0e1292e 100644
1907
1907
+
--- a/kernel/time/timer.c
1908
1908
+
+++ b/kernel/time/timer.c
1909
1909
+
@@ -1624,7 +1624,7 @@ static inline void __run_timers(struct timer_base *base)
1910
1910
+
/*
1911
1911
+
* This function runs timers and the timer-tq in bottom half context.
1912
1912
+
*/
1913
1913
+
-static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1914
1914
+
+static __latent_entropy void run_timer_softirq(void)
1915
1915
+
{
1916
1916
+
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1917
1917
+
1918
1918
+
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
1919
1919
+
index c490f1e4313b..dd03bd39d7bf 100644
1920
1920
+
--- a/kernel/user_namespace.c
1921
1921
+
+++ b/kernel/user_namespace.c
1922
1922
+
@@ -24,6 +24,9 @@
1923
1923
+
#include <linux/projid.h>
1924
1924
+
#include <linux/fs_struct.h>
1925
1925
+
1926
1926
+
+/* sysctl */
1927
1927
+
+int unprivileged_userns_clone;
1928
1928
+
+
1929
1929
+
static struct kmem_cache *user_ns_cachep __read_mostly;
1930
1930
+
static DEFINE_MUTEX(userns_state_mutex);
1931
1931
+
1932
1932
+
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
1933
1933
+
index 62d0e25c054c..3953072277eb 100644
1934
1934
+
--- a/lib/Kconfig.debug
1935
1935
+
+++ b/lib/Kconfig.debug
1936
1936
+
@@ -937,6 +937,7 @@ endmenu # "Debug lockups and hangs"
1937
1937
+
1938
1938
+
config PANIC_ON_OOPS
1939
1939
+
bool "Panic on Oops"
1940
1940
+
+ default y
1941
1941
+
help
1942
1942
+
Say Y here to enable the kernel to panic when it oopses. This
1943
1943
+
has the same effect as setting oops=panic on the kernel command
1944
1944
+
@@ -946,7 +947,7 @@ config PANIC_ON_OOPS
1945
1945
+
anything erroneous after an oops which could result in data
1946
1946
+
corruption or other issues.
1947
1947
+
1948
1948
+
- Say N if unsure.
1949
1949
+
+ Say Y if unsure.
1950
1950
+
1951
1951
+
config PANIC_ON_OOPS_VALUE
1952
1952
+
int
1953
1953
+
@@ -1319,6 +1320,7 @@ config DEBUG_BUGVERBOSE
1954
1954
+
config DEBUG_LIST
1955
1955
+
bool "Debug linked list manipulation"
1956
1956
+
depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION
1957
1957
+
+ default y
1958
1958
+
help
1959
1959
+
Enable this to turn on extended checks in the linked-list
1960
1960
+
walking routines.
1961
1961
+
@@ -1932,6 +1934,7 @@ config MEMTEST
1962
1962
+
config BUG_ON_DATA_CORRUPTION
1963
1963
+
bool "Trigger a BUG when data corruption is detected"
1964
1964
+
select DEBUG_LIST
1965
1965
+
+ default y
1966
1966
+
help
1967
1967
+
Select this option if the kernel should BUG when it encounters
1968
1968
+
data corruption in kernel memory structures when they get checked
1969
1969
+
@@ -1952,7 +1955,7 @@ config STRICT_DEVMEM
1970
1970
+
bool "Filter access to /dev/mem"
1971
1971
+
depends on MMU && DEVMEM
1972
1972
+
depends on ARCH_HAS_DEVMEM_IS_ALLOWED
1973
1973
+
- default y if TILE || PPC
1974
1974
+
+ default y
1975
1975
+
---help---
1976
1976
+
If this option is disabled, you allow userspace (root) access to all
1977
1977
+
of memory, including kernel and userspace memory. Accidental
1978
1978
+
@@ -1971,6 +1974,7 @@ config STRICT_DEVMEM
1979
1979
+
config IO_STRICT_DEVMEM
1980
1980
+
bool "Filter I/O access to /dev/mem"
1981
1981
+
depends on STRICT_DEVMEM
1982
1982
+
+ default y
1983
1983
+
---help---
1984
1984
+
If this option is disabled, you allow userspace (root) access to all
1985
1985
+
io-memory regardless of whether a driver is actively using that
1986
1986
+
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
1987
1987
+
index 86a709954f5a..6f15787fcb1b 100644
1988
1988
+
--- a/lib/irq_poll.c
1989
1989
+
+++ b/lib/irq_poll.c
1990
1990
+
@@ -75,7 +75,7 @@ void irq_poll_complete(struct irq_poll *iop)
1991
1991
+
}
1992
1992
+
EXPORT_SYMBOL(irq_poll_complete);
1993
1993
+
1994
1994
+
-static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
1995
1995
+
+static void __latent_entropy irq_poll_softirq(void)
1996
1996
+
{
1997
1997
+
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
1998
1998
+
int rearm = 0, budget = irq_poll_budget;
1999
1999
+
diff --git a/lib/kobject.c b/lib/kobject.c
2000
2000
+
index 34f847252c02..4fda329de614 100644
2001
2001
+
--- a/lib/kobject.c
2002
2002
+
+++ b/lib/kobject.c
2003
2003
+
@@ -956,9 +956,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
2004
2004
+
2005
2005
+
2006
2006
+
static DEFINE_SPINLOCK(kobj_ns_type_lock);
2007
2007
+
-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
2008
2008
+
+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __ro_after_init;
2009
2009
+
2010
2010
+
-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
2011
2011
+
+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
2012
2012
+
{
2013
2013
+
enum kobj_ns_type type = ops->type;
2014
2014
+
int error;
2015
2015
+
diff --git a/lib/nlattr.c b/lib/nlattr.c
2016
2016
+
index 3d8295c85505..3fa3b3409d69 100644
2017
2017
+
--- a/lib/nlattr.c
2018
2018
+
+++ b/lib/nlattr.c
2019
2019
+
@@ -341,6 +341,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
2020
2020
+
{
2021
2021
+
int minlen = min_t(int, count, nla_len(src));
2022
2022
+
2023
2023
+
+ BUG_ON(minlen < 0);
2024
2024
+
+
2025
2025
+
memcpy(dest, nla_data(src), minlen);
2026
2026
+
if (count > minlen)
2027
2027
+
memset(dest + minlen, 0, count - minlen);
2028
2028
+
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
2029
2029
+
index 86c3385b9eb3..c482070e379b 100644
2030
2030
+
--- a/lib/vsprintf.c
2031
2031
+
+++ b/lib/vsprintf.c
2032
2032
+
@@ -1591,7 +1591,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
2033
2033
+
return widen_string(buf, buf - buf_start, end, spec);
2034
2034
+
}
2035
2035
+
2036
2036
+
-int kptr_restrict __read_mostly;
2037
2037
+
+int kptr_restrict __read_mostly = 2;
2038
2038
+
2039
2039
+
/*
2040
2040
+
* Show a '%p' thing. A kernel extension is that the '%p' is followed
2041
2041
+
diff --git a/mm/Kconfig b/mm/Kconfig
2042
2042
+
index 59efbd3337e0..c070e14ec83d 100644
2043
2043
+
--- a/mm/Kconfig
2044
2044
+
+++ b/mm/Kconfig
2045
2045
+
@@ -319,7 +319,8 @@ config KSM
2046
2046
+
config DEFAULT_MMAP_MIN_ADDR
2047
2047
+
int "Low address space to protect from user allocation"
2048
2048
+
depends on MMU
2049
2049
+
- default 4096
2050
2050
+
+ default 32768 if ARM || (ARM64 && COMPAT)
2051
2051
+
+ default 65536
2052
2052
+
help
2053
2053
+
This is the portion of low virtual memory which should be protected
2054
2054
+
from userspace allocation. Keeping a user from writing to low pages
2055
2055
+
diff --git a/mm/mmap.c b/mm/mmap.c
2056
2056
+
index 11f96fad5271..632e7f9a710e 100644
2057
2057
+
--- a/mm/mmap.c
2058
2058
+
+++ b/mm/mmap.c
2059
2059
+
@@ -220,6 +220,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
2060
2060
+
2061
2061
+
newbrk = PAGE_ALIGN(brk);
2062
2062
+
oldbrk = PAGE_ALIGN(mm->brk);
2063
2063
+
+ /* properly handle unaligned min_brk as an empty heap */
2064
2064
+
+ if (min_brk & ~PAGE_MASK) {
2065
2065
+
+ if (brk == min_brk)
2066
2066
+
+ newbrk -= PAGE_SIZE;
2067
2067
+
+ if (mm->brk == min_brk)
2068
2068
+
+ oldbrk -= PAGE_SIZE;
2069
2069
+
+ }
2070
2070
+
if (oldbrk == newbrk)
2071
2071
+
goto set_brk;
2072
2072
+
2073
2073
+
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2074
2074
+
index 1d7693c35424..8963a3b4d37c 100644
2075
2075
+
--- a/mm/page_alloc.c
2076
2076
+
+++ b/mm/page_alloc.c
2077
2077
+
@@ -67,6 +67,7 @@
2078
2078
+
#include <linux/ftrace.h>
2079
2079
+
#include <linux/lockdep.h>
2080
2080
+
#include <linux/nmi.h>
2081
2081
+
+#include <linux/random.h>
2082
2082
+
2083
2083
+
#include <asm/sections.h>
2084
2084
+
#include <asm/tlbflush.h>
2085
2085
+
@@ -98,6 +99,15 @@ int _node_numa_mem_[MAX_NUMNODES];
2086
2086
+
DEFINE_MUTEX(pcpu_drain_mutex);
2087
2087
+
DEFINE_PER_CPU(struct work_struct, pcpu_drain);
2088
2088
+
2089
2089
+
+bool __meminitdata extra_latent_entropy;
2090
2090
+
+
2091
2091
+
+static int __init setup_extra_latent_entropy(char *str)
2092
2092
+
+{
2093
2093
+
+ extra_latent_entropy = true;
2094
2094
+
+ return 0;
2095
2095
+
+}
2096
2096
+
+early_param("extra_latent_entropy", setup_extra_latent_entropy);
2097
2097
+
+
2098
2098
+
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
2099
2099
+
volatile unsigned long latent_entropy __latent_entropy;
2100
2100
+
EXPORT_SYMBOL(latent_entropy);
2101
2101
+
@@ -1063,6 +1073,13 @@ static __always_inline bool free_pages_prepare(struct page *page,
2102
2102
+
debug_check_no_obj_freed(page_address(page),
2103
2103
+
PAGE_SIZE << order);
2104
2104
+
}
2105
2105
+
+
2106
2106
+
+ if (IS_ENABLED(CONFIG_PAGE_SANITIZE)) {
2107
2107
+
+ int i;
2108
2108
+
+ for (i = 0; i < (1 << order); i++)
2109
2109
+
+ clear_highpage(page + i);
2110
2110
+
+ }
2111
2111
+
+
2112
2112
+
arch_free_page(page, order);
2113
2113
+
kernel_poison_pages(page, 1 << order, 0);
2114
2114
+
kernel_map_pages(page, 1 << order, 0);
2115
2115
+
@@ -1278,6 +1295,21 @@ static void __init __free_pages_boot_core(struct page *page, unsigned int order)
2116
2116
+
__ClearPageReserved(p);
2117
2117
+
set_page_count(p, 0);
2118
2118
+
2119
2119
+
+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
2120
2120
+
+ unsigned long hash = 0;
2121
2121
+
+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
2122
2122
+
+ const unsigned long *data = lowmem_page_address(page);
2123
2123
+
+
2124
2124
+
+ for (index = 0; index < end; index++)
2125
2125
+
+ hash ^= hash + data[index];
2126
2126
+
+#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
2127
2127
+
+ latent_entropy ^= hash;
2128
2128
+
+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
2129
2129
+
+#else
2130
2130
+
+ add_device_randomness((const void *)&hash, sizeof(hash));
2131
2131
+
+#endif
2132
2132
+
+ }
2133
2133
+
+
2134
2134
+
page_zone(page)->managed_pages += nr_pages;
2135
2135
+
set_page_refcounted(page);
2136
2136
+
__free_pages(page, order);
2137
2137
+
@@ -1718,8 +1750,8 @@ static inline int check_new_page(struct page *page)
2138
2138
+
2139
2139
+
static inline bool free_pages_prezeroed(void)
2140
2140
+
{
2141
2141
+
- return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
2142
2142
+
- page_poisoning_enabled();
2143
2143
+
+ return IS_ENABLED(CONFIG_PAGE_SANITIZE) ||
2144
2144
+
+ (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && page_poisoning_enabled());
2145
2145
+
}
2146
2146
+
2147
2147
+
#ifdef CONFIG_DEBUG_VM
2148
2148
+
@@ -1776,6 +1808,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
2149
2149
+
2150
2150
+
post_alloc_hook(page, order, gfp_flags);
2151
2151
+
2152
2152
+
+ if (IS_ENABLED(CONFIG_PAGE_SANITIZE_VERIFY)) {
2153
2153
+
+ for (i = 0; i < (1 << order); i++)
2154
2154
+
+ verify_zero_highpage(page + i);
2155
2155
+
+ }
2156
2156
+
+
2157
2157
+
if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
2158
2158
+
for (i = 0; i < (1 << order); i++)
2159
2159
+
clear_highpage(page + i);
2160
2160
+
diff --git a/mm/slab.h b/mm/slab.h
2161
2161
+
index 485d9fbb8802..436461588804 100644
2162
2162
+
--- a/mm/slab.h
2163
2163
+
+++ b/mm/slab.h
2164
2164
+
@@ -311,7 +311,11 @@ static inline bool is_root_cache(struct kmem_cache *s)
2165
2165
+
static inline bool slab_equal_or_root(struct kmem_cache *s,
2166
2166
+
struct kmem_cache *p)
2167
2167
+
{
2168
2168
+
+#ifdef CONFIG_SLAB_HARDENED
2169
2169
+
+ return p == s;
2170
2170
+
+#else
2171
2171
+
return true;
2172
2172
+
+#endif
2173
2173
+
}
2174
2174
+
2175
2175
+
static inline const char *cache_name(struct kmem_cache *s)
2176
2176
+
@@ -363,18 +367,26 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
2177
2177
+
* to not do even the assignment. In that case, slab_equal_or_root
2178
2178
+
* will also be a constant.
2179
2179
+
*/
2180
2180
+
- if (!memcg_kmem_enabled() &&
2181
2181
+
+ if (!IS_ENABLED(CONFIG_SLAB_HARDENED) &&
2182
2182
+
+ !memcg_kmem_enabled() &&
2183
2183
+
!unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
2184
2184
+
return s;
2185
2185
+
2186
2186
+
page = virt_to_head_page(x);
2187
2187
+
+#ifdef CONFIG_SLAB_HARDENED
2188
2188
+
+ BUG_ON(!PageSlab(page));
2189
2189
+
+#endif
2190
2190
+
cachep = page->slab_cache;
2191
2191
+
if (slab_equal_or_root(cachep, s))
2192
2192
+
return cachep;
2193
2193
+
2194
2194
+
pr_err("%s: Wrong slab cache. %s but object is from %s\n",
2195
2195
+
__func__, s->name, cachep->name);
2196
2196
+
+#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
2197
2197
+
+ BUG_ON(1);
2198
2198
+
+#else
2199
2199
+
WARN_ON_ONCE(1);
2200
2200
+
+#endif
2201
2201
+
return s;
2202
2202
+
}
2203
2203
+
2204
2204
+
@@ -399,7 +411,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
2205
2205
+
* back there or track user information then we can
2206
2206
+
* only use the space before that information.
2207
2207
+
*/
2208
2208
+
- if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
2209
2209
+
+ if ((s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) || IS_ENABLED(CONFIG_SLAB_CANARY))
2210
2210
+
return s->inuse;
2211
2211
+
/*
2212
2212
+
* Else we can use all the padding etc for the allocation
2213
2213
+
diff --git a/mm/slab_common.c b/mm/slab_common.c
2214
2214
+
index 65212caa1f2a..d8bf8a75f445 100644
2215
2215
+
--- a/mm/slab_common.c
2216
2216
+
+++ b/mm/slab_common.c
2217
2217
+
@@ -26,10 +26,10 @@
2218
2218
+
2219
2219
+
#include "slab.h"
2220
2220
+
2221
2221
+
-enum slab_state slab_state;
2222
2222
+
+enum slab_state slab_state __ro_after_init;
2223
2223
+
LIST_HEAD(slab_caches);
2224
2224
+
DEFINE_MUTEX(slab_mutex);
2225
2225
+
-struct kmem_cache *kmem_cache;
2226
2226
+
+struct kmem_cache *kmem_cache __ro_after_init;
2227
2227
+
2228
2228
+
static LIST_HEAD(slab_caches_to_rcu_destroy);
2229
2229
+
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
2230
2230
+
@@ -49,7 +49,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
2231
2231
+
/*
2232
2232
+
* Merge control. If this is set then no merging of slab caches will occur.
2233
2233
+
*/
2234
2234
+
-static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
2235
2235
+
+static bool slab_nomerge __ro_after_init = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
2236
2236
+
2237
2237
+
static int __init setup_slab_nomerge(char *str)
2238
2238
+
{
2239
2239
+
@@ -927,7 +927,7 @@ EXPORT_SYMBOL(kmalloc_dma_caches);
2240
2240
+
* of two cache sizes there. The size of larger slabs can be determined using
2241
2241
+
* fls.
2242
2242
+
*/
2243
2243
+
-static s8 size_index[24] = {
2244
2244
+
+static s8 size_index[24] __ro_after_init = {
2245
2245
+
3, /* 8 */
2246
2246
+
4, /* 16 */
2247
2247
+
5, /* 24 */
2248
2248
+
diff --git a/mm/slub.c b/mm/slub.c
2249
2249
+
index 41c01690d116..591dd60d37f3 100644
2250
2250
+
--- a/mm/slub.c
2251
2251
+
+++ b/mm/slub.c
2252
2252
+
@@ -125,6 +125,16 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
2253
2253
+
#endif
2254
2254
+
}
2255
2255
+
2256
2256
+
+static inline bool has_sanitize(struct kmem_cache *s)
2257
2257
+
+{
2258
2258
+
+ return IS_ENABLED(CONFIG_SLAB_SANITIZE) && !(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON));
2259
2259
+
+}
2260
2260
+
+
2261
2261
+
+static inline bool has_sanitize_verify(struct kmem_cache *s)
2262
2262
+
+{
2263
2263
+
+ return IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && has_sanitize(s);
2264
2264
+
+}
2265
2265
+
+
2266
2266
+
void *fixup_red_left(struct kmem_cache *s, void *p)
2267
2267
+
{
2268
2268
+
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
2269
2269
+
@@ -297,6 +307,35 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
2270
2270
+
*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
2271
2271
+
}
2272
2272
+
2273
2273
+
+#ifdef CONFIG_SLAB_CANARY
2274
2274
+
+static inline unsigned long *get_canary(struct kmem_cache *s, void *object)
2275
2275
+
+{
2276
2276
+
+ if (s->offset)
2277
2277
+
+ return object + s->offset + sizeof(void *);
2278
2278
+
+ return object + s->inuse;
2279
2279
+
+}
2280
2280
+
+
2281
2281
+
+static inline unsigned long get_canary_value(const void *canary, unsigned long value)
2282
2282
+
+{
2283
2283
+
+ return (value ^ (unsigned long)canary) & CANARY_MASK;
2284
2284
+
+}
2285
2285
+
+
2286
2286
+
+static inline void set_canary(struct kmem_cache *s, void *object, unsigned long value)
2287
2287
+
+{
2288
2288
+
+ unsigned long *canary = get_canary(s, object);
2289
2289
+
+ *canary = get_canary_value(canary, value);
2290
2290
+
+}
2291
2291
+
+
2292
2292
+
+static inline void check_canary(struct kmem_cache *s, void *object, unsigned long value)
2293
2293
+
+{
2294
2294
+
+ unsigned long *canary = get_canary(s, object);
2295
2295
+
+ BUG_ON(*canary != get_canary_value(canary, value));
2296
2296
+
+}
2297
2297
+
+#else
2298
2298
+
+#define set_canary(s, object, value)
2299
2299
+
+#define check_canary(s, object, value)
2300
2300
+
+#endif
2301
2301
+
+
2302
2302
+
/* Loop over all objects in a slab */
2303
2303
+
#define for_each_object(__p, __s, __addr, __objects) \
2304
2304
+
for (__p = fixup_red_left(__s, __addr); \
2305
2305
+
@@ -484,13 +523,13 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
2306
2306
+
* Debug settings:
2307
2307
+
*/
2308
2308
+
#if defined(CONFIG_SLUB_DEBUG_ON)
2309
2309
+
-static int slub_debug = DEBUG_DEFAULT_FLAGS;
2310
2310
+
+static int slub_debug __ro_after_init = DEBUG_DEFAULT_FLAGS;
2311
2311
+
#else
2312
2312
+
-static int slub_debug;
2313
2313
+
+static int slub_debug __ro_after_init;
2314
2314
+
#endif
2315
2315
+
2316
2316
+
-static char *slub_debug_slabs;
2317
2317
+
-static int disable_higher_order_debug;
2318
2318
+
+static char *slub_debug_slabs __ro_after_init;
2319
2319
+
+static int disable_higher_order_debug __ro_after_init;
2320
2320
+
2321
2321
+
/*
2322
2322
+
* slub is about to manipulate internal object metadata. This memory lies
2323
2323
+
@@ -550,6 +589,9 @@ static struct track *get_track(struct kmem_cache *s, void *object,
2324
2324
+
else
2325
2325
+
p = object + s->inuse;
2326
2326
+
2327
2327
+
+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
2328
2328
+
+ p = (void *)p + sizeof(void *);
2329
2329
+
+
2330
2330
+
return p + alloc;
2331
2331
+
}
2332
2332
+
2333
2333
+
@@ -688,6 +730,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
2334
2334
+
else
2335
2335
+
off = s->inuse;
2336
2336
+
2337
2337
+
+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
2338
2338
+
+ off += sizeof(void *);
2339
2339
+
+
2340
2340
+
if (s->flags & SLAB_STORE_USER)
2341
2341
+
off += 2 * sizeof(struct track);
2342
2342
+
2343
2343
+
@@ -817,6 +862,9 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
2344
2344
+
/* Freepointer is placed after the object. */
2345
2345
+
off += sizeof(void *);
2346
2346
+
2347
2347
+
+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
2348
2348
+
+ off += sizeof(void *);
2349
2349
+
+
2350
2350
+
if (s->flags & SLAB_STORE_USER)
2351
2351
+
/* We also have user information there */
2352
2352
+
off += 2 * sizeof(struct track);
2353
2353
+
@@ -1416,8 +1464,9 @@ static void setup_object(struct kmem_cache *s, struct page *page,
2354
2354
+
void *object)
2355
2355
+
{
2356
2356
+
setup_object_debug(s, page, object);
2357
2357
+
+ set_canary(s, object, s->random_inactive);
2358
2358
+
kasan_init_slab_obj(s, object);
2359
2359
+
- if (unlikely(s->ctor)) {
2360
2360
+
+ if (unlikely(s->ctor) && !has_sanitize_verify(s)) {
2361
2361
+
kasan_unpoison_object_data(s, object);
2362
2362
+
s->ctor(object);
2363
2363
+
kasan_poison_object_data(s, object);
2364
2364
+
@@ -2717,9 +2766,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2365
2365
+
stat(s, ALLOC_FASTPATH);
2366
2366
+
}
2367
2367
+
2368
2368
+
- if (unlikely(gfpflags & __GFP_ZERO) && object)
2369
2369
+
+ if (has_sanitize_verify(s) && object) {
2370
2370
+
+ size_t offset = s->offset ? 0 : sizeof(void *);
2371
2371
+
+ BUG_ON(memchr_inv(object + offset, 0, s->object_size - offset));
2372
2372
+
+ if (s->ctor)
2373
2373
+
+ s->ctor(object);
2374
2374
+
+ if (unlikely(gfpflags & __GFP_ZERO) && offset)
2375
2375
+
+ memset(object, 0, sizeof(void *));
2376
2376
+
+ } else if (unlikely(gfpflags & __GFP_ZERO) && object)
2377
2377
+
memset(object, 0, s->object_size);
2378
2378
+
2379
2379
+
+ if (object) {
2380
2380
+
+ check_canary(s, object, s->random_inactive);
2381
2381
+
+ set_canary(s, object, s->random_active);
2382
2382
+
+ }
2383
2383
+
+
2384
2384
+
slab_post_alloc_hook(s, gfpflags, 1, &object);
2385
2385
+
2386
2386
+
return object;
2387
2387
+
@@ -2926,6 +2987,27 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
2388
2388
+
void *tail_obj = tail ? : head;
2389
2389
+
struct kmem_cache_cpu *c;
2390
2390
+
unsigned long tid;
2391
2391
+
+ bool sanitize = has_sanitize(s);
2392
2392
+
+
2393
2393
+
+ if (IS_ENABLED(CONFIG_SLAB_CANARY) || sanitize) {
2394
2394
+
+ __maybe_unused int offset = s->offset ? 0 : sizeof(void *);
2395
2395
+
+ void *x = head;
2396
2396
+
+
2397
2397
+
+ while (1) {
2398
2398
+
+ check_canary(s, x, s->random_active);
2399
2399
+
+ set_canary(s, x, s->random_inactive);
2400
2400
+
+
2401
2401
+
+ if (sanitize) {
2402
2402
+
+ memset(x + offset, 0, s->object_size - offset);
2403
2403
+
+ if (!IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && s->ctor)
2404
2404
+
+ s->ctor(x);
2405
2405
+
+ }
2406
2406
+
+ if (x == tail_obj)
2407
2407
+
+ break;
2408
2408
+
+ x = get_freepointer(s, x);
2409
2409
+
+ }
2410
2410
+
+ }
2411
2411
+
+
2412
2412
+
redo:
2413
2413
+
/*
2414
2414
+
* Determine the currently cpus per cpu slab.
2415
2415
+
@@ -3104,7 +3186,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2416
2416
+
void **p)
2417
2417
+
{
2418
2418
+
struct kmem_cache_cpu *c;
2419
2419
+
- int i;
2420
2420
+
+ int i, k;
2421
2421
+
2422
2422
+
/* memcg and kmem_cache debug support */
2423
2423
+
s = slab_pre_alloc_hook(s, flags);
2424
2424
+
@@ -3141,13 +3223,29 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2425
2425
+
local_irq_enable();
2426
2426
+
2427
2427
+
/* Clear memory outside IRQ disabled fastpath loop */
2428
2428
+
- if (unlikely(flags & __GFP_ZERO)) {
2429
2429
+
+ if (has_sanitize_verify(s)) {
2430
2430
+
+ int j;
2431
2431
+
+
2432
2432
+
+ for (j = 0; j < i; j++) {
2433
2433
+
+ size_t offset = s->offset ? 0 : sizeof(void *);
2434
2434
+
+ BUG_ON(memchr_inv(p[j] + offset, 0, s->object_size - offset));
2435
2435
+
+ if (s->ctor)
2436
2436
+
+ s->ctor(p[j]);
2437
2437
+
+ if (unlikely(flags & __GFP_ZERO) && offset)
2438
2438
+
+ memset(p[j], 0, sizeof(void *));
2439
2439
+
+ }
2440
2440
+
+ } else if (unlikely(flags & __GFP_ZERO)) {
2441
2441
+
int j;
2442
2442
+
2443
2443
+
for (j = 0; j < i; j++)
2444
2444
+
memset(p[j], 0, s->object_size);
2445
2445
+
}
2446
2446
+
2447
2447
+
+ for (k = 0; k < i; k++) {
2448
2448
+
+ check_canary(s, p[k], s->random_inactive);
2449
2449
+
+ set_canary(s, p[k], s->random_active);
2450
2450
+
+ }
2451
2451
+
+
2452
2452
+
/* memcg and kmem_cache debug support */
2453
2453
+
slab_post_alloc_hook(s, flags, size, p);
2454
2454
+
return i;
2455
2455
+
@@ -3179,9 +3277,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk);
2456
2456
+
* and increases the number of allocations possible without having to
2457
2457
+
* take the list_lock.
2458
2458
+
*/
2459
2459
+
-static int slub_min_order;
2460
2460
+
-static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
2461
2461
+
-static int slub_min_objects;
2462
2462
+
+static int slub_min_order __ro_after_init;
2463
2463
+
+static int slub_max_order __ro_after_init = PAGE_ALLOC_COSTLY_ORDER;
2464
2464
+
+static int slub_min_objects __ro_after_init;
2465
2465
+
2466
2466
+
/*
2467
2467
+
* Calculate the order of allocation given an slab object size.
2468
2468
+
@@ -3351,6 +3449,7 @@ static void early_kmem_cache_node_alloc(int node)
2469
2469
+
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2470
2470
+
init_tracking(kmem_cache_node, n);
2471
2471
+
#endif
2472
2472
+
+ set_canary(kmem_cache_node, n, kmem_cache_node->random_active);
2473
2473
+
kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
2474
2474
+
GFP_KERNEL);
2475
2475
+
init_kmem_cache_node(n);
2476
2476
+
@@ -3507,6 +3606,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2477
2477
+
size += sizeof(void *);
2478
2478
+
}
2479
2479
+
2480
2480
+
+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
2481
2481
+
+ size += sizeof(void *);
2482
2482
+
+
2483
2483
+
#ifdef CONFIG_SLUB_DEBUG
2484
2484
+
if (flags & SLAB_STORE_USER)
2485
2485
+
/*
2486
2486
+
@@ -3577,6 +3679,10 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
2487
2487
+
#ifdef CONFIG_SLAB_FREELIST_HARDENED
2488
2488
+
s->random = get_random_long();
2489
2489
+
#endif
2490
2490
+
+#ifdef CONFIG_SLAB_CANARY
2491
2491
+
+ s->random_active = get_random_long();
2492
2492
+
+ s->random_inactive = get_random_long();
2493
2493
+
+#endif
2494
2494
+
2495
2495
+
if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
2496
2496
+
s->reserved = sizeof(struct rcu_head);
2497
2497
+
@@ -3841,6 +3947,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
2498
2498
+
offset -= s->red_left_pad;
2499
2499
+
}
2500
2500
+
2501
2501
+
+ check_canary(s, (void *)ptr - offset, s->random_active);
2502
2502
+
+
2503
2503
+
/* Allow address range falling entirely within object size. */
2504
2504
+
if (offset <= object_size && n <= object_size - offset)
2505
2505
+
return NULL;
2506
2506
+
@@ -3859,7 +3967,11 @@ static size_t __ksize(const void *object)
2507
2507
+
page = virt_to_head_page(object);
2508
2508
+
2509
2509
+
if (unlikely(!PageSlab(page))) {
2510
2510
+
+#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
2511
2511
+
+ BUG_ON(!PageCompound(page));
2512
2512
+
+#else
2513
2513
+
WARN_ON(!PageCompound(page));
2514
2514
+
+#endif
2515
2515
+
return PAGE_SIZE << compound_order(page);
2516
2516
+
}
2517
2517
+
2518
2518
+
@@ -4724,7 +4836,7 @@ enum slab_stat_type {
2519
2519
+
#define SO_TOTAL (1 << SL_TOTAL)
2520
2520
+
2521
2521
+
#ifdef CONFIG_MEMCG
2522
2522
+
-static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
2523
2523
+
+static bool memcg_sysfs_enabled __ro_after_init = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
2524
2524
+
2525
2525
+
static int __init setup_slub_memcg_sysfs(char *str)
2526
2526
+
{
2527
2527
+
diff --git a/mm/swap.c b/mm/swap.c
2528
2528
+
index a77d68f2c1b6..d1f1d75f4d1f 100644
2529
2529
+
--- a/mm/swap.c
2530
2530
+
+++ b/mm/swap.c
2531
2531
+
@@ -92,6 +92,13 @@ static void __put_compound_page(struct page *page)
2532
2532
+
if (!PageHuge(page))
2533
2533
+
__page_cache_release(page);
2534
2534
+
dtor = get_compound_page_dtor(page);
2535
2535
+
+ if (!PageHuge(page))
2536
2536
+
+ BUG_ON(dtor != free_compound_page
2537
2537
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2538
2538
+
+ && dtor != free_transhuge_page
2539
2539
+
+#endif
2540
2540
+
+ );
2541
2541
+
+
2542
2542
+
(*dtor)(page);
2543
2543
+
}
2544
2544
+
2545
2545
+
diff --git a/net/core/dev.c b/net/core/dev.c
2546
2546
+
index 6ca771f2f25b..6da2c9c3e6a5 100644
2547
2547
+
--- a/net/core/dev.c
2548
2548
+
+++ b/net/core/dev.c
2549
2549
+
@@ -4095,7 +4095,7 @@ int netif_rx_ni(struct sk_buff *skb)
2550
2550
+
}
2551
2551
+
EXPORT_SYMBOL(netif_rx_ni);
2552
2552
+
2553
2553
+
-static __latent_entropy void net_tx_action(struct softirq_action *h)
2554
2554
+
+static __latent_entropy void net_tx_action(void)
2555
2555
+
{
2556
2556
+
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
2557
2557
+
2558
2558
+
@@ -5609,7 +5609,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
2559
2559
+
return work;
2560
2560
+
}
2561
2561
+
2562
2562
+
-static __latent_entropy void net_rx_action(struct softirq_action *h)
2563
2563
+
+static __latent_entropy void net_rx_action(void)
2564
2564
+
{
2565
2565
+
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
2566
2566
+
unsigned long time_limit = jiffies +
2567
2567
+
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
2568
2568
+
index f48fe6fc7e8c..d78c52835c08 100644
2569
2569
+
--- a/net/ipv4/Kconfig
2570
2570
+
+++ b/net/ipv4/Kconfig
2571
2571
+
@@ -261,6 +261,7 @@ config IP_PIMSM_V2
2572
2572
+
2573
2573
+
config SYN_COOKIES
2574
2574
+
bool "IP: TCP syncookie support"
2575
2575
+
+ default y
2576
2576
+
---help---
2577
2577
+
Normal TCP/IP networking is open to an attack known as "SYN
2578
2578
+
flooding". This denial-of-service attack prevents legitimate remote
2579
2579
+
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
2580
2580
+
index 54deaa1066cf..211f97bd5ee3 100644
2581
2581
+
--- a/scripts/mod/modpost.c
2582
2582
+
+++ b/scripts/mod/modpost.c
2583
2583
+
@@ -37,6 +37,7 @@ static int vmlinux_section_warnings = 1;
2584
2584
+
static int warn_unresolved = 0;
2585
2585
+
/* How a symbol is exported */
2586
2586
+
static int sec_mismatch_count = 0;
2587
2587
+
+static int writable_fptr_count = 0;
2588
2588
+
static int sec_mismatch_verbose = 1;
2589
2589
+
static int sec_mismatch_fatal = 0;
2590
2590
+
/* ignore missing files */
2591
2591
+
@@ -965,6 +966,7 @@ enum mismatch {
2592
2592
+
ANY_EXIT_TO_ANY_INIT,
2593
2593
+
EXPORT_TO_INIT_EXIT,
2594
2594
+
EXTABLE_TO_NON_TEXT,
2595
2595
+
+ DATA_TO_TEXT
2596
2596
+
};
2597
2597
+
2598
2598
+
/**
2599
2599
+
@@ -1091,6 +1093,12 @@ static const struct sectioncheck sectioncheck[] = {
2600
2600
+
.good_tosec = {ALL_TEXT_SECTIONS , NULL},
2601
2601
+
.mismatch = EXTABLE_TO_NON_TEXT,
2602
2602
+
.handler = extable_mismatch_handler,
2603
2603
+
+},
2604
2604
+
+/* Do not reference code from writable data */
2605
2605
+
+{
2606
2606
+
+ .fromsec = { DATA_SECTIONS, NULL },
2607
2607
+
+ .bad_tosec = { ALL_TEXT_SECTIONS, NULL },
2608
2608
+
+ .mismatch = DATA_TO_TEXT
2609
2609
+
}
2610
2610
+
};
2611
2611
+
2612
2612
+
@@ -1240,10 +1248,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
2613
2613
+
continue;
2614
2614
+
if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
2615
2615
+
continue;
2616
2616
+
- if (sym->st_value == addr)
2617
2617
+
- return sym;
2618
2618
+
/* Find a symbol nearby - addr are maybe negative */
2619
2619
+
d = sym->st_value - addr;
2620
2620
+
+ if (d == 0)
2621
2621
+
+ return sym;
2622
2622
+
if (d < 0)
2623
2623
+
d = addr - sym->st_value;
2624
2624
+
if (d < distance) {
2625
2625
+
@@ -1402,7 +1410,11 @@ static void report_sec_mismatch(const char *modname,
2626
2626
+
char *prl_from;
2627
2627
+
char *prl_to;
2628
2628
+
2629
2629
+
- sec_mismatch_count++;
2630
2630
+
+ if (mismatch->mismatch == DATA_TO_TEXT)
2631
2631
+
+ writable_fptr_count++;
2632
2632
+
+ else
2633
2633
+
+ sec_mismatch_count++;
2634
2634
+
+
2635
2635
+
if (!sec_mismatch_verbose)
2636
2636
+
return;
2637
2637
+
2638
2638
+
@@ -1526,6 +1538,14 @@ static void report_sec_mismatch(const char *modname,
2639
2639
+
fatal("There's a special handler for this mismatch type, "
2640
2640
+
"we should never get here.");
2641
2641
+
break;
2642
2642
+
+ case DATA_TO_TEXT:
2643
2643
+
+#if 0
2644
2644
+
+ fprintf(stderr,
2645
2645
+
+ "The %s %s:%s references\n"
2646
2646
+
+ "the %s %s:%s%s\n",
2647
2647
+
+ from, fromsec, fromsym, to, tosec, tosym, to_p);
2648
2648
+
+#endif
2649
2649
+
+ break;
2650
2650
+
}
2651
2651
+
fprintf(stderr, "\n");
2652
2652
+
}
2653
2653
+
@@ -2539,6 +2559,14 @@ int main(int argc, char **argv)
2654
2654
+
}
2655
2655
+
}
2656
2656
+
free(buf.p);
2657
2657
+
+ if (writable_fptr_count) {
2658
2658
+
+ if (!sec_mismatch_verbose) {
2659
2659
+
+ warn("modpost: Found %d writable function pointer(s).\n"
2660
2660
+
+ "To see full details build your kernel with:\n"
2661
2661
+
+ "'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n",
2662
2662
+
+ writable_fptr_count);
2663
2663
+
+ }
2664
2664
+
+ }
2665
2665
+
2666
2666
+
return err;
2667
2667
+
}
2668
2668
+
diff --git a/security/Kconfig b/security/Kconfig
2669
2669
+
index 87f2a6f842fd..7bdbb7edf5bf 100644
2670
2670
+
--- a/security/Kconfig
2671
2671
+
+++ b/security/Kconfig
2672
2672
+
@@ -8,7 +8,7 @@ source security/keys/Kconfig
2673
2673
+
2674
2674
+
config SECURITY_DMESG_RESTRICT
2675
2675
+
bool "Restrict unprivileged access to the kernel syslog"
2676
2676
+
- default n
2677
2677
+
+ default y
2678
2678
+
help
2679
2679
+
This enforces restrictions on unprivileged users reading the kernel
2680
2680
+
syslog via dmesg(8).
2681
2681
+
@@ -18,10 +18,34 @@ config SECURITY_DMESG_RESTRICT
2682
2682
+
2683
2683
+
If you are unsure how to answer this question, answer N.
2684
2684
+
2685
2685
+
+config SECURITY_PERF_EVENTS_RESTRICT
2686
2686
+
+ bool "Restrict unprivileged use of performance events"
2687
2687
+
+ depends on PERF_EVENTS
2688
2688
+
+ default y
2689
2689
+
+ help
2690
2690
+
+ If you say Y here, the kernel.perf_event_paranoid sysctl
2691
2691
+
+ will be set to 3 by default, and no unprivileged use of the
2692
2692
+
+ perf_event_open syscall will be permitted unless it is
2693
2693
+
+ changed.
2694
2694
+
+
2695
2695
+
+config SECURITY_TIOCSTI_RESTRICT
2696
2696
+
+ bool "Restrict unprivileged use of tiocsti command injection"
2697
2697
+
+ default y
2698
2698
+
+ help
2699
2699
+
+ This enforces restrictions on unprivileged users injecting commands
2700
2700
+
+ into other processes which share a tty session using the TIOCSTI
2701
2701
+
+ ioctl. This option makes TIOCSTI use require CAP_SYS_ADMIN.
2702
2702
+
+
2703
2703
+
+ If this option is not selected, no restrictions will be enforced
2704
2704
+
+ unless the tiocsti_restrict sysctl is explicitly set to (1).
2705
2705
+
+
2706
2706
+
+ If you are unsure how to answer this question, answer N.
2707
2707
+
+
2708
2708
+
config SECURITY
2709
2709
+
bool "Enable different security models"
2710
2710
+
depends on SYSFS
2711
2711
+
depends on MULTIUSER
2712
2712
+
+ default y
2713
2713
+
help
2714
2714
+
This allows you to choose different security modules to be
2715
2715
+
configured into your kernel.
2716
2716
+
@@ -48,6 +72,7 @@ config SECURITYFS
2717
2717
+
config SECURITY_NETWORK
2718
2718
+
bool "Socket and Networking Security Hooks"
2719
2719
+
depends on SECURITY
2720
2720
+
+ default y
2721
2721
+
help
2722
2722
+
This enables the socket and networking security hooks.
2723
2723
+
If enabled, a security module can use these hooks to
2724
2724
+
@@ -155,6 +180,7 @@ config HARDENED_USERCOPY
2725
2725
+
depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
2726
2726
+
select BUG
2727
2727
+
imply STRICT_DEVMEM
2728
2728
+
+ default y
2729
2729
+
help
2730
2730
+
This option checks for obviously wrong memory regions when
2731
2731
+
copying memory to/from the kernel (via copy_to_user() and
2732
2732
+
@@ -178,10 +204,36 @@ config HARDENED_USERCOPY_PAGESPAN
2733
2733
+
config FORTIFY_SOURCE
2734
2734
+
bool "Harden common str/mem functions against buffer overflows"
2735
2735
+
depends on ARCH_HAS_FORTIFY_SOURCE
2736
2736
+
+ default y
2737
2737
+
help
2738
2738
+
Detect overflows of buffers in common string and memory functions
2739
2739
+
where the compiler can determine and validate the buffer sizes.
2740
2740
+
2741
2741
+
+config FORTIFY_SOURCE_STRICT_STRING
2742
2742
+
+ bool "Harden common functions against buffer overflows"
2743
2743
+
+ depends on FORTIFY_SOURCE
2744
2744
+
+ depends on EXPERT
2745
2745
+
+ help
2746
2746
+
+ Perform stricter overflow checks catching overflows within objects
2747
2747
+
+ for common C string functions rather than only between objects.
2748
2748
+
+
2749
2749
+
+ This is not yet intended for production use, only bug finding.
2750
2750
+
+
2751
2751
+
+config PAGE_SANITIZE
2752
2752
+
+ bool "Sanitize pages"
2753
2753
+
+ default y
2754
2754
+
+ help
2755
2755
+
+ Zero fill page allocations on free, reducing the lifetime of
2756
2756
+
+ sensitive data and helping to mitigate use-after-free bugs.
2757
2757
+
+
2758
2758
+
+config PAGE_SANITIZE_VERIFY
2759
2759
+
+ bool "Verify sanitized pages"
2760
2760
+
+ depends on PAGE_SANITIZE
2761
2761
+
+ default y
2762
2762
+
+ help
2763
2763
+
+ Verify that newly allocated pages are zeroed to detect
2764
2764
+
+ write-after-free bugs.
2765
2765
+
+
2766
2766
+
config STATIC_USERMODEHELPER
2767
2767
+
bool "Force all usermode helper calls through a single binary"
2768
2768
+
help
2769
2769
+
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
2770
2770
+
index 8af7a690eb40..6539694b0fd3 100644
2771
2771
+
--- a/security/selinux/Kconfig
2772
2772
+
+++ b/security/selinux/Kconfig
2773
2773
+
@@ -2,7 +2,7 @@ config SECURITY_SELINUX
2774
2774
+
bool "NSA SELinux Support"
2775
2775
+
depends on SECURITY_NETWORK && AUDIT && NET && INET
2776
2776
+
select NETWORK_SECMARK
2777
2777
+
- default n
2778
2778
+
+ default y
2779
2779
+
help
2780
2780
+
This selects NSA Security-Enhanced Linux (SELinux).
2781
2781
+
You will also need a policy configuration and a labeled filesystem.
2782
2782
+
@@ -79,23 +79,3 @@ config SECURITY_SELINUX_AVC_STATS
2783
2783
+
This option collects access vector cache statistics to
2784
2784
+
/selinux/avc/cache_stats, which may be monitored via
2785
2785
+
tools such as avcstat.
2786
2786
+
-
2787
2787
+
-config SECURITY_SELINUX_CHECKREQPROT_VALUE
2788
2788
+
- int "NSA SELinux checkreqprot default value"
2789
2789
+
- depends on SECURITY_SELINUX
2790
2790
+
- range 0 1
2791
2791
+
- default 0
2792
2792
+
- help
2793
2793
+
- This option sets the default value for the 'checkreqprot' flag
2794
2794
+
- that determines whether SELinux checks the protection requested
2795
2795
+
- by the application or the protection that will be applied by the
2796
2796
+
- kernel (including any implied execute for read-implies-exec) for
2797
2797
+
- mmap and mprotect calls. If this option is set to 0 (zero),
2798
2798
+
- SELinux will default to checking the protection that will be applied
2799
2799
+
- by the kernel. If this option is set to 1 (one), SELinux will
2800
2800
+
- default to checking the protection requested by the application.
2801
2801
+
- The checkreqprot flag may be changed from the default via the
2802
2802
+
- 'checkreqprot=' boot parameter. It may also be changed at runtime
2803
2803
+
- via /selinux/checkreqprot if authorized by policy.
2804
2804
+
-
2805
2805
+
- If you are unsure how to answer this question, answer 0.
2806
2806
+
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
2807
2807
+
index 1649cd18eb0b..067f35559aa7 100644
2808
2808
+
--- a/security/selinux/include/objsec.h
2809
2809
+
+++ b/security/selinux/include/objsec.h
2810
2810
+
@@ -150,6 +150,6 @@ struct pkey_security_struct {
2811
2811
+
u32 sid; /* SID of pkey */
2812
2812
+
};
2813
2813
+
2814
2814
+
-extern unsigned int selinux_checkreqprot;
2815
2815
+
+extern const unsigned int selinux_checkreqprot;
2816
2816
+
2817
2817
+
#endif /* _SELINUX_OBJSEC_H_ */
2818
2818
+
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
2819
2819
+
index 00eed842c491..8f7b8d7e6f91 100644
2820
2820
+
--- a/security/selinux/selinuxfs.c
2821
2821
+
+++ b/security/selinux/selinuxfs.c
2822
2822
+
@@ -41,16 +41,7 @@
2823
2823
+
#include "objsec.h"
2824
2824
+
#include "conditional.h"
2825
2825
+
2826
2826
+
-unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
2827
2827
+
-
2828
2828
+
-static int __init checkreqprot_setup(char *str)
2829
2829
+
-{
2830
2830
+
- unsigned long checkreqprot;
2831
2831
+
- if (!kstrtoul(str, 0, &checkreqprot))
2832
2832
+
- selinux_checkreqprot = checkreqprot ? 1 : 0;
2833
2833
+
- return 1;
2834
2834
+
-}
2835
2835
+
-__setup("checkreqprot=", checkreqprot_setup);
2836
2836
+
+const unsigned int selinux_checkreqprot;
2837
2837
+
2838
2838
+
static DEFINE_MUTEX(sel_mutex);
2839
2839
+
2840
2840
+
@@ -610,10 +601,9 @@ static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
2841
2841
+
return PTR_ERR(page);
2842
2842
+
2843
2843
+
length = -EINVAL;
2844
2844
+
- if (sscanf(page, "%u", &new_value) != 1)
2845
2845
+
+ if (sscanf(page, "%u", &new_value) != 1 || new_value)
2846
2846
+
goto out;
2847
2847
+
2848
2848
+
- selinux_checkreqprot = new_value ? 1 : 0;
2849
2849
+
length = count;
2850
2850
+
out:
2851
2851
+
kfree(page);
2852
2852
+
diff --git a/security/yama/Kconfig b/security/yama/Kconfig
2853
2853
+
index 96b27405558a..485c1b85c325 100644
2854
2854
+
--- a/security/yama/Kconfig
2855
2855
+
+++ b/security/yama/Kconfig
2856
2856
+
@@ -1,7 +1,7 @@
2857
2857
+
config SECURITY_YAMA
2858
2858
+
bool "Yama support"
2859
2859
+
depends on SECURITY
2860
2860
+
- default n
2861
2861
+
+ default y
2862
2862
+
help
2863
2863
+
This selects Yama, which extends DAC support with additional
2864
2864
+
system-wide security settings beyond regular Linux discretionary
+4
-1
pkgs/os-specific/linux/kernel/linux-4.14.nix
···
1
1
-
{ stdenv, buildPackages, hostPlatform, fetchurl, perl, buildLinux, ... } @ args:
1
1
+
{ stdenv, buildPackages, hostPlatform, fetchurl, perl, buildLinux, modDirVersionArg ? null, ... } @ args:
2
2
3
3
with stdenv.lib;
4
4
5
5
buildLinux (args // rec {
6
6
version = "4.14.48";
7
7
+
8
8
+
# modDirVersion needs to be x.y.z, will automatically add .0 if needed
9
9
+
modDirVersion = if (modDirVersionArg == null) then concatStrings (intersperse "." (take 3 (splitString "." "${version}.0"))) else modDirVersionArg;
7
10
8
11
# branchVersion needs to be x.y
9
12
extraMeta.branch = concatStrings (intersperse "." (take 2 (splitString "." version)));
+5
pkgs/os-specific/linux/kernel/patches.nix
···
28
28
patch = ./tag-hardened.patch;
29
29
};
30
30
31
31
+
copperhead_4_14 = rec {
32
32
+
name = "copperhead-4.14";
33
33
+
patch = ./copperhead-4-14.patch;
34
34
+
};
35
35
+
31
36
copperhead_4_16 = rec {
32
37
name = "copperhead-4.16";
33
38
patch = ./copperhead-4-16.patch;
+7
-7
pkgs/top-level/all-packages.nix
···
13499
13499
];
13500
13500
};
13501
13501
13502
13502
-
linux_copperhead_lts = callPackage ../os-specific/linux/kernel/linux-copperhead-lts.nix {
13503
13503
-
kernelPatches = with kernelPatches; [
13504
13504
-
bridge_stp_helper
13505
13505
-
modinst_arg_list_too_long
13506
13506
-
tag_hardened
13507
13507
-
];
13508
13508
-
};
13502
13502
+
linux_copperhead_lts = (linux_4_14.override {
13503
13503
+
kernelPatches = linux_4_14.kernelPatches ++ [
13504
13504
+
kernelPatches.copperhead_4_14
13505
13505
+
kernelPatches.tag_hardened
13506
13506
+
];
13507
13507
+
modDirVersionArg = linux_4_14.modDirVersion + "-hardened";
13508
13508
+
});
13509
13509
13510
13510
linux_copperhead_stable = (linux_4_16.override {
13511
13511
kernelPatches = linux_4_16.kernelPatches ++ [