1diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2index 9824d049367e..6ff72316ff0a 100644
3--- a/Documentation/admin-guide/kernel-parameters.txt
4+++ b/Documentation/admin-guide/kernel-parameters.txt
5@@ -496,16 +496,6 @@
6 nosocket -- Disable socket memory accounting.
7 nokmem -- Disable kernel memory accounting.
8
9- checkreqprot [SELINUX] Set initial checkreqprot flag value.
10- Format: { "0" | "1" }
11- See security/selinux/Kconfig help text.
12- 0 -- check protection applied by kernel (includes
13- any implied execute protection).
14- 1 -- check protection requested by application.
15- Default value is set via a kernel config option.
16- Value can be changed at runtime via
17- /selinux/checkreqprot.
18-
19 cio_ignore= [S390]
20 See Documentation/s390/CommonIO for details.
21 clk_ignore_unused
22@@ -2946,6 +2936,11 @@
23 the specified number of seconds. This is to be used if
24 your oopses keep scrolling off the screen.
25
26+ extra_latent_entropy
27+ Enable a very simple form of latent entropy extraction
28+ from the first 4GB of memory as the bootmem allocator
29+ passes the memory pages to the buddy allocator.
30+
31 pcbit= [HW,ISDN]
32
33 pcd. [PARIDE]
34diff --git a/Makefile b/Makefile
35index ded9e8480d74..2e948bb78142 100644
36--- a/Makefile
37+++ b/Makefile
38@@ -734,6 +734,9 @@ endif
39 endif
40
41 ifeq ($(cc-name),clang)
42+ifdef CONFIG_LOCAL_INIT
43+KBUILD_CFLAGS += -fsanitize=local-init
44+endif
45 KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
46 KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
47 KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
48diff --git a/arch/Kconfig b/arch/Kconfig
49index 76c0b54443b1..63a2d30f807f 100644
50--- a/arch/Kconfig
51+++ b/arch/Kconfig
52@@ -454,6 +454,11 @@ config GCC_PLUGIN_LATENT_ENTROPY
53 is some slowdown of the boot process (about 0.5%) and fork and
54 irq processing.
55
56+ When extra_latent_entropy is passed on the kernel command line,
57+ entropy will be extracted from up to the first 4GB of RAM while the
58+ runtime memory allocator is being initialized. This costs even more
59+ slowdown of the boot process.
60+
61 Note that entropy extracted this way is not cryptographically
62 secure!
63
64@@ -747,7 +752,7 @@ config ARCH_MMAP_RND_BITS
65 int "Number of bits to use for ASLR of mmap base address" if EXPERT
66 range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
67 default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
68- default ARCH_MMAP_RND_BITS_MIN
69+ default ARCH_MMAP_RND_BITS_MAX
70 depends on HAVE_ARCH_MMAP_RND_BITS
71 help
72 This value can be used to select the number of bits to use to
73@@ -781,7 +786,7 @@ config ARCH_MMAP_RND_COMPAT_BITS
74 int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
75 range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
76 default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
77- default ARCH_MMAP_RND_COMPAT_BITS_MIN
78+ default ARCH_MMAP_RND_COMPAT_BITS_MAX
79 depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
80 help
81 This value can be used to select the number of bits to use to
82@@ -968,6 +973,7 @@ config ARCH_HAS_REFCOUNT
83
84 config REFCOUNT_FULL
85 bool "Perform full reference count validation at the expense of speed"
86+ default y
87 help
88 Enabling this switches the refcounting infrastructure from a fast
89 unchecked atomic_t implementation to a fully state checked
90diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
91index be665760f2bd..5fb9026c9762 100644
92--- a/arch/arm64/Kconfig
93+++ b/arch/arm64/Kconfig
94@@ -988,6 +988,7 @@ endif
95
96 config ARM64_SW_TTBR0_PAN
97 bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
98+ default y
99 help
100 Enabling this option prevents the kernel from accessing
101 user-space memory directly by pointing TTBR0_EL1 to a reserved
102@@ -1141,6 +1142,7 @@ config RANDOMIZE_BASE
103 bool "Randomize the address of the kernel image"
104 select ARM64_MODULE_PLTS if MODULES
105 select RELOCATABLE
106+ default y
107 help
108 Randomizes the virtual address at which the kernel image is
109 loaded, as a security feature that deters exploit attempts
110diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
111index cc6bd559af85..01d5442d4722 100644
112--- a/arch/arm64/Kconfig.debug
113+++ b/arch/arm64/Kconfig.debug
114@@ -45,6 +45,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
115 config DEBUG_WX
116 bool "Warn on W+X mappings at boot"
117 select ARM64_PTDUMP_CORE
118+ default y
119 ---help---
120 Generate a warning if any W+X mappings are found at boot.
121
122diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
123index 634b373785c4..5b255a6db6a7 100644
124--- a/arch/arm64/configs/defconfig
125+++ b/arch/arm64/configs/defconfig
126@@ -1,4 +1,3 @@
127-CONFIG_SYSVIPC=y
128 CONFIG_POSIX_MQUEUE=y
129 CONFIG_AUDIT=y
130 CONFIG_NO_HZ_IDLE=y
131diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
132index fac1c4de7898..34786ca166e4 100644
133--- a/arch/arm64/include/asm/elf.h
134+++ b/arch/arm64/include/asm/elf.h
135@@ -114,10 +114,10 @@
136
137 /*
138 * This is the base location for PIE (ET_DYN with INTERP) loads. On
139- * 64-bit, this is above 4GB to leave the entire 32-bit address
140+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
141 * space open for things that want to use the area for 32-bit pointers.
142 */
143-#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
144+#define ELF_ET_DYN_BASE 0x100000000UL
145
146 #ifndef __ASSEMBLY__
147
148@@ -158,10 +158,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
149 /* 1GB of VA */
150 #ifdef CONFIG_COMPAT
151 #define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
152- 0x7ff >> (PAGE_SHIFT - 12) : \
153- 0x3ffff >> (PAGE_SHIFT - 12))
154+ ((1UL << mmap_rnd_compat_bits) - 1) >> (PAGE_SHIFT - 12) : \
155+ ((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
156 #else
157-#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
158+#define STACK_RND_MASK (((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
159 #endif
160
161 #ifdef __AARCH64EB__
162diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
163index c0da6efe5465..f308b521c114 100644
164--- a/arch/arm64/kernel/process.c
165+++ b/arch/arm64/kernel/process.c
166@@ -481,9 +481,9 @@ unsigned long arch_align_stack(unsigned long sp)
167 unsigned long arch_randomize_brk(struct mm_struct *mm)
168 {
169 if (is_compat_task())
170- return randomize_page(mm->brk, SZ_32M);
171+ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
172 else
173- return randomize_page(mm->brk, SZ_1G);
174+ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
175 }
176
177 /*
178diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
179index 0fa71a78ec99..d78d44944374 100644
180--- a/arch/x86/Kconfig
181+++ b/arch/x86/Kconfig
182@@ -1208,8 +1208,7 @@ config VM86
183 default X86_LEGACY_VM86
184
185 config X86_16BIT
186- bool "Enable support for 16-bit segments" if EXPERT
187- default y
188+ bool "Enable support for 16-bit segments"
189 depends on MODIFY_LDT_SYSCALL
190 ---help---
191 This option is required by programs like Wine to run 16-bit
192@@ -2299,7 +2298,7 @@ config COMPAT_VDSO
193 choice
194 prompt "vsyscall table for legacy applications"
195 depends on X86_64
196- default LEGACY_VSYSCALL_EMULATE
197+ default LEGACY_VSYSCALL_NONE
198 help
199 Legacy user code that does not know how to find the vDSO expects
200 to be able to issue three syscalls by calling fixed addresses in
201@@ -2380,8 +2379,7 @@ config CMDLINE_OVERRIDE
202 be set to 'N' under normal conditions.
203
204 config MODIFY_LDT_SYSCALL
205- bool "Enable the LDT (local descriptor table)" if EXPERT
206- default y
207+ bool "Enable the LDT (local descriptor table)"
208 ---help---
209 Linux can allow user programs to install a per-process x86
210 Local Descriptor Table (LDT) using the modify_ldt(2) system
211diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
212index 192e4d2f9efc..343c2f1b13da 100644
213--- a/arch/x86/Kconfig.debug
214+++ b/arch/x86/Kconfig.debug
215@@ -101,6 +101,7 @@ config EFI_PGT_DUMP
216 config DEBUG_WX
217 bool "Warn on W+X mappings at boot"
218 select X86_PTDUMP_CORE
219+ default y
220 ---help---
221 Generate a warning if any W+X mappings are found at boot.
222
223diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
224index e32fc1f274d8..d08acc76502a 100644
225--- a/arch/x86/configs/x86_64_defconfig
226+++ b/arch/x86/configs/x86_64_defconfig
227@@ -1,5 +1,4 @@
228 # CONFIG_LOCALVERSION_AUTO is not set
229-CONFIG_SYSVIPC=y
230 CONFIG_POSIX_MQUEUE=y
231 CONFIG_BSD_PROCESS_ACCT=y
232 CONFIG_TASKSTATS=y
233diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
234index 5b8b556dbb12..a569f08b4478 100644
235--- a/arch/x86/entry/vdso/vma.c
236+++ b/arch/x86/entry/vdso/vma.c
237@@ -204,55 +204,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
238 }
239
240 #ifdef CONFIG_X86_64
241-/*
242- * Put the vdso above the (randomized) stack with another randomized
243- * offset. This way there is no hole in the middle of address space.
244- * To save memory make sure it is still in the same PTE as the stack
245- * top. This doesn't give that many random bits.
246- *
247- * Note that this algorithm is imperfect: the distribution of the vdso
248- * start address within a PMD is biased toward the end.
249- *
250- * Only used for the 64-bit and x32 vdsos.
251- */
252-static unsigned long vdso_addr(unsigned long start, unsigned len)
253-{
254- unsigned long addr, end;
255- unsigned offset;
256-
257- /*
258- * Round up the start address. It can start out unaligned as a result
259- * of stack start randomization.
260- */
261- start = PAGE_ALIGN(start);
262-
263- /* Round the lowest possible end address up to a PMD boundary. */
264- end = (start + len + PMD_SIZE - 1) & PMD_MASK;
265- if (end >= TASK_SIZE_MAX)
266- end = TASK_SIZE_MAX;
267- end -= len;
268-
269- if (end > start) {
270- offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
271- addr = start + (offset << PAGE_SHIFT);
272- } else {
273- addr = start;
274- }
275-
276- /*
277- * Forcibly align the final address in case we have a hardware
278- * issue that requires alignment for performance reasons.
279- */
280- addr = align_vdso_addr(addr);
281-
282- return addr;
283-}
284-
285 static int map_vdso_randomized(const struct vdso_image *image)
286 {
287- unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
288-
289- return map_vdso(image, addr);
290+ return map_vdso(image, 0);
291 }
292 #endif
293
294diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
295index 0d157d2a1e2a..770c8ae97f92 100644
296--- a/arch/x86/include/asm/elf.h
297+++ b/arch/x86/include/asm/elf.h
298@@ -249,11 +249,11 @@ extern int force_personality32;
299
300 /*
301 * This is the base location for PIE (ET_DYN with INTERP) loads. On
302- * 64-bit, this is above 4GB to leave the entire 32-bit address
303+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
304 * space open for things that want to use the area for 32-bit pointers.
305 */
306 #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
307- (DEFAULT_MAP_WINDOW / 3 * 2))
308+ 0x100000000UL)
309
310 /* This yields a mask that user programs can use to figure out what
311 instruction set this CPU supports. This could be done in user space,
312@@ -313,8 +313,8 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
313
314 #ifdef CONFIG_X86_32
315
316-#define __STACK_RND_MASK(is32bit) (0x7ff)
317-#define STACK_RND_MASK (0x7ff)
318+#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
319+#define STACK_RND_MASK ((1UL << mmap_rnd_bits) - 1)
320
321 #define ARCH_DLINFO ARCH_DLINFO_IA32
322
323@@ -323,7 +323,11 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
324 #else /* CONFIG_X86_32 */
325
326 /* 1GB for 64bit, 8MB for 32bit */
327-#define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
328+#ifdef CONFIG_COMPAT
329+#define __STACK_RND_MASK(is32bit) ((is32bit) ? (1UL << mmap_rnd_compat_bits) - 1 : (1UL << mmap_rnd_bits) - 1)
330+#else
331+#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
332+#endif
333 #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
334
335 #define ARCH_DLINFO \
336@@ -381,5 +385,4 @@ struct va_alignment {
337 } ____cacheline_aligned;
338
339 extern struct va_alignment va_align;
340-extern unsigned long align_vdso_addr(unsigned long);
341 #endif /* _ASM_X86_ELF_H */
342diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
343index 84137c22fdfa..4f197404cab9 100644
344--- a/arch/x86/include/asm/tlbflush.h
345+++ b/arch/x86/include/asm/tlbflush.h
346@@ -261,6 +261,7 @@ static inline void cr4_set_bits(unsigned long mask)
347
348 local_irq_save(flags);
349 cr4 = this_cpu_read(cpu_tlbstate.cr4);
350+ BUG_ON(cr4 != __read_cr4());
351 if ((cr4 | mask) != cr4)
352 __cr4_set(cr4 | mask);
353 local_irq_restore(flags);
354@@ -273,6 +274,7 @@ static inline void cr4_clear_bits(unsigned long mask)
355
356 local_irq_save(flags);
357 cr4 = this_cpu_read(cpu_tlbstate.cr4);
358+ BUG_ON(cr4 != __read_cr4());
359 if ((cr4 & ~mask) != cr4)
360 __cr4_set(cr4 & ~mask);
361 local_irq_restore(flags);
362@@ -283,6 +285,7 @@ static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
363 unsigned long cr4;
364
365 cr4 = this_cpu_read(cpu_tlbstate.cr4);
366+ BUG_ON(cr4 != __read_cr4());
367 __cr4_set(cr4 ^ mask);
368 }
369
370@@ -389,6 +392,7 @@ static inline void __native_flush_tlb_global(void)
371 raw_local_irq_save(flags);
372
373 cr4 = this_cpu_read(cpu_tlbstate.cr4);
374+ BUG_ON(cr4 != __read_cr4());
375 /* toggle PGE */
376 native_write_cr4(cr4 ^ X86_CR4_PGE);
377 /* write old PGE again and flush TLBs */
378diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
379index 5f74f94244e1..9c9fdb5ef52e 100644
380--- a/arch/x86/kernel/cpu/common.c
381+++ b/arch/x86/kernel/cpu/common.c
382@@ -1662,7 +1662,6 @@ void cpu_init(void)
383 wrmsrl(MSR_KERNEL_GS_BASE, 0);
384 barrier();
385
386- x86_configure_nx();
387 x2apic_setup();
388
389 /*
390diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
391index 30ca2d1a9231..bde0a18cd9f9 100644
392--- a/arch/x86/kernel/process.c
393+++ b/arch/x86/kernel/process.c
394@@ -39,6 +39,8 @@
395 #include <asm/desc.h>
396 #include <asm/prctl.h>
397 #include <asm/spec-ctrl.h>
398+#include <asm/elf.h>
399+#include <linux/sizes.h>
400
401 /*
402 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
403@@ -718,7 +720,10 @@ unsigned long arch_align_stack(unsigned long sp)
404
405 unsigned long arch_randomize_brk(struct mm_struct *mm)
406 {
407- return randomize_page(mm->brk, 0x02000000);
408+ if (mmap_is_ia32())
409+ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
410+ else
411+ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
412 }
413
414 /*
415diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
416index 676774b9bb8d..5ad7a95382b0 100644
417--- a/arch/x86/kernel/sys_x86_64.c
418+++ b/arch/x86/kernel/sys_x86_64.c
419@@ -54,13 +54,6 @@ static unsigned long get_align_bits(void)
420 return va_align.bits & get_align_mask();
421 }
422
423-unsigned long align_vdso_addr(unsigned long addr)
424-{
425- unsigned long align_mask = get_align_mask();
426- addr = (addr + align_mask) & ~align_mask;
427- return addr | get_align_bits();
428-}
429-
430 static int __init control_va_addr_alignment(char *str)
431 {
432 /* guard against enabling this on other CPU families */
433@@ -122,10 +115,7 @@ static void find_start_end(unsigned long addr, unsigned long flags,
434 }
435
436 *begin = get_mmap_base(1);
437- if (in_compat_syscall())
438- *end = task_size_32bit();
439- else
440- *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
441+ *end = get_mmap_base(0);
442 }
443
444 unsigned long
445@@ -210,7 +200,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
446
447 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
448 info.length = len;
449- info.low_limit = PAGE_SIZE;
450+ info.low_limit = get_mmap_base(1);
451 info.high_limit = get_mmap_base(0);
452
453 /*
454diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
455index 396e1f0151ac..6526b19579dc 100644
456--- a/arch/x86/mm/init_32.c
457+++ b/arch/x86/mm/init_32.c
458@@ -558,7 +558,7 @@ static void __init pagetable_init(void)
459 permanent_kmaps_init(pgd_base);
460 }
461
462-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
463+pteval_t __supported_pte_mask __ro_after_init = ~(_PAGE_NX | _PAGE_GLOBAL);
464 EXPORT_SYMBOL_GPL(__supported_pte_mask);
465
466 /* user-defined highmem size */
467@@ -866,7 +866,7 @@ int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
468 #endif
469 #endif
470
471-int kernel_set_to_readonly __read_mostly;
472+int kernel_set_to_readonly __ro_after_init;
473
474 void set_kernel_text_rw(void)
475 {
476@@ -918,12 +918,11 @@ void mark_rodata_ro(void)
477 unsigned long start = PFN_ALIGN(_text);
478 unsigned long size = PFN_ALIGN(_etext) - start;
479
480+ kernel_set_to_readonly = 1;
481 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
482 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
483 size >> 10);
484
485- kernel_set_to_readonly = 1;
486-
487 #ifdef CONFIG_CPA_DEBUG
488 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
489 start, start+size);
490diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
491index af11a2890235..b2d28d38c01e 100644
492--- a/arch/x86/mm/init_64.c
493+++ b/arch/x86/mm/init_64.c
494@@ -65,7 +65,7 @@
495 * around without checking the pgd every time.
496 */
497
498-pteval_t __supported_pte_mask __read_mostly = ~0;
499+pteval_t __supported_pte_mask __ro_after_init = ~0;
500 EXPORT_SYMBOL_GPL(__supported_pte_mask);
501
502 int force_personality32;
503@@ -1195,7 +1195,7 @@ void __init mem_init(void)
504 mem_init_print_info(NULL);
505 }
506
507-int kernel_set_to_readonly;
508+int kernel_set_to_readonly __ro_after_init;
509
510 void set_kernel_text_rw(void)
511 {
512@@ -1244,9 +1244,8 @@ void mark_rodata_ro(void)
513
514 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
515 (end - start) >> 10);
516- set_memory_ro(start, (end - start) >> PAGE_SHIFT);
517-
518 kernel_set_to_readonly = 1;
519+ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
520
521 /*
522 * The rodata/data/bss/brk section (but not the kernel text!)
523diff --git a/block/blk-softirq.c b/block/blk-softirq.c
524index 01e2b353a2b9..9aeddca4a29f 100644
525--- a/block/blk-softirq.c
526+++ b/block/blk-softirq.c
527@@ -20,7 +20,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
528 * Softirq action handler - move entries to local list and loop over them
529 * while passing them to the queue registered handler.
530 */
531-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
532+static __latent_entropy void blk_done_softirq(void)
533 {
534 struct list_head *cpu_list, local_list;
535
536diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
537index 0df21f046fc6..6f8d9bf71bba 100644
538--- a/drivers/ata/libata-core.c
539+++ b/drivers/ata/libata-core.c
540@@ -5151,7 +5151,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
541 struct ata_port *ap;
542 unsigned int tag;
543
544- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
545+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
546 ap = qc->ap;
547
548 qc->flags = 0;
549@@ -5168,7 +5168,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
550 struct ata_port *ap;
551 struct ata_link *link;
552
553- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
554+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
555 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
556 ap = qc->ap;
557 link = qc->dev->link;
558diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
559index c28dca0c613d..d4813f0d25ca 100644
560--- a/drivers/char/Kconfig
561+++ b/drivers/char/Kconfig
562@@ -9,7 +9,6 @@ source "drivers/tty/Kconfig"
563
564 config DEVMEM
565 bool "/dev/mem virtual device support"
566- default y
567 help
568 Say Y here if you want to support the /dev/mem device.
569 The /dev/mem device is used to access areas of physical
570@@ -568,7 +567,6 @@ config TELCLOCK
571 config DEVPORT
572 bool "/dev/port character device"
573 depends on ISA || PCI
574- default y
575 help
576 Say Y here if you want to support the /dev/port device. The /dev/port
577 device is similar to /dev/mem, but for I/O ports.
578diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
579index b811442c5ce6..4f62a63cbcb1 100644
580--- a/drivers/tty/Kconfig
581+++ b/drivers/tty/Kconfig
582@@ -122,7 +122,6 @@ config UNIX98_PTYS
583
584 config LEGACY_PTYS
585 bool "Legacy (BSD) PTY support"
586- default y
587 ---help---
588 A pseudo terminal (PTY) is a software device consisting of two
589 halves: a master and a slave. The slave device behaves identical to
590diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
591index 83c58a20d16f..32ef2e3a8482 100644
592--- a/drivers/usb/core/hub.c
593+++ b/drivers/usb/core/hub.c
594@@ -41,6 +41,8 @@
595 #define USB_TP_TRANSMISSION_DELAY 40 /* ns */
596 #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
597
598+extern int deny_new_usb;
599+
600 /* Protect struct usb_device->state and ->children members
601 * Note: Both are also protected by ->dev.sem, except that ->state can
602 * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
603@@ -4847,6 +4849,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
604 goto done;
605 return;
606 }
607+
608+ if (deny_new_usb) {
609+ dev_err(&port_dev->dev, "denied insert of USB device on port %d\n", port1);
610+ goto done;
611+ }
612+
613 if (hub_is_superspeed(hub->hdev))
614 unit_load = 150;
615 else
616diff --git a/fs/exec.c b/fs/exec.c
617index 7eb8d21bcab9..171f31b3bf05 100644
618--- a/fs/exec.c
619+++ b/fs/exec.c
620@@ -62,6 +62,7 @@
621 #include <linux/oom.h>
622 #include <linux/compat.h>
623 #include <linux/vmalloc.h>
624+#include <linux/random.h>
625
626 #include <linux/uaccess.h>
627 #include <asm/mmu_context.h>
628@@ -321,6 +322,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
629 arch_bprm_mm_init(mm, vma);
630 up_write(&mm->mmap_sem);
631 bprm->p = vma->vm_end - sizeof(void *);
632+ if (randomize_va_space)
633+ bprm->p ^= get_random_int() & ~PAGE_MASK;
634 return 0;
635 err:
636 up_write(&mm->mmap_sem);
637diff --git a/fs/namei.c b/fs/namei.c
638index b61d6aa9279d..255c2dc36189 100644
639--- a/fs/namei.c
640+++ b/fs/namei.c
641@@ -883,8 +883,8 @@ static inline void put_link(struct nameidata *nd)
642 path_put(&last->link);
643 }
644
645-int sysctl_protected_symlinks __read_mostly = 0;
646-int sysctl_protected_hardlinks __read_mostly = 0;
647+int sysctl_protected_symlinks __read_mostly = 1;
648+int sysctl_protected_hardlinks __read_mostly = 1;
649
650 /**
651 * may_follow_link - Check symlink following for unsafe situations
652diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
653index 5f93cfacb3d1..cea0d7d3b23e 100644
654--- a/fs/nfs/Kconfig
655+++ b/fs/nfs/Kconfig
656@@ -195,4 +195,3 @@ config NFS_DEBUG
657 bool
658 depends on NFS_FS && SUNRPC_DEBUG
659 select CRC32
660- default y
661diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
662index 1ade1206bb89..60b0f76dec47 100644
663--- a/fs/proc/Kconfig
664+++ b/fs/proc/Kconfig
665@@ -39,7 +39,6 @@ config PROC_KCORE
666 config PROC_VMCORE
667 bool "/proc/vmcore support"
668 depends on PROC_FS && CRASH_DUMP
669- default y
670 help
671 Exports the dump image of crashed kernel in ELF format.
672
673diff --git a/fs/stat.c b/fs/stat.c
674index 873785dae022..d3c2ada8b9c7 100644
675--- a/fs/stat.c
676+++ b/fs/stat.c
677@@ -40,8 +40,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
678 stat->gid = inode->i_gid;
679 stat->rdev = inode->i_rdev;
680 stat->size = i_size_read(inode);
681- stat->atime = inode->i_atime;
682- stat->mtime = inode->i_mtime;
683+ if (is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
684+ stat->atime = inode->i_ctime;
685+ stat->mtime = inode->i_ctime;
686+ } else {
687+ stat->atime = inode->i_atime;
688+ stat->mtime = inode->i_mtime;
689+ }
690 stat->ctime = inode->i_ctime;
691 stat->blksize = i_blocksize(inode);
692 stat->blocks = inode->i_blocks;
693@@ -75,9 +80,14 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
694 stat->result_mask |= STATX_BASIC_STATS;
695 request_mask &= STATX_ALL;
696 query_flags &= KSTAT_QUERY_FLAGS;
697- if (inode->i_op->getattr)
698- return inode->i_op->getattr(path, stat, request_mask,
699- query_flags);
700+ if (inode->i_op->getattr) {
701+ int retval = inode->i_op->getattr(path, stat, request_mask, query_flags);
702+ if (!retval && is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
703+ stat->atime = stat->ctime;
704+ stat->mtime = stat->ctime;
705+ }
706+ return retval;
707+ }
708
709 generic_fillattr(inode, stat);
710 return 0;
711diff --git a/include/linux/cache.h b/include/linux/cache.h
712index 750621e41d1c..e7157c18c62c 100644
713--- a/include/linux/cache.h
714+++ b/include/linux/cache.h
715@@ -31,6 +31,8 @@
716 #define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
717 #endif
718
719+#define __read_only __ro_after_init
720+
721 #ifndef ____cacheline_aligned
722 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
723 #endif
724diff --git a/include/linux/capability.h b/include/linux/capability.h
725index f640dcbc880c..2b4f5d651f19 100644
726--- a/include/linux/capability.h
727+++ b/include/linux/capability.h
728@@ -207,6 +207,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap);
729 extern bool has_ns_capability_noaudit(struct task_struct *t,
730 struct user_namespace *ns, int cap);
731 extern bool capable(int cap);
732+extern bool capable_noaudit(int cap);
733 extern bool ns_capable(struct user_namespace *ns, int cap);
734 extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
735 #else
736@@ -232,6 +233,10 @@ static inline bool capable(int cap)
737 {
738 return true;
739 }
740+static inline bool capable_noaudit(int cap)
741+{
742+ return true;
743+}
744 static inline bool ns_capable(struct user_namespace *ns, int cap)
745 {
746 return true;
747diff --git a/include/linux/fs.h b/include/linux/fs.h
748index c6baf767619e..31904f3d38a6 100644
749--- a/include/linux/fs.h
750+++ b/include/linux/fs.h
751@@ -3407,4 +3407,15 @@ static inline bool dir_relax_shared(struct inode *inode)
752 extern bool path_noexec(const struct path *path);
753 extern void inode_nohighmem(struct inode *inode);
754
755+extern int device_sidechannel_restrict;
756+
757+static inline bool is_sidechannel_device(const struct inode *inode)
758+{
759+ umode_t mode;
760+ if (!device_sidechannel_restrict)
761+ return false;
762+ mode = inode->i_mode;
763+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
764+}
765+
766 #endif /* _LINUX_FS_H */
767diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
768index bdaf22582f6e..326ff15d4637 100644
769--- a/include/linux/fsnotify.h
770+++ b/include/linux/fsnotify.h
771@@ -181,6 +181,9 @@ static inline void fsnotify_access(struct file *file)
772 struct inode *inode = path->dentry->d_inode;
773 __u32 mask = FS_ACCESS;
774
775+ if (is_sidechannel_device(inode))
776+ return;
777+
778 if (S_ISDIR(inode->i_mode))
779 mask |= FS_ISDIR;
780
781@@ -199,6 +202,9 @@ static inline void fsnotify_modify(struct file *file)
782 struct inode *inode = path->dentry->d_inode;
783 __u32 mask = FS_MODIFY;
784
785+ if (is_sidechannel_device(inode))
786+ return;
787+
788 if (S_ISDIR(inode->i_mode))
789 mask |= FS_ISDIR;
790
791diff --git a/include/linux/gfp.h b/include/linux/gfp.h
792index 1a4582b44d32..4d445a8fe7f2 100644
793--- a/include/linux/gfp.h
794+++ b/include/linux/gfp.h
795@@ -513,9 +513,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
796 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
797 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
798
799-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
800+void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
801 void free_pages_exact(void *virt, size_t size);
802-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
803+void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
804
805 #define __get_free_page(gfp_mask) \
806 __get_free_pages((gfp_mask), 0)
807diff --git a/include/linux/highmem.h b/include/linux/highmem.h
808index 776f90f3a1cd..3f5c47000059 100644
809--- a/include/linux/highmem.h
810+++ b/include/linux/highmem.h
811@@ -191,6 +191,13 @@ static inline void clear_highpage(struct page *page)
812 kunmap_atomic(kaddr);
813 }
814
815+static inline void verify_zero_highpage(struct page *page)
816+{
817+ void *kaddr = kmap_atomic(page);
818+ BUG_ON(memchr_inv(kaddr, 0, PAGE_SIZE));
819+ kunmap_atomic(kaddr);
820+}
821+
822 static inline void zero_user_segments(struct page *page,
823 unsigned start1, unsigned end1,
824 unsigned start2, unsigned end2)
825diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
826index 69c238210325..ee487ea4f48f 100644
827--- a/include/linux/interrupt.h
828+++ b/include/linux/interrupt.h
829@@ -485,7 +485,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
830
831 struct softirq_action
832 {
833- void (*action)(struct softirq_action *);
834+ void (*action)(void);
835 };
836
837 asmlinkage void do_softirq(void);
838@@ -500,7 +500,7 @@ static inline void do_softirq_own_stack(void)
839 }
840 #endif
841
842-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
843+extern void __init open_softirq(int nr, void (*action)(void));
844 extern void softirq_init(void);
845 extern void __raise_softirq_irqoff(unsigned int nr);
846
847diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
848index 069aa2ebef90..cb9e3637a620 100644
849--- a/include/linux/kobject_ns.h
850+++ b/include/linux/kobject_ns.h
851@@ -45,7 +45,7 @@ struct kobj_ns_type_operations {
852 void (*drop_ns)(void *);
853 };
854
855-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
856+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
857 int kobj_ns_type_registered(enum kobj_ns_type type);
858 const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
859 const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
860diff --git a/include/linux/mm.h b/include/linux/mm.h
861index a4e9bdbec490..0fe7ebd0f462 100644
862--- a/include/linux/mm.h
863+++ b/include/linux/mm.h
864@@ -535,7 +535,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
865 }
866 #endif
867
868-extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
869+extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __attribute__((alloc_size(1)));
870 static inline void *kvmalloc(size_t size, gfp_t flags)
871 {
872 return kvmalloc_node(size, flags, NUMA_NO_NODE);
873diff --git a/include/linux/percpu.h b/include/linux/percpu.h
874index 296bbe49d5d1..b26652c9a98d 100644
875--- a/include/linux/percpu.h
876+++ b/include/linux/percpu.h
877@@ -129,7 +129,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
878 pcpu_fc_populate_pte_fn_t populate_pte_fn);
879 #endif
880
881-extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
882+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
883 extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
884 extern bool is_kernel_percpu_address(unsigned long addr);
885
886@@ -137,8 +137,8 @@ extern bool is_kernel_percpu_address(unsigned long addr);
887 extern void __init setup_per_cpu_areas(void);
888 #endif
889
890-extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
891-extern void __percpu *__alloc_percpu(size_t size, size_t align);
892+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __attribute__((alloc_size(1)));
893+extern void __percpu *__alloc_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
894 extern void free_percpu(void __percpu *__pdata);
895 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
896
897diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
898index 7546822a1d74..320033b310d2 100644
899--- a/include/linux/perf_event.h
900+++ b/include/linux/perf_event.h
901@@ -1151,6 +1151,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
902 int perf_event_max_stack_handler(struct ctl_table *table, int write,
903 void __user *buffer, size_t *lenp, loff_t *ppos);
904
905+static inline bool perf_paranoid_any(void)
906+{
907+ return sysctl_perf_event_paranoid > 2;
908+}
909+
910 static inline bool perf_paranoid_tracepoint_raw(void)
911 {
912 return sysctl_perf_event_paranoid > -1;
913diff --git a/include/linux/slab.h b/include/linux/slab.h
914index 231abc8976c5..b0bf5d4a4934 100644
915--- a/include/linux/slab.h
916+++ b/include/linux/slab.h
917@@ -177,8 +177,8 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *);
918 /*
919 * Common kmalloc functions provided by all allocators
920 */
921-void * __must_check __krealloc(const void *, size_t, gfp_t);
922-void * __must_check krealloc(const void *, size_t, gfp_t);
923+void * __must_check __krealloc(const void *, size_t, gfp_t) __attribute__((alloc_size(2)));
924+void * __must_check krealloc(const void *, size_t, gfp_t) __attribute((alloc_size(2)));
925 void kfree(const void *);
926 void kzfree(const void *);
927 size_t ksize(const void *);
928@@ -351,7 +351,7 @@ static __always_inline int kmalloc_index(size_t size)
929 }
930 #endif /* !CONFIG_SLOB */
931
932-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
933+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
934 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
935 void kmem_cache_free(struct kmem_cache *, void *);
936
937@@ -375,7 +375,7 @@ static __always_inline void kfree_bulk(size_t size, void **p)
938 }
939
940 #ifdef CONFIG_NUMA
941-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
942+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
943 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
944 #else
945 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
946@@ -497,7 +497,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
947 * for general use, and so are not documented here. For a full list of
948 * potential flags, always refer to linux/gfp.h.
949 */
950-static __always_inline void *kmalloc(size_t size, gfp_t flags)
951+static __always_inline __attribute__((alloc_size(1))) void *kmalloc(size_t size, gfp_t flags)
952 {
953 if (__builtin_constant_p(size)) {
954 if (size > KMALLOC_MAX_CACHE_SIZE)
955@@ -537,7 +537,7 @@ static __always_inline int kmalloc_size(int n)
956 return 0;
957 }
958
959-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
960+static __always_inline __attribute__((alloc_size(1))) void *kmalloc_node(size_t size, gfp_t flags, int node)
961 {
962 #ifndef CONFIG_SLOB
963 if (__builtin_constant_p(size) &&
964diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
965index 8ad99c47b19c..91fea0d0db3b 100644
966--- a/include/linux/slub_def.h
967+++ b/include/linux/slub_def.h
968@@ -120,6 +120,11 @@ struct kmem_cache {
969 unsigned long random;
970 #endif
971
972+#ifdef CONFIG_SLAB_CANARY
973+ unsigned long random_active;
974+ unsigned long random_inactive;
975+#endif
976+
977 #ifdef CONFIG_NUMA
978 /*
979 * Defragmentation by allocating from a remote node.
980diff --git a/include/linux/string.h b/include/linux/string.h
981index dd39a690c841..00d16d874c0a 100644
982--- a/include/linux/string.h
983+++ b/include/linux/string.h
984@@ -235,10 +235,16 @@ void __read_overflow2(void) __compiletime_error("detected read beyond size of ob
985 void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
986 void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
987
988+#ifdef CONFIG_FORTIFY_SOURCE_STRICT_STRING
989+#define __string_size(p) __builtin_object_size(p, 1)
990+#else
991+#define __string_size(p) __builtin_object_size(p, 0)
992+#endif
993+
994 #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
995 __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
996 {
997- size_t p_size = __builtin_object_size(p, 0);
998+ size_t p_size = __string_size(p);
999 if (__builtin_constant_p(size) && p_size < size)
1000 __write_overflow();
1001 if (p_size < size)
1002@@ -248,7 +254,7 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
1003
1004 __FORTIFY_INLINE char *strcat(char *p, const char *q)
1005 {
1006- size_t p_size = __builtin_object_size(p, 0);
1007+ size_t p_size = __string_size(p);
1008 if (p_size == (size_t)-1)
1009 return __builtin_strcat(p, q);
1010 if (strlcat(p, q, p_size) >= p_size)
1011@@ -259,7 +265,7 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q)
1012 __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
1013 {
1014 __kernel_size_t ret;
1015- size_t p_size = __builtin_object_size(p, 0);
1016+ size_t p_size = __string_size(p);
1017
1018 /* Work around gcc excess stack consumption issue */
1019 if (p_size == (size_t)-1 ||
1020@@ -274,7 +280,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
1021 extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
1022 __FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
1023 {
1024- size_t p_size = __builtin_object_size(p, 0);
1025+ size_t p_size = __string_size(p);
1026 __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
1027 if (p_size <= ret && maxlen != ret)
1028 fortify_panic(__func__);
1029@@ -286,8 +292,8 @@ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
1030 __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
1031 {
1032 size_t ret;
1033- size_t p_size = __builtin_object_size(p, 0);
1034- size_t q_size = __builtin_object_size(q, 0);
1035+ size_t p_size = __string_size(p);
1036+ size_t q_size = __string_size(q);
1037 if (p_size == (size_t)-1 && q_size == (size_t)-1)
1038 return __real_strlcpy(p, q, size);
1039 ret = strlen(q);
1040@@ -307,8 +313,8 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
1041 __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
1042 {
1043 size_t p_len, copy_len;
1044- size_t p_size = __builtin_object_size(p, 0);
1045- size_t q_size = __builtin_object_size(q, 0);
1046+ size_t p_size = __string_size(p);
1047+ size_t q_size = __string_size(q);
1048 if (p_size == (size_t)-1 && q_size == (size_t)-1)
1049 return __builtin_strncat(p, q, count);
1050 p_len = strlen(p);
1051@@ -421,8 +427,8 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
1052 /* defined after fortified strlen and memcpy to reuse them */
1053 __FORTIFY_INLINE char *strcpy(char *p, const char *q)
1054 {
1055- size_t p_size = __builtin_object_size(p, 0);
1056- size_t q_size = __builtin_object_size(q, 0);
1057+ size_t p_size = __string_size(p);
1058+ size_t q_size = __string_size(q);
1059 if (p_size == (size_t)-1 && q_size == (size_t)-1)
1060 return __builtin_strcpy(p, q);
1061 memcpy(p, q, strlen(q) + 1);
1062diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
1063index 1e5d8c392f15..66d0e49c9987 100644
1064--- a/include/linux/vmalloc.h
1065+++ b/include/linux/vmalloc.h
1066@@ -68,19 +68,19 @@ static inline void vmalloc_init(void)
1067 }
1068 #endif
1069
1070-extern void *vmalloc(unsigned long size);
1071-extern void *vzalloc(unsigned long size);
1072-extern void *vmalloc_user(unsigned long size);
1073-extern void *vmalloc_node(unsigned long size, int node);
1074-extern void *vzalloc_node(unsigned long size, int node);
1075-extern void *vmalloc_exec(unsigned long size);
1076-extern void *vmalloc_32(unsigned long size);
1077-extern void *vmalloc_32_user(unsigned long size);
1078-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
1079+extern void *vmalloc(unsigned long size) __attribute__((alloc_size(1)));
1080+extern void *vzalloc(unsigned long size) __attribute__((alloc_size(1)));
1081+extern void *vmalloc_user(unsigned long size) __attribute__((alloc_size(1)));
1082+extern void *vmalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
1083+extern void *vzalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
1084+extern void *vmalloc_exec(unsigned long size) __attribute__((alloc_size(1)));
1085+extern void *vmalloc_32(unsigned long size) __attribute__((alloc_size(1)));
1086+extern void *vmalloc_32_user(unsigned long size) __attribute__((alloc_size(1)));
1087+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __attribute__((alloc_size(1)));
1088 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
1089 unsigned long start, unsigned long end, gfp_t gfp_mask,
1090 pgprot_t prot, unsigned long vm_flags, int node,
1091- const void *caller);
1092+ const void *caller) __attribute__((alloc_size(1)));
1093 #ifndef CONFIG_MMU
1094 extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
1095 static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
1096diff --git a/init/Kconfig b/init/Kconfig
1097index e37f4b2a6445..49c20dea1afd 100644
1098--- a/init/Kconfig
1099+++ b/init/Kconfig
1100@@ -296,6 +296,7 @@ config USELIB
1101 config AUDIT
1102 bool "Auditing support"
1103 depends on NET
1104+ default y
1105 help
1106 Enable auditing infrastructure that can be used with another
1107 kernel subsystem, such as SELinux (which requires this for
1108@@ -1039,6 +1040,12 @@ config CC_OPTIMIZE_FOR_SIZE
1109
1110 endchoice
1111
1112+config LOCAL_INIT
1113+ bool "Zero uninitialized locals"
1114+ help
1115+ Zero-fill uninitialized local variables, other than variable-length
1116+ arrays. Requires compiler support.
1117+
1118 config SYSCTL
1119 bool
1120
1121@@ -1296,8 +1303,7 @@ config SHMEM
1122 which may be appropriate on small systems without swap.
1123
1124 config AIO
1125- bool "Enable AIO support" if EXPERT
1126- default y
1127+ bool "Enable AIO support"
1128 help
1129 This option enables POSIX asynchronous I/O which may by used
1130 by some high performance threaded applications. Disabling
1131@@ -1502,7 +1508,7 @@ config VM_EVENT_COUNTERS
1132
1133 config SLUB_DEBUG
1134 default y
1135- bool "Enable SLUB debugging support" if EXPERT
1136+ bool "Enable SLUB debugging support"
1137 depends on SLUB && SYSFS
1138 help
1139 SLUB has extensive debug support features. Disabling these can
1140@@ -1526,7 +1532,6 @@ config SLUB_MEMCG_SYSFS_ON
1141
1142 config COMPAT_BRK
1143 bool "Disable heap randomization"
1144- default y
1145 help
1146 Randomizing heap placement makes heap exploits harder, but it
1147 also breaks ancient binaries (including anything libc5 based).
1148@@ -1573,7 +1578,6 @@ endchoice
1149
1150 config SLAB_MERGE_DEFAULT
1151 bool "Allow slab caches to be merged"
1152- default y
1153 help
1154 For reduced kernel memory fragmentation, slab caches can be
1155 merged when they share the same size and other characteristics.
1156@@ -1586,9 +1590,9 @@ config SLAB_MERGE_DEFAULT
1157 command line.
1158
1159 config SLAB_FREELIST_RANDOM
1160- default n
1161 depends on SLAB || SLUB
1162 bool "SLAB freelist randomization"
1163+ default y
1164 help
1165 Randomizes the freelist order used on creating new pages. This
1166 security feature reduces the predictability of the kernel slab
1167@@ -1597,12 +1601,56 @@ config SLAB_FREELIST_RANDOM
1168 config SLAB_FREELIST_HARDENED
1169 bool "Harden slab freelist metadata"
1170 depends on SLUB
1171+ default y
1172 help
1173 Many kernel heap attacks try to target slab cache metadata and
1174 other infrastructure. This options makes minor performance
1175 sacrifies to harden the kernel slab allocator against common
1176 freelist exploit methods.
1177
1178+config SLAB_HARDENED
1179+ default y
1180+ depends on SLUB
1181+ bool "Hardened SLAB infrastructure"
1182+ help
1183+ Make minor performance sacrifices to harden the kernel slab
1184+ allocator.
1185+
1186+config SLAB_CANARY
1187+ depends on SLUB
1188+ depends on !SLAB_MERGE_DEFAULT
1189+ bool "SLAB canaries"
1190+ default y
1191+ help
1192+ Place canaries at the end of kernel slab allocations, sacrificing
1193+ some performance and memory usage for security.
1194+
1195+ Canaries can detect some forms of heap corruption when allocations
1196+ are freed and as part of the HARDENED_USERCOPY feature. It provides
1197+ basic use-after-free detection for HARDENED_USERCOPY.
1198+
1199+ Canaries absorb small overflows (rendering them harmless), mitigate
1200+ non-NUL terminated C string overflows on 64-bit via a guaranteed zero
1201+ byte and provide basic double-free detection.
1202+
1203+config SLAB_SANITIZE
1204+ bool "Sanitize SLAB allocations"
1205+ depends on SLUB
1206+ default y
1207+ help
1208+ Zero fill slab allocations on free, reducing the lifetime of
1209+ sensitive data and helping to mitigate use-after-free bugs.
1210+
1211+ For slabs with debug poisoning enabling, this has no impact.
1212+
1213+config SLAB_SANITIZE_VERIFY
1214+ depends on SLAB_SANITIZE && PAGE_SANITIZE
1215+ default y
1216+ bool "Verify sanitized SLAB allocations"
1217+ help
1218+ Verify that newly allocated slab allocations are zeroed to detect
1219+ write-after-free bugs.
1220+
1221 config SLUB_CPU_PARTIAL
1222 default y
1223 depends on SLUB && SMP
1224diff --git a/kernel/audit.c b/kernel/audit.c
1225index 227db99b0f19..aec52b408543 100644
1226--- a/kernel/audit.c
1227+++ b/kernel/audit.c
1228@@ -1578,6 +1578,9 @@ static int __init audit_enable(char *str)
1229
1230 if (audit_default == AUDIT_OFF)
1231 audit_initialized = AUDIT_DISABLED;
1232+ else if (!audit_ever_enabled)
1233+ audit_initialized = AUDIT_UNINITIALIZED;
1234+
1235 if (audit_set_enabled(audit_default))
1236 panic("audit: error setting audit state (%d)\n", audit_default);
1237
1238diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
1239index ba03ec39efb3..47ed9081b668 100644
1240--- a/kernel/bpf/core.c
1241+++ b/kernel/bpf/core.c
1242@@ -302,7 +302,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
1243 #ifdef CONFIG_BPF_JIT
1244 /* All BPF JIT sysctl knobs here. */
1245 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
1246-int bpf_jit_harden __read_mostly;
1247+int bpf_jit_harden __read_mostly = 2;
1248 int bpf_jit_kallsyms __read_mostly;
1249
1250 static __always_inline void
1251diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
1252index 43f95d190eea..8e353f596873 100644
1253--- a/kernel/bpf/syscall.c
1254+++ b/kernel/bpf/syscall.c
1255@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(prog_idr_lock);
1256 static DEFINE_IDR(map_idr);
1257 static DEFINE_SPINLOCK(map_idr_lock);
1258
1259-int sysctl_unprivileged_bpf_disabled __read_mostly;
1260+int sysctl_unprivileged_bpf_disabled __read_mostly = 1;
1261
1262 static const struct bpf_map_ops * const bpf_map_types[] = {
1263 #define BPF_PROG_TYPE(_id, _ops)
1264diff --git a/kernel/capability.c b/kernel/capability.c
1265index 1e1c0236f55b..452062fe45ce 100644
1266--- a/kernel/capability.c
1267+++ b/kernel/capability.c
1268@@ -431,6 +431,12 @@ bool capable(int cap)
1269 return ns_capable(&init_user_ns, cap);
1270 }
1271 EXPORT_SYMBOL(capable);
1272+
1273+bool capable_noaudit(int cap)
1274+{
1275+ return ns_capable_noaudit(&init_user_ns, cap);
1276+}
1277+EXPORT_SYMBOL(capable_noaudit);
1278 #endif /* CONFIG_MULTIUSER */
1279
1280 /**
1281diff --git a/kernel/events/core.c b/kernel/events/core.c
1282index ca7298760c83..910ac6cc9f07 100644
1283--- a/kernel/events/core.c
1284+++ b/kernel/events/core.c
1285@@ -397,8 +397,13 @@ static cpumask_var_t perf_online_mask;
1286 * 0 - disallow raw tracepoint access for unpriv
1287 * 1 - disallow cpu events for unpriv
1288 * 2 - disallow kernel profiling for unpriv
1289+ * 3 - disallow all unpriv perf event use
1290 */
1291+#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT
1292+int sysctl_perf_event_paranoid __read_mostly = 3;
1293+#else
1294 int sysctl_perf_event_paranoid __read_mostly = 2;
1295+#endif
1296
1297 /* Minimum for 512 kiB + 1 user control page */
1298 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
1299@@ -9921,6 +9926,9 @@ SYSCALL_DEFINE5(perf_event_open,
1300 if (flags & ~PERF_FLAG_ALL)
1301 return -EINVAL;
1302
1303+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
1304+ return -EACCES;
1305+
1306 err = perf_copy_attr(attr_uptr, &attr);
1307 if (err)
1308 return err;
1309diff --git a/kernel/fork.c b/kernel/fork.c
1310index e5d9d405ae4e..c4be89c51f25 100644
1311--- a/kernel/fork.c
1312+++ b/kernel/fork.c
1313@@ -103,6 +103,11 @@
1314
1315 #define CREATE_TRACE_POINTS
1316 #include <trace/events/task.h>
1317+#ifdef CONFIG_USER_NS
1318+extern int unprivileged_userns_clone;
1319+#else
1320+#define unprivileged_userns_clone 0
1321+#endif
1322
1323 /*
1324 * Minimum number of threads to boot the kernel
1325@@ -1591,6 +1596,10 @@ static __latent_entropy struct task_struct *copy_process(
1326 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1327 return ERR_PTR(-EINVAL);
1328
1329+ if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
1330+ if (!capable(CAP_SYS_ADMIN))
1331+ return ERR_PTR(-EPERM);
1332+
1333 /*
1334 * Thread groups must share signals as well, and detached threads
1335 * can only be started up within the thread group.
1336@@ -2385,6 +2394,12 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1337 if (unshare_flags & CLONE_NEWNS)
1338 unshare_flags |= CLONE_FS;
1339
1340+ if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
1341+ err = -EPERM;
1342+ if (!capable(CAP_SYS_ADMIN))
1343+ goto bad_unshare_out;
1344+ }
1345+
1346 err = check_unshare_flags(unshare_flags);
1347 if (err)
1348 goto bad_unshare_out;
1349diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
1350index 3d37c279c090..0789ca413f09 100644
1351--- a/kernel/power/snapshot.c
1352+++ b/kernel/power/snapshot.c
1353@@ -1138,7 +1138,7 @@ void free_basic_memory_bitmaps(void)
1354
1355 void clear_free_pages(void)
1356 {
1357-#ifdef CONFIG_PAGE_POISONING_ZERO
1358+#if defined(CONFIG_PAGE_POISONING_ZERO) || defined(CONFIG_PAGE_SANITIZE)
1359 struct memory_bitmap *bm = free_pages_map;
1360 unsigned long pfn;
1361
1362@@ -1155,7 +1155,7 @@ void clear_free_pages(void)
1363 }
1364 memory_bm_position_reset(bm);
1365 pr_info("free pages cleared after restore\n");
1366-#endif /* PAGE_POISONING_ZERO */
1367+#endif /* PAGE_POISONING_ZERO || PAGE_SANITIZE */
1368 }
1369
1370 /**
1371diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
1372index a64eee0db39e..4d7de378fe4c 100644
1373--- a/kernel/rcu/tiny.c
1374+++ b/kernel/rcu/tiny.c
1375@@ -164,7 +164,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
1376 }
1377 }
1378
1379-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
1380+static __latent_entropy void rcu_process_callbacks(void)
1381 {
1382 __rcu_process_callbacks(&rcu_sched_ctrlblk);
1383 __rcu_process_callbacks(&rcu_bh_ctrlblk);
1384diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
1385index 491bdf39f276..28f51c774671 100644
1386--- a/kernel/rcu/tree.c
1387+++ b/kernel/rcu/tree.c
1388@@ -2906,7 +2906,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
1389 /*
1390 * Do RCU core processing for the current CPU.
1391 */
1392-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
1393+static __latent_entropy void rcu_process_callbacks(void)
1394 {
1395 struct rcu_state *rsp;
1396
1397diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1398index 5eb3ffc9be84..a60c59cbbaee 100644
1399--- a/kernel/sched/fair.c
1400+++ b/kernel/sched/fair.c
1401@@ -9387,7 +9387,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
1402 * run_rebalance_domains is triggered when needed from the scheduler tick.
1403 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
1404 */
1405-static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
1406+static __latent_entropy void run_rebalance_domains(void)
1407 {
1408 struct rq *this_rq = this_rq();
1409 enum cpu_idle_type idle = this_rq->idle_balance ?
1410diff --git a/kernel/softirq.c b/kernel/softirq.c
1411index 24d243ef8e71..4ed8a162cd70 100644
1412--- a/kernel/softirq.c
1413+++ b/kernel/softirq.c
1414@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
1415 EXPORT_SYMBOL(irq_stat);
1416 #endif
1417
1418-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1419+static struct softirq_action softirq_vec[NR_SOFTIRQS] __ro_after_init __aligned(PAGE_SIZE);
1420
1421 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
1422
1423@@ -282,7 +282,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
1424 kstat_incr_softirqs_this_cpu(vec_nr);
1425
1426 trace_softirq_entry(vec_nr);
1427- h->action(h);
1428+ h->action();
1429 trace_softirq_exit(vec_nr);
1430 if (unlikely(prev_count != preempt_count())) {
1431 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
1432@@ -444,7 +444,7 @@ void __raise_softirq_irqoff(unsigned int nr)
1433 or_softirq_pending(1UL << nr);
1434 }
1435
1436-void open_softirq(int nr, void (*action)(struct softirq_action *))
1437+void __init open_softirq(int nr, void (*action)(void))
1438 {
1439 softirq_vec[nr].action = action;
1440 }
1441@@ -486,7 +486,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
1442 }
1443 EXPORT_SYMBOL(__tasklet_hi_schedule);
1444
1445-static __latent_entropy void tasklet_action(struct softirq_action *a)
1446+static __latent_entropy void tasklet_action(void)
1447 {
1448 struct tasklet_struct *list;
1449
1450@@ -522,7 +522,7 @@ static __latent_entropy void tasklet_action(struct softirq_action *a)
1451 }
1452 }
1453
1454-static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
1455+static __latent_entropy void tasklet_hi_action(void)
1456 {
1457 struct tasklet_struct *list;
1458
1459diff --git a/kernel/sysctl.c b/kernel/sysctl.c
1460index f98f28c12020..861e8b721161 100644
1461--- a/kernel/sysctl.c
1462+++ b/kernel/sysctl.c
1463@@ -99,12 +99,19 @@
1464 #if defined(CONFIG_SYSCTL)
1465
1466 /* External variables not in a header file. */
1467+#if IS_ENABLED(CONFIG_USB)
1468+int deny_new_usb __read_mostly = 0;
1469+EXPORT_SYMBOL(deny_new_usb);
1470+#endif
1471 extern int suid_dumpable;
1472 #ifdef CONFIG_COREDUMP
1473 extern int core_uses_pid;
1474 extern char core_pattern[];
1475 extern unsigned int core_pipe_limit;
1476 #endif
1477+#ifdef CONFIG_USER_NS
1478+extern int unprivileged_userns_clone;
1479+#endif
1480 extern int pid_max;
1481 extern int pid_max_min, pid_max_max;
1482 extern int percpu_pagelist_fraction;
1483@@ -116,40 +123,43 @@ extern int sysctl_nr_trim_pages;
1484
1485 /* Constants used for minimum and maximum */
1486 #ifdef CONFIG_LOCKUP_DETECTOR
1487-static int sixty = 60;
1488+static int sixty __read_only = 60;
1489 #endif
1490
1491-static int __maybe_unused neg_one = -1;
1492+static int __maybe_unused neg_one __read_only = -1;
1493
1494 static int zero;
1495-static int __maybe_unused one = 1;
1496-static int __maybe_unused two = 2;
1497-static int __maybe_unused four = 4;
1498-static unsigned long one_ul = 1;
1499-static int one_hundred = 100;
1500-static int one_thousand = 1000;
1501+static int __maybe_unused one __read_only = 1;
1502+static int __maybe_unused two __read_only = 2;
1503+static int __maybe_unused four __read_only = 4;
1504+static unsigned long one_ul __read_only = 1;
1505+static int one_hundred __read_only = 100;
1506+static int one_thousand __read_only = 1000;
1507 #ifdef CONFIG_PRINTK
1508-static int ten_thousand = 10000;
1509+static int ten_thousand __read_only = 10000;
1510 #endif
1511 #ifdef CONFIG_PERF_EVENTS
1512-static int six_hundred_forty_kb = 640 * 1024;
1513+static int six_hundred_forty_kb __read_only = 640 * 1024;
1514 #endif
1515
1516 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
1517-static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
1518+static unsigned long dirty_bytes_min __read_only = 2 * PAGE_SIZE;
1519
1520 /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
1521-static int maxolduid = 65535;
1522-static int minolduid;
1523+static int maxolduid __read_only = 65535;
1524+static int minolduid __read_only;
1525
1526-static int ngroups_max = NGROUPS_MAX;
1527+static int ngroups_max __read_only = NGROUPS_MAX;
1528 static const int cap_last_cap = CAP_LAST_CAP;
1529
1530 /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */
1531 #ifdef CONFIG_DETECT_HUNG_TASK
1532-static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
1533+static unsigned long hung_task_timeout_max __read_only = (LONG_MAX/HZ);
1534 #endif
1535
1536+int device_sidechannel_restrict __read_mostly = 1;
1537+EXPORT_SYMBOL(device_sidechannel_restrict);
1538+
1539 #ifdef CONFIG_INOTIFY_USER
1540 #include <linux/inotify.h>
1541 #endif
1542@@ -289,19 +299,19 @@ static struct ctl_table sysctl_base_table[] = {
1543 };
1544
1545 #ifdef CONFIG_SCHED_DEBUG
1546-static int min_sched_granularity_ns = 100000; /* 100 usecs */
1547-static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
1548-static int min_wakeup_granularity_ns; /* 0 usecs */
1549-static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
1550+static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
1551+static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
1552+static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
1553+static int max_wakeup_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
1554 #ifdef CONFIG_SMP
1555-static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
1556-static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
1557+static int min_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_NONE;
1558+static int max_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_END-1;
1559 #endif /* CONFIG_SMP */
1560 #endif /* CONFIG_SCHED_DEBUG */
1561
1562 #ifdef CONFIG_COMPACTION
1563-static int min_extfrag_threshold;
1564-static int max_extfrag_threshold = 1000;
1565+static int min_extfrag_threshold __read_only;
1566+static int max_extfrag_threshold __read_only = 1000;
1567 #endif
1568
1569 static struct ctl_table kern_table[] = {
1570@@ -515,6 +525,15 @@ static struct ctl_table kern_table[] = {
1571 .proc_handler = proc_dointvec,
1572 },
1573 #endif
1574+#ifdef CONFIG_USER_NS
1575+ {
1576+ .procname = "unprivileged_userns_clone",
1577+ .data = &unprivileged_userns_clone,
1578+ .maxlen = sizeof(int),
1579+ .mode = 0644,
1580+ .proc_handler = proc_dointvec,
1581+ },
1582+#endif
1583 #ifdef CONFIG_PROC_SYSCTL
1584 {
1585 .procname = "tainted",
1586@@ -856,6 +875,26 @@ static struct ctl_table kern_table[] = {
1587 .extra1 = &zero,
1588 .extra2 = &two,
1589 },
1590+#endif
1591+ {
1592+ .procname = "device_sidechannel_restrict",
1593+ .data = &device_sidechannel_restrict,
1594+ .maxlen = sizeof(int),
1595+ .mode = 0644,
1596+ .proc_handler = proc_dointvec_minmax_sysadmin,
1597+ .extra1 = &zero,
1598+ .extra2 = &one,
1599+ },
1600+#if IS_ENABLED(CONFIG_USB)
1601+ {
1602+ .procname = "deny_new_usb",
1603+ .data = &deny_new_usb,
1604+ .maxlen = sizeof(int),
1605+ .mode = 0644,
1606+ .proc_handler = proc_dointvec_minmax_sysadmin,
1607+ .extra1 = &zero,
1608+ .extra2 = &one,
1609+ },
1610 #endif
1611 {
1612 .procname = "ngroups_max",
1613diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
1614index 23788100e214..5577a58d1c7a 100644
1615--- a/kernel/time/hrtimer.c
1616+++ b/kernel/time/hrtimer.c
1617@@ -1413,7 +1413,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
1618 }
1619 }
1620
1621-static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
1622+static __latent_entropy void hrtimer_run_softirq(void)
1623 {
1624 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1625 unsigned long flags;
1626diff --git a/kernel/time/timer.c b/kernel/time/timer.c
1627index 4a4fd567fb26..be236ef42690 100644
1628--- a/kernel/time/timer.c
1629+++ b/kernel/time/timer.c
1630@@ -1672,7 +1672,7 @@ static inline void __run_timers(struct timer_base *base)
1631 /*
1632 * This function runs timers and the timer-tq in bottom half context.
1633 */
1634-static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1635+static __latent_entropy void run_timer_softirq(void)
1636 {
1637 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1638
1639diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
1640index 246d4d4ce5c7..f64432b45cec 100644
1641--- a/kernel/user_namespace.c
1642+++ b/kernel/user_namespace.c
1643@@ -26,6 +26,9 @@
1644 #include <linux/bsearch.h>
1645 #include <linux/sort.h>
1646
1647+/* sysctl */
1648+int unprivileged_userns_clone;
1649+
1650 static struct kmem_cache *user_ns_cachep __read_mostly;
1651 static DEFINE_MUTEX(userns_state_mutex);
1652
1653diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
1654index 64155e310a9f..0c002bbb8f75 100644
1655--- a/lib/Kconfig.debug
1656+++ b/lib/Kconfig.debug
1657@@ -945,6 +945,7 @@ endmenu # "Debug lockups and hangs"
1658
1659 config PANIC_ON_OOPS
1660 bool "Panic on Oops"
1661+ default y
1662 help
1663 Say Y here to enable the kernel to panic when it oopses. This
1664 has the same effect as setting oops=panic on the kernel command
1665@@ -954,7 +955,7 @@ config PANIC_ON_OOPS
1666 anything erroneous after an oops which could result in data
1667 corruption or other issues.
1668
1669- Say N if unsure.
1670+ Say Y if unsure.
1671
1672 config PANIC_ON_OOPS_VALUE
1673 int
1674@@ -1309,6 +1310,7 @@ config DEBUG_BUGVERBOSE
1675 config DEBUG_LIST
1676 bool "Debug linked list manipulation"
1677 depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION
1678+ default y
1679 help
1680 Enable this to turn on extended checks in the linked-list
1681 walking routines.
1682@@ -1949,6 +1951,7 @@ config MEMTEST
1683 config BUG_ON_DATA_CORRUPTION
1684 bool "Trigger a BUG when data corruption is detected"
1685 select DEBUG_LIST
1686+ default y
1687 help
1688 Select this option if the kernel should BUG when it encounters
1689 data corruption in kernel memory structures when they get checked
1690@@ -1988,6 +1991,7 @@ config STRICT_DEVMEM
1691 config IO_STRICT_DEVMEM
1692 bool "Filter I/O access to /dev/mem"
1693 depends on STRICT_DEVMEM
1694+ default y
1695 ---help---
1696 If this option is disabled, you allow userspace (root) access to all
1697 io-memory regardless of whether a driver is actively using that
1698diff --git a/lib/irq_poll.c b/lib/irq_poll.c
1699index 86a709954f5a..6f15787fcb1b 100644
1700--- a/lib/irq_poll.c
1701+++ b/lib/irq_poll.c
1702@@ -75,7 +75,7 @@ void irq_poll_complete(struct irq_poll *iop)
1703 }
1704 EXPORT_SYMBOL(irq_poll_complete);
1705
1706-static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
1707+static void __latent_entropy irq_poll_softirq(void)
1708 {
1709 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
1710 int rearm = 0, budget = irq_poll_budget;
1711diff --git a/lib/kobject.c b/lib/kobject.c
1712index d20a97a7e168..7afe7fcfa6c9 100644
1713--- a/lib/kobject.c
1714+++ b/lib/kobject.c
1715@@ -954,9 +954,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
1716
1717
1718 static DEFINE_SPINLOCK(kobj_ns_type_lock);
1719-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
1720+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __ro_after_init;
1721
1722-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
1723+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
1724 {
1725 enum kobj_ns_type type = ops->type;
1726 int error;
1727diff --git a/lib/nlattr.c b/lib/nlattr.c
1728index dfa55c873c13..c6b0436f473d 100644
1729--- a/lib/nlattr.c
1730+++ b/lib/nlattr.c
1731@@ -364,6 +364,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
1732 {
1733 int minlen = min_t(int, count, nla_len(src));
1734
1735+ BUG_ON(minlen < 0);
1736+
1737 memcpy(dest, nla_data(src), minlen);
1738 if (count > minlen)
1739 memset(dest + minlen, 0, count - minlen);
1740diff --git a/lib/vsprintf.c b/lib/vsprintf.c
1741index 38b509cc6b46..6b3bf13d57d6 100644
1742--- a/lib/vsprintf.c
1743+++ b/lib/vsprintf.c
1744@@ -1344,7 +1344,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
1745 return string(buf, end, uuid, spec);
1746 }
1747
1748-int kptr_restrict __read_mostly;
1749+int kptr_restrict __read_mostly = 2;
1750
1751 static noinline_for_stack
1752 char *restricted_pointer(char *buf, char *end, const void *ptr,
1753diff --git a/mm/Kconfig b/mm/Kconfig
1754index e07608f64d47..18937911b87a 100644
1755--- a/mm/Kconfig
1756+++ b/mm/Kconfig
1757@@ -319,7 +319,8 @@ config KSM
1758 config DEFAULT_MMAP_MIN_ADDR
1759 int "Low address space to protect from user allocation"
1760 depends on MMU
1761- default 4096
1762+ default 32768 if ARM || (ARM64 && COMPAT)
1763+ default 65536
1764 help
1765 This is the portion of low virtual memory which should be protected
1766 from userspace allocation. Keeping a user from writing to low pages
1767diff --git a/mm/mmap.c b/mm/mmap.c
1768index 03ca089cce0f..9bf52aa6b042 100644
1769--- a/mm/mmap.c
1770+++ b/mm/mmap.c
1771@@ -220,6 +220,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
1772
1773 newbrk = PAGE_ALIGN(brk);
1774 oldbrk = PAGE_ALIGN(mm->brk);
1775+ /* properly handle unaligned min_brk as an empty heap */
1776+ if (min_brk & ~PAGE_MASK) {
1777+ if (brk == min_brk)
1778+ newbrk -= PAGE_SIZE;
1779+ if (mm->brk == min_brk)
1780+ oldbrk -= PAGE_SIZE;
1781+ }
1782 if (oldbrk == newbrk)
1783 goto set_brk;
1784
1785diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1786index 1741dd23e7c1..462ce9910e43 100644
1787--- a/mm/page_alloc.c
1788+++ b/mm/page_alloc.c
1789@@ -68,6 +68,7 @@
1790 #include <linux/ftrace.h>
1791 #include <linux/lockdep.h>
1792 #include <linux/nmi.h>
1793+#include <linux/random.h>
1794
1795 #include <asm/sections.h>
1796 #include <asm/tlbflush.h>
1797@@ -101,6 +102,15 @@ int _node_numa_mem_[MAX_NUMNODES];
1798 DEFINE_MUTEX(pcpu_drain_mutex);
1799 DEFINE_PER_CPU(struct work_struct, pcpu_drain);
1800
1801+bool __meminitdata extra_latent_entropy;
1802+
1803+static int __init setup_extra_latent_entropy(char *str)
1804+{
1805+ extra_latent_entropy = true;
1806+ return 0;
1807+}
1808+early_param("extra_latent_entropy", setup_extra_latent_entropy);
1809+
1810 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
1811 volatile unsigned long latent_entropy __latent_entropy;
1812 EXPORT_SYMBOL(latent_entropy);
1813@@ -1069,6 +1079,13 @@ static __always_inline bool free_pages_prepare(struct page *page,
1814 debug_check_no_obj_freed(page_address(page),
1815 PAGE_SIZE << order);
1816 }
1817+
1818+ if (IS_ENABLED(CONFIG_PAGE_SANITIZE)) {
1819+ int i;
1820+ for (i = 0; i < (1 << order); i++)
1821+ clear_highpage(page + i);
1822+ }
1823+
1824 arch_free_page(page, order);
1825 kernel_poison_pages(page, 1 << order, 0);
1826 kernel_map_pages(page, 1 << order, 0);
1827@@ -1286,6 +1303,21 @@ static void __init __free_pages_boot_core(struct page *page, unsigned int order)
1828 __ClearPageReserved(p);
1829 set_page_count(p, 0);
1830
1831+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
1832+ unsigned long hash = 0;
1833+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
1834+ const unsigned long *data = lowmem_page_address(page);
1835+
1836+ for (index = 0; index < end; index++)
1837+ hash ^= hash + data[index];
1838+#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
1839+ latent_entropy ^= hash;
1840+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
1841+#else
1842+ add_device_randomness((const void *)&hash, sizeof(hash));
1843+#endif
1844+ }
1845+
1846 page_zone(page)->managed_pages += nr_pages;
1847 set_page_refcounted(page);
1848 __free_pages(page, order);
1849@@ -1754,8 +1786,8 @@ static inline int check_new_page(struct page *page)
1850
1851 static inline bool free_pages_prezeroed(void)
1852 {
1853- return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1854- page_poisoning_enabled();
1855+ return IS_ENABLED(CONFIG_PAGE_SANITIZE) ||
1856+ (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && page_poisoning_enabled());
1857 }
1858
1859 #ifdef CONFIG_DEBUG_VM
1860@@ -1812,6 +1844,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
1861
1862 post_alloc_hook(page, order, gfp_flags);
1863
1864+ if (IS_ENABLED(CONFIG_PAGE_SANITIZE_VERIFY)) {
1865+ for (i = 0; i < (1 << order); i++)
1866+ verify_zero_highpage(page + i);
1867+ }
1868+
1869 if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
1870 for (i = 0; i < (1 << order); i++)
1871 clear_highpage(page + i);
1872diff --git a/mm/slab.h b/mm/slab.h
1873index 51813236e773..e0d1b194a454 100644
1874--- a/mm/slab.h
1875+++ b/mm/slab.h
1876@@ -312,7 +312,11 @@ static inline bool is_root_cache(struct kmem_cache *s)
1877 static inline bool slab_equal_or_root(struct kmem_cache *s,
1878 struct kmem_cache *p)
1879 {
1880+#ifdef CONFIG_SLAB_HARDENED
1881+ return p == s;
1882+#else
1883 return true;
1884+#endif
1885 }
1886
1887 static inline const char *cache_name(struct kmem_cache *s)
1888@@ -364,18 +368,26 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
1889 * to not do even the assignment. In that case, slab_equal_or_root
1890 * will also be a constant.
1891 */
1892- if (!memcg_kmem_enabled() &&
1893+ if (!IS_ENABLED(CONFIG_SLAB_HARDENED) &&
1894+ !memcg_kmem_enabled() &&
1895 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
1896 return s;
1897
1898 page = virt_to_head_page(x);
1899+#ifdef CONFIG_SLAB_HARDENED
1900+ BUG_ON(!PageSlab(page));
1901+#endif
1902 cachep = page->slab_cache;
1903 if (slab_equal_or_root(cachep, s))
1904 return cachep;
1905
1906 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
1907 __func__, s->name, cachep->name);
1908+#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
1909+ BUG_ON(1);
1910+#else
1911 WARN_ON_ONCE(1);
1912+#endif
1913 return s;
1914 }
1915
1916@@ -400,7 +412,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
1917 * back there or track user information then we can
1918 * only use the space before that information.
1919 */
1920- if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
1921+ if ((s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) || IS_ENABLED(CONFIG_SLAB_CANARY))
1922 return s->inuse;
1923 /*
1924 * Else we can use all the padding etc for the allocation
1925diff --git a/mm/slab_common.c b/mm/slab_common.c
1926index 10f127b2de7c..3637ef72c5b4 100644
1927--- a/mm/slab_common.c
1928+++ b/mm/slab_common.c
1929@@ -26,10 +26,10 @@
1930
1931 #include "slab.h"
1932
1933-enum slab_state slab_state;
1934+enum slab_state slab_state __ro_after_init;
1935 LIST_HEAD(slab_caches);
1936 DEFINE_MUTEX(slab_mutex);
1937-struct kmem_cache *kmem_cache;
1938+struct kmem_cache *kmem_cache __ro_after_init;
1939
1940 #ifdef CONFIG_HARDENED_USERCOPY
1941 bool usercopy_fallback __ro_after_init =
1942@@ -57,7 +57,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
1943 /*
1944 * Merge control. If this is set then no merging of slab caches will occur.
1945 */
1946-static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
1947+static bool slab_nomerge __ro_after_init = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
1948
1949 static int __init setup_slab_nomerge(char *str)
1950 {
1951@@ -968,7 +968,7 @@ EXPORT_SYMBOL(kmalloc_dma_caches);
1952 * of two cache sizes there. The size of larger slabs can be determined using
1953 * fls.
1954 */
1955-static s8 size_index[24] = {
1956+static s8 size_index[24] __ro_after_init = {
1957 3, /* 8 */
1958 4, /* 16 */
1959 5, /* 24 */
1960diff --git a/mm/slub.c b/mm/slub.c
1961index e381728a3751..76dd844d978a 100644
1962--- a/mm/slub.c
1963+++ b/mm/slub.c
1964@@ -125,6 +125,16 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
1965 #endif
1966 }
1967
1968+static inline bool has_sanitize(struct kmem_cache *s)
1969+{
1970+ return IS_ENABLED(CONFIG_SLAB_SANITIZE) && !(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON));
1971+}
1972+
1973+static inline bool has_sanitize_verify(struct kmem_cache *s)
1974+{
1975+ return IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && has_sanitize(s);
1976+}
1977+
1978 void *fixup_red_left(struct kmem_cache *s, void *p)
1979 {
1980 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
1981@@ -299,6 +309,35 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
1982 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
1983 }
1984
1985+#ifdef CONFIG_SLAB_CANARY
1986+static inline unsigned long *get_canary(struct kmem_cache *s, void *object)
1987+{
1988+ if (s->offset)
1989+ return object + s->offset + sizeof(void *);
1990+ return object + s->inuse;
1991+}
1992+
1993+static inline unsigned long get_canary_value(const void *canary, unsigned long value)
1994+{
1995+ return (value ^ (unsigned long)canary) & CANARY_MASK;
1996+}
1997+
1998+static inline void set_canary(struct kmem_cache *s, void *object, unsigned long value)
1999+{
2000+ unsigned long *canary = get_canary(s, object);
2001+ *canary = get_canary_value(canary, value);
2002+}
2003+
2004+static inline void check_canary(struct kmem_cache *s, void *object, unsigned long value)
2005+{
2006+ unsigned long *canary = get_canary(s, object);
2007+ BUG_ON(*canary != get_canary_value(canary, value));
2008+}
2009+#else
2010+#define set_canary(s, object, value)
2011+#define check_canary(s, object, value)
2012+#endif
2013+
2014 /* Loop over all objects in a slab */
2015 #define for_each_object(__p, __s, __addr, __objects) \
2016 for (__p = fixup_red_left(__s, __addr); \
2017@@ -486,13 +525,13 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
2018 * Debug settings:
2019 */
2020 #if defined(CONFIG_SLUB_DEBUG_ON)
2021-static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
2022+static slab_flags_t slub_debug __ro_after_init = DEBUG_DEFAULT_FLAGS;
2023 #else
2024-static slab_flags_t slub_debug;
2025+static slab_flags_t slub_debug __ro_after_init;
2026 #endif
2027
2028-static char *slub_debug_slabs;
2029-static int disable_higher_order_debug;
2030+static char *slub_debug_slabs __ro_after_init;
2031+static int disable_higher_order_debug __ro_after_init;
2032
2033 /*
2034 * slub is about to manipulate internal object metadata. This memory lies
2035@@ -552,6 +591,9 @@ static struct track *get_track(struct kmem_cache *s, void *object,
2036 else
2037 p = object + s->inuse;
2038
2039+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
2040+ p = (void *)p + sizeof(void *);
2041+
2042 return p + alloc;
2043 }
2044
2045@@ -690,6 +732,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
2046 else
2047 off = s->inuse;
2048
2049+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
2050+ off += sizeof(void *);
2051+
2052 if (s->flags & SLAB_STORE_USER)
2053 off += 2 * sizeof(struct track);
2054
2055@@ -819,6 +864,9 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
2056 /* Freepointer is placed after the object. */
2057 off += sizeof(void *);
2058
2059+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
2060+ off += sizeof(void *);
2061+
2062 if (s->flags & SLAB_STORE_USER)
2063 /* We also have user information there */
2064 off += 2 * sizeof(struct track);
2065@@ -1420,8 +1468,9 @@ static void setup_object(struct kmem_cache *s, struct page *page,
2066 void *object)
2067 {
2068 setup_object_debug(s, page, object);
2069+ set_canary(s, object, s->random_inactive);
2070 kasan_init_slab_obj(s, object);
2071- if (unlikely(s->ctor)) {
2072+ if (unlikely(s->ctor) && !has_sanitize_verify(s)) {
2073 kasan_unpoison_object_data(s, object);
2074 s->ctor(object);
2075 kasan_poison_object_data(s, object);
2076@@ -2719,9 +2768,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2077 stat(s, ALLOC_FASTPATH);
2078 }
2079
2080- if (unlikely(gfpflags & __GFP_ZERO) && object)
2081+ if (has_sanitize_verify(s) && object) {
2082+ size_t offset = s->offset ? 0 : sizeof(void *);
2083+ BUG_ON(memchr_inv(object + offset, 0, s->object_size - offset));
2084+ if (s->ctor)
2085+ s->ctor(object);
2086+ if (unlikely(gfpflags & __GFP_ZERO) && offset)
2087+ memset(object, 0, sizeof(void *));
2088+ } else if (unlikely(gfpflags & __GFP_ZERO) && object)
2089 memset(object, 0, s->object_size);
2090
2091+ if (object) {
2092+ check_canary(s, object, s->random_inactive);
2093+ set_canary(s, object, s->random_active);
2094+ }
2095+
2096 slab_post_alloc_hook(s, gfpflags, 1, &object);
2097
2098 return object;
2099@@ -2928,6 +2989,27 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
2100 void *tail_obj = tail ? : head;
2101 struct kmem_cache_cpu *c;
2102 unsigned long tid;
2103+ bool sanitize = has_sanitize(s);
2104+
2105+ if (IS_ENABLED(CONFIG_SLAB_CANARY) || sanitize) {
2106+ __maybe_unused int offset = s->offset ? 0 : sizeof(void *);
2107+ void *x = head;
2108+
2109+ while (1) {
2110+ check_canary(s, x, s->random_active);
2111+ set_canary(s, x, s->random_inactive);
2112+
2113+ if (sanitize) {
2114+ memset(x + offset, 0, s->object_size - offset);
2115+ if (!IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && s->ctor)
2116+ s->ctor(x);
2117+ }
2118+ if (x == tail_obj)
2119+ break;
2120+ x = get_freepointer(s, x);
2121+ }
2122+ }
2123+
2124 redo:
2125 /*
2126 * Determine the currently cpus per cpu slab.
2127@@ -3106,7 +3188,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2128 void **p)
2129 {
2130 struct kmem_cache_cpu *c;
2131- int i;
2132+ int i, k;
2133
2134 /* memcg and kmem_cache debug support */
2135 s = slab_pre_alloc_hook(s, flags);
2136@@ -3143,13 +3225,29 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2137 local_irq_enable();
2138
2139 /* Clear memory outside IRQ disabled fastpath loop */
2140- if (unlikely(flags & __GFP_ZERO)) {
2141+ if (has_sanitize_verify(s)) {
2142+ int j;
2143+
2144+ for (j = 0; j < i; j++) {
2145+ size_t offset = s->offset ? 0 : sizeof(void *);
2146+ BUG_ON(memchr_inv(p[j] + offset, 0, s->object_size - offset));
2147+ if (s->ctor)
2148+ s->ctor(p[j]);
2149+ if (unlikely(flags & __GFP_ZERO) && offset)
2150+ memset(p[j], 0, sizeof(void *));
2151+ }
2152+ } else if (unlikely(flags & __GFP_ZERO)) {
2153 int j;
2154
2155 for (j = 0; j < i; j++)
2156 memset(p[j], 0, s->object_size);
2157 }
2158
2159+ for (k = 0; k < i; k++) {
2160+ check_canary(s, p[k], s->random_inactive);
2161+ set_canary(s, p[k], s->random_active);
2162+ }
2163+
2164 /* memcg and kmem_cache debug support */
2165 slab_post_alloc_hook(s, flags, size, p);
2166 return i;
2167@@ -3181,9 +3279,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk);
2168 * and increases the number of allocations possible without having to
2169 * take the list_lock.
2170 */
2171-static int slub_min_order;
2172-static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
2173-static int slub_min_objects;
2174+static int slub_min_order __ro_after_init;
2175+static int slub_max_order __ro_after_init = PAGE_ALLOC_COSTLY_ORDER;
2176+static int slub_min_objects __ro_after_init;
2177
2178 /*
2179 * Calculate the order of allocation given an slab object size.
2180@@ -3353,6 +3451,7 @@ static void early_kmem_cache_node_alloc(int node)
2181 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2182 init_tracking(kmem_cache_node, n);
2183 #endif
2184+ set_canary(kmem_cache_node, n, kmem_cache_node->random_active);
2185 kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
2186 GFP_KERNEL);
2187 init_kmem_cache_node(n);
2188@@ -3509,6 +3608,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2189 size += sizeof(void *);
2190 }
2191
2192+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
2193+ size += sizeof(void *);
2194+
2195 #ifdef CONFIG_SLUB_DEBUG
2196 if (flags & SLAB_STORE_USER)
2197 /*
2198@@ -3579,6 +3681,10 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
2199 #ifdef CONFIG_SLAB_FREELIST_HARDENED
2200 s->random = get_random_long();
2201 #endif
2202+#ifdef CONFIG_SLAB_CANARY
2203+ s->random_active = get_random_long();
2204+ s->random_inactive = get_random_long();
2205+#endif
2206
2207 if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
2208 s->reserved = sizeof(struct rcu_head);
2209@@ -3846,6 +3952,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
2210 offset -= s->red_left_pad;
2211 }
2212
2213+ check_canary(s, (void *)ptr - offset, s->random_active);
2214+
2215 /* Allow address range falling entirely within usercopy region. */
2216 if (offset >= s->useroffset &&
2217 offset - s->useroffset <= s->usersize &&
2218@@ -3879,7 +3987,11 @@ static size_t __ksize(const void *object)
2219 page = virt_to_head_page(object);
2220
2221 if (unlikely(!PageSlab(page))) {
2222+#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
2223+ BUG_ON(!PageCompound(page));
2224+#else
2225 WARN_ON(!PageCompound(page));
2226+#endif
2227 return PAGE_SIZE << compound_order(page);
2228 }
2229
2230@@ -4744,7 +4856,7 @@ enum slab_stat_type {
2231 #define SO_TOTAL (1 << SL_TOTAL)
2232
2233 #ifdef CONFIG_MEMCG
2234-static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
2235+static bool memcg_sysfs_enabled __ro_after_init = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
2236
2237 static int __init setup_slub_memcg_sysfs(char *str)
2238 {
2239diff --git a/mm/swap.c b/mm/swap.c
2240index 0f17330dd0e5..6e52cb476ef5 100644
2241--- a/mm/swap.c
2242+++ b/mm/swap.c
2243@@ -92,6 +92,13 @@ static void __put_compound_page(struct page *page)
2244 if (!PageHuge(page))
2245 __page_cache_release(page);
2246 dtor = get_compound_page_dtor(page);
2247+ if (!PageHuge(page))
2248+ BUG_ON(dtor != free_compound_page
2249+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2250+ && dtor != free_transhuge_page
2251+#endif
2252+ );
2253+
2254 (*dtor)(page);
2255 }
2256
2257diff --git a/net/core/dev.c b/net/core/dev.c
2258index ace13bea3e50..77a1519c52b7 100644
2259--- a/net/core/dev.c
2260+++ b/net/core/dev.c
2261@@ -4196,7 +4196,7 @@ int netif_rx_ni(struct sk_buff *skb)
2262 }
2263 EXPORT_SYMBOL(netif_rx_ni);
2264
2265-static __latent_entropy void net_tx_action(struct softirq_action *h)
2266+static __latent_entropy void net_tx_action(void)
2267 {
2268 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
2269
2270@@ -5745,7 +5745,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
2271 return work;
2272 }
2273
2274-static __latent_entropy void net_rx_action(struct softirq_action *h)
2275+static __latent_entropy void net_rx_action(void)
2276 {
2277 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
2278 unsigned long time_limit = jiffies +
2279diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
2280index f48fe6fc7e8c..d78c52835c08 100644
2281--- a/net/ipv4/Kconfig
2282+++ b/net/ipv4/Kconfig
2283@@ -261,6 +261,7 @@ config IP_PIMSM_V2
2284
2285 config SYN_COOKIES
2286 bool "IP: TCP syncookie support"
2287+ default y
2288 ---help---
2289 Normal TCP/IP networking is open to an attack known as "SYN
2290 flooding". This denial-of-service attack prevents legitimate remote
2291diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
2292index 9917f928d0fd..784e0e2fc839 100644
2293--- a/scripts/mod/modpost.c
2294+++ b/scripts/mod/modpost.c
2295@@ -37,6 +37,7 @@ static int vmlinux_section_warnings = 1;
2296 static int warn_unresolved = 0;
2297 /* How a symbol is exported */
2298 static int sec_mismatch_count = 0;
2299+static int writable_fptr_count = 0;
2300 static int sec_mismatch_verbose = 1;
2301 static int sec_mismatch_fatal = 0;
2302 /* ignore missing files */
2303@@ -965,6 +966,7 @@ enum mismatch {
2304 ANY_EXIT_TO_ANY_INIT,
2305 EXPORT_TO_INIT_EXIT,
2306 EXTABLE_TO_NON_TEXT,
2307+ DATA_TO_TEXT
2308 };
2309
2310 /**
2311@@ -1091,6 +1093,12 @@ static const struct sectioncheck sectioncheck[] = {
2312 .good_tosec = {ALL_TEXT_SECTIONS , NULL},
2313 .mismatch = EXTABLE_TO_NON_TEXT,
2314 .handler = extable_mismatch_handler,
2315+},
2316+/* Do not reference code from writable data */
2317+{
2318+ .fromsec = { DATA_SECTIONS, NULL },
2319+ .bad_tosec = { ALL_TEXT_SECTIONS, NULL },
2320+ .mismatch = DATA_TO_TEXT
2321 }
2322 };
2323
2324@@ -1240,10 +1248,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
2325 continue;
2326 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
2327 continue;
2328- if (sym->st_value == addr)
2329- return sym;
2330 /* Find a symbol nearby - addr are maybe negative */
2331 d = sym->st_value - addr;
2332+ if (d == 0)
2333+ return sym;
2334 if (d < 0)
2335 d = addr - sym->st_value;
2336 if (d < distance) {
2337@@ -1402,7 +1410,11 @@ static void report_sec_mismatch(const char *modname,
2338 char *prl_from;
2339 char *prl_to;
2340
2341- sec_mismatch_count++;
2342+ if (mismatch->mismatch == DATA_TO_TEXT)
2343+ writable_fptr_count++;
2344+ else
2345+ sec_mismatch_count++;
2346+
2347 if (!sec_mismatch_verbose)
2348 return;
2349
2350@@ -1526,6 +1538,14 @@ static void report_sec_mismatch(const char *modname,
2351 fatal("There's a special handler for this mismatch type, "
2352 "we should never get here.");
2353 break;
2354+ case DATA_TO_TEXT:
2355+#if 0
2356+ fprintf(stderr,
2357+ "The %s %s:%s references\n"
2358+ "the %s %s:%s%s\n",
2359+ from, fromsec, fromsym, to, tosec, tosym, to_p);
2360+#endif
2361+ break;
2362 }
2363 fprintf(stderr, "\n");
2364 }
2365@@ -2539,6 +2559,14 @@ int main(int argc, char **argv)
2366 }
2367 }
2368 free(buf.p);
2369+ if (writable_fptr_count) {
2370+ if (!sec_mismatch_verbose) {
2371+ warn("modpost: Found %d writable function pointer(s).\n"
2372+ "To see full details build your kernel with:\n"
2373+ "'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n",
2374+ writable_fptr_count);
2375+ }
2376+ }
2377
2378 return err;
2379 }
2380diff --git a/security/Kconfig b/security/Kconfig
2381index c4302067a3ad..1b1e9368cae5 100644
2382--- a/security/Kconfig
2383+++ b/security/Kconfig
2384@@ -8,7 +8,7 @@ source security/keys/Kconfig
2385
2386 config SECURITY_DMESG_RESTRICT
2387 bool "Restrict unprivileged access to the kernel syslog"
2388- default n
2389+ default y
2390 help
2391 This enforces restrictions on unprivileged users reading the kernel
2392 syslog via dmesg(8).
2393@@ -18,10 +18,21 @@ config SECURITY_DMESG_RESTRICT
2394
2395 If you are unsure how to answer this question, answer N.
2396
2397+config SECURITY_PERF_EVENTS_RESTRICT
2398+ bool "Restrict unprivileged use of performance events"
2399+ depends on PERF_EVENTS
2400+ default y
2401+ help
2402+ If you say Y here, the kernel.perf_event_paranoid sysctl
2403+ will be set to 3 by default, and no unprivileged use of the
2404+ perf_event_open syscall will be permitted unless it is
2405+ changed.
2406+
2407 config SECURITY
2408 bool "Enable different security models"
2409 depends on SYSFS
2410 depends on MULTIUSER
2411+ default y
2412 help
2413 This allows you to choose different security modules to be
2414 configured into your kernel.
2415@@ -48,6 +59,7 @@ config SECURITYFS
2416 config SECURITY_NETWORK
2417 bool "Socket and Networking Security Hooks"
2418 depends on SECURITY
2419+ default y
2420 help
2421 This enables the socket and networking security hooks.
2422 If enabled, a security module can use these hooks to
2423@@ -155,6 +167,7 @@ config HARDENED_USERCOPY
2424 depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
2425 select BUG
2426 imply STRICT_DEVMEM
2427+ default y
2428 help
2429 This option checks for obviously wrong memory regions when
2430 copying memory to/from the kernel (via copy_to_user() and
2431@@ -167,7 +180,6 @@ config HARDENED_USERCOPY
2432 config HARDENED_USERCOPY_FALLBACK
2433 bool "Allow usercopy whitelist violations to fallback to object size"
2434 depends on HARDENED_USERCOPY
2435- default y
2436 help
2437 This is a temporary option that allows missing usercopy whitelists
2438 to be discovered via a WARN() to the kernel log, instead of
2439@@ -192,10 +204,36 @@ config HARDENED_USERCOPY_PAGESPAN
2440 config FORTIFY_SOURCE
2441 bool "Harden common str/mem functions against buffer overflows"
2442 depends on ARCH_HAS_FORTIFY_SOURCE
2443+ default y
2444 help
2445 Detect overflows of buffers in common string and memory functions
2446 where the compiler can determine and validate the buffer sizes.
2447
2448+config FORTIFY_SOURCE_STRICT_STRING
2449+ bool "Harden common functions against buffer overflows"
2450+ depends on FORTIFY_SOURCE
2451+ depends on EXPERT
2452+ help
2453+ Perform stricter overflow checks catching overflows within objects
2454+ for common C string functions rather than only between objects.
2455+
2456+ This is not yet intended for production use, only bug finding.
2457+
2458+config PAGE_SANITIZE
2459+ bool "Sanitize pages"
2460+ default y
2461+ help
2462+ Zero fill page allocations on free, reducing the lifetime of
2463+ sensitive data and helping to mitigate use-after-free bugs.
2464+
2465+config PAGE_SANITIZE_VERIFY
2466+ bool "Verify sanitized pages"
2467+ depends on PAGE_SANITIZE
2468+ default y
2469+ help
2470+ Verify that newly allocated pages are zeroed to detect
2471+ write-after-free bugs.
2472+
2473 config STATIC_USERMODEHELPER
2474 bool "Force all usermode helper calls through a single binary"
2475 help
2476diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
2477index 8af7a690eb40..6539694b0fd3 100644
2478--- a/security/selinux/Kconfig
2479+++ b/security/selinux/Kconfig
2480@@ -2,7 +2,7 @@ config SECURITY_SELINUX
2481 bool "NSA SELinux Support"
2482 depends on SECURITY_NETWORK && AUDIT && NET && INET
2483 select NETWORK_SECMARK
2484- default n
2485+ default y
2486 help
2487 This selects NSA Security-Enhanced Linux (SELinux).
2488 You will also need a policy configuration and a labeled filesystem.
2489@@ -79,23 +79,3 @@ config SECURITY_SELINUX_AVC_STATS
2490 This option collects access vector cache statistics to
2491 /selinux/avc/cache_stats, which may be monitored via
2492 tools such as avcstat.
2493-
2494-config SECURITY_SELINUX_CHECKREQPROT_VALUE
2495- int "NSA SELinux checkreqprot default value"
2496- depends on SECURITY_SELINUX
2497- range 0 1
2498- default 0
2499- help
2500- This option sets the default value for the 'checkreqprot' flag
2501- that determines whether SELinux checks the protection requested
2502- by the application or the protection that will be applied by the
2503- kernel (including any implied execute for read-implies-exec) for
2504- mmap and mprotect calls. If this option is set to 0 (zero),
2505- SELinux will default to checking the protection that will be applied
2506- by the kernel. If this option is set to 1 (one), SELinux will
2507- default to checking the protection requested by the application.
2508- The checkreqprot flag may be changed from the default via the
2509- 'checkreqprot=' boot parameter. It may also be changed at runtime
2510- via /selinux/checkreqprot if authorized by policy.
2511-
2512- If you are unsure how to answer this question, answer 0.
2513diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
2514index 3d54468ce334..ceafb908afde 100644
2515--- a/security/selinux/include/objsec.h
2516+++ b/security/selinux/include/objsec.h
2517@@ -154,6 +154,6 @@ struct bpf_security_struct {
2518 u32 sid; /*SID of bpf obj creater*/
2519 };
2520
2521-extern unsigned int selinux_checkreqprot;
2522+extern const unsigned int selinux_checkreqprot;
2523
2524 #endif /* _SELINUX_OBJSEC_H_ */
2525diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
2526index 00eed842c491..8f7b8d7e6f91 100644
2527--- a/security/selinux/selinuxfs.c
2528+++ b/security/selinux/selinuxfs.c
2529@@ -41,16 +41,7 @@
2530 #include "objsec.h"
2531 #include "conditional.h"
2532
2533-unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
2534-
2535-static int __init checkreqprot_setup(char *str)
2536-{
2537- unsigned long checkreqprot;
2538- if (!kstrtoul(str, 0, &checkreqprot))
2539- selinux_checkreqprot = checkreqprot ? 1 : 0;
2540- return 1;
2541-}
2542-__setup("checkreqprot=", checkreqprot_setup);
2543+const unsigned int selinux_checkreqprot;
2544
2545 static DEFINE_MUTEX(sel_mutex);
2546
2547@@ -610,10 +601,9 @@ static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
2548 return PTR_ERR(page);
2549
2550 length = -EINVAL;
2551- if (sscanf(page, "%u", &new_value) != 1)
2552+ if (sscanf(page, "%u", &new_value) != 1 || new_value)
2553 goto out;
2554
2555- selinux_checkreqprot = new_value ? 1 : 0;
2556 length = count;
2557 out:
2558 kfree(page);
2559diff --git a/security/yama/Kconfig b/security/yama/Kconfig
2560index 96b27405558a..485c1b85c325 100644
2561--- a/security/yama/Kconfig
2562+++ b/security/yama/Kconfig
2563@@ -1,7 +1,7 @@
2564 config SECURITY_YAMA
2565 bool "Yama support"
2566 depends on SECURITY
2567- default n
2568+ default y
2569 help
2570 This selects Yama, which extends DAC support with additional
2571 system-wide security settings beyond regular Linux discretionary