Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'cfi-v5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull CFI on arm64 support from Kees Cook:
"This builds on last cycle's LTO work, and allows the arm64 kernels to
be built with Clang's Control Flow Integrity feature. This feature has
happily lived in Android kernels for almost 3 years[1], so I'm excited
to have it ready for upstream.

The wide diffstat is mainly due to the treewide fixing of mismatched
list_sort prototypes. Other things in core kernel are to address
various CFI corner cases. The largest code portion is the CFI runtime
implementation itself (which will be shared by all architectures
implementing support for CFI). The arm64 pieces are Acked by arm64
maintainers rather than coming through the arm64 tree since carrying
this tree over there was going to be awkward.

CFI support for x86 is still under development, but is pretty close.
There are a handful of corner cases on x86 that need some improvements
to Clang and objtool, but otherwise works well.

Summary:

- Clean up list_sort prototypes (Sami Tolvanen)

- Introduce CONFIG_CFI_CLANG for arm64 (Sami Tolvanen)"

* tag 'cfi-v5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
arm64: allow CONFIG_CFI_CLANG to be selected
KVM: arm64: Disable CFI for nVHE
arm64: ftrace: use function_nocfi for ftrace_call
arm64: add __nocfi to __apply_alternatives
arm64: add __nocfi to functions that jump to a physical address
arm64: use function_nocfi with __pa_symbol
arm64: implement function_nocfi
psci: use function_nocfi for cpu_resume
lkdtm: use function_nocfi
treewide: Change list_sort to use const pointers
bpf: disable CFI in dispatcher functions
kallsyms: strip ThinLTO hashes from static functions
kthread: use WARN_ON_FUNCTION_MISMATCH
workqueue: use WARN_ON_FUNCTION_MISMATCH
module: ensure __cfi_check alignment
mm: add generic function_nocfi macro
cfi: add __cficanonical
add support for Clang CFI

+760 -113
+17
Makefile
··· 924 924 export CC_FLAGS_LTO 925 925 endif 926 926 927 + ifdef CONFIG_CFI_CLANG 928 + CC_FLAGS_CFI := -fsanitize=cfi \ 929 + -fsanitize-cfi-cross-dso \ 930 + -fno-sanitize-cfi-canonical-jump-tables \ 931 + -fno-sanitize-trap=cfi \ 932 + -fno-sanitize-blacklist 933 + 934 + ifdef CONFIG_CFI_PERMISSIVE 935 + CC_FLAGS_CFI += -fsanitize-recover=cfi 936 + endif 937 + 938 + # If LTO flags are filtered out, we must also filter out CFI. 939 + CC_FLAGS_LTO += $(CC_FLAGS_CFI) 940 + KBUILD_CFLAGS += $(CC_FLAGS_CFI) 941 + export CC_FLAGS_CFI 942 + endif 943 + 927 944 ifdef CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B 928 945 KBUILD_CFLAGS += -falign-functions=32 929 946 endif
+45
arch/Kconfig
··· 692 692 If unsure, say Y. 693 693 endchoice 694 694 695 + config ARCH_SUPPORTS_CFI_CLANG 696 + bool 697 + help 698 + An architecture should select this option if it can support Clang's 699 + Control-Flow Integrity (CFI) checking. 700 + 701 + config CFI_CLANG 702 + bool "Use Clang's Control Flow Integrity (CFI)" 703 + depends on LTO_CLANG && ARCH_SUPPORTS_CFI_CLANG 704 + # Clang >= 12: 705 + # - https://bugs.llvm.org/show_bug.cgi?id=46258 706 + # - https://bugs.llvm.org/show_bug.cgi?id=47479 707 + depends on CLANG_VERSION >= 120000 708 + select KALLSYMS 709 + help 710 + This option enables Clang’s forward-edge Control Flow Integrity 711 + (CFI) checking, where the compiler injects a runtime check to each 712 + indirect function call to ensure the target is a valid function with 713 + the correct static type. This restricts possible call targets and 714 + makes it more difficult for an attacker to exploit bugs that allow 715 + the modification of stored function pointers. More information can be 716 + found from Clang's documentation: 717 + 718 + https://clang.llvm.org/docs/ControlFlowIntegrity.html 719 + 720 + config CFI_CLANG_SHADOW 721 + bool "Use CFI shadow to speed up cross-module checks" 722 + default y 723 + depends on CFI_CLANG && MODULES 724 + help 725 + If you select this option, the kernel builds a fast look-up table of 726 + CFI check functions in loaded modules to reduce performance overhead. 727 + 728 + If unsure, say Y. 729 + 730 + config CFI_PERMISSIVE 731 + bool "Use CFI in permissive mode" 732 + depends on CFI_CLANG 733 + help 734 + When selected, Control Flow Integrity (CFI) violations result in a 735 + warning instead of a kernel panic. This option should only be used 736 + for finding indirect call type mismatches during development. 737 + 738 + If unsure, say N. 739 + 695 740 config HAVE_ARCH_WITHIN_STACK_FRAMES 696 741 bool 697 742 help
+1
arch/arm64/Kconfig
··· 75 75 select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK 76 76 select ARCH_SUPPORTS_LTO_CLANG if CPU_LITTLE_ENDIAN 77 77 select ARCH_SUPPORTS_LTO_CLANG_THIN 78 + select ARCH_SUPPORTS_CFI_CLANG 78 79 select ARCH_SUPPORTS_ATOMIC_RMW 79 80 select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG) 80 81 select ARCH_SUPPORTS_NUMA_BALANCING
+16
arch/arm64/include/asm/memory.h
··· 323 323 #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) 324 324 #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) 325 325 326 + #ifdef CONFIG_CFI_CLANG 327 + /* 328 + * With CONFIG_CFI_CLANG, the compiler replaces function address 329 + * references with the address of the function's CFI jump table 330 + * entry. The function_nocfi macro always returns the address of the 331 + * actual function instead. 332 + */ 333 + #define function_nocfi(x) ({ \ 334 + void *addr; \ 335 + asm("adrp %0, " __stringify(x) "\n\t" \ 336 + "add %0, %0, :lo12:" __stringify(x) \ 337 + : "=r" (addr)); \ 338 + addr; \ 339 + }) 340 + #endif 341 + 326 342 /* 327 343 * virt_to_page(x) convert a _valid_ virtual address to struct page * 328 344 * virt_addr_valid(x) indicates whether a virtual address is valid
+2 -2
arch/arm64/include/asm/mmu_context.h
··· 119 119 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, 120 120 * avoiding the possibility of conflicting TLB entries being allocated. 121 121 */ 122 - static inline void cpu_replace_ttbr1(pgd_t *pgdp) 122 + static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp) 123 123 { 124 124 typedef void (ttbr_replace_func)(phys_addr_t); 125 125 extern ttbr_replace_func idmap_cpu_replace_ttbr1; ··· 140 140 ttbr1 |= TTBR_CNP_BIT; 141 141 } 142 142 143 - replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); 143 + replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1)); 144 144 145 145 cpu_install_idmap(); 146 146 replace_phys(ttbr1);
+2 -1
arch/arm64/kernel/acpi_parking_protocol.c
··· 99 99 * that read this address need to convert this address to the 100 100 * Boot-Loader's endianness before jumping. 101 101 */ 102 - writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point); 102 + writeq_relaxed(__pa_symbol(function_nocfi(secondary_entry)), 103 + &mailbox->entry_point); 103 104 writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id); 104 105 105 106 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+2 -2
arch/arm64/kernel/alternative.c
··· 133 133 } while (cur += d_size, cur < end); 134 134 } 135 135 136 - static void __apply_alternatives(void *alt_region, bool is_module, 137 - unsigned long *feature_mask) 136 + static void __nocfi __apply_alternatives(void *alt_region, bool is_module, 137 + unsigned long *feature_mask) 138 138 { 139 139 struct alt_instr *alt; 140 140 struct alt_region *region = alt_region;
+5 -5
arch/arm64/kernel/cpu-reset.h
··· 13 13 void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry, 14 14 unsigned long arg0, unsigned long arg1, unsigned long arg2); 15 15 16 - static inline void __noreturn cpu_soft_restart(unsigned long entry, 17 - unsigned long arg0, 18 - unsigned long arg1, 19 - unsigned long arg2) 16 + static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry, 17 + unsigned long arg0, 18 + unsigned long arg1, 19 + unsigned long arg2) 20 20 { 21 21 typeof(__cpu_soft_restart) *restart; 22 22 23 23 unsigned long el2_switch = !is_kernel_in_hyp_mode() && 24 24 is_hyp_mode_available(); 25 - restart = (void *)__pa_symbol(__cpu_soft_restart); 25 + restart = (void *)__pa_symbol(function_nocfi(__cpu_soft_restart)); 26 26 27 27 cpu_install_idmap(); 28 28 restart(el2_switch, entry, arg0, arg1, arg2);
+2 -2
arch/arm64/kernel/cpufeature.c
··· 1451 1451 } 1452 1452 1453 1453 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 1454 - static void 1454 + static void __nocfi 1455 1455 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) 1456 1456 { 1457 1457 typedef void (kpti_remap_fn)(int, int, phys_addr_t); ··· 1468 1468 if (arm64_use_ng_mappings) 1469 1469 return; 1470 1470 1471 - remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); 1471 + remap_fn = (void *)__pa_symbol(function_nocfi(idmap_kpti_install_ng_mappings)); 1472 1472 1473 1473 cpu_install_idmap(); 1474 1474 remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
+1 -1
arch/arm64/kernel/ftrace.c
··· 55 55 unsigned long pc; 56 56 u32 new; 57 57 58 - pc = (unsigned long)&ftrace_call; 58 + pc = (unsigned long)function_nocfi(ftrace_call); 59 59 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, 60 60 AARCH64_INSN_BRANCH_LINK); 61 61
+2 -1
arch/arm64/kernel/psci.c
··· 38 38 39 39 static int cpu_psci_cpu_boot(unsigned int cpu) 40 40 { 41 - int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry)); 41 + phys_addr_t pa_secondary_entry = __pa_symbol(function_nocfi(secondary_entry)); 42 + int err = psci_ops.cpu_on(cpu_logical_map(cpu), pa_secondary_entry); 42 43 if (err) 43 44 pr_err("failed to boot CPU%d (%d)\n", cpu, err); 44 45
+2 -1
arch/arm64/kernel/smp_spin_table.c
··· 66 66 static int smp_spin_table_cpu_prepare(unsigned int cpu) 67 67 { 68 68 __le64 __iomem *release_addr; 69 + phys_addr_t pa_holding_pen = __pa_symbol(function_nocfi(secondary_holding_pen)); 69 70 70 71 if (!cpu_release_addr[cpu]) 71 72 return -ENODEV; ··· 89 88 * boot-loader's endianness before jumping. This is mandated by 90 89 * the boot protocol. 91 90 */ 92 - writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr); 91 + writeq_relaxed(pa_holding_pen, release_addr); 93 92 __flush_dcache_area((__force void *)release_addr, 94 93 sizeof(*release_addr)); 95 94
+3 -3
arch/arm64/kvm/hyp/nvhe/Makefile
··· 75 75 quiet_cmd_hypcopy = HYPCOPY $@ 76 76 cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ $< $@ 77 77 78 - # Remove ftrace and Shadow Call Stack CFLAGS. 79 - # This is equivalent to the 'notrace' and '__noscs' annotations. 80 - KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS), $(KBUILD_CFLAGS)) 78 + # Remove ftrace, Shadow Call Stack, and CFI CFLAGS. 79 + # This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations. 80 + KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS)) 81 81 82 82 # KVM nVHE code is run at a different exception code with a different map, so 83 83 # compiler instrumentation that inserts callbacks or checks into the code may
+4 -4
arch/arm64/kvm/vgic/vgic-its.c
··· 2190 2190 return offset; 2191 2191 } 2192 2192 2193 - static int vgic_its_ite_cmp(void *priv, struct list_head *a, 2194 - struct list_head *b) 2193 + static int vgic_its_ite_cmp(void *priv, const struct list_head *a, 2194 + const struct list_head *b) 2195 2195 { 2196 2196 struct its_ite *itea = container_of(a, struct its_ite, ite_list); 2197 2197 struct its_ite *iteb = container_of(b, struct its_ite, ite_list); ··· 2329 2329 return offset; 2330 2330 } 2331 2331 2332 - static int vgic_its_device_cmp(void *priv, struct list_head *a, 2333 - struct list_head *b) 2332 + static int vgic_its_device_cmp(void *priv, const struct list_head *a, 2333 + const struct list_head *b) 2334 2334 { 2335 2335 struct its_device *deva = container_of(a, struct its_device, dev_list); 2336 2336 struct its_device *devb = container_of(b, struct its_device, dev_list);
+2 -1
arch/arm64/kvm/vgic/vgic.c
··· 255 255 * Return negative if "a" sorts before "b", 0 to preserve order, and positive 256 256 * to sort "b" before "a". 257 257 */ 258 - static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) 258 + static int vgic_irq_cmp(void *priv, const struct list_head *a, 259 + const struct list_head *b) 259 260 { 260 261 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); 261 262 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
+2 -1
block/blk-mq-sched.c
··· 75 75 blk_mq_run_hw_queue(hctx, true); 76 76 } 77 77 78 - static int sched_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 78 + static int sched_rq_cmp(void *priv, const struct list_head *a, 79 + const struct list_head *b) 79 80 { 80 81 struct request *rqa = container_of(a, struct request, queuelist); 81 82 struct request *rqb = container_of(b, struct request, queuelist);
+2 -1
block/blk-mq.c
··· 1895 1895 spin_unlock(&ctx->lock); 1896 1896 } 1897 1897 1898 - static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) 1898 + static int plug_rq_cmp(void *priv, const struct list_head *a, 1899 + const struct list_head *b) 1899 1900 { 1900 1901 struct request *rqa = container_of(a, struct request, queuelist); 1901 1902 struct request *rqb = container_of(b, struct request, queuelist);
+2 -1
drivers/acpi/nfit/core.c
··· 1195 1195 return 0; 1196 1196 } 1197 1197 1198 - static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 1198 + static int nfit_mem_cmp(void *priv, const struct list_head *_a, 1199 + const struct list_head *_b) 1199 1200 { 1200 1201 struct nfit_mem *a = container_of(_a, typeof(*a), list); 1201 1202 struct nfit_mem *b = container_of(_b, typeof(*b), list);
+2 -1
drivers/acpi/numa/hmat.c
··· 558 558 return updated; 559 559 } 560 560 561 - static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b) 561 + static int initiator_cmp(void *priv, const struct list_head *a, 562 + const struct list_head *b) 562 563 { 563 564 struct memory_initiator *ia; 564 565 struct memory_initiator *ib;
+2 -2
drivers/clk/keystone/sci-clk.c
··· 503 503 504 504 #else 505 505 506 - static int _cmp_sci_clk_list(void *priv, struct list_head *a, 507 - struct list_head *b) 506 + static int _cmp_sci_clk_list(void *priv, const struct list_head *a, 507 + const struct list_head *b) 508 508 { 509 509 struct sci_clk *ca = container_of(a, struct sci_clk, node); 510 510 struct sci_clk *cb = container_of(b, struct sci_clk, node);
+5 -2
drivers/firmware/psci/psci.c
··· 325 325 static int psci_suspend_finisher(unsigned long state) 326 326 { 327 327 u32 power_state = state; 328 + phys_addr_t pa_cpu_resume = __pa_symbol(function_nocfi(cpu_resume)); 328 329 329 - return psci_ops.cpu_suspend(power_state, __pa_symbol(cpu_resume)); 330 + return psci_ops.cpu_suspend(power_state, pa_cpu_resume); 330 331 } 331 332 332 333 int psci_cpu_suspend_enter(u32 state) ··· 345 344 346 345 static int psci_system_suspend(unsigned long unused) 347 346 { 347 + phys_addr_t pa_cpu_resume = __pa_symbol(function_nocfi(cpu_resume)); 348 + 348 349 return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), 349 - __pa_symbol(cpu_resume), 0, 0); 350 + pa_cpu_resume, 0, 0); 350 351 } 351 352 352 353 static int psci_system_suspend_enter(suspend_state_t state)
+2 -1
drivers/gpu/drm/drm_modes.c
··· 1290 1290 * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or 1291 1291 * positive if @lh_b is better than @lh_a. 1292 1292 */ 1293 - static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b) 1293 + static int drm_mode_compare(void *priv, const struct list_head *lh_a, 1294 + const struct list_head *lh_b) 1294 1295 { 1295 1296 struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); 1296 1297 struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+2 -1
drivers/gpu/drm/i915/gt/intel_engine_user.c
··· 49 49 [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE, 50 50 }; 51 51 52 - static int engine_cmp(void *priv, struct list_head *A, struct list_head *B) 52 + static int engine_cmp(void *priv, const struct list_head *A, 53 + const struct list_head *B) 53 54 { 54 55 const struct intel_engine_cs *a = 55 56 container_of((struct rb_node *)A, typeof(*a), uabi_node);
+1 -1
drivers/gpu/drm/i915/gvt/debugfs.c
··· 41 41 42 42 /* Compare two diff_mmio items. */ 43 43 static int mmio_offset_compare(void *priv, 44 - struct list_head *a, struct list_head *b) 44 + const struct list_head *a, const struct list_head *b) 45 45 { 46 46 struct diff_mmio *ma; 47 47 struct diff_mmio *mb;
+2 -1
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
··· 1076 1076 return exercise_ppgtt(arg, shrink_boom); 1077 1077 } 1078 1078 1079 - static int sort_holes(void *priv, struct list_head *A, struct list_head *B) 1079 + static int sort_holes(void *priv, const struct list_head *A, 1080 + const struct list_head *B) 1080 1081 { 1081 1082 struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack); 1082 1083 struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
+2 -2
drivers/gpu/drm/radeon/radeon_cs.c
··· 393 393 return 0; 394 394 } 395 395 396 - static int cmp_size_smaller_first(void *priv, struct list_head *a, 397 - struct list_head *b) 396 + static int cmp_size_smaller_first(void *priv, const struct list_head *a, 397 + const struct list_head *b) 398 398 { 399 399 struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head); 400 400 struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
+2 -1
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
··· 83 83 return interval; 84 84 } 85 85 86 - static int interval_cmp(void *priv, struct list_head *a, struct list_head *b) 86 + static int interval_cmp(void *priv, const struct list_head *a, 87 + const struct list_head *b) 87 88 { 88 89 struct usnic_uiom_interval_node *node_a, *node_b; 89 90
+1 -1
drivers/interconnect/qcom/bcm-voter.c
··· 39 39 u32 tcs_wait; 40 40 }; 41 41 42 - static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b) 42 + static int cmp_vcd(void *priv, const struct list_head *a, const struct list_head *b) 43 43 { 44 44 const struct qcom_icc_bcm *bcm_a = list_entry(a, struct qcom_icc_bcm, list); 45 45 const struct qcom_icc_bcm *bcm_b = list_entry(b, struct qcom_icc_bcm, list);
+2 -1
drivers/md/raid5.c
··· 953 953 submit_bio_noacct(bio); 954 954 } 955 955 956 - static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b) 956 + static int cmp_stripe(void *priv, const struct list_head *a, 957 + const struct list_head *b) 957 958 { 958 959 const struct r5pending_data *da = list_entry(a, 959 960 struct r5pending_data, sibling);
+1 -1
drivers/misc/lkdtm/usercopy.c
··· 314 314 315 315 pr_info("attempting bad copy_to_user from kernel text: %px\n", 316 316 vm_mmap); 317 - if (copy_to_user((void __user *)user_addr, vm_mmap, 317 + if (copy_to_user((void __user *)user_addr, function_nocfi(vm_mmap), 318 318 unconst + PAGE_SIZE)) { 319 319 pr_warn("copy_to_user failed, but lacked Oops\n"); 320 320 goto free_user;
+2 -2
drivers/misc/sram.c
··· 144 144 } 145 145 } 146 146 147 - static int sram_reserve_cmp(void *priv, struct list_head *a, 148 - struct list_head *b) 147 + static int sram_reserve_cmp(void *priv, const struct list_head *a, 148 + const struct list_head *b) 149 149 { 150 150 struct sram_reserve *ra = list_entry(a, struct sram_reserve, list); 151 151 struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
+2 -1
drivers/nvme/host/core.c
··· 3855 3855 return ret; 3856 3856 } 3857 3857 3858 - static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) 3858 + static int ns_cmp(void *priv, const struct list_head *a, 3859 + const struct list_head *b) 3859 3860 { 3860 3861 struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 3861 3862 struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
+2 -1
drivers/pci/controller/cadence/pcie-cadence-host.c
··· 345 345 return 0; 346 346 } 347 347 348 - static int cdns_pcie_host_dma_ranges_cmp(void *priv, struct list_head *a, struct list_head *b) 348 + static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a, 349 + const struct list_head *b) 349 350 { 350 351 struct resource_entry *entry1, *entry2; 351 352
+2 -1
drivers/spi/spi-loopback-test.c
··· 454 454 u8 *end; 455 455 }; 456 456 457 - static int rx_ranges_cmp(void *priv, struct list_head *a, struct list_head *b) 457 + static int rx_ranges_cmp(void *priv, const struct list_head *a, 458 + const struct list_head *b) 458 459 { 459 460 struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list); 460 461 struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list);
+2 -1
fs/btrfs/raid56.c
··· 1633 1633 /* 1634 1634 * rbios on the plug list are sorted for easier merging. 1635 1635 */ 1636 - static int plug_cmp(void *priv, struct list_head *a, struct list_head *b) 1636 + static int plug_cmp(void *priv, const struct list_head *a, 1637 + const struct list_head *b) 1637 1638 { 1638 1639 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, 1639 1640 plug_list);
+2 -1
fs/btrfs/tree-log.c
··· 4138 4138 return ret; 4139 4139 } 4140 4140 4141 - static int extent_cmp(void *priv, struct list_head *a, struct list_head *b) 4141 + static int extent_cmp(void *priv, const struct list_head *a, 4142 + const struct list_head *b) 4142 4143 { 4143 4144 struct extent_map *em1, *em2; 4144 4145
+2 -1
fs/btrfs/volumes.c
··· 1224 1224 return 0; 1225 1225 } 1226 1226 1227 - static int devid_cmp(void *priv, struct list_head *a, struct list_head *b) 1227 + static int devid_cmp(void *priv, const struct list_head *a, 1228 + const struct list_head *b) 1228 1229 { 1229 1230 struct btrfs_device *dev1, *dev2; 1230 1231
+2 -2
fs/ext4/fsmap.c
··· 354 354 355 355 /* Compare two fsmap items. */ 356 356 static int ext4_getfsmap_compare(void *priv, 357 - struct list_head *a, 358 - struct list_head *b) 357 + const struct list_head *a, 358 + const struct list_head *b) 359 359 { 360 360 struct ext4_fsmap *fa; 361 361 struct ext4_fsmap *fb;
+2 -1
fs/gfs2/glock.c
··· 1732 1732 spin_unlock(&gl->gl_lockref.lock); 1733 1733 } 1734 1734 1735 - static int glock_cmp(void *priv, struct list_head *a, struct list_head *b) 1735 + static int glock_cmp(void *priv, const struct list_head *a, 1736 + const struct list_head *b) 1736 1737 { 1737 1738 struct gfs2_glock *gla, *glb; 1738 1739
+1 -1
fs/gfs2/log.c
··· 695 695 } 696 696 } 697 697 698 - static int ip_cmp(void *priv, struct list_head *a, struct list_head *b) 698 + static int ip_cmp(void *priv, const struct list_head *a, const struct list_head *b) 699 699 { 700 700 struct gfs2_inode *ipa, *ipb; 701 701
+2 -1
fs/gfs2/lops.c
··· 634 634 kunmap_atomic(kaddr); 635 635 } 636 636 637 - static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b) 637 + static int blocknr_cmp(void *priv, const struct list_head *a, 638 + const struct list_head *b) 638 639 { 639 640 struct gfs2_bufdata *bda, *bdb; 640 641
+2 -1
fs/iomap/buffered-io.c
··· 1155 1155 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); 1156 1156 1157 1157 static int 1158 - iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b) 1158 + iomap_ioend_compare(void *priv, const struct list_head *a, 1159 + const struct list_head *b) 1159 1160 { 1160 1161 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); 1161 1162 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
+4 -3
fs/ubifs/gc.c
··· 102 102 * This function compares data nodes @a and @b. Returns %1 if @a has greater 103 103 * inode or block number, and %-1 otherwise. 104 104 */ 105 - static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b) 105 + static int data_nodes_cmp(void *priv, const struct list_head *a, 106 + const struct list_head *b) 106 107 { 107 108 ino_t inuma, inumb; 108 109 struct ubifs_info *c = priv; ··· 146 145 * first and sorted by length in descending order. Directory entry nodes go 147 146 * after inode nodes and are sorted in ascending hash valuer order. 148 147 */ 149 - static int nondata_nodes_cmp(void *priv, struct list_head *a, 150 - struct list_head *b) 148 + static int nondata_nodes_cmp(void *priv, const struct list_head *a, 149 + const struct list_head *b) 151 150 { 152 151 ino_t inuma, inumb; 153 152 struct ubifs_info *c = priv;
+2 -2
fs/ubifs/replay.c
··· 298 298 * entries @a and @b by comparing their sequence numer. Returns %1 if @a has 299 299 * greater sequence number and %-1 otherwise. 300 300 */ 301 - static int replay_entries_cmp(void *priv, struct list_head *a, 302 - struct list_head *b) 301 + static int replay_entries_cmp(void *priv, const struct list_head *a, 302 + const struct list_head *b) 303 303 { 304 304 struct ubifs_info *c = priv; 305 305 struct replay_entry *ra, *rb;
+2 -2
fs/xfs/scrub/bitmap.c
··· 63 63 static int 64 64 xbitmap_range_cmp( 65 65 void *priv, 66 - struct list_head *a, 67 - struct list_head *b) 66 + const struct list_head *a, 67 + const struct list_head *b) 68 68 { 69 69 struct xbitmap_range *ap; 70 70 struct xbitmap_range *bp;
+2 -2
fs/xfs/xfs_bmap_item.c
··· 265 265 static int 266 266 xfs_bmap_update_diff_items( 267 267 void *priv, 268 - struct list_head *a, 269 - struct list_head *b) 268 + const struct list_head *a, 269 + const struct list_head *b) 270 270 { 271 271 struct xfs_bmap_intent *ba; 272 272 struct xfs_bmap_intent *bb;
+3 -3
fs/xfs/xfs_buf.c
··· 2124 2124 */ 2125 2125 static int 2126 2126 xfs_buf_cmp( 2127 - void *priv, 2128 - struct list_head *a, 2129 - struct list_head *b) 2127 + void *priv, 2128 + const struct list_head *a, 2129 + const struct list_head *b) 2130 2130 { 2131 2131 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); 2132 2132 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
+2 -2
fs/xfs/xfs_extent_busy.c
··· 629 629 int 630 630 xfs_extent_busy_ag_cmp( 631 631 void *priv, 632 - struct list_head *l1, 633 - struct list_head *l2) 632 + const struct list_head *l1, 633 + const struct list_head *l2) 634 634 { 635 635 struct xfs_extent_busy *b1 = 636 636 container_of(l1, struct xfs_extent_busy, list);
+2 -1
fs/xfs/xfs_extent_busy.h
··· 58 58 xfs_extent_busy_wait_all(struct xfs_mount *mp); 59 59 60 60 int 61 - xfs_extent_busy_ag_cmp(void *priv, struct list_head *a, struct list_head *b); 61 + xfs_extent_busy_ag_cmp(void *priv, const struct list_head *a, 62 + const struct list_head *b); 62 63 63 64 static inline void xfs_extent_busy_sort(struct list_head *list) 64 65 {
+2 -2
fs/xfs/xfs_extfree_item.c
··· 397 397 static int 398 398 xfs_extent_free_diff_items( 399 399 void *priv, 400 - struct list_head *a, 401 - struct list_head *b) 400 + const struct list_head *a, 401 + const struct list_head *b) 402 402 { 403 403 struct xfs_mount *mp = priv; 404 404 struct xfs_extent_free_item *ra;
+2 -2
fs/xfs/xfs_refcount_item.c
··· 269 269 static int 270 270 xfs_refcount_update_diff_items( 271 271 void *priv, 272 - struct list_head *a, 273 - struct list_head *b) 272 + const struct list_head *a, 273 + const struct list_head *b) 274 274 { 275 275 struct xfs_mount *mp = priv; 276 276 struct xfs_refcount_intent *ra;
+2 -2
fs/xfs/xfs_rmap_item.c
··· 337 337 static int 338 338 xfs_rmap_update_diff_items( 339 339 void *priv, 340 - struct list_head *a, 341 - struct list_head *b) 340 + const struct list_head *a, 341 + const struct list_head *b) 342 342 { 343 343 struct xfs_mount *mp = priv; 344 344 struct xfs_rmap_intent *ra;
+16
include/asm-generic/bug.h
··· 241 241 # define WARN_ON_SMP(x) ({0;}) 242 242 #endif 243 243 244 + /* 245 + * WARN_ON_FUNCTION_MISMATCH() warns if a value doesn't match a 246 + * function address, and can be useful for catching issues with 247 + * callback functions, for example. 248 + * 249 + * With CONFIG_CFI_CLANG, the warning is disabled because the 250 + * compiler replaces function addresses taken in C code with 251 + * local jump table addresses, which breaks cross-module function 252 + * address equality. 253 + */ 254 + #if defined(CONFIG_CFI_CLANG) && defined(CONFIG_MODULES) 255 + # define WARN_ON_FUNCTION_MISMATCH(x, fn) ({ 0; }) 256 + #else 257 + # define WARN_ON_FUNCTION_MISMATCH(x, fn) WARN_ON_ONCE((x) != (fn)) 258 + #endif 259 + 244 260 #endif /* __ASSEMBLY__ */ 245 261 246 262 #endif
+19 -1
include/asm-generic/vmlinux.lds.h
··· 544 544 . = ALIGN((align)); \ 545 545 __end_rodata = .; 546 546 547 + 548 + /* 549 + * .text..L.cfi.jumptable.* contain Control-Flow Integrity (CFI) 550 + * jump table entries. 551 + */ 552 + #ifdef CONFIG_CFI_CLANG 553 + #define TEXT_CFI_JT \ 554 + . = ALIGN(PMD_SIZE); \ 555 + __cfi_jt_start = .; \ 556 + *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \ 557 + . = ALIGN(PMD_SIZE); \ 558 + __cfi_jt_end = .; 559 + #else 560 + #define TEXT_CFI_JT 561 + #endif 562 + 547 563 /* 548 564 * Non-instrumentable text section 549 565 */ ··· 586 570 NOINSTR_TEXT \ 587 571 *(.text..refcount) \ 588 572 *(.ref.text) \ 573 + TEXT_CFI_JT \ 589 574 MEM_KEEP(init.text*) \ 590 575 MEM_KEEP(exit.text*) \ 591 576 ··· 991 974 * keep any .init_array.* sections. 992 975 * https://bugs.llvm.org/show_bug.cgi?id=46478 993 976 */ 994 - #if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) 977 + #if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) || \ 978 + defined(CONFIG_CFI_CLANG) 995 979 # ifdef CONFIG_CONSTRUCTORS 996 980 # define SANITIZER_DISCARDS \ 997 981 *(.eh_frame)
+2 -2
include/linux/bpf.h
··· 652 652 struct bpf_ksym ksym; 653 653 }; 654 654 655 - static __always_inline unsigned int bpf_dispatcher_nop_func( 655 + static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( 656 656 const void *ctx, 657 657 const struct bpf_insn *insnsi, 658 658 unsigned int (*bpf_func)(const void *, ··· 680 680 } 681 681 682 682 #define DEFINE_BPF_DISPATCHER(name) \ 683 - noinline unsigned int bpf_dispatcher_##name##_func( \ 683 + noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ 684 684 const void *ctx, \ 685 685 const struct bpf_insn *insnsi, \ 686 686 unsigned int (*bpf_func)(const void *, \
+41
include/linux/cfi.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Clang Control Flow Integrity (CFI) support. 4 + * 5 + * Copyright (C) 2021 Google LLC 6 + */ 7 + #ifndef _LINUX_CFI_H 8 + #define _LINUX_CFI_H 9 + 10 + #ifdef CONFIG_CFI_CLANG 11 + typedef void (*cfi_check_fn)(uint64_t id, void *ptr, void *diag); 12 + 13 + /* Compiler-generated function in each module, and the kernel */ 14 + extern void __cfi_check(uint64_t id, void *ptr, void *diag); 15 + 16 + /* 17 + * Force the compiler to generate a CFI jump table entry for a function 18 + * and store the jump table address to __cfi_jt_<function>. 19 + */ 20 + #define __CFI_ADDRESSABLE(fn, __attr) \ 21 + const void *__cfi_jt_ ## fn __visible __attr = (void *)&fn 22 + 23 + #ifdef CONFIG_CFI_CLANG_SHADOW 24 + 25 + extern void cfi_module_add(struct module *mod, unsigned long base_addr); 26 + extern void cfi_module_remove(struct module *mod, unsigned long base_addr); 27 + 28 + #else 29 + 30 + static inline void cfi_module_add(struct module *mod, unsigned long base_addr) {} 31 + static inline void cfi_module_remove(struct module *mod, unsigned long base_addr) {} 32 + 33 + #endif /* CONFIG_CFI_CLANG_SHADOW */ 34 + 35 + #else /* !CONFIG_CFI_CLANG */ 36 + 37 + #define __CFI_ADDRESSABLE(fn, __attr) 38 + 39 + #endif /* CONFIG_CFI_CLANG */ 40 + 41 + #endif /* _LINUX_CFI_H */
+3
include/linux/compiler-clang.h
··· 61 61 #if __has_feature(shadow_call_stack) 62 62 # define __noscs __attribute__((__no_sanitize__("shadow-call-stack"))) 63 63 #endif 64 + 65 + #define __nocfi __attribute__((__no_sanitize__("cfi"))) 66 + #define __cficanonical __attribute__((__cfi_canonical_jump_table__))
+8
include/linux/compiler_types.h
··· 242 242 # define __noscs 243 243 #endif 244 244 245 + #ifndef __nocfi 246 + # define __nocfi 247 + #endif 248 + 249 + #ifndef __cficanonical 250 + # define __cficanonical 251 + #endif 252 + 245 253 #ifndef asm_volatile_goto 246 254 #define asm_volatile_goto(x...) asm goto(x) 247 255 #endif
+3 -3
include/linux/init.h
··· 47 47 48 48 /* These are for everybody (although not all archs will actually 49 49 discard it in modules) */ 50 - #define __init __section(".init.text") __cold __latent_entropy __noinitretpoline 50 + #define __init __section(".init.text") __cold __latent_entropy __noinitretpoline __nocfi 51 51 #define __initdata __section(".init.data") 52 52 #define __initconst __section(".init.rodata") 53 53 #define __exitdata __section(".exit.data") ··· 220 220 __initcall_name(initstub, __iid, id) 221 221 222 222 #define __define_initcall_stub(__stub, fn) \ 223 - int __init __stub(void); \ 224 - int __init __stub(void) \ 223 + int __init __cficanonical __stub(void); \ 224 + int __init __cficanonical __stub(void) \ 225 225 { \ 226 226 return fn(); \ 227 227 } \
+4 -3
include/linux/list_sort.h
··· 6 6 7 7 struct list_head; 8 8 9 + typedef int __attribute__((nonnull(2,3))) (*list_cmp_func_t)(void *, 10 + const struct list_head *, const struct list_head *); 11 + 9 12 __attribute__((nonnull(2,3))) 10 - void list_sort(void *priv, struct list_head *head, 11 - int (*cmp)(void *priv, struct list_head *a, 12 - struct list_head *b)); 13 + void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp); 13 14 #endif
+10
include/linux/mm.h
··· 125 125 #endif 126 126 127 127 /* 128 + * With CONFIG_CFI_CLANG, the compiler replaces function addresses in 129 + * instrumented C code with jump table addresses. Architectures that 130 + * support CFI can define this macro to return the actual function address 131 + * when needed. 132 + */ 133 + #ifndef function_nocfi 134 + #define function_nocfi(x) (x) 135 + #endif 136 + 137 + /* 128 138 * To prevent common memory management code establishing 129 139 * a zero page mapping on a read fault. 130 140 * This macro should be defined within <asm/pgtable.h>.
+11 -2
include/linux/module.h
··· 26 26 #include <linux/tracepoint-defs.h> 27 27 #include <linux/srcu.h> 28 28 #include <linux/static_call_types.h> 29 + #include <linux/cfi.h> 29 30 30 31 #include <linux/percpu.h> 31 32 #include <asm/module.h> ··· 129 128 #define module_init(initfn) \ 130 129 static inline initcall_t __maybe_unused __inittest(void) \ 131 130 { return initfn; } \ 132 - int init_module(void) __copy(initfn) __attribute__((alias(#initfn))); 131 + int init_module(void) __copy(initfn) \ 132 + __attribute__((alias(#initfn))); \ 133 + __CFI_ADDRESSABLE(init_module, __initdata); 133 134 134 135 /* This is only required if you want to be unloadable. */ 135 136 #define module_exit(exitfn) \ 136 137 static inline exitcall_t __maybe_unused __exittest(void) \ 137 138 { return exitfn; } \ 138 - void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn))); 139 + void cleanup_module(void) __copy(exitfn) \ 140 + __attribute__((alias(#exitfn))); \ 141 + __CFI_ADDRESSABLE(cleanup_module, __exitdata); 139 142 140 143 #endif 141 144 ··· 380 375 const struct kernel_symbol *syms; 381 376 const s32 *crcs; 382 377 unsigned int num_syms; 378 + 379 + #ifdef CONFIG_CFI_CLANG 380 + cfi_check_fn cfi_check; 381 + #endif 383 382 384 383 /* Kernel parameters. */ 385 384 #ifdef CONFIG_SYSFS
+2 -2
include/linux/pci.h
··· 1944 1944 #ifdef CONFIG_LTO_CLANG 1945 1945 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 1946 1946 class_shift, hook, stub) \ 1947 - void stub(struct pci_dev *dev); \ 1948 - void stub(struct pci_dev *dev) \ 1947 + void __cficanonical stub(struct pci_dev *dev); \ 1948 + void __cficanonical stub(struct pci_dev *dev) \ 1949 1949 { \ 1950 1950 hook(dev); \ 1951 1951 } \
+1 -1
init/Kconfig
··· 2296 2296 2297 2297 config MODULES_TREE_LOOKUP 2298 2298 def_bool y 2299 - depends on PERF_EVENTS || TRACING 2299 + depends on PERF_EVENTS || TRACING || CFI_CLANG 2300 2300 2301 2301 config INIT_ALL_POSSIBLE 2302 2302 bool
+4
kernel/Makefile
··· 41 41 UBSAN_SANITIZE_kcov.o := n 42 42 CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector 43 43 44 + # Don't instrument error handlers 45 + CFLAGS_REMOVE_cfi.o := $(CC_FLAGS_CFI) 46 + 44 47 obj-y += sched/ 45 48 obj-y += locking/ 46 49 obj-y += power/ ··· 114 111 obj-$(CONFIG_KCSAN) += kcsan/ 115 112 obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o 116 113 obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call.o 114 + obj-$(CONFIG_CFI_CLANG) += cfi.o 117 115 118 116 obj-$(CONFIG_PERF_EVENTS) += events/ 119 117
+329
kernel/cfi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Clang Control Flow Integrity (CFI) error and slowpath handling. 4 + * 5 + * Copyright (C) 2021 Google LLC 6 + */ 7 + 8 + #include <linux/hardirq.h> 9 + #include <linux/kallsyms.h> 10 + #include <linux/module.h> 11 + #include <linux/mutex.h> 12 + #include <linux/printk.h> 13 + #include <linux/ratelimit.h> 14 + #include <linux/rcupdate.h> 15 + #include <linux/vmalloc.h> 16 + #include <asm/cacheflush.h> 17 + #include <asm/set_memory.h> 18 + 19 + /* Compiler-defined handler names */ 20 + #ifdef CONFIG_CFI_PERMISSIVE 21 + #define cfi_failure_handler __ubsan_handle_cfi_check_fail 22 + #else 23 + #define cfi_failure_handler __ubsan_handle_cfi_check_fail_abort 24 + #endif 25 + 26 + static inline void handle_cfi_failure(void *ptr) 27 + { 28 + if (IS_ENABLED(CONFIG_CFI_PERMISSIVE)) 29 + WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr); 30 + else 31 + panic("CFI failure (target: %pS)\n", ptr); 32 + } 33 + 34 + #ifdef CONFIG_MODULES 35 + #ifdef CONFIG_CFI_CLANG_SHADOW 36 + /* 37 + * Index type. A 16-bit index can address at most (2^16)-2 pages (taking 38 + * into account SHADOW_INVALID), i.e. ~256M with 4k pages. 39 + */ 40 + typedef u16 shadow_t; 41 + #define SHADOW_INVALID ((shadow_t)~0UL) 42 + 43 + struct cfi_shadow { 44 + /* Page index for the beginning of the shadow */ 45 + unsigned long base; 46 + /* An array of __cfi_check locations (as indices to the shadow) */ 47 + shadow_t shadow[1]; 48 + } __packed; 49 + 50 + /* 51 + * The shadow covers ~128M from the beginning of the module region. If 52 + * the region is larger, we fall back to __module_address for the rest. 53 + */ 54 + #define __SHADOW_RANGE (_UL(SZ_128M) >> PAGE_SHIFT) 55 + 56 + /* The in-memory size of struct cfi_shadow, always at least one page */ 57 + #define __SHADOW_PAGES ((__SHADOW_RANGE * sizeof(shadow_t)) >> PAGE_SHIFT) 58 + #define SHADOW_PAGES max(1UL, __SHADOW_PAGES) 59 + #define SHADOW_SIZE (SHADOW_PAGES << PAGE_SHIFT) 60 + 61 + /* The actual size of the shadow array, minus metadata */ 62 + #define SHADOW_ARR_SIZE (SHADOW_SIZE - offsetof(struct cfi_shadow, shadow)) 63 + #define SHADOW_ARR_SLOTS (SHADOW_ARR_SIZE / sizeof(shadow_t)) 64 + 65 + static DEFINE_MUTEX(shadow_update_lock); 66 + static struct cfi_shadow __rcu *cfi_shadow __read_mostly; 67 + 68 + /* Returns the index in the shadow for the given address */ 69 + static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr) 70 + { 71 + unsigned long index; 72 + unsigned long page = ptr >> PAGE_SHIFT; 73 + 74 + if (unlikely(page < s->base)) 75 + return -1; /* Outside of module area */ 76 + 77 + index = page - s->base; 78 + 79 + if (index >= SHADOW_ARR_SLOTS) 80 + return -1; /* Cannot be addressed with shadow */ 81 + 82 + return (int)index; 83 + } 84 + 85 + /* Returns the page address for an index in the shadow */ 86 + static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s, 87 + int index) 88 + { 89 + if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS)) 90 + return 0; 91 + 92 + return (s->base + index) << PAGE_SHIFT; 93 + } 94 + 95 + /* Returns the __cfi_check function address for the given shadow location */ 96 + static inline unsigned long shadow_to_check_fn(const struct cfi_shadow *s, 97 + int index) 98 + { 99 + if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS)) 100 + return 0; 101 + 102 + if (unlikely(s->shadow[index] == SHADOW_INVALID)) 103 + return 0; 104 + 105 + /* __cfi_check is always page aligned */ 106 + return (s->base + s->shadow[index]) << PAGE_SHIFT; 107 + } 108 + 109 + static void prepare_next_shadow(const struct cfi_shadow __rcu *prev, 110 + struct cfi_shadow *next) 111 + { 112 + int i, index, check; 113 + 114 + /* Mark everything invalid */ 115 + memset(next->shadow, 0xFF, SHADOW_ARR_SIZE); 116 + 117 + if (!prev) 118 + return; /* No previous shadow */ 119 + 120 + /* If the base address didn't change, an update is not needed */ 121 + if (prev->base == next->base) { 122 + memcpy(next->shadow, prev->shadow, SHADOW_ARR_SIZE); 123 + return; 124 + } 125 + 126 + /* Convert the previous shadow to the new address range */ 127 + for (i = 0; i < SHADOW_ARR_SLOTS; ++i) { 128 + if (prev->shadow[i] == SHADOW_INVALID) 129 + continue; 130 + 131 + index = ptr_to_shadow(next, shadow_to_ptr(prev, i)); 132 + if (index < 0) 133 + continue; 134 + 135 + check = ptr_to_shadow(next, 136 + shadow_to_check_fn(prev, prev->shadow[i])); 137 + if (check < 0) 138 + continue; 139 + 140 + next->shadow[index] = (shadow_t)check; 141 + } 142 + } 143 + 144 + static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod, 145 + unsigned long min_addr, unsigned long max_addr) 146 + { 147 + int check_index; 148 + unsigned long check = (unsigned long)mod->cfi_check; 149 + unsigned long ptr; 150 + 151 + if (unlikely(!PAGE_ALIGNED(check))) { 152 + pr_warn("cfi: not using shadow for module %s\n", mod->name); 153 + return; 154 + } 155 + 156 + check_index = ptr_to_shadow(s, check); 157 + if (check_index < 0) 158 + return; /* Module not addressable with shadow */ 159 + 160 + /* For each page, store the check function index in the shadow */ 161 + for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) { 162 + int index = ptr_to_shadow(s, ptr); 163 + 164 + if (index >= 0) { 165 + /* Each page must only contain one module */ 166 + WARN_ON_ONCE(s->shadow[index] != SHADOW_INVALID); 167 + s->shadow[index] = (shadow_t)check_index; 168 + } 169 + } 170 + } 171 + 172 + static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod, 173 + unsigned long min_addr, unsigned long max_addr) 174 + { 175 + unsigned long ptr; 176 + 177 + for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) { 178 + int index = ptr_to_shadow(s, ptr); 179 + 180 + if (index >= 0) 181 + s->shadow[index] = SHADOW_INVALID; 182 + } 183 + } 184 + 185 + typedef void (*update_shadow_fn)(struct cfi_shadow *, struct module *, 186 + unsigned long min_addr, unsigned long max_addr); 187 + 188 + static void update_shadow(struct module *mod, unsigned long base_addr, 189 + update_shadow_fn fn) 190 + { 191 + struct cfi_shadow *prev; 192 + struct cfi_shadow *next; 193 + unsigned long min_addr, max_addr; 194 + 195 + next = vmalloc(SHADOW_SIZE); 196 + 197 + mutex_lock(&shadow_update_lock); 198 + prev = rcu_dereference_protected(cfi_shadow, 199 + mutex_is_locked(&shadow_update_lock)); 200 + 201 + if (next) { 202 + next->base = base_addr >> PAGE_SHIFT; 203 + prepare_next_shadow(prev, next); 204 + 205 + min_addr = (unsigned long)mod->core_layout.base; 206 + max_addr = min_addr + mod->core_layout.text_size; 207 + fn(next, mod, min_addr & PAGE_MASK, max_addr & PAGE_MASK); 208 + 209 + set_memory_ro((unsigned long)next, SHADOW_PAGES); 210 + } 211 + 212 + rcu_assign_pointer(cfi_shadow, next); 213 + mutex_unlock(&shadow_update_lock); 214 + synchronize_rcu(); 215 + 216 + if (prev) { 217 + set_memory_rw((unsigned long)prev, SHADOW_PAGES); 218 + vfree(prev); 219 + } 220 + } 221 + 222 + void cfi_module_add(struct module *mod, unsigned long base_addr) 223 + { 224 + update_shadow(mod, base_addr, add_module_to_shadow); 225 + } 226 + 227 + void cfi_module_remove(struct module *mod, unsigned long base_addr) 228 + { 229 + update_shadow(mod, base_addr, remove_module_from_shadow); 230 + } 231 + 232 + static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s, 233 + unsigned long ptr) 234 + { 235 + int index; 236 + 237 + if (unlikely(!s)) 238 + return NULL; /* No shadow available */ 239 + 240 + index = ptr_to_shadow(s, ptr); 241 + if (index < 0) 242 + return NULL; /* Cannot be addressed with shadow */ 243 + 244 + return (cfi_check_fn)shadow_to_check_fn(s, index); 245 + } 246 + 247 + static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr) 248 + { 249 + cfi_check_fn fn; 250 + 251 + rcu_read_lock_sched(); 252 + fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr); 253 + rcu_read_unlock_sched(); 254 + 255 + return fn; 256 + } 257 + 258 + #else /* !CONFIG_CFI_CLANG_SHADOW */ 259 + 260 + static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr) 261 + { 262 + return NULL; 263 + } 264 + 265 + #endif /* CONFIG_CFI_CLANG_SHADOW */ 266 + 267 + static inline cfi_check_fn find_module_check_fn(unsigned long ptr) 268 + { 269 + cfi_check_fn fn = NULL; 270 + struct module *mod; 271 + 272 + rcu_read_lock_sched(); 273 + mod = __module_address(ptr); 274 + if (mod) 275 + fn = mod->cfi_check; 276 + rcu_read_unlock_sched(); 277 + 278 + return fn; 279 + } 280 + 281 + static inline cfi_check_fn find_check_fn(unsigned long ptr) 282 + { 283 + cfi_check_fn fn = NULL; 284 + 285 + if (is_kernel_text(ptr)) 286 + return __cfi_check; 287 + 288 + /* 289 + * Indirect call checks can happen when RCU is not watching. Both 290 + * the shadow and __module_address use RCU, so we need to wake it 291 + * up if necessary. 292 + */ 293 + RCU_NONIDLE({ 294 + if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW)) 295 + fn = find_shadow_check_fn(ptr); 296 + 297 + if (!fn) 298 + fn = find_module_check_fn(ptr); 299 + }); 300 + 301 + return fn; 302 + } 303 + 304 + void __cfi_slowpath_diag(uint64_t id, void *ptr, void *diag) 305 + { 306 + cfi_check_fn fn = find_check_fn((unsigned long)ptr); 307 + 308 + if (likely(fn)) 309 + fn(id, ptr, diag); 310 + else /* Don't allow unchecked modules */ 311 + handle_cfi_failure(ptr); 312 + } 313 + EXPORT_SYMBOL(__cfi_slowpath_diag); 314 + 315 + #else /* !CONFIG_MODULES */ 316 + 317 + void __cfi_slowpath_diag(uint64_t id, void *ptr, void *diag) 318 + { 319 + handle_cfi_failure(ptr); /* No modules */ 320 + } 321 + EXPORT_SYMBOL(__cfi_slowpath_diag); 322 + 323 + #endif /* CONFIG_MODULES */ 324 + 325 + void cfi_failure_handler(void *data, void *ptr, void *vtable) 326 + { 327 + handle_cfi_failure(ptr); 328 + } 329 + EXPORT_SYMBOL(cfi_failure_handler);
+50 -5
kernel/kallsyms.c
··· 161 161 return kallsyms_relative_base - 1 - kallsyms_offsets[idx]; 162 162 } 163 163 164 + #if defined(CONFIG_CFI_CLANG) && defined(CONFIG_LTO_CLANG_THIN) 165 + /* 166 + * LLVM appends a hash to static function names when ThinLTO and CFI are 167 + * both enabled, i.e. foo() becomes foo$707af9a22804d33c81801f27dcfe489b. 168 + * This causes confusion and potentially breaks user space tools, so we 169 + * strip the suffix from expanded symbol names. 170 + */ 171 + static inline bool cleanup_symbol_name(char *s) 172 + { 173 + char *res; 174 + 175 + res = strrchr(s, '$'); 176 + if (res) 177 + *res = '\0'; 178 + 179 + return res != NULL; 180 + } 181 + #else 182 + static inline bool cleanup_symbol_name(char *s) { return false; } 183 + #endif 184 + 164 185 /* Lookup the address for this symbol. Returns 0 if not found. */ 165 186 unsigned long kallsyms_lookup_name(const char *name) 166 187 { ··· 193 172 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 194 173 195 174 if (strcmp(namebuf, name) == 0) 175 + return kallsyms_sym_address(i); 176 + 177 + if (cleanup_symbol_name(namebuf) && strcmp(namebuf, name) == 0) 196 178 return kallsyms_sym_address(i); 197 179 } 198 180 return module_kallsyms_lookup_name(name); ··· 327 303 namebuf, KSYM_NAME_LEN); 328 304 if (modname) 329 305 *modname = NULL; 330 - return namebuf; 306 + 307 + ret = namebuf; 308 + goto found; 331 309 } 332 310 333 311 /* See if it's in a module or a BPF JITed image. */ ··· 342 316 if (!ret) 343 317 ret = ftrace_mod_address_lookup(addr, symbolsize, 344 318 offset, modname, namebuf); 319 + 320 + found: 321 + cleanup_symbol_name(namebuf); 345 322 return ret; 346 323 } 347 324 348 325 int lookup_symbol_name(unsigned long addr, char *symname) 349 326 { 327 + int res; 328 + 350 329 symname[0] = '\0'; 351 330 symname[KSYM_NAME_LEN - 1] = '\0'; 352 331 ··· 362 331 /* Grab name */ 363 332 kallsyms_expand_symbol(get_symbol_offset(pos), 364 333 symname, KSYM_NAME_LEN); 365 - return 0; 334 + goto found; 366 335 } 367 336 /* See if it's in a module. */ 368 - return lookup_module_symbol_name(addr, symname); 337 + res = lookup_module_symbol_name(addr, symname); 338 + if (res) 339 + return res; 340 + 341 + found: 342 + cleanup_symbol_name(symname); 343 + return 0; 369 344 } 370 345 371 346 int lookup_symbol_attrs(unsigned long addr, unsigned long *size, 372 347 unsigned long *offset, char *modname, char *name) 373 348 { 349 + int res; 350 + 374 351 name[0] = '\0'; 375 352 name[KSYM_NAME_LEN - 1] = '\0'; 376 353 ··· 390 351 kallsyms_expand_symbol(get_symbol_offset(pos), 391 352 name, KSYM_NAME_LEN); 392 353 modname[0] = '\0'; 393 - return 0; 354 + goto found; 394 355 } 395 356 /* See if it's in a module. */ 396 - return lookup_module_symbol_attrs(addr, size, offset, modname, name); 357 + res = lookup_module_symbol_attrs(addr, size, offset, modname, name); 358 + if (res) 359 + return res; 360 + 361 + found: 362 + cleanup_symbol_name(name); 363 + return 0; 397 364 } 398 365 399 366 /* Look up a kernel symbol and return it in a text buffer. */
+2 -1
kernel/kthread.c
··· 963 963 struct timer_list *timer = &dwork->timer; 964 964 struct kthread_work *work = &dwork->work; 965 965 966 - WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn); 966 + WARN_ON_FUNCTION_MISMATCH(timer->function, 967 + kthread_delayed_work_timer_fn); 967 968 968 969 /* 969 970 * If @delay is 0, queue @dwork->work immediately. This is for
+43
kernel/module.c
··· 2146 2146 { 2147 2147 } 2148 2148 2149 + static void cfi_cleanup(struct module *mod); 2150 + 2149 2151 /* Free a module, remove from lists, etc. */ 2150 2152 static void free_module(struct module *mod) 2151 2153 { ··· 2188 2186 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ 2189 2187 synchronize_rcu(); 2190 2188 mutex_unlock(&module_mutex); 2189 + 2190 + /* Clean up CFI for the module. */ 2191 + cfi_cleanup(mod); 2191 2192 2192 2193 /* This may be empty, but that's OK */ 2193 2194 module_arch_freeing_init(mod); ··· 3871 3866 return 0; 3872 3867 } 3873 3868 3869 + static void cfi_init(struct module *mod); 3870 + 3874 3871 /* 3875 3872 * Allocate and load the module: note that size of section 0 is always 3876 3873 * zero, and we rely on this for optional sections. ··· 4004 3997 4005 3998 flush_module_icache(mod); 4006 3999 4000 + /* Setup CFI for the module. */ 4001 + cfi_init(mod); 4002 + 4007 4003 /* Now copy in args */ 4008 4004 mod->args = strndup_user(uargs, ~0UL >> 1); 4009 4005 if (IS_ERR(mod->args)) { ··· 4080 4070 synchronize_rcu(); 4081 4071 kfree(mod->args); 4082 4072 free_arch_cleanup: 4073 + cfi_cleanup(mod); 4083 4074 module_arch_cleanup(mod); 4084 4075 free_modinfo: 4085 4076 free_modinfo(mod); ··· 4425 4414 } 4426 4415 #endif /* CONFIG_LIVEPATCH */ 4427 4416 #endif /* CONFIG_KALLSYMS */ 4417 + 4418 + static void cfi_init(struct module *mod) 4419 + { 4420 + #ifdef CONFIG_CFI_CLANG 4421 + initcall_t *init; 4422 + exitcall_t *exit; 4423 + 4424 + rcu_read_lock_sched(); 4425 + mod->cfi_check = (cfi_check_fn) 4426 + find_kallsyms_symbol_value(mod, "__cfi_check"); 4427 + init = (initcall_t *) 4428 + find_kallsyms_symbol_value(mod, "__cfi_jt_init_module"); 4429 + exit = (exitcall_t *) 4430 + find_kallsyms_symbol_value(mod, "__cfi_jt_cleanup_module"); 4431 + rcu_read_unlock_sched(); 4432 + 4433 + /* Fix init/exit functions to point to the CFI jump table */ 4434 + if (init) 4435 + mod->init = *init; 4436 + if (exit) 4437 + mod->exit = *exit; 4438 + 4439 + cfi_module_add(mod, module_addr_min); 4440 + #endif 4441 + } 4442 + 4443 + static void cfi_cleanup(struct module *mod) 4444 + { 4445 + #ifdef CONFIG_CFI_CLANG 4446 + cfi_module_remove(mod, module_addr_min); 4447 + #endif 4448 + } 4428 4449 4429 4450 /* Maximum number of characters written by module_flags() */ 4430 4451 #define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
+1 -1
kernel/workqueue.c
··· 1630 1630 struct work_struct *work = &dwork->work; 1631 1631 1632 1632 WARN_ON_ONCE(!wq); 1633 - WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 1633 + WARN_ON_FUNCTION_MISMATCH(timer->function, delayed_work_timer_fn); 1634 1634 WARN_ON_ONCE(timer_pending(timer)); 1635 1635 WARN_ON_ONCE(!list_empty(&work->entry)); 1636 1636
+6 -11
lib/list_sort.c
··· 7 7 #include <linux/list_sort.h> 8 8 #include <linux/list.h> 9 9 10 - typedef int __attribute__((nonnull(2,3))) (*cmp_func)(void *, 11 - struct list_head const *, struct list_head const *); 12 - 13 10 /* 14 11 * Returns a list organized in an intermediate format suited 15 12 * to chaining of merge() calls: null-terminated, no reserved or 16 13 * sentinel head node, "prev" links not maintained. 17 14 */ 18 15 __attribute__((nonnull(2,3,4))) 19 - static struct list_head *merge(void *priv, cmp_func cmp, 16 + static struct list_head *merge(void *priv, list_cmp_func_t cmp, 20 17 struct list_head *a, struct list_head *b) 21 18 { 22 19 struct list_head *head, **tail = &head; ··· 49 52 * throughout. 50 53 */ 51 54 __attribute__((nonnull(2,3,4,5))) 52 - static void merge_final(void *priv, cmp_func cmp, struct list_head *head, 55 + static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head, 53 56 struct list_head *a, struct list_head *b) 54 57 { 55 58 struct list_head *tail = head; ··· 182 185 * 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1). 183 186 */ 184 187 __attribute__((nonnull(2,3))) 185 - void list_sort(void *priv, struct list_head *head, 186 - int (*cmp)(void *priv, struct list_head *a, 187 - struct list_head *b)) 188 + void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp) 188 189 { 189 190 struct list_head *list = head->next, *pending = NULL; 190 191 size_t count = 0; /* Count of pending */ ··· 222 227 if (likely(bits)) { 223 228 struct list_head *a = *tail, *b = a->prev; 224 229 225 - a = merge(priv, (cmp_func)cmp, b, a); 230 + a = merge(priv, cmp, b, a); 226 231 /* Install the merged result in place of the inputs */ 227 232 a->prev = b->prev; 228 233 *tail = a; ··· 244 249 245 250 if (!next) 246 251 break; 247 - list = merge(priv, (cmp_func)cmp, pending, list); 252 + list = merge(priv, cmp, pending, list); 248 253 pending = next; 249 254 } 250 255 /* The final merge, rebuilding prev links */ 251 - merge_final(priv, (cmp_func)cmp, head, pending, list); 256 + merge_final(priv, cmp, head, pending, list); 252 257 } 253 258 EXPORT_SYMBOL(list_sort);
+2 -1
lib/test_list_sort.c
··· 56 56 return 0; 57 57 } 58 58 59 - static int __init cmp(void *priv, struct list_head *a, struct list_head *b) 59 + static int __init cmp(void *priv, const struct list_head *a, 60 + const struct list_head *b) 60 61 { 61 62 struct debug_el *ela, *elb; 62 63
+2 -2
net/tipc/name_table.c
··· 397 397 * Code reused: time_after32() for the same purpose 398 398 */ 399 399 #define publication_after(pa, pb) time_after32((pa)->id, (pb)->id) 400 - static int tipc_publ_sort(void *priv, struct list_head *a, 401 - struct list_head *b) 400 + static int tipc_publ_sort(void *priv, const struct list_head *a, 401 + const struct list_head *b) 402 402 { 403 403 struct publication *pa, *pb; 404 404
+1 -1
scripts/Makefile.modfinal
··· 23 23 part-of-module = y 24 24 25 25 quiet_cmd_cc_o_c = CC [M] $@ 26 - cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< 26 + cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI), $(c_flags)) -c -o $@ $< 27 27 28 28 %.mod.o: %.mod.c FORCE 29 29 $(call if_changed_dep,cc_o_c)
+18 -1
scripts/module.lds.S
··· 3 3 * Archs are free to supply their own linker scripts. ld will 4 4 * combine them automatically. 5 5 */ 6 + #ifdef CONFIG_CFI_CLANG 7 + # include <asm/page.h> 8 + # define ALIGN_CFI ALIGN(PAGE_SIZE) 9 + # define SANITIZER_DISCARDS *(.eh_frame) 10 + #else 11 + # define ALIGN_CFI 12 + # define SANITIZER_DISCARDS 13 + #endif 14 + 6 15 SECTIONS { 7 16 /DISCARD/ : { 8 17 *(.discard) 9 18 *(.discard.*) 19 + SANITIZER_DISCARDS 10 20 } 11 21 12 22 __ksymtab 0 : { *(SORT(___ksymtab+*)) } ··· 51 41 *(.rodata..L*) 52 42 } 53 43 54 - .text : { *(.text .text.[0-9a-zA-Z_]*) } 44 + /* 45 + * With CONFIG_CFI_CLANG, we assume __cfi_check is at the beginning 46 + * of the .text section, and is aligned to PAGE_SIZE. 47 + */ 48 + .text : ALIGN_CFI { 49 + *(.text.__cfi_check) 50 + *(.text .text.[0-9a-zA-Z_]* .text..L.cfi*) 51 + } 55 52 #endif 56 53 } 57 54