Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'loongarch-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch updates from Huacai Chen:

- Make -mstrict-align configurable

- Add kernel relocation and KASLR support

- Add single kernel image implementation for kdump

- Add hardware breakpoints/watchpoints support

- Add kprobes/kretprobes/kprobes_on_ftrace support

- Add LoongArch support for some selftests.

* tag 'loongarch-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: (23 commits)
selftests/ftrace: Add LoongArch kprobe args string tests support
selftests/seccomp: Add LoongArch selftesting support
tools: Add LoongArch build infrastructure
samples/kprobes: Add LoongArch support
LoongArch: Mark some assembler symbols as non-kprobe-able
LoongArch: Add kprobes on ftrace support
LoongArch: Add kretprobes support
LoongArch: Add kprobes support
LoongArch: Simulate branch and PC* instructions
LoongArch: ptrace: Add hardware single step support
LoongArch: ptrace: Add function argument access API
LoongArch: ptrace: Expose hardware breakpoints to debuggers
LoongArch: Add hardware breakpoints/watchpoints support
LoongArch: kdump: Add crashkernel=YM handling
LoongArch: kdump: Add single kernel image implementation
LoongArch: Add support for kernel address space layout randomization (KASLR)
LoongArch: Add support for kernel relocation
LoongArch: Add la_abs macro implementation
LoongArch: Add JUMP_VIRT_ADDR macro implementation to avoid using la.abs
LoongArch: Use la.pcrel instead of la.abs when it's trivially possible
...

+2665 -130
+56 -9
arch/loongarch/Kconfig
··· 94 94 select HAVE_DYNAMIC_FTRACE_WITH_ARGS 95 95 select HAVE_DYNAMIC_FTRACE_WITH_REGS 96 96 select HAVE_EBPF_JIT 97 + select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN 97 98 select HAVE_EXIT_THREAD 98 99 select HAVE_FAST_GUP 99 100 select HAVE_FTRACE_MCOUNT_RECORD 101 + select HAVE_FUNCTION_ARG_ACCESS_API 100 102 select HAVE_FUNCTION_GRAPH_TRACER 101 103 select HAVE_FUNCTION_TRACER 102 104 select HAVE_GENERIC_VDSO 105 + select HAVE_HW_BREAKPOINT if PERF_EVENTS 103 106 select HAVE_IOREMAP_PROT 104 107 select HAVE_IRQ_EXIT_ON_IRQ_STACK 105 108 select HAVE_IRQ_TIME_ACCOUNTING 109 + select HAVE_KPROBES 110 + select HAVE_KPROBES_ON_FTRACE 111 + select HAVE_KRETPROBES 106 112 select HAVE_MOD_ARCH_SPECIFIC 107 113 select HAVE_NMI 108 114 select HAVE_PCI ··· 447 441 protection support. However, you can enable LoongArch DMW-based 448 442 ioremap() for better performance. 449 443 444 + config ARCH_STRICT_ALIGN 445 + bool "Enable -mstrict-align to prevent unaligned accesses" if EXPERT 446 + default y 447 + help 448 + Not all LoongArch cores support h/w unaligned access, we can use 449 + -mstrict-align build parameter to prevent unaligned accesses. 450 + 451 + CPUs with h/w unaligned access support: 452 + Loongson-2K2000/2K3000/3A5000/3C5000/3D5000. 453 + 454 + CPUs without h/w unaligned access support: 455 + Loongson-2K500/2K1000. 456 + 457 + This option is enabled by default to make the kernel be able to run 458 + on all LoongArch systems. But you can disable it manually if you want 459 + to run kernel only on systems with h/w unaligned access support in 460 + order to optimise for performance. 461 + 450 462 config KEXEC 451 463 bool "Kexec system call" 452 464 select KEXEC_CORE ··· 478 454 479 455 config CRASH_DUMP 480 456 bool "Build kdump crash kernel" 457 + select RELOCATABLE 481 458 help 482 459 Generate crash dump after being started by kexec. This should 483 460 be normally only set in special crash dump kernels which are ··· 488 463 489 464 For more details see Documentation/admin-guide/kdump/kdump.rst 490 465 491 - config PHYSICAL_START 492 - hex "Physical address where the kernel is loaded" 493 - default "0x90000000a0000000" 494 - depends on CRASH_DUMP 466 + config RELOCATABLE 467 + bool "Relocatable kernel" 495 468 help 496 - This gives the XKPRANGE address where the kernel is loaded. 497 - If you plan to use kernel for capturing the crash dump change 498 - this value to start of the reserved region (the "X" value as 499 - specified in the "crashkernel=YM@XM" command line boot parameter 500 - passed to the panic-ed kernel). 469 + This builds the kernel as a Position Independent Executable (PIE), 470 + which retains all relocation metadata required, so as to relocate 471 + the kernel binary at runtime to a different virtual address from 472 + its link address. 473 + 474 + config RANDOMIZE_BASE 475 + bool "Randomize the address of the kernel (KASLR)" 476 + depends on RELOCATABLE 477 + help 478 + Randomizes the physical and virtual address at which the 479 + kernel image is loaded, as a security feature that 480 + deters exploit attempts relying on knowledge of the location 481 + of kernel internals. 482 + 483 + The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET. 484 + 485 + If unsure, say N. 486 + 487 + config RANDOMIZE_BASE_MAX_OFFSET 488 + hex "Maximum KASLR offset" if EXPERT 489 + depends on RANDOMIZE_BASE 490 + range 0x0 0x10000000 491 + default "0x01000000" 492 + help 493 + When KASLR is active, this provides the maximum offset that will 494 + be applied to the kernel image. It should be set according to the 495 + amount of physical RAM available in the target system. 496 + 497 + This is limited by the size of the lower address memory, 256MB. 501 498 502 499 config SECCOMP 503 500 bool "Enable seccomp to safely compute untrusted bytecode"
+10 -4
arch/loongarch/Makefile
··· 71 71 KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs 72 72 endif 73 73 74 + ifeq ($(CONFIG_RELOCATABLE),y) 75 + KBUILD_CFLAGS_KERNEL += -fPIE 76 + LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext 77 + endif 78 + 74 79 cflags-y += -ffreestanding 75 80 cflags-y += $(call cc-option, -mno-check-zero-division) 76 81 77 - ifndef CONFIG_PHYSICAL_START 78 82 load-y = 0x9000000000200000 79 - else 80 - load-y = $(CONFIG_PHYSICAL_START) 81 - endif 82 83 bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) 83 84 84 85 drivers-$(CONFIG_PCI) += arch/loongarch/pci/ ··· 92 91 # instead of .eh_frame so we don't discard them. 93 92 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables 94 93 94 + ifdef CONFIG_ARCH_STRICT_ALIGN 95 95 # Don't emit unaligned accesses. 96 96 # Not all LoongArch cores support unaligned access, and as kernel we can't 97 97 # rely on others to provide emulation for these accesses. 98 98 KBUILD_CFLAGS += $(call cc-option,-mstrict-align) 99 + else 100 + # Optimise for performance on hardware supports unaligned access. 101 + KBUILD_CFLAGS += $(call cc-option,-mno-strict-align) 102 + endif 99 103 100 104 KBUILD_CFLAGS += -isystem $(shell $(CC) -print-file-name=include) 101 105
+1
arch/loongarch/configs/loongson3_defconfig
··· 48 48 CONFIG_NR_CPUS=64 49 49 CONFIG_NUMA=y 50 50 CONFIG_KEXEC=y 51 + CONFIG_CRASH_DUMP=y 51 52 CONFIG_SUSPEND=y 52 53 CONFIG_HIBERNATION=y 53 54 CONFIG_ACPI=y
+2
arch/loongarch/include/asm/addrspace.h
··· 125 125 #define ISA_IOSIZE SZ_16K 126 126 #define IO_SPACE_LIMIT (PCI_IOSIZE - 1) 127 127 128 + #define PHYS_LINK_KADDR PHYSADDR(VMLINUX_LOAD_ADDRESS) 129 + 128 130 #endif /* _ASM_ADDRSPACE_H */
+10
arch/loongarch/include/asm/asm.h
··· 188 188 #define PTRLOG 3 189 189 #endif 190 190 191 + /* Annotate a function as being unsuitable for kprobes. */ 192 + #ifdef CONFIG_KPROBES 193 + #define _ASM_NOKPROBE(name) \ 194 + .pushsection "_kprobe_blacklist", "aw"; \ 195 + .quad name; \ 196 + .popsection 197 + #else 198 + #define _ASM_NOKPROBE(name) 199 + #endif 200 + 191 201 #endif /* __ASM_ASM_H */
+17
arch/loongarch/include/asm/asmmacro.h
··· 274 274 nor \dst, \src, zero 275 275 .endm 276 276 277 + .macro la_abs reg, sym 278 + #ifndef CONFIG_RELOCATABLE 279 + la.abs \reg, \sym 280 + #else 281 + 766: 282 + lu12i.w \reg, 0 283 + ori \reg, \reg, 0 284 + lu32i.d \reg, 0 285 + lu52i.d \reg, \reg, 0 286 + .pushsection ".la_abs", "aw", %progbits 287 + 768: 288 + .dword 768b-766b 289 + .dword \sym 290 + .popsection 291 + #endif 292 + .endm 293 + 277 294 #endif /* _ASM_ASMMACRO_H */
+1 -1
arch/loongarch/include/asm/cpu.h
··· 36 36 37 37 #define PRID_SERIES_LA132 0x8000 /* Loongson 32bit */ 38 38 #define PRID_SERIES_LA264 0xa000 /* Loongson 64bit, 2-issue */ 39 - #define PRID_SERIES_LA364 0xb000 /* Loongson 64bit,3-issue */ 39 + #define PRID_SERIES_LA364 0xb000 /* Loongson 64bit, 3-issue */ 40 40 #define PRID_SERIES_LA464 0xc000 /* Loongson 64bit, 4-issue */ 41 41 #define PRID_SERIES_LA664 0xd000 /* Loongson 64bit, 6-issue */ 42 42
+145
arch/loongarch/include/asm/hw_breakpoint.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2022-2023 Loongson Technology Corporation Limited 4 + */ 5 + #ifndef __ASM_HW_BREAKPOINT_H 6 + #define __ASM_HW_BREAKPOINT_H 7 + 8 + #include <asm/loongarch.h> 9 + 10 + #ifdef __KERNEL__ 11 + 12 + /* Breakpoint */ 13 + #define LOONGARCH_BREAKPOINT_EXECUTE (0 << 0) 14 + 15 + /* Watchpoints */ 16 + #define LOONGARCH_BREAKPOINT_LOAD (1 << 0) 17 + #define LOONGARCH_BREAKPOINT_STORE (1 << 1) 18 + 19 + struct arch_hw_breakpoint_ctrl { 20 + u32 __reserved : 28, 21 + len : 2, 22 + type : 2; 23 + }; 24 + 25 + struct arch_hw_breakpoint { 26 + u64 address; 27 + u64 mask; 28 + struct arch_hw_breakpoint_ctrl ctrl; 29 + }; 30 + 31 + /* Lengths */ 32 + #define LOONGARCH_BREAKPOINT_LEN_1 0b11 33 + #define LOONGARCH_BREAKPOINT_LEN_2 0b10 34 + #define LOONGARCH_BREAKPOINT_LEN_4 0b01 35 + #define LOONGARCH_BREAKPOINT_LEN_8 0b00 36 + 37 + /* 38 + * Limits. 39 + * Changing these will require modifications to the register accessors. 40 + */ 41 + #define LOONGARCH_MAX_BRP 8 42 + #define LOONGARCH_MAX_WRP 8 43 + 44 + /* Virtual debug register bases. */ 45 + #define CSR_CFG_ADDR 0 46 + #define CSR_CFG_MASK (CSR_CFG_ADDR + LOONGARCH_MAX_BRP) 47 + #define CSR_CFG_CTRL (CSR_CFG_MASK + LOONGARCH_MAX_BRP) 48 + #define CSR_CFG_ASID (CSR_CFG_CTRL + LOONGARCH_MAX_WRP) 49 + 50 + /* Debug register names. */ 51 + #define LOONGARCH_CSR_NAME_ADDR ADDR 52 + #define LOONGARCH_CSR_NAME_MASK MASK 53 + #define LOONGARCH_CSR_NAME_CTRL CTRL 54 + #define LOONGARCH_CSR_NAME_ASID ASID 55 + 56 + /* Accessor macros for the debug registers. */ 57 + #define LOONGARCH_CSR_WATCH_READ(N, REG, T, VAL) \ 58 + do { \ 59 + if (T == 0) \ 60 + VAL = csr_read64(LOONGARCH_CSR_##IB##N##REG); \ 61 + else \ 62 + VAL = csr_read64(LOONGARCH_CSR_##DB##N##REG); \ 63 + } while (0) 64 + 65 + #define LOONGARCH_CSR_WATCH_WRITE(N, REG, T, VAL) \ 66 + do { \ 67 + if (T == 0) \ 68 + csr_write64(VAL, LOONGARCH_CSR_##IB##N##REG); \ 69 + else \ 70 + csr_write64(VAL, LOONGARCH_CSR_##DB##N##REG); \ 71 + } while (0) 72 + 73 + /* Exact number */ 74 + #define CSR_FWPC_NUM 0x3f 75 + #define CSR_MWPC_NUM 0x3f 76 + 77 + #define CTRL_PLV_ENABLE 0x1e 78 + 79 + #define MWPnCFG3_LoadEn 8 80 + #define MWPnCFG3_StoreEn 9 81 + 82 + #define MWPnCFG3_Type_mask 0x3 83 + #define MWPnCFG3_Size_mask 0x3 84 + 85 + static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl) 86 + { 87 + return (ctrl.len << 10) | (ctrl.type << 8); 88 + } 89 + 90 + static inline void decode_ctrl_reg(u32 reg, struct arch_hw_breakpoint_ctrl *ctrl) 91 + { 92 + reg >>= 8; 93 + ctrl->type = reg & MWPnCFG3_Type_mask; 94 + reg >>= 2; 95 + ctrl->len = reg & MWPnCFG3_Size_mask; 96 + } 97 + 98 + struct task_struct; 99 + struct notifier_block; 100 + struct perf_event; 101 + struct perf_event_attr; 102 + 103 + extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, 104 + int *gen_len, int *gen_type, int *offset); 105 + extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); 106 + extern int hw_breakpoint_arch_parse(struct perf_event *bp, 107 + const struct perf_event_attr *attr, 108 + struct arch_hw_breakpoint *hw); 109 + extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, 110 + unsigned long val, void *data); 111 + 112 + extern int arch_install_hw_breakpoint(struct perf_event *bp); 113 + extern void arch_uninstall_hw_breakpoint(struct perf_event *bp); 114 + extern int hw_breakpoint_slots(int type); 115 + extern void hw_breakpoint_pmu_read(struct perf_event *bp); 116 + 117 + void breakpoint_handler(struct pt_regs *regs); 118 + void watchpoint_handler(struct pt_regs *regs); 119 + 120 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 121 + extern void ptrace_hw_copy_thread(struct task_struct *task); 122 + extern void hw_breakpoint_thread_switch(struct task_struct *next); 123 + #else 124 + static inline void ptrace_hw_copy_thread(struct task_struct *task) 125 + { 126 + } 127 + static inline void hw_breakpoint_thread_switch(struct task_struct *next) 128 + { 129 + } 130 + #endif 131 + 132 + /* Determine number of BRP registers available. */ 133 + static inline int get_num_brps(void) 134 + { 135 + return csr_read64(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM; 136 + } 137 + 138 + /* Determine number of WRP registers available. */ 139 + static inline int get_num_wrps(void) 140 + { 141 + return csr_read64(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM; 142 + } 143 + 144 + #endif /* __KERNEL__ */ 145 + #endif /* __ASM_BREAKPOINT_H */
+58
arch/loongarch/include/asm/inst.h
··· 7 7 8 8 #include <linux/types.h> 9 9 #include <asm/asm.h> 10 + #include <asm/ptrace.h> 10 11 11 12 #define INSN_NOP 0x03400000 12 13 #define INSN_BREAK 0x002a0000 ··· 24 23 25 24 #define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN) 26 25 26 + enum reg0i15_op { 27 + break_op = 0x54, 28 + }; 29 + 27 30 enum reg0i26_op { 28 31 b_op = 0x14, 29 32 bl_op = 0x15, ··· 37 32 lu12iw_op = 0x0a, 38 33 lu32id_op = 0x0b, 39 34 pcaddi_op = 0x0c, 35 + pcalau12i_op = 0x0d, 40 36 pcaddu12i_op = 0x0e, 41 37 pcaddu18i_op = 0x0f, 42 38 }; ··· 184 178 alsld_op = 0x16, 185 179 }; 186 180 181 + struct reg0i15_format { 182 + unsigned int immediate : 15; 183 + unsigned int opcode : 17; 184 + }; 185 + 187 186 struct reg0i26_format { 188 187 unsigned int immediate_h : 10; 189 188 unsigned int immediate_l : 16; ··· 274 263 275 264 union loongarch_instruction { 276 265 unsigned int word; 266 + struct reg0i15_format reg0i15_format; 277 267 struct reg0i26_format reg0i26_format; 278 268 struct reg1i20_format reg1i20_format; 279 269 struct reg1i21_format reg1i21_format; ··· 333 321 return val & (1UL << (bit - 1)); 334 322 } 335 323 324 + static inline bool is_break_ins(union loongarch_instruction *ip) 325 + { 326 + return ip->reg0i15_format.opcode == break_op; 327 + } 328 + 336 329 static inline bool is_pc_ins(union loongarch_instruction *ip) 337 330 { 338 331 return ip->reg1i20_format.opcode >= pcaddi_op && ··· 367 350 ip->reg2i12_format.rd == LOONGARCH_GPR_SP && 368 351 is_imm12_negative(ip->reg2i12_format.immediate); 369 352 } 353 + 354 + static inline bool is_self_loop_ins(union loongarch_instruction *ip, struct pt_regs *regs) 355 + { 356 + switch (ip->reg0i26_format.opcode) { 357 + case b_op: 358 + case bl_op: 359 + if (ip->reg0i26_format.immediate_l == 0 360 + && ip->reg0i26_format.immediate_h == 0) 361 + return true; 362 + } 363 + 364 + switch (ip->reg1i21_format.opcode) { 365 + case beqz_op: 366 + case bnez_op: 367 + case bceqz_op: 368 + if (ip->reg1i21_format.immediate_l == 0 369 + && ip->reg1i21_format.immediate_h == 0) 370 + return true; 371 + } 372 + 373 + switch (ip->reg2i16_format.opcode) { 374 + case beq_op: 375 + case bne_op: 376 + case blt_op: 377 + case bge_op: 378 + case bltu_op: 379 + case bgeu_op: 380 + if (ip->reg2i16_format.immediate == 0) 381 + return true; 382 + break; 383 + case jirl_op: 384 + if (regs->regs[ip->reg2i16_format.rj] + 385 + ((unsigned long)ip->reg2i16_format.immediate << 2) == (unsigned long)ip) 386 + return true; 387 + } 388 + 389 + return false; 390 + } 391 + 392 + void simu_pc(struct pt_regs *regs, union loongarch_instruction insn); 393 + void simu_branch(struct pt_regs *regs, union loongarch_instruction insn); 370 394 371 395 int larch_insn_read(void *addr, u32 *insnp); 372 396 int larch_insn_write(void *addr, u32 insn);
+61
arch/loongarch/include/asm/kprobes.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + #ifndef __ASM_LOONGARCH_KPROBES_H 3 + #define __ASM_LOONGARCH_KPROBES_H 4 + 5 + #include <asm-generic/kprobes.h> 6 + 7 + #ifdef CONFIG_KPROBES 8 + 9 + #include <asm/inst.h> 10 + #include <asm/cacheflush.h> 11 + 12 + #define __ARCH_WANT_KPROBES_INSN_SLOT 13 + #define MAX_INSN_SIZE 2 14 + 15 + #define flush_insn_slot(p) \ 16 + do { \ 17 + if (p->addr) \ 18 + flush_icache_range((unsigned long)p->addr, \ 19 + (unsigned long)p->addr + \ 20 + (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ 21 + } while (0) 22 + 23 + #define kretprobe_blacklist_size 0 24 + 25 + typedef union loongarch_instruction kprobe_opcode_t; 26 + 27 + /* Architecture specific copy of original instruction */ 28 + struct arch_specific_insn { 29 + /* copy of the original instruction */ 30 + kprobe_opcode_t *insn; 31 + /* restore address after simulation */ 32 + unsigned long restore; 33 + }; 34 + 35 + struct prev_kprobe { 36 + struct kprobe *kp; 37 + unsigned int status; 38 + }; 39 + 40 + /* per-cpu kprobe control block */ 41 + struct kprobe_ctlblk { 42 + unsigned int kprobe_status; 43 + unsigned long saved_status; 44 + struct prev_kprobe prev_kprobe; 45 + }; 46 + 47 + void arch_remove_kprobe(struct kprobe *p); 48 + bool kprobe_fault_handler(struct pt_regs *regs, int trapnr); 49 + bool kprobe_breakpoint_handler(struct pt_regs *regs); 50 + bool kprobe_singlestep_handler(struct pt_regs *regs); 51 + 52 + void __kretprobe_trampoline(void); 53 + void *trampoline_probe_handler(struct pt_regs *regs); 54 + 55 + #else /* !CONFIG_KPROBES */ 56 + 57 + static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) { return false; } 58 + static inline bool kprobe_singlestep_handler(struct pt_regs *regs) { return false; } 59 + 60 + #endif /* CONFIG_KPROBES */ 61 + #endif /* __ASM_LOONGARCH_KPROBES_H */
+19 -16
arch/loongarch/include/asm/loongarch.h
··· 970 970 971 971 #define LOONGARCH_CSR_DB0ADDR 0x310 /* data breakpoint 0 address */ 972 972 #define LOONGARCH_CSR_DB0MASK 0x311 /* data breakpoint 0 mask */ 973 - #define LOONGARCH_CSR_DB0CTL 0x312 /* data breakpoint 0 control */ 973 + #define LOONGARCH_CSR_DB0CTRL 0x312 /* data breakpoint 0 control */ 974 974 #define LOONGARCH_CSR_DB0ASID 0x313 /* data breakpoint 0 asid */ 975 975 976 976 #define LOONGARCH_CSR_DB1ADDR 0x318 /* data breakpoint 1 address */ 977 977 #define LOONGARCH_CSR_DB1MASK 0x319 /* data breakpoint 1 mask */ 978 - #define LOONGARCH_CSR_DB1CTL 0x31a /* data breakpoint 1 control */ 978 + #define LOONGARCH_CSR_DB1CTRL 0x31a /* data breakpoint 1 control */ 979 979 #define LOONGARCH_CSR_DB1ASID 0x31b /* data breakpoint 1 asid */ 980 980 981 981 #define LOONGARCH_CSR_DB2ADDR 0x320 /* data breakpoint 2 address */ 982 982 #define LOONGARCH_CSR_DB2MASK 0x321 /* data breakpoint 2 mask */ 983 - #define LOONGARCH_CSR_DB2CTL 0x322 /* data breakpoint 2 control */ 983 + #define LOONGARCH_CSR_DB2CTRL 0x322 /* data breakpoint 2 control */ 984 984 #define LOONGARCH_CSR_DB2ASID 0x323 /* data breakpoint 2 asid */ 985 985 986 986 #define LOONGARCH_CSR_DB3ADDR 0x328 /* data breakpoint 3 address */ 987 987 #define LOONGARCH_CSR_DB3MASK 0x329 /* data breakpoint 3 mask */ 988 - #define LOONGARCH_CSR_DB3CTL 0x32a /* data breakpoint 3 control */ 988 + #define LOONGARCH_CSR_DB3CTRL 0x32a /* data breakpoint 3 control */ 989 989 #define LOONGARCH_CSR_DB3ASID 0x32b /* data breakpoint 3 asid */ 990 990 991 991 #define LOONGARCH_CSR_DB4ADDR 0x330 /* data breakpoint 4 address */ 992 992 #define LOONGARCH_CSR_DB4MASK 0x331 /* data breakpoint 4 maks */ 993 - #define LOONGARCH_CSR_DB4CTL 0x332 /* data breakpoint 4 control */ 993 + #define LOONGARCH_CSR_DB4CTRL 0x332 /* data breakpoint 4 control */ 994 994 #define LOONGARCH_CSR_DB4ASID 0x333 /* data breakpoint 4 asid */ 995 995 996 996 #define LOONGARCH_CSR_DB5ADDR 0x338 /* data breakpoint 5 address */ 997 997 #define LOONGARCH_CSR_DB5MASK 0x339 /* data breakpoint 5 mask */ 998 - #define LOONGARCH_CSR_DB5CTL 0x33a /* data breakpoint 5 control */ 998 + #define LOONGARCH_CSR_DB5CTRL 0x33a /* data breakpoint 5 control */ 999 999 #define LOONGARCH_CSR_DB5ASID 0x33b /* data breakpoint 5 asid */ 1000 1000 1001 1001 #define LOONGARCH_CSR_DB6ADDR 0x340 /* data breakpoint 6 address */ 1002 1002 #define LOONGARCH_CSR_DB6MASK 0x341 /* data breakpoint 6 mask */ 1003 - #define LOONGARCH_CSR_DB6CTL 0x342 /* data breakpoint 6 control */ 1003 + #define LOONGARCH_CSR_DB6CTRL 0x342 /* data breakpoint 6 control */ 1004 1004 #define LOONGARCH_CSR_DB6ASID 0x343 /* data breakpoint 6 asid */ 1005 1005 1006 1006 #define LOONGARCH_CSR_DB7ADDR 0x348 /* data breakpoint 7 address */ 1007 1007 #define LOONGARCH_CSR_DB7MASK 0x349 /* data breakpoint 7 mask */ 1008 - #define LOONGARCH_CSR_DB7CTL 0x34a /* data breakpoint 7 control */ 1008 + #define LOONGARCH_CSR_DB7CTRL 0x34a /* data breakpoint 7 control */ 1009 1009 #define LOONGARCH_CSR_DB7ASID 0x34b /* data breakpoint 7 asid */ 1010 1010 1011 1011 #define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */ ··· 1013 1013 1014 1014 #define LOONGARCH_CSR_IB0ADDR 0x390 /* inst breakpoint 0 address */ 1015 1015 #define LOONGARCH_CSR_IB0MASK 0x391 /* inst breakpoint 0 mask */ 1016 - #define LOONGARCH_CSR_IB0CTL 0x392 /* inst breakpoint 0 control */ 1016 + #define LOONGARCH_CSR_IB0CTRL 0x392 /* inst breakpoint 0 control */ 1017 1017 #define LOONGARCH_CSR_IB0ASID 0x393 /* inst breakpoint 0 asid */ 1018 1018 1019 1019 #define LOONGARCH_CSR_IB1ADDR 0x398 /* inst breakpoint 1 address */ 1020 1020 #define LOONGARCH_CSR_IB1MASK 0x399 /* inst breakpoint 1 mask */ 1021 - #define LOONGARCH_CSR_IB1CTL 0x39a /* inst breakpoint 1 control */ 1021 + #define LOONGARCH_CSR_IB1CTRL 0x39a /* inst breakpoint 1 control */ 1022 1022 #define LOONGARCH_CSR_IB1ASID 0x39b /* inst breakpoint 1 asid */ 1023 1023 1024 1024 #define LOONGARCH_CSR_IB2ADDR 0x3a0 /* inst breakpoint 2 address */ 1025 1025 #define LOONGARCH_CSR_IB2MASK 0x3a1 /* inst breakpoint 2 mask */ 1026 - #define LOONGARCH_CSR_IB2CTL 0x3a2 /* inst breakpoint 2 control */ 1026 + #define LOONGARCH_CSR_IB2CTRL 0x3a2 /* inst breakpoint 2 control */ 1027 1027 #define LOONGARCH_CSR_IB2ASID 0x3a3 /* inst breakpoint 2 asid */ 1028 1028 1029 1029 #define LOONGARCH_CSR_IB3ADDR 0x3a8 /* inst breakpoint 3 address */ 1030 1030 #define LOONGARCH_CSR_IB3MASK 0x3a9 /* breakpoint 3 mask */ 1031 - #define LOONGARCH_CSR_IB3CTL 0x3aa /* inst breakpoint 3 control */ 1031 + #define LOONGARCH_CSR_IB3CTRL 0x3aa /* inst breakpoint 3 control */ 1032 1032 #define LOONGARCH_CSR_IB3ASID 0x3ab /* inst breakpoint 3 asid */ 1033 1033 1034 1034 #define LOONGARCH_CSR_IB4ADDR 0x3b0 /* inst breakpoint 4 address */ 1035 1035 #define LOONGARCH_CSR_IB4MASK 0x3b1 /* inst breakpoint 4 mask */ 1036 - #define LOONGARCH_CSR_IB4CTL 0x3b2 /* inst breakpoint 4 control */ 1036 + #define LOONGARCH_CSR_IB4CTRL 0x3b2 /* inst breakpoint 4 control */ 1037 1037 #define LOONGARCH_CSR_IB4ASID 0x3b3 /* inst breakpoint 4 asid */ 1038 1038 1039 1039 #define LOONGARCH_CSR_IB5ADDR 0x3b8 /* inst breakpoint 5 address */ 1040 1040 #define LOONGARCH_CSR_IB5MASK 0x3b9 /* inst breakpoint 5 mask */ 1041 - #define LOONGARCH_CSR_IB5CTL 0x3ba /* inst breakpoint 5 control */ 1041 + #define LOONGARCH_CSR_IB5CTRL 0x3ba /* inst breakpoint 5 control */ 1042 1042 #define LOONGARCH_CSR_IB5ASID 0x3bb /* inst breakpoint 5 asid */ 1043 1043 1044 1044 #define LOONGARCH_CSR_IB6ADDR 0x3c0 /* inst breakpoint 6 address */ 1045 1045 #define LOONGARCH_CSR_IB6MASK 0x3c1 /* inst breakpoint 6 mask */ 1046 - #define LOONGARCH_CSR_IB6CTL 0x3c2 /* inst breakpoint 6 control */ 1046 + #define LOONGARCH_CSR_IB6CTRL 0x3c2 /* inst breakpoint 6 control */ 1047 1047 #define LOONGARCH_CSR_IB6ASID 0x3c3 /* inst breakpoint 6 asid */ 1048 1048 1049 1049 #define LOONGARCH_CSR_IB7ADDR 0x3c8 /* inst breakpoint 7 address */ 1050 1050 #define LOONGARCH_CSR_IB7MASK 0x3c9 /* inst breakpoint 7 mask */ 1051 - #define LOONGARCH_CSR_IB7CTL 0x3ca /* inst breakpoint 7 control */ 1051 + #define LOONGARCH_CSR_IB7CTRL 0x3ca /* inst breakpoint 7 control */ 1052 1052 #define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */ 1053 1053 1054 1054 #define LOONGARCH_CSR_DEBUG 0x500 /* debug config */ 1055 1055 #define LOONGARCH_CSR_DERA 0x501 /* debug era */ 1056 1056 #define LOONGARCH_CSR_DESAVE 0x502 /* debug save */ 1057 + 1058 + #define CSR_FWPC_SKIP_SHIFT 16 1059 + #define CSR_FWPC_SKIP (_ULCAST_(1) << CSR_FWPC_SKIP_SHIFT) 1057 1060 1058 1061 /* 1059 1062 * CSR_ECFG IM
+10 -6
arch/loongarch/include/asm/processor.h
··· 11 11 12 12 #include <asm/cpu.h> 13 13 #include <asm/cpu-info.h> 14 + #include <asm/hw_breakpoint.h> 14 15 #include <asm/loongarch.h> 15 16 #include <asm/vdso/processor.h> 16 17 #include <uapi/asm/ptrace.h> ··· 125 124 /* Other stuff associated with the thread. */ 126 125 unsigned long trap_nr; 127 126 unsigned long error_code; 127 + unsigned long single_step; /* Used by PTRACE_SINGLESTEP */ 128 128 struct loongarch_vdso_info *vdso; 129 129 130 130 /* 131 - * FPU & vector registers, must be at last because 132 - * they are conditionally copied at fork(). 131 + * FPU & vector registers, must be at the last of inherited 132 + * context because they are conditionally copied at fork(). 133 133 */ 134 134 struct loongarch_fpu fpu FPU_ALIGN; 135 + 136 + /* Hardware breakpoints pinned to this task. */ 137 + struct perf_event *hbp_break[LOONGARCH_MAX_BRP]; 138 + struct perf_event *hbp_watch[LOONGARCH_MAX_WRP]; 135 139 }; 136 140 137 141 #define thread_saved_ra(tsk) (tsk->thread.sched_ra) ··· 178 172 .fcc = 0, \ 179 173 .fpr = {{{0,},},}, \ 180 174 }, \ 175 + .hbp_break = {0}, \ 176 + .hbp_watch = {0}, \ 181 177 } 182 178 183 179 struct task_struct; ··· 191 183 * Do necessary setup to start up a newly executed thread. 192 184 */ 193 185 extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp); 194 - 195 - static inline void flush_thread(void) 196 - { 197 - } 198 186 199 187 unsigned long __get_wchan(struct task_struct *p); 200 188
+39
arch/loongarch/include/asm/ptrace.h
··· 6 6 #define _ASM_PTRACE_H 7 7 8 8 #include <asm/page.h> 9 + #include <asm/irqflags.h> 9 10 #include <asm/thread_info.h> 10 11 #include <uapi/asm/ptrace.h> 11 12 ··· 110 109 111 110 struct task_struct; 112 111 112 + /** 113 + * regs_get_kernel_argument() - get Nth function argument in kernel 114 + * @regs: pt_regs of that context 115 + * @n: function argument number (start from 0) 116 + * 117 + * regs_get_argument() returns @n th argument of the function call. 118 + * Note that this chooses most probably assignment, in some case 119 + * it can be incorrect. 120 + * This is expected to be called from kprobes or ftrace with regs 121 + * where the top of stack is the return address. 122 + */ 123 + static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, 124 + unsigned int n) 125 + { 126 + #define NR_REG_ARGUMENTS 8 127 + static const unsigned int args[] = { 128 + offsetof(struct pt_regs, regs[4]), 129 + offsetof(struct pt_regs, regs[5]), 130 + offsetof(struct pt_regs, regs[6]), 131 + offsetof(struct pt_regs, regs[7]), 132 + offsetof(struct pt_regs, regs[8]), 133 + offsetof(struct pt_regs, regs[9]), 134 + offsetof(struct pt_regs, regs[10]), 135 + offsetof(struct pt_regs, regs[11]), 136 + }; 137 + 138 + if (n < NR_REG_ARGUMENTS) 139 + return regs_get_register(regs, args[n]); 140 + else { 141 + n -= NR_REG_ARGUMENTS; 142 + return regs_get_kernel_stack_nth(regs, n); 143 + } 144 + } 145 + 113 146 /* 114 147 * Does the process account for user or for system time? 115 148 */ ··· 183 148 { 184 149 regs->regs[3] = val; 185 150 } 151 + 152 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 153 + #define arch_has_single_step() (1) 154 + #endif 186 155 187 156 #endif /* _ASM_PTRACE_H */
+16
arch/loongarch/include/asm/setup.h
··· 21 21 extern void set_handler(unsigned long offset, void *addr, unsigned long len); 22 22 extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len); 23 23 24 + #ifdef CONFIG_RELOCATABLE 25 + 26 + struct rela_la_abs { 27 + long offset; 28 + long symvalue; 29 + }; 30 + 31 + extern long __la_abs_begin; 32 + extern long __la_abs_end; 33 + extern long __rela_dyn_begin; 34 + extern long __rela_dyn_end; 35 + 36 + extern void * __init relocate_kernel(void); 37 + 38 + #endif 39 + 24 40 #endif /* __SETUP_H */
+11 -2
arch/loongarch/include/asm/stackframe.h
··· 7 7 8 8 #include <linux/threads.h> 9 9 10 + #include <asm/addrspace.h> 10 11 #include <asm/asm.h> 11 12 #include <asm/asmmacro.h> 12 13 #include <asm/asm-offsets.h> ··· 35 34 .macro cfi_ld reg offset=0 docfi=0 36 35 LONG_L \reg, sp, \offset 37 36 cfi_restore \reg \offset \docfi 37 + .endm 38 + 39 + /* Jump to the runtime virtual address. */ 40 + .macro JUMP_VIRT_ADDR temp1 temp2 41 + li.d \temp1, CACHE_BASE 42 + pcaddi \temp2, 0 43 + or \temp1, \temp1, \temp2 44 + jirl zero, \temp1, 0xc 38 45 .endm 39 46 40 47 .macro BACKUP_T0T1 ··· 86 77 * new value in sp. 87 78 */ 88 79 .macro get_saved_sp docfi=0 89 - la.abs t1, kernelsp 80 + la_abs t1, kernelsp 90 81 #ifdef CONFIG_SMP 91 82 csrrd t0, PERCPU_BASE_KS 92 83 LONG_ADD t1, t1, t0 ··· 99 90 .endm 100 91 101 92 .macro set_saved_sp stackp temp temp2 102 - la.abs \temp, kernelsp 93 + la.pcrel \temp, kernelsp 103 94 #ifdef CONFIG_SMP 104 95 LONG_ADD \temp, \temp, u0 105 96 #endif
+1
arch/loongarch/include/asm/switch_to.h
··· 34 34 #define switch_to(prev, next, last) \ 35 35 do { \ 36 36 lose_fpu_inatomic(1, prev); \ 37 + hw_breakpoint_thread_switch(next); \ 37 38 (last) = __switch_to(prev, next, task_thread_info(next), \ 38 39 __builtin_return_address(0), __builtin_frame_address(0)); \ 39 40 } while (0)
-1
arch/loongarch/include/asm/uaccess.h
··· 22 22 extern u64 __ua_limit; 23 23 24 24 #define __UA_ADDR ".dword" 25 - #define __UA_LA "la.abs" 26 25 #define __UA_LIMIT __ua_limit 27 26 28 27 /*
+9
arch/loongarch/include/uapi/asm/ptrace.h
··· 46 46 uint32_t fcsr; 47 47 }; 48 48 49 + struct user_watch_state { 50 + uint16_t dbg_info; 51 + struct { 52 + uint64_t addr; 53 + uint64_t mask; 54 + uint32_t ctrl; 55 + } dbg_regs[8]; 56 + }; 57 + 49 58 #define PTRACE_SYSEMU 0x1f 50 59 #define PTRACE_SYSEMU_SINGLESTEP 0x20 51 60
+8 -1
arch/loongarch/kernel/Makefile
··· 8 8 obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ 9 9 traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \ 10 10 elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \ 11 - alternative.o unaligned.o unwind.o 11 + alternative.o unwind.o 12 12 13 13 obj-$(CONFIG_ACPI) += acpi.o 14 14 obj-$(CONFIG_EFI) += efi.o 15 15 16 16 obj-$(CONFIG_CPU_HAS_FPU) += fpu.o 17 + 18 + obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o 17 19 18 20 ifdef CONFIG_FUNCTION_TRACER 19 21 ifndef CONFIG_DYNAMIC_FTRACE ··· 41 39 42 40 obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o 43 41 42 + obj-$(CONFIG_RELOCATABLE) += relocate.o 43 + 44 44 obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 45 45 obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 46 46 ··· 50 46 obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o 51 47 52 48 obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o 49 + obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 50 + 51 + obj-$(CONFIG_KPROBES) += kprobes.o kprobes_trampoline.o 53 52 54 53 CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
+46 -45
arch/loongarch/kernel/entry.S
··· 19 19 .cfi_sections .debug_frame 20 20 .align 5 21 21 SYM_FUNC_START(handle_syscall) 22 - csrrd t0, PERCPU_BASE_KS 23 - la.abs t1, kernelsp 24 - add.d t1, t1, t0 25 - move t2, sp 26 - ld.d sp, t1, 0 22 + csrrd t0, PERCPU_BASE_KS 23 + la.pcrel t1, kernelsp 24 + add.d t1, t1, t0 25 + move t2, sp 26 + ld.d sp, t1, 0 27 27 28 - addi.d sp, sp, -PT_SIZE 29 - cfi_st t2, PT_R3 28 + addi.d sp, sp, -PT_SIZE 29 + cfi_st t2, PT_R3 30 30 cfi_rel_offset sp, PT_R3 31 - st.d zero, sp, PT_R0 32 - csrrd t2, LOONGARCH_CSR_PRMD 33 - st.d t2, sp, PT_PRMD 34 - csrrd t2, LOONGARCH_CSR_CRMD 35 - st.d t2, sp, PT_CRMD 36 - csrrd t2, LOONGARCH_CSR_EUEN 37 - st.d t2, sp, PT_EUEN 38 - csrrd t2, LOONGARCH_CSR_ECFG 39 - st.d t2, sp, PT_ECFG 40 - csrrd t2, LOONGARCH_CSR_ESTAT 41 - st.d t2, sp, PT_ESTAT 42 - cfi_st ra, PT_R1 43 - cfi_st a0, PT_R4 44 - cfi_st a1, PT_R5 45 - cfi_st a2, PT_R6 46 - cfi_st a3, PT_R7 47 - cfi_st a4, PT_R8 48 - cfi_st a5, PT_R9 49 - cfi_st a6, PT_R10 50 - cfi_st a7, PT_R11 51 - csrrd ra, LOONGARCH_CSR_ERA 52 - st.d ra, sp, PT_ERA 31 + st.d zero, sp, PT_R0 32 + csrrd t2, LOONGARCH_CSR_PRMD 33 + st.d t2, sp, PT_PRMD 34 + csrrd t2, LOONGARCH_CSR_CRMD 35 + st.d t2, sp, PT_CRMD 36 + csrrd t2, LOONGARCH_CSR_EUEN 37 + st.d t2, sp, PT_EUEN 38 + csrrd t2, LOONGARCH_CSR_ECFG 39 + st.d t2, sp, PT_ECFG 40 + csrrd t2, LOONGARCH_CSR_ESTAT 41 + st.d t2, sp, PT_ESTAT 42 + cfi_st ra, PT_R1 43 + cfi_st a0, PT_R4 44 + cfi_st a1, PT_R5 45 + cfi_st a2, PT_R6 46 + cfi_st a3, PT_R7 47 + cfi_st a4, PT_R8 48 + cfi_st a5, PT_R9 49 + cfi_st a6, PT_R10 50 + cfi_st a7, PT_R11 51 + csrrd ra, LOONGARCH_CSR_ERA 52 + st.d ra, sp, PT_ERA 53 53 cfi_rel_offset ra, PT_ERA 54 54 55 - cfi_st tp, PT_R2 56 - cfi_st u0, PT_R21 57 - cfi_st fp, PT_R22 55 + cfi_st tp, PT_R2 56 + cfi_st u0, PT_R21 57 + cfi_st fp, PT_R22 58 58 59 59 SAVE_STATIC 60 60 61 - move u0, t0 62 - li.d tp, ~_THREAD_MASK 63 - and tp, tp, sp 61 + move u0, t0 62 + li.d tp, ~_THREAD_MASK 63 + and tp, tp, sp 64 64 65 - move a0, sp 66 - bl do_syscall 65 + move a0, sp 66 + bl do_syscall 67 67 68 68 RESTORE_ALL_AND_RET 69 69 SYM_FUNC_END(handle_syscall) 70 + _ASM_NOKPROBE(handle_syscall) 70 71 71 72 SYM_CODE_START(ret_from_fork) 72 - bl schedule_tail # a0 = struct task_struct *prev 73 - move a0, sp 74 - bl syscall_exit_to_user_mode 73 + bl schedule_tail # a0 = struct task_struct *prev 74 + move a0, sp 75 + bl syscall_exit_to_user_mode 75 76 RESTORE_STATIC 76 77 RESTORE_SOME 77 78 RESTORE_SP_AND_RET 78 79 SYM_CODE_END(ret_from_fork) 79 80 80 81 SYM_CODE_START(ret_from_kernel_thread) 81 - bl schedule_tail # a0 = struct task_struct *prev 82 - move a0, s1 83 - jirl ra, s0, 0 84 - move a0, sp 85 - bl syscall_exit_to_user_mode 82 + bl schedule_tail # a0 = struct task_struct *prev 83 + move a0, s1 84 + jirl ra, s0, 0 85 + move a0, sp 86 + bl syscall_exit_to_user_mode 86 87 RESTORE_STATIC 87 88 RESTORE_SOME 88 89 RESTORE_SP_AND_RET
+64
arch/loongarch/kernel/ftrace_dyn.c
··· 6 6 */ 7 7 8 8 #include <linux/ftrace.h> 9 + #include <linux/kprobes.h> 9 10 #include <linux/uaccess.h> 10 11 11 12 #include <asm/inst.h> ··· 272 271 } 273 272 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ 274 273 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 274 + 275 + #ifdef CONFIG_KPROBES_ON_FTRACE 276 + /* Ftrace callback handler for kprobes -- called under preepmt disabled */ 277 + void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 278 + struct ftrace_ops *ops, struct ftrace_regs *fregs) 279 + { 280 + int bit; 281 + struct pt_regs *regs; 282 + struct kprobe *p; 283 + struct kprobe_ctlblk *kcb; 284 + 285 + bit = ftrace_test_recursion_trylock(ip, parent_ip); 286 + if (bit < 0) 287 + return; 288 + 289 + p = get_kprobe((kprobe_opcode_t *)ip); 290 + if (unlikely(!p) || kprobe_disabled(p)) 291 + goto out; 292 + 293 + regs = ftrace_get_regs(fregs); 294 + if (!regs) 295 + goto out; 296 + 297 + kcb = get_kprobe_ctlblk(); 298 + if (kprobe_running()) { 299 + kprobes_inc_nmissed_count(p); 300 + } else { 301 + unsigned long orig_ip = instruction_pointer(regs); 302 + 303 + instruction_pointer_set(regs, ip); 304 + 305 + __this_cpu_write(current_kprobe, p); 306 + kcb->kprobe_status = KPROBE_HIT_ACTIVE; 307 + if (!p->pre_handler || !p->pre_handler(p, regs)) { 308 + /* 309 + * Emulate singlestep (and also recover regs->csr_era) 310 + * as if there is a nop 311 + */ 312 + instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE); 313 + if (unlikely(p->post_handler)) { 314 + kcb->kprobe_status = KPROBE_HIT_SSDONE; 315 + p->post_handler(p, regs, 0); 316 + } 317 + instruction_pointer_set(regs, orig_ip); 318 + } 319 + 320 + /* 321 + * If pre_handler returns !0, it changes regs->csr_era. We have to 322 + * skip emulating post_handler. 323 + */ 324 + __this_cpu_write(current_kprobe, NULL); 325 + } 326 + out: 327 + ftrace_test_recursion_unlock(bit); 328 + } 329 + NOKPROBE_SYMBOL(kprobe_ftrace_handler); 330 + 331 + int arch_prepare_kprobe_ftrace(struct kprobe *p) 332 + { 333 + p->ainsn.insn = NULL; 334 + return 0; 335 + } 336 + #endif /* CONFIG_KPROBES_ON_FTRACE */
+4 -4
arch/loongarch/kernel/genex.S
··· 34 34 SYM_FUNC_START(handle_vint) 35 35 BACKUP_T0T1 36 36 SAVE_ALL 37 - la.abs t1, __arch_cpu_idle 37 + la_abs t1, __arch_cpu_idle 38 38 LONG_L t0, sp, PT_ERA 39 39 /* 32 byte rollback region */ 40 40 ori t0, t0, 0x1f ··· 43 43 LONG_S t0, sp, PT_ERA 44 44 1: move a0, sp 45 45 move a1, sp 46 - la.abs t0, do_vint 46 + la_abs t0, do_vint 47 47 jirl ra, t0, 0 48 48 RESTORE_ALL_AND_RET 49 49 SYM_FUNC_END(handle_vint) ··· 72 72 SAVE_ALL 73 73 build_prep_\prep 74 74 move a0, sp 75 - la.abs t0, do_\handler 75 + la_abs t0, do_\handler 76 76 jirl ra, t0, 0 77 77 668: 78 78 RESTORE_ALL_AND_RET ··· 93 93 BUILD_HANDLER reserved reserved none /* others */ 94 94 95 95 SYM_FUNC_START(handle_sys) 96 - la.abs t0, handle_syscall 96 + la_abs t0, handle_syscall 97 97 jr t0 98 98 SYM_FUNC_END(handle_sys)
+23 -10
arch/loongarch/kernel/head.S
··· 24 24 .org 0x8 25 25 .dword kernel_entry /* Kernel entry point */ 26 26 .dword _end - _text /* Kernel image effective size */ 27 - .quad 0 /* Kernel image load offset from start of RAM */ 27 + .quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */ 28 28 .org 0x38 /* 0x20 ~ 0x37 reserved */ 29 29 .long LINUX_PE_MAGIC 30 30 .long pe_header - _head /* Offset to the PE header */ ··· 50 50 li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx 51 51 csrwr t0, LOONGARCH_CSR_DMWIN1 52 52 53 - /* We might not get launched at the address the kernel is linked to, 54 - so we jump there. */ 55 - la.abs t0, 0f 56 - jr t0 57 - 0: 53 + JUMP_VIRT_ADDR t0, t1 54 + 58 55 /* Enable PG */ 59 56 li.w t0, 0xb0 # PLV=0, IE=0, PG=1 60 57 csrwr t0, LOONGARCH_CSR_CRMD ··· 86 89 PTR_ADD sp, sp, tp 87 90 set_saved_sp sp, t0, t1 88 91 92 + #ifdef CONFIG_RELOCATABLE 93 + 94 + bl relocate_kernel 95 + 96 + #ifdef CONFIG_RANDOMIZE_BASE 97 + /* Repoint the sp into the new kernel */ 98 + PTR_LI sp, (_THREAD_SIZE - PT_SIZE) 99 + PTR_ADD sp, sp, tp 100 + set_saved_sp sp, t0, t1 101 + #endif 102 + 103 + /* relocate_kernel() returns the new kernel entry point */ 104 + jr a0 105 + ASM_BUG() 106 + 107 + #endif 108 + 89 109 bl start_kernel 90 110 ASM_BUG() 91 111 ··· 120 106 li.d t0, CSR_DMW1_INIT # CA, PLV0 121 107 csrwr t0, LOONGARCH_CSR_DMWIN1 122 108 123 - la.abs t0, 0f 124 - jr t0 125 - 0: 109 + JUMP_VIRT_ADDR t0, t1 110 + 126 111 /* Enable PG */ 127 112 li.w t0, 0xb0 # PLV=0, IE=0, PG=1 128 113 csrwr t0, LOONGARCH_CSR_CRMD ··· 130 117 li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0 131 118 csrwr t0, LOONGARCH_CSR_EUEN 132 119 133 - la.abs t0, cpuboot_data 120 + la.pcrel t0, cpuboot_data 134 121 ld.d sp, t0, CPU_BOOT_STACK 135 122 ld.d tp, t0, CPU_BOOT_TINFO 136 123
+548
arch/loongarch/kernel/hw_breakpoint.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2022-2023 Loongson Technology Corporation Limited 4 + */ 5 + #define pr_fmt(fmt) "hw-breakpoint: " fmt 6 + 7 + #include <linux/hw_breakpoint.h> 8 + #include <linux/kprobes.h> 9 + #include <linux/perf_event.h> 10 + 11 + #include <asm/hw_breakpoint.h> 12 + 13 + /* Breakpoint currently in use for each BRP. */ 14 + static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[LOONGARCH_MAX_BRP]); 15 + 16 + /* Watchpoint currently in use for each WRP. */ 17 + static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[LOONGARCH_MAX_WRP]); 18 + 19 + int hw_breakpoint_slots(int type) 20 + { 21 + /* 22 + * We can be called early, so don't rely on 23 + * our static variables being initialised. 24 + */ 25 + switch (type) { 26 + case TYPE_INST: 27 + return get_num_brps(); 28 + case TYPE_DATA: 29 + return get_num_wrps(); 30 + default: 31 + pr_warn("unknown slot type: %d\n", type); 32 + return 0; 33 + } 34 + } 35 + 36 + #define READ_WB_REG_CASE(OFF, N, REG, T, VAL) \ 37 + case (OFF + N): \ 38 + LOONGARCH_CSR_WATCH_READ(N, REG, T, VAL); \ 39 + break 40 + 41 + #define WRITE_WB_REG_CASE(OFF, N, REG, T, VAL) \ 42 + case (OFF + N): \ 43 + LOONGARCH_CSR_WATCH_WRITE(N, REG, T, VAL); \ 44 + break 45 + 46 + #define GEN_READ_WB_REG_CASES(OFF, REG, T, VAL) \ 47 + READ_WB_REG_CASE(OFF, 0, REG, T, VAL); \ 48 + READ_WB_REG_CASE(OFF, 1, REG, T, VAL); \ 49 + READ_WB_REG_CASE(OFF, 2, REG, T, VAL); \ 50 + READ_WB_REG_CASE(OFF, 3, REG, T, VAL); \ 51 + READ_WB_REG_CASE(OFF, 4, REG, T, VAL); \ 52 + READ_WB_REG_CASE(OFF, 5, REG, T, VAL); \ 53 + READ_WB_REG_CASE(OFF, 6, REG, T, VAL); \ 54 + READ_WB_REG_CASE(OFF, 7, REG, T, VAL); 55 + 56 + #define GEN_WRITE_WB_REG_CASES(OFF, REG, T, VAL) \ 57 + WRITE_WB_REG_CASE(OFF, 0, REG, T, VAL); \ 58 + WRITE_WB_REG_CASE(OFF, 1, REG, T, VAL); \ 59 + WRITE_WB_REG_CASE(OFF, 2, REG, T, VAL); \ 60 + WRITE_WB_REG_CASE(OFF, 3, REG, T, VAL); \ 61 + WRITE_WB_REG_CASE(OFF, 4, REG, T, VAL); \ 62 + WRITE_WB_REG_CASE(OFF, 5, REG, T, VAL); \ 63 + WRITE_WB_REG_CASE(OFF, 6, REG, T, VAL); \ 64 + WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL); 65 + 66 + static u64 read_wb_reg(int reg, int n, int t) 67 + { 68 + u64 val = 0; 69 + 70 + switch (reg + n) { 71 + GEN_READ_WB_REG_CASES(CSR_CFG_ADDR, ADDR, t, val); 72 + GEN_READ_WB_REG_CASES(CSR_CFG_MASK, MASK, t, val); 73 + GEN_READ_WB_REG_CASES(CSR_CFG_CTRL, CTRL, t, val); 74 + GEN_READ_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val); 75 + default: 76 + pr_warn("Attempt to read from unknown breakpoint register %d\n", n); 77 + } 78 + 79 + return val; 80 + } 81 + NOKPROBE_SYMBOL(read_wb_reg); 82 + 83 + static void write_wb_reg(int reg, int n, int t, u64 val) 84 + { 85 + switch (reg + n) { 86 + GEN_WRITE_WB_REG_CASES(CSR_CFG_ADDR, ADDR, t, val); 87 + GEN_WRITE_WB_REG_CASES(CSR_CFG_MASK, MASK, t, val); 88 + GEN_WRITE_WB_REG_CASES(CSR_CFG_CTRL, CTRL, t, val); 89 + GEN_WRITE_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val); 90 + default: 91 + pr_warn("Attempt to write to unknown breakpoint register %d\n", n); 92 + } 93 + } 94 + NOKPROBE_SYMBOL(write_wb_reg); 95 + 96 + enum hw_breakpoint_ops { 97 + HW_BREAKPOINT_INSTALL, 98 + HW_BREAKPOINT_UNINSTALL, 99 + }; 100 + 101 + /* 102 + * hw_breakpoint_slot_setup - Find and setup a perf slot according to operations 103 + * 104 + * @slots: pointer to array of slots 105 + * @max_slots: max number of slots 106 + * @bp: perf_event to setup 107 + * @ops: operation to be carried out on the slot 108 + * 109 + * Return: 110 + * slot index on success 111 + * -ENOSPC if no slot is available/matches 112 + * -EINVAL on wrong operations parameter 113 + */ 114 + 115 + static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots, 116 + struct perf_event *bp, enum hw_breakpoint_ops ops) 117 + { 118 + int i; 119 + struct perf_event **slot; 120 + 121 + for (i = 0; i < max_slots; ++i) { 122 + slot = &slots[i]; 123 + switch (ops) { 124 + case HW_BREAKPOINT_INSTALL: 125 + if (!*slot) { 126 + *slot = bp; 127 + return i; 128 + } 129 + break; 130 + case HW_BREAKPOINT_UNINSTALL: 131 + if (*slot == bp) { 132 + *slot = NULL; 133 + return i; 134 + } 135 + break; 136 + default: 137 + pr_warn_once("Unhandled hw breakpoint ops %d\n", ops); 138 + return -EINVAL; 139 + } 140 + } 141 + 142 + return -ENOSPC; 143 + } 144 + 145 + void ptrace_hw_copy_thread(struct task_struct *tsk) 146 + { 147 + memset(tsk->thread.hbp_break, 0, sizeof(tsk->thread.hbp_break)); 148 + memset(tsk->thread.hbp_watch, 0, sizeof(tsk->thread.hbp_watch)); 149 + } 150 + 151 + /* 152 + * Unregister breakpoints from this task and reset the pointers in the thread_struct. 153 + */ 154 + void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 155 + { 156 + int i; 157 + struct thread_struct *t = &tsk->thread; 158 + 159 + for (i = 0; i < LOONGARCH_MAX_BRP; i++) { 160 + if (t->hbp_break[i]) { 161 + unregister_hw_breakpoint(t->hbp_break[i]); 162 + t->hbp_break[i] = NULL; 163 + } 164 + } 165 + 166 + for (i = 0; i < LOONGARCH_MAX_WRP; i++) { 167 + if (t->hbp_watch[i]) { 168 + unregister_hw_breakpoint(t->hbp_watch[i]); 169 + t->hbp_watch[i] = NULL; 170 + } 171 + } 172 + } 173 + 174 + static int hw_breakpoint_control(struct perf_event *bp, 175 + enum hw_breakpoint_ops ops) 176 + { 177 + u32 ctrl; 178 + int i, max_slots, enable; 179 + struct perf_event **slots; 180 + struct arch_hw_breakpoint *info = counter_arch_bp(bp); 181 + 182 + if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) { 183 + /* Breakpoint */ 184 + slots = this_cpu_ptr(bp_on_reg); 185 + max_slots = boot_cpu_data.watch_ireg_count; 186 + } else { 187 + /* Watchpoint */ 188 + slots = this_cpu_ptr(wp_on_reg); 189 + max_slots = boot_cpu_data.watch_dreg_count; 190 + } 191 + 192 + i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops); 193 + 194 + if (WARN_ONCE(i < 0, "Can't find any breakpoint slot")) 195 + return i; 196 + 197 + switch (ops) { 198 + case HW_BREAKPOINT_INSTALL: 199 + /* Set the FWPnCFG/MWPnCFG 1~4 register. */ 200 + write_wb_reg(CSR_CFG_ADDR, i, 0, info->address); 201 + write_wb_reg(CSR_CFG_ADDR, i, 1, info->address); 202 + write_wb_reg(CSR_CFG_MASK, i, 0, info->mask); 203 + write_wb_reg(CSR_CFG_MASK, i, 1, info->mask); 204 + write_wb_reg(CSR_CFG_ASID, i, 0, 0); 205 + write_wb_reg(CSR_CFG_ASID, i, 1, 0); 206 + if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) { 207 + write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE); 208 + } else { 209 + ctrl = encode_ctrl_reg(info->ctrl); 210 + write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE | 211 + 1 << MWPnCFG3_LoadEn | 1 << MWPnCFG3_StoreEn); 212 + } 213 + enable = csr_read64(LOONGARCH_CSR_CRMD); 214 + csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD); 215 + break; 216 + case HW_BREAKPOINT_UNINSTALL: 217 + /* Reset the FWPnCFG/MWPnCFG 1~4 register. */ 218 + write_wb_reg(CSR_CFG_ADDR, i, 0, 0); 219 + write_wb_reg(CSR_CFG_ADDR, i, 1, 0); 220 + write_wb_reg(CSR_CFG_MASK, i, 0, 0); 221 + write_wb_reg(CSR_CFG_MASK, i, 1, 0); 222 + write_wb_reg(CSR_CFG_CTRL, i, 0, 0); 223 + write_wb_reg(CSR_CFG_CTRL, i, 1, 0); 224 + write_wb_reg(CSR_CFG_ASID, i, 0, 0); 225 + write_wb_reg(CSR_CFG_ASID, i, 1, 0); 226 + break; 227 + } 228 + 229 + return 0; 230 + } 231 + 232 + /* 233 + * Install a perf counter breakpoint. 234 + */ 235 + int arch_install_hw_breakpoint(struct perf_event *bp) 236 + { 237 + return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL); 238 + } 239 + 240 + void arch_uninstall_hw_breakpoint(struct perf_event *bp) 241 + { 242 + hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL); 243 + } 244 + 245 + static int get_hbp_len(u8 hbp_len) 246 + { 247 + unsigned int len_in_bytes = 0; 248 + 249 + switch (hbp_len) { 250 + case LOONGARCH_BREAKPOINT_LEN_1: 251 + len_in_bytes = 1; 252 + break; 253 + case LOONGARCH_BREAKPOINT_LEN_2: 254 + len_in_bytes = 2; 255 + break; 256 + case LOONGARCH_BREAKPOINT_LEN_4: 257 + len_in_bytes = 4; 258 + break; 259 + case LOONGARCH_BREAKPOINT_LEN_8: 260 + len_in_bytes = 8; 261 + break; 262 + } 263 + 264 + return len_in_bytes; 265 + } 266 + 267 + /* 268 + * Check whether bp virtual address is in kernel space. 269 + */ 270 + int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) 271 + { 272 + unsigned int len; 273 + unsigned long va; 274 + 275 + va = hw->address; 276 + len = get_hbp_len(hw->ctrl.len); 277 + 278 + return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 279 + } 280 + 281 + /* 282 + * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. 283 + * Hopefully this will disappear when ptrace can bypass the conversion 284 + * to generic breakpoint descriptions. 285 + */ 286 + int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, 287 + int *gen_len, int *gen_type, int *offset) 288 + { 289 + /* Type */ 290 + switch (ctrl.type) { 291 + case LOONGARCH_BREAKPOINT_EXECUTE: 292 + *gen_type = HW_BREAKPOINT_X; 293 + break; 294 + case LOONGARCH_BREAKPOINT_LOAD: 295 + *gen_type = HW_BREAKPOINT_R; 296 + break; 297 + case LOONGARCH_BREAKPOINT_STORE: 298 + *gen_type = HW_BREAKPOINT_W; 299 + break; 300 + case LOONGARCH_BREAKPOINT_LOAD | LOONGARCH_BREAKPOINT_STORE: 301 + *gen_type = HW_BREAKPOINT_RW; 302 + break; 303 + default: 304 + return -EINVAL; 305 + } 306 + 307 + if (!ctrl.len) 308 + return -EINVAL; 309 + 310 + *offset = __ffs(ctrl.len); 311 + 312 + /* Len */ 313 + switch (ctrl.len) { 314 + case LOONGARCH_BREAKPOINT_LEN_1: 315 + *gen_len = HW_BREAKPOINT_LEN_1; 316 + break; 317 + case LOONGARCH_BREAKPOINT_LEN_2: 318 + *gen_len = HW_BREAKPOINT_LEN_2; 319 + break; 320 + case LOONGARCH_BREAKPOINT_LEN_4: 321 + *gen_len = HW_BREAKPOINT_LEN_4; 322 + break; 323 + case LOONGARCH_BREAKPOINT_LEN_8: 324 + *gen_len = HW_BREAKPOINT_LEN_8; 325 + break; 326 + default: 327 + return -EINVAL; 328 + } 329 + 330 + return 0; 331 + } 332 + 333 + /* 334 + * Construct an arch_hw_breakpoint from a perf_event. 335 + */ 336 + static int arch_build_bp_info(struct perf_event *bp, 337 + const struct perf_event_attr *attr, 338 + struct arch_hw_breakpoint *hw) 339 + { 340 + /* Type */ 341 + switch (attr->bp_type) { 342 + case HW_BREAKPOINT_X: 343 + hw->ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE; 344 + break; 345 + case HW_BREAKPOINT_R: 346 + hw->ctrl.type = LOONGARCH_BREAKPOINT_LOAD; 347 + break; 348 + case HW_BREAKPOINT_W: 349 + hw->ctrl.type = LOONGARCH_BREAKPOINT_STORE; 350 + break; 351 + case HW_BREAKPOINT_RW: 352 + hw->ctrl.type = LOONGARCH_BREAKPOINT_LOAD | LOONGARCH_BREAKPOINT_STORE; 353 + break; 354 + default: 355 + return -EINVAL; 356 + } 357 + 358 + /* Len */ 359 + switch (attr->bp_len) { 360 + case HW_BREAKPOINT_LEN_1: 361 + hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_1; 362 + break; 363 + case HW_BREAKPOINT_LEN_2: 364 + hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_2; 365 + break; 366 + case HW_BREAKPOINT_LEN_4: 367 + hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_4; 368 + break; 369 + case HW_BREAKPOINT_LEN_8: 370 + hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_8; 371 + break; 372 + default: 373 + return -EINVAL; 374 + } 375 + 376 + /* Address */ 377 + hw->address = attr->bp_addr; 378 + 379 + return 0; 380 + } 381 + 382 + /* 383 + * Validate the arch-specific HW Breakpoint register settings. 384 + */ 385 + int hw_breakpoint_arch_parse(struct perf_event *bp, 386 + const struct perf_event_attr *attr, 387 + struct arch_hw_breakpoint *hw) 388 + { 389 + int ret; 390 + u64 alignment_mask, offset; 391 + 392 + /* Build the arch_hw_breakpoint. */ 393 + ret = arch_build_bp_info(bp, attr, hw); 394 + if (ret) 395 + return ret; 396 + 397 + if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE) 398 + alignment_mask = 0x7; 399 + offset = hw->address & alignment_mask; 400 + 401 + hw->address &= ~alignment_mask; 402 + hw->ctrl.len <<= offset; 403 + 404 + return 0; 405 + } 406 + 407 + static void update_bp_registers(struct pt_regs *regs, int enable, int type) 408 + { 409 + u32 ctrl; 410 + int i, max_slots; 411 + struct perf_event **slots; 412 + struct arch_hw_breakpoint *info; 413 + 414 + switch (type) { 415 + case 0: 416 + slots = this_cpu_ptr(bp_on_reg); 417 + max_slots = boot_cpu_data.watch_ireg_count; 418 + break; 419 + case 1: 420 + slots = this_cpu_ptr(wp_on_reg); 421 + max_slots = boot_cpu_data.watch_dreg_count; 422 + break; 423 + default: 424 + return; 425 + } 426 + 427 + for (i = 0; i < max_slots; ++i) { 428 + if (!slots[i]) 429 + continue; 430 + 431 + info = counter_arch_bp(slots[i]); 432 + if (enable) { 433 + if ((info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) && (type == 0)) { 434 + write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE); 435 + write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE); 436 + } else { 437 + ctrl = read_wb_reg(CSR_CFG_CTRL, i, 1); 438 + if (info->ctrl.type == LOONGARCH_BREAKPOINT_LOAD) 439 + ctrl |= 0x1 << MWPnCFG3_LoadEn; 440 + if (info->ctrl.type == LOONGARCH_BREAKPOINT_STORE) 441 + ctrl |= 0x1 << MWPnCFG3_StoreEn; 442 + write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl); 443 + } 444 + regs->csr_prmd |= CSR_PRMD_PWE; 445 + } else { 446 + if ((info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) && (type == 0)) { 447 + write_wb_reg(CSR_CFG_CTRL, i, 0, 0); 448 + } else { 449 + ctrl = read_wb_reg(CSR_CFG_CTRL, i, 1); 450 + if (info->ctrl.type == LOONGARCH_BREAKPOINT_LOAD) 451 + ctrl &= ~0x1 << MWPnCFG3_LoadEn; 452 + if (info->ctrl.type == LOONGARCH_BREAKPOINT_STORE) 453 + ctrl &= ~0x1 << MWPnCFG3_StoreEn; 454 + write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl); 455 + } 456 + regs->csr_prmd &= ~CSR_PRMD_PWE; 457 + } 458 + } 459 + } 460 + NOKPROBE_SYMBOL(update_bp_registers); 461 + 462 + /* 463 + * Debug exception handlers. 464 + */ 465 + void breakpoint_handler(struct pt_regs *regs) 466 + { 467 + int i; 468 + struct perf_event *bp, **slots; 469 + 470 + slots = this_cpu_ptr(bp_on_reg); 471 + 472 + for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) { 473 + bp = slots[i]; 474 + if (bp == NULL) 475 + continue; 476 + perf_bp_event(bp, regs); 477 + } 478 + update_bp_registers(regs, 0, 0); 479 + } 480 + NOKPROBE_SYMBOL(breakpoint_handler); 481 + 482 + void watchpoint_handler(struct pt_regs *regs) 483 + { 484 + int i; 485 + struct perf_event *wp, **slots; 486 + 487 + slots = this_cpu_ptr(wp_on_reg); 488 + 489 + for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) { 490 + wp = slots[i]; 491 + if (wp == NULL) 492 + continue; 493 + perf_bp_event(wp, regs); 494 + } 495 + update_bp_registers(regs, 0, 1); 496 + } 497 + NOKPROBE_SYMBOL(watchpoint_handler); 498 + 499 + static int __init arch_hw_breakpoint_init(void) 500 + { 501 + int cpu; 502 + 503 + boot_cpu_data.watch_ireg_count = get_num_brps(); 504 + boot_cpu_data.watch_dreg_count = get_num_wrps(); 505 + 506 + pr_info("Found %d breakpoint and %d watchpoint registers.\n", 507 + boot_cpu_data.watch_ireg_count, boot_cpu_data.watch_dreg_count); 508 + 509 + for (cpu = 1; cpu < NR_CPUS; cpu++) { 510 + cpu_data[cpu].watch_ireg_count = boot_cpu_data.watch_ireg_count; 511 + cpu_data[cpu].watch_dreg_count = boot_cpu_data.watch_dreg_count; 512 + } 513 + 514 + return 0; 515 + } 516 + arch_initcall(arch_hw_breakpoint_init); 517 + 518 + void hw_breakpoint_thread_switch(struct task_struct *next) 519 + { 520 + u64 addr, mask; 521 + struct pt_regs *regs = task_pt_regs(next); 522 + 523 + if (test_tsk_thread_flag(next, TIF_SINGLESTEP)) { 524 + addr = read_wb_reg(CSR_CFG_ADDR, 0, 0); 525 + mask = read_wb_reg(CSR_CFG_MASK, 0, 0); 526 + if (!((regs->csr_era ^ addr) & ~mask)) 527 + csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS); 528 + regs->csr_prmd |= CSR_PRMD_PWE; 529 + } else { 530 + /* Update breakpoints */ 531 + update_bp_registers(regs, 1, 0); 532 + /* Update watchpoints */ 533 + update_bp_registers(regs, 1, 1); 534 + } 535 + } 536 + 537 + void hw_breakpoint_pmu_read(struct perf_event *bp) 538 + { 539 + } 540 + 541 + /* 542 + * Dummy function to register with die_notifier. 543 + */ 544 + int hw_breakpoint_exceptions_notify(struct notifier_block *unused, 545 + unsigned long val, void *data) 546 + { 547 + return NOTIFY_DONE; 548 + }
+123
arch/loongarch/kernel/inst.c
··· 10 10 11 11 static DEFINE_RAW_SPINLOCK(patch_lock); 12 12 13 + void simu_pc(struct pt_regs *regs, union loongarch_instruction insn) 14 + { 15 + unsigned long pc = regs->csr_era; 16 + unsigned int rd = insn.reg1i20_format.rd; 17 + unsigned int imm = insn.reg1i20_format.immediate; 18 + 19 + if (pc & 3) { 20 + pr_warn("%s: invalid pc 0x%lx\n", __func__, pc); 21 + return; 22 + } 23 + 24 + switch (insn.reg1i20_format.opcode) { 25 + case pcaddi_op: 26 + regs->regs[rd] = pc + sign_extend64(imm << 2, 21); 27 + break; 28 + case pcaddu12i_op: 29 + regs->regs[rd] = pc + sign_extend64(imm << 12, 31); 30 + break; 31 + case pcaddu18i_op: 32 + regs->regs[rd] = pc + sign_extend64(imm << 18, 37); 33 + break; 34 + case pcalau12i_op: 35 + regs->regs[rd] = pc + sign_extend64(imm << 12, 31); 36 + regs->regs[rd] &= ~((1 << 12) - 1); 37 + break; 38 + default: 39 + pr_info("%s: unknown opcode\n", __func__); 40 + return; 41 + } 42 + 43 + regs->csr_era += LOONGARCH_INSN_SIZE; 44 + } 45 + 46 + void simu_branch(struct pt_regs *regs, union loongarch_instruction insn) 47 + { 48 + unsigned int imm, imm_l, imm_h, rd, rj; 49 + unsigned long pc = regs->csr_era; 50 + 51 + if (pc & 3) { 52 + pr_warn("%s: invalid pc 0x%lx\n", __func__, pc); 53 + return; 54 + } 55 + 56 + imm_l = insn.reg0i26_format.immediate_l; 57 + imm_h = insn.reg0i26_format.immediate_h; 58 + switch (insn.reg0i26_format.opcode) { 59 + case b_op: 60 + regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 27); 61 + return; 62 + case bl_op: 63 + regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 27); 64 + regs->regs[1] = pc + LOONGARCH_INSN_SIZE; 65 + return; 66 + } 67 + 68 + imm_l = insn.reg1i21_format.immediate_l; 69 + imm_h = insn.reg1i21_format.immediate_h; 70 + rj = insn.reg1i21_format.rj; 71 + switch (insn.reg1i21_format.opcode) { 72 + case beqz_op: 73 + if (regs->regs[rj] == 0) 74 + regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 22); 75 + else 76 + regs->csr_era = pc + LOONGARCH_INSN_SIZE; 77 + return; 78 + case bnez_op: 79 + if (regs->regs[rj] != 0) 80 + regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 22); 81 + else 82 + regs->csr_era = pc + LOONGARCH_INSN_SIZE; 83 + return; 84 + } 85 + 86 + imm = insn.reg2i16_format.immediate; 87 + rj = insn.reg2i16_format.rj; 88 + rd = insn.reg2i16_format.rd; 89 + switch (insn.reg2i16_format.opcode) { 90 + case beq_op: 91 + if (regs->regs[rj] == regs->regs[rd]) 92 + regs->csr_era = pc + sign_extend64(imm << 2, 17); 93 + else 94 + regs->csr_era = pc + LOONGARCH_INSN_SIZE; 95 + break; 96 + case bne_op: 97 + if (regs->regs[rj] != regs->regs[rd]) 98 + regs->csr_era = pc + sign_extend64(imm << 2, 17); 99 + else 100 + regs->csr_era = pc + LOONGARCH_INSN_SIZE; 101 + break; 102 + case blt_op: 103 + if ((long)regs->regs[rj] < (long)regs->regs[rd]) 104 + regs->csr_era = pc + sign_extend64(imm << 2, 17); 105 + else 106 + regs->csr_era = pc + LOONGARCH_INSN_SIZE; 107 + break; 108 + case bge_op: 109 + if ((long)regs->regs[rj] >= (long)regs->regs[rd]) 110 + regs->csr_era = pc + sign_extend64(imm << 2, 17); 111 + else 112 + regs->csr_era = pc + LOONGARCH_INSN_SIZE; 113 + break; 114 + case bltu_op: 115 + if (regs->regs[rj] < regs->regs[rd]) 116 + regs->csr_era = pc + sign_extend64(imm << 2, 17); 117 + else 118 + regs->csr_era = pc + LOONGARCH_INSN_SIZE; 119 + break; 120 + case bgeu_op: 121 + if (regs->regs[rj] >= regs->regs[rd]) 122 + regs->csr_era = pc + sign_extend64(imm << 2, 17); 123 + else 124 + regs->csr_era = pc + LOONGARCH_INSN_SIZE; 125 + break; 126 + case jirl_op: 127 + regs->csr_era = regs->regs[rj] + sign_extend64(imm << 2, 17); 128 + regs->regs[rd] = pc + LOONGARCH_INSN_SIZE; 129 + break; 130 + default: 131 + pr_info("%s: unknown opcode\n", __func__); 132 + return; 133 + } 134 + } 135 + 13 136 int larch_insn_read(void *addr, u32 *insnp) 14 137 { 15 138 int ret;
+406
arch/loongarch/kernel/kprobes.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #include <linux/kdebug.h> 3 + #include <linux/kprobes.h> 4 + #include <linux/preempt.h> 5 + #include <asm/break.h> 6 + 7 + static const union loongarch_instruction breakpoint_insn = { 8 + .reg0i15_format = { 9 + .opcode = break_op, 10 + .immediate = BRK_KPROBE_BP, 11 + } 12 + }; 13 + 14 + static const union loongarch_instruction singlestep_insn = { 15 + .reg0i15_format = { 16 + .opcode = break_op, 17 + .immediate = BRK_KPROBE_SSTEPBP, 18 + } 19 + }; 20 + 21 + DEFINE_PER_CPU(struct kprobe *, current_kprobe); 22 + DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 23 + 24 + static bool insns_not_supported(union loongarch_instruction insn) 25 + { 26 + switch (insn.reg2i14_format.opcode) { 27 + case llw_op: 28 + case lld_op: 29 + case scw_op: 30 + case scd_op: 31 + pr_notice("kprobe: ll and sc instructions are not supported\n"); 32 + return true; 33 + } 34 + 35 + switch (insn.reg1i21_format.opcode) { 36 + case bceqz_op: 37 + pr_notice("kprobe: bceqz and bcnez instructions are not supported\n"); 38 + return true; 39 + } 40 + 41 + return false; 42 + } 43 + NOKPROBE_SYMBOL(insns_not_supported); 44 + 45 + static bool insns_need_simulation(struct kprobe *p) 46 + { 47 + if (is_pc_ins(&p->opcode)) 48 + return true; 49 + 50 + if (is_branch_ins(&p->opcode)) 51 + return true; 52 + 53 + return false; 54 + } 55 + NOKPROBE_SYMBOL(insns_need_simulation); 56 + 57 + static void arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) 58 + { 59 + if (is_pc_ins(&p->opcode)) 60 + simu_pc(regs, p->opcode); 61 + else if (is_branch_ins(&p->opcode)) 62 + simu_branch(regs, p->opcode); 63 + } 64 + NOKPROBE_SYMBOL(arch_simulate_insn); 65 + 66 + static void arch_prepare_ss_slot(struct kprobe *p) 67 + { 68 + p->ainsn.insn[0] = *p->addr; 69 + p->ainsn.insn[1] = singlestep_insn; 70 + p->ainsn.restore = (unsigned long)p->addr + LOONGARCH_INSN_SIZE; 71 + } 72 + NOKPROBE_SYMBOL(arch_prepare_ss_slot); 73 + 74 + static void arch_prepare_simulate(struct kprobe *p) 75 + { 76 + p->ainsn.restore = 0; 77 + } 78 + NOKPROBE_SYMBOL(arch_prepare_simulate); 79 + 80 + int arch_prepare_kprobe(struct kprobe *p) 81 + { 82 + if ((unsigned long)p->addr & 0x3) 83 + return -EILSEQ; 84 + 85 + /* copy instruction */ 86 + p->opcode = *p->addr; 87 + 88 + /* decode instruction */ 89 + if (insns_not_supported(p->opcode)) 90 + return -EINVAL; 91 + 92 + if (insns_need_simulation(p)) { 93 + p->ainsn.insn = NULL; 94 + } else { 95 + p->ainsn.insn = get_insn_slot(); 96 + if (!p->ainsn.insn) 97 + return -ENOMEM; 98 + } 99 + 100 + /* prepare the instruction */ 101 + if (p->ainsn.insn) 102 + arch_prepare_ss_slot(p); 103 + else 104 + arch_prepare_simulate(p); 105 + 106 + return 0; 107 + } 108 + NOKPROBE_SYMBOL(arch_prepare_kprobe); 109 + 110 + /* Install breakpoint in text */ 111 + void arch_arm_kprobe(struct kprobe *p) 112 + { 113 + *p->addr = breakpoint_insn; 114 + flush_insn_slot(p); 115 + } 116 + NOKPROBE_SYMBOL(arch_arm_kprobe); 117 + 118 + /* Remove breakpoint from text */ 119 + void arch_disarm_kprobe(struct kprobe *p) 120 + { 121 + *p->addr = p->opcode; 122 + flush_insn_slot(p); 123 + } 124 + NOKPROBE_SYMBOL(arch_disarm_kprobe); 125 + 126 + void arch_remove_kprobe(struct kprobe *p) 127 + { 128 + if (p->ainsn.insn) { 129 + free_insn_slot(p->ainsn.insn, 0); 130 + p->ainsn.insn = NULL; 131 + } 132 + } 133 + NOKPROBE_SYMBOL(arch_remove_kprobe); 134 + 135 + static void save_previous_kprobe(struct kprobe_ctlblk *kcb) 136 + { 137 + kcb->prev_kprobe.kp = kprobe_running(); 138 + kcb->prev_kprobe.status = kcb->kprobe_status; 139 + } 140 + NOKPROBE_SYMBOL(save_previous_kprobe); 141 + 142 + static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 143 + { 144 + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 145 + kcb->kprobe_status = kcb->prev_kprobe.status; 146 + } 147 + NOKPROBE_SYMBOL(restore_previous_kprobe); 148 + 149 + static void set_current_kprobe(struct kprobe *p) 150 + { 151 + __this_cpu_write(current_kprobe, p); 152 + } 153 + NOKPROBE_SYMBOL(set_current_kprobe); 154 + 155 + /* 156 + * Interrupts need to be disabled before single-step mode is set, 157 + * and not reenabled until after single-step mode ends. 158 + * Without disabling interrupt on local CPU, there is a chance of 159 + * interrupt occurrence in the period of exception return and start 160 + * of out-of-line single-step, that result in wrongly single stepping 161 + * into the interrupt handler. 162 + */ 163 + static void save_local_irqflag(struct kprobe_ctlblk *kcb, 164 + struct pt_regs *regs) 165 + { 166 + kcb->saved_status = regs->csr_prmd; 167 + regs->csr_prmd &= ~CSR_PRMD_PIE; 168 + } 169 + NOKPROBE_SYMBOL(save_local_irqflag); 170 + 171 + static void restore_local_irqflag(struct kprobe_ctlblk *kcb, 172 + struct pt_regs *regs) 173 + { 174 + regs->csr_prmd = kcb->saved_status; 175 + } 176 + NOKPROBE_SYMBOL(restore_local_irqflag); 177 + 178 + static void post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, 179 + struct pt_regs *regs) 180 + { 181 + /* return addr restore if non-branching insn */ 182 + if (cur->ainsn.restore != 0) 183 + instruction_pointer_set(regs, cur->ainsn.restore); 184 + 185 + /* restore back original saved kprobe variables and continue */ 186 + if (kcb->kprobe_status == KPROBE_REENTER) { 187 + restore_previous_kprobe(kcb); 188 + preempt_enable_no_resched(); 189 + return; 190 + } 191 + 192 + /* 193 + * update the kcb status even if the cur->post_handler is 194 + * not set because reset_curent_kprobe() doesn't update kcb. 195 + */ 196 + kcb->kprobe_status = KPROBE_HIT_SSDONE; 197 + if (cur->post_handler) 198 + cur->post_handler(cur, regs, 0); 199 + 200 + reset_current_kprobe(); 201 + preempt_enable_no_resched(); 202 + } 203 + NOKPROBE_SYMBOL(post_kprobe_handler); 204 + 205 + static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, 206 + struct kprobe_ctlblk *kcb, int reenter) 207 + { 208 + if (reenter) { 209 + save_previous_kprobe(kcb); 210 + set_current_kprobe(p); 211 + kcb->kprobe_status = KPROBE_REENTER; 212 + } else { 213 + kcb->kprobe_status = KPROBE_HIT_SS; 214 + } 215 + 216 + if (p->ainsn.insn) { 217 + /* IRQs and single stepping do not mix well */ 218 + save_local_irqflag(kcb, regs); 219 + /* set ip register to prepare for single stepping */ 220 + regs->csr_era = (unsigned long)p->ainsn.insn; 221 + } else { 222 + /* simulate single steping */ 223 + arch_simulate_insn(p, regs); 224 + /* now go for post processing */ 225 + post_kprobe_handler(p, kcb, regs); 226 + } 227 + } 228 + NOKPROBE_SYMBOL(setup_singlestep); 229 + 230 + static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs, 231 + struct kprobe_ctlblk *kcb) 232 + { 233 + switch (kcb->kprobe_status) { 234 + case KPROBE_HIT_SS: 235 + case KPROBE_HIT_SSDONE: 236 + case KPROBE_HIT_ACTIVE: 237 + kprobes_inc_nmissed_count(p); 238 + setup_singlestep(p, regs, kcb, 1); 239 + break; 240 + case KPROBE_REENTER: 241 + pr_warn("Failed to recover from reentered kprobes.\n"); 242 + dump_kprobe(p); 243 + WARN_ON_ONCE(1); 244 + break; 245 + default: 246 + WARN_ON(1); 247 + return false; 248 + } 249 + 250 + return true; 251 + } 252 + NOKPROBE_SYMBOL(reenter_kprobe); 253 + 254 + bool kprobe_breakpoint_handler(struct pt_regs *regs) 255 + { 256 + struct kprobe_ctlblk *kcb; 257 + struct kprobe *p, *cur_kprobe; 258 + kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era; 259 + 260 + /* 261 + * We don't want to be preempted for the entire 262 + * duration of kprobe processing. 263 + */ 264 + preempt_disable(); 265 + kcb = get_kprobe_ctlblk(); 266 + cur_kprobe = kprobe_running(); 267 + 268 + p = get_kprobe(addr); 269 + if (p) { 270 + if (cur_kprobe) { 271 + if (reenter_kprobe(p, regs, kcb)) 272 + return true; 273 + } else { 274 + /* Probe hit */ 275 + set_current_kprobe(p); 276 + kcb->kprobe_status = KPROBE_HIT_ACTIVE; 277 + 278 + /* 279 + * If we have no pre-handler or it returned 0, we 280 + * continue with normal processing. If we have a 281 + * pre-handler and it returned non-zero, it will 282 + * modify the execution path and no need to single 283 + * stepping. Let's just reset current kprobe and exit. 284 + * 285 + * pre_handler can hit a breakpoint and can step thru 286 + * before return. 287 + */ 288 + if (!p->pre_handler || !p->pre_handler(p, regs)) { 289 + setup_singlestep(p, regs, kcb, 0); 290 + } else { 291 + reset_current_kprobe(); 292 + preempt_enable_no_resched(); 293 + } 294 + return true; 295 + } 296 + } 297 + 298 + if (addr->word != breakpoint_insn.word) { 299 + /* 300 + * The breakpoint instruction was removed right 301 + * after we hit it. Another cpu has removed 302 + * either a probepoint or a debugger breakpoint 303 + * at this address. In either case, no further 304 + * handling of this interrupt is appropriate. 305 + * Return back to original instruction, and continue. 306 + */ 307 + regs->csr_era = (unsigned long)addr; 308 + preempt_enable_no_resched(); 309 + return true; 310 + } 311 + 312 + preempt_enable_no_resched(); 313 + return false; 314 + } 315 + NOKPROBE_SYMBOL(kprobe_breakpoint_handler); 316 + 317 + bool kprobe_singlestep_handler(struct pt_regs *regs) 318 + { 319 + struct kprobe *cur = kprobe_running(); 320 + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 321 + unsigned long addr = instruction_pointer(regs); 322 + 323 + if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) && 324 + ((unsigned long)&cur->ainsn.insn[1] == addr)) { 325 + restore_local_irqflag(kcb, regs); 326 + post_kprobe_handler(cur, kcb, regs); 327 + return true; 328 + } 329 + 330 + preempt_enable_no_resched(); 331 + return false; 332 + } 333 + NOKPROBE_SYMBOL(kprobe_singlestep_handler); 334 + 335 + bool kprobe_fault_handler(struct pt_regs *regs, int trapnr) 336 + { 337 + struct kprobe *cur = kprobe_running(); 338 + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 339 + 340 + switch (kcb->kprobe_status) { 341 + case KPROBE_HIT_SS: 342 + case KPROBE_REENTER: 343 + /* 344 + * We are here because the instruction being single 345 + * stepped caused a page fault. We reset the current 346 + * kprobe and the ip points back to the probe address 347 + * and allow the page fault handler to continue as a 348 + * normal page fault. 349 + */ 350 + regs->csr_era = (unsigned long)cur->addr; 351 + WARN_ON_ONCE(!instruction_pointer(regs)); 352 + 353 + if (kcb->kprobe_status == KPROBE_REENTER) { 354 + restore_previous_kprobe(kcb); 355 + } else { 356 + restore_local_irqflag(kcb, regs); 357 + reset_current_kprobe(); 358 + } 359 + preempt_enable_no_resched(); 360 + break; 361 + } 362 + return false; 363 + } 364 + NOKPROBE_SYMBOL(kprobe_fault_handler); 365 + 366 + /* 367 + * Provide a blacklist of symbols identifying ranges which cannot be kprobed. 368 + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). 369 + */ 370 + int __init arch_populate_kprobe_blacklist(void) 371 + { 372 + return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, 373 + (unsigned long)__irqentry_text_end); 374 + } 375 + 376 + int __init arch_init_kprobes(void) 377 + { 378 + return 0; 379 + } 380 + 381 + /* ASM function that handles the kretprobes must not be probed */ 382 + NOKPROBE_SYMBOL(__kretprobe_trampoline); 383 + 384 + /* Called from __kretprobe_trampoline */ 385 + void __used *trampoline_probe_handler(struct pt_regs *regs) 386 + { 387 + return (void *)kretprobe_trampoline_handler(regs, NULL); 388 + } 389 + NOKPROBE_SYMBOL(trampoline_probe_handler); 390 + 391 + void arch_prepare_kretprobe(struct kretprobe_instance *ri, 392 + struct pt_regs *regs) 393 + { 394 + ri->ret_addr = (kprobe_opcode_t *)regs->regs[1]; 395 + ri->fp = NULL; 396 + 397 + /* Replace the return addr with trampoline addr */ 398 + regs->regs[1] = (unsigned long)&__kretprobe_trampoline; 399 + } 400 + NOKPROBE_SYMBOL(arch_prepare_kretprobe); 401 + 402 + int arch_trampoline_kprobe(struct kprobe *p) 403 + { 404 + return 0; 405 + } 406 + NOKPROBE_SYMBOL(arch_trampoline_kprobe);
+96
arch/loongarch/kernel/kprobes_trampoline.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + #include <linux/linkage.h> 3 + #include <asm/stackframe.h> 4 + 5 + .text 6 + 7 + .macro save_all_base_regs 8 + cfi_st ra, PT_R1 9 + cfi_st tp, PT_R2 10 + cfi_st a0, PT_R4 11 + cfi_st a1, PT_R5 12 + cfi_st a2, PT_R6 13 + cfi_st a3, PT_R7 14 + cfi_st a4, PT_R8 15 + cfi_st a5, PT_R9 16 + cfi_st a6, PT_R10 17 + cfi_st a7, PT_R11 18 + cfi_st t0, PT_R12 19 + cfi_st t1, PT_R13 20 + cfi_st t2, PT_R14 21 + cfi_st t3, PT_R15 22 + cfi_st t4, PT_R16 23 + cfi_st t5, PT_R17 24 + cfi_st t6, PT_R18 25 + cfi_st t7, PT_R19 26 + cfi_st t8, PT_R20 27 + cfi_st u0, PT_R21 28 + cfi_st fp, PT_R22 29 + cfi_st s0, PT_R23 30 + cfi_st s1, PT_R24 31 + cfi_st s2, PT_R25 32 + cfi_st s3, PT_R26 33 + cfi_st s4, PT_R27 34 + cfi_st s5, PT_R28 35 + cfi_st s6, PT_R29 36 + cfi_st s7, PT_R30 37 + cfi_st s8, PT_R31 38 + csrrd t0, LOONGARCH_CSR_CRMD 39 + andi t0, t0, 0x7 /* extract bit[1:0] PLV, bit[2] IE */ 40 + LONG_S t0, sp, PT_CRMD 41 + .endm 42 + 43 + .macro restore_all_base_regs 44 + cfi_ld tp, PT_R2 45 + cfi_ld a0, PT_R4 46 + cfi_ld a1, PT_R5 47 + cfi_ld a2, PT_R6 48 + cfi_ld a3, PT_R7 49 + cfi_ld a4, PT_R8 50 + cfi_ld a5, PT_R9 51 + cfi_ld a6, PT_R10 52 + cfi_ld a7, PT_R11 53 + cfi_ld t0, PT_R12 54 + cfi_ld t1, PT_R13 55 + cfi_ld t2, PT_R14 56 + cfi_ld t3, PT_R15 57 + cfi_ld t4, PT_R16 58 + cfi_ld t5, PT_R17 59 + cfi_ld t6, PT_R18 60 + cfi_ld t7, PT_R19 61 + cfi_ld t8, PT_R20 62 + cfi_ld u0, PT_R21 63 + cfi_ld fp, PT_R22 64 + cfi_ld s0, PT_R23 65 + cfi_ld s1, PT_R24 66 + cfi_ld s2, PT_R25 67 + cfi_ld s3, PT_R26 68 + cfi_ld s4, PT_R27 69 + cfi_ld s5, PT_R28 70 + cfi_ld s6, PT_R29 71 + cfi_ld s7, PT_R30 72 + cfi_ld s8, PT_R31 73 + LONG_L t0, sp, PT_CRMD 74 + li.d t1, 0x7 /* mask bit[1:0] PLV, bit[2] IE */ 75 + csrxchg t0, t1, LOONGARCH_CSR_CRMD 76 + .endm 77 + 78 + SYM_CODE_START(__kretprobe_trampoline) 79 + addi.d sp, sp, -PT_SIZE 80 + save_all_base_regs 81 + 82 + addi.d t0, sp, PT_SIZE 83 + LONG_S t0, sp, PT_R3 84 + 85 + move a0, sp /* pt_regs */ 86 + 87 + bl trampoline_probe_handler 88 + 89 + /* use the result as the return-address */ 90 + move ra, a0 91 + 92 + restore_all_base_regs 93 + addi.d sp, sp, PT_SIZE 94 + 95 + jr ra 96 + SYM_CODE_END(__kretprobe_trampoline)
+7
arch/loongarch/kernel/process.c
··· 18 18 #include <linux/sched/debug.h> 19 19 #include <linux/sched/task.h> 20 20 #include <linux/sched/task_stack.h> 21 + #include <linux/hw_breakpoint.h> 21 22 #include <linux/mm.h> 22 23 #include <linux/stddef.h> 23 24 #include <linux/unistd.h> ··· 95 94 clear_used_math(); 96 95 regs->csr_era = pc; 97 96 regs->regs[3] = sp; 97 + } 98 + 99 + void flush_thread(void) 100 + { 101 + flush_ptrace_hw_breakpoint(current); 98 102 } 99 103 100 104 void exit_thread(struct task_struct *tsk) ··· 187 181 childregs->regs[2] = tls; 188 182 189 183 out: 184 + ptrace_hw_copy_thread(p); 190 185 clear_tsk_thread_flag(p, TIF_USEDFPU); 191 186 clear_tsk_thread_flag(p, TIF_USEDSIMD); 192 187 clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
+472
arch/loongarch/kernel/ptrace.c
··· 20 20 #include <linux/context_tracking.h> 21 21 #include <linux/elf.h> 22 22 #include <linux/errno.h> 23 + #include <linux/hw_breakpoint.h> 23 24 #include <linux/mm.h> 25 + #include <linux/nospec.h> 24 26 #include <linux/ptrace.h> 25 27 #include <linux/regset.h> 26 28 #include <linux/sched.h> ··· 31 29 #include <linux/smp.h> 32 30 #include <linux/stddef.h> 33 31 #include <linux/seccomp.h> 32 + #include <linux/thread_info.h> 34 33 #include <linux/uaccess.h> 35 34 36 35 #include <asm/byteorder.h> ··· 42 39 #include <asm/page.h> 43 40 #include <asm/pgtable.h> 44 41 #include <asm/processor.h> 42 + #include <asm/ptrace.h> 45 43 #include <asm/reg.h> 46 44 #include <asm/syscall.h> 47 45 ··· 250 246 return 0; 251 247 } 252 248 249 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 250 + 251 + /* 252 + * Handle hitting a HW-breakpoint. 253 + */ 254 + static void ptrace_hbptriggered(struct perf_event *bp, 255 + struct perf_sample_data *data, 256 + struct pt_regs *regs) 257 + { 258 + int i; 259 + struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 260 + 261 + for (i = 0; i < LOONGARCH_MAX_BRP; ++i) 262 + if (current->thread.hbp_break[i] == bp) 263 + break; 264 + 265 + for (i = 0; i < LOONGARCH_MAX_WRP; ++i) 266 + if (current->thread.hbp_watch[i] == bp) 267 + break; 268 + 269 + force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address); 270 + } 271 + 272 + static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 273 + struct task_struct *tsk, 274 + unsigned long idx) 275 + { 276 + struct perf_event *bp; 277 + 278 + switch (note_type) { 279 + case NT_LOONGARCH_HW_BREAK: 280 + if (idx >= LOONGARCH_MAX_BRP) 281 + return ERR_PTR(-EINVAL); 282 + idx = array_index_nospec(idx, LOONGARCH_MAX_BRP); 283 + bp = tsk->thread.hbp_break[idx]; 284 + break; 285 + case NT_LOONGARCH_HW_WATCH: 286 + if (idx >= LOONGARCH_MAX_WRP) 287 + return ERR_PTR(-EINVAL); 288 + idx = array_index_nospec(idx, LOONGARCH_MAX_WRP); 289 + bp = tsk->thread.hbp_watch[idx]; 290 + break; 291 + } 292 + 293 + return bp; 294 + } 295 + 296 + static int ptrace_hbp_set_event(unsigned int note_type, 297 + struct task_struct *tsk, 298 + unsigned long idx, 299 + struct perf_event *bp) 300 + { 301 + switch (note_type) { 302 + case NT_LOONGARCH_HW_BREAK: 303 + if (idx >= LOONGARCH_MAX_BRP) 304 + return -EINVAL; 305 + idx = array_index_nospec(idx, LOONGARCH_MAX_BRP); 306 + tsk->thread.hbp_break[idx] = bp; 307 + break; 308 + case NT_LOONGARCH_HW_WATCH: 309 + if (idx >= LOONGARCH_MAX_WRP) 310 + return -EINVAL; 311 + idx = array_index_nospec(idx, LOONGARCH_MAX_WRP); 312 + tsk->thread.hbp_watch[idx] = bp; 313 + break; 314 + } 315 + 316 + return 0; 317 + } 318 + 319 + static struct perf_event *ptrace_hbp_create(unsigned int note_type, 320 + struct task_struct *tsk, 321 + unsigned long idx) 322 + { 323 + int err, type; 324 + struct perf_event *bp; 325 + struct perf_event_attr attr; 326 + 327 + switch (note_type) { 328 + case NT_LOONGARCH_HW_BREAK: 329 + type = HW_BREAKPOINT_X; 330 + break; 331 + case NT_LOONGARCH_HW_WATCH: 332 + type = HW_BREAKPOINT_RW; 333 + break; 334 + default: 335 + return ERR_PTR(-EINVAL); 336 + } 337 + 338 + ptrace_breakpoint_init(&attr); 339 + 340 + /* 341 + * Initialise fields to sane defaults 342 + * (i.e. values that will pass validation). 343 + */ 344 + attr.bp_addr = 0; 345 + attr.bp_len = HW_BREAKPOINT_LEN_4; 346 + attr.bp_type = type; 347 + attr.disabled = 1; 348 + 349 + bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 350 + if (IS_ERR(bp)) 351 + return bp; 352 + 353 + err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 354 + if (err) 355 + return ERR_PTR(err); 356 + 357 + return bp; 358 + } 359 + 360 + static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 361 + struct arch_hw_breakpoint_ctrl ctrl, 362 + struct perf_event_attr *attr) 363 + { 364 + int err, len, type, offset; 365 + 366 + err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 367 + if (err) 368 + return err; 369 + 370 + switch (note_type) { 371 + case NT_LOONGARCH_HW_BREAK: 372 + if ((type & HW_BREAKPOINT_X) != type) 373 + return -EINVAL; 374 + break; 375 + case NT_LOONGARCH_HW_WATCH: 376 + if ((type & HW_BREAKPOINT_RW) != type) 377 + return -EINVAL; 378 + break; 379 + default: 380 + return -EINVAL; 381 + } 382 + 383 + attr->bp_len = len; 384 + attr->bp_type = type; 385 + attr->bp_addr += offset; 386 + 387 + return 0; 388 + } 389 + 390 + static int ptrace_hbp_get_resource_info(unsigned int note_type, u16 *info) 391 + { 392 + u8 num; 393 + u16 reg = 0; 394 + 395 + switch (note_type) { 396 + case NT_LOONGARCH_HW_BREAK: 397 + num = hw_breakpoint_slots(TYPE_INST); 398 + break; 399 + case NT_LOONGARCH_HW_WATCH: 400 + num = hw_breakpoint_slots(TYPE_DATA); 401 + break; 402 + default: 403 + return -EINVAL; 404 + } 405 + 406 + *info = reg | num; 407 + 408 + return 0; 409 + } 410 + 411 + static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 412 + struct task_struct *tsk, 413 + unsigned long idx) 414 + { 415 + struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 416 + 417 + if (!bp) 418 + bp = ptrace_hbp_create(note_type, tsk, idx); 419 + 420 + return bp; 421 + } 422 + 423 + static int ptrace_hbp_get_ctrl(unsigned int note_type, 424 + struct task_struct *tsk, 425 + unsigned long idx, u32 *ctrl) 426 + { 427 + struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 428 + 429 + if (IS_ERR(bp)) 430 + return PTR_ERR(bp); 431 + 432 + *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 433 + 434 + return 0; 435 + } 436 + 437 + static int ptrace_hbp_get_mask(unsigned int note_type, 438 + struct task_struct *tsk, 439 + unsigned long idx, u64 *mask) 440 + { 441 + struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 442 + 443 + if (IS_ERR(bp)) 444 + return PTR_ERR(bp); 445 + 446 + *mask = bp ? counter_arch_bp(bp)->mask : 0; 447 + 448 + return 0; 449 + } 450 + 451 + static int ptrace_hbp_get_addr(unsigned int note_type, 452 + struct task_struct *tsk, 453 + unsigned long idx, u64 *addr) 454 + { 455 + struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 456 + 457 + if (IS_ERR(bp)) 458 + return PTR_ERR(bp); 459 + 460 + *addr = bp ? counter_arch_bp(bp)->address : 0; 461 + 462 + return 0; 463 + } 464 + 465 + static int ptrace_hbp_set_ctrl(unsigned int note_type, 466 + struct task_struct *tsk, 467 + unsigned long idx, u32 uctrl) 468 + { 469 + int err; 470 + struct perf_event *bp; 471 + struct perf_event_attr attr; 472 + struct arch_hw_breakpoint_ctrl ctrl; 473 + 474 + bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 475 + if (IS_ERR(bp)) 476 + return PTR_ERR(bp); 477 + 478 + attr = bp->attr; 479 + decode_ctrl_reg(uctrl, &ctrl); 480 + err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 481 + if (err) 482 + return err; 483 + 484 + return modify_user_hw_breakpoint(bp, &attr); 485 + } 486 + 487 + static int ptrace_hbp_set_mask(unsigned int note_type, 488 + struct task_struct *tsk, 489 + unsigned long idx, u64 mask) 490 + { 491 + struct perf_event *bp; 492 + struct perf_event_attr attr; 493 + struct arch_hw_breakpoint *info; 494 + 495 + bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 496 + if (IS_ERR(bp)) 497 + return PTR_ERR(bp); 498 + 499 + attr = bp->attr; 500 + info = counter_arch_bp(bp); 501 + info->mask = mask; 502 + 503 + return modify_user_hw_breakpoint(bp, &attr); 504 + } 505 + 506 + static int ptrace_hbp_set_addr(unsigned int note_type, 507 + struct task_struct *tsk, 508 + unsigned long idx, u64 addr) 509 + { 510 + struct perf_event *bp; 511 + struct perf_event_attr attr; 512 + 513 + bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 514 + if (IS_ERR(bp)) 515 + return PTR_ERR(bp); 516 + 517 + attr = bp->attr; 518 + attr.bp_addr = addr; 519 + 520 + return modify_user_hw_breakpoint(bp, &attr); 521 + } 522 + 523 + #define PTRACE_HBP_CTRL_SZ sizeof(u32) 524 + #define PTRACE_HBP_ADDR_SZ sizeof(u64) 525 + #define PTRACE_HBP_MASK_SZ sizeof(u64) 526 + 527 + static int hw_break_get(struct task_struct *target, 528 + const struct user_regset *regset, 529 + struct membuf to) 530 + { 531 + u16 info; 532 + u32 ctrl; 533 + u64 addr, mask; 534 + int ret, idx = 0; 535 + unsigned int note_type = regset->core_note_type; 536 + 537 + /* Resource info */ 538 + ret = ptrace_hbp_get_resource_info(note_type, &info); 539 + if (ret) 540 + return ret; 541 + 542 + membuf_write(&to, &info, sizeof(info)); 543 + 544 + /* (address, ctrl) registers */ 545 + while (to.left) { 546 + ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 547 + if (ret) 548 + return ret; 549 + 550 + ret = ptrace_hbp_get_mask(note_type, target, idx, &mask); 551 + if (ret) 552 + return ret; 553 + 554 + ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 555 + if (ret) 556 + return ret; 557 + 558 + membuf_store(&to, addr); 559 + membuf_store(&to, mask); 560 + membuf_store(&to, ctrl); 561 + idx++; 562 + } 563 + 564 + return 0; 565 + } 566 + 567 + static int hw_break_set(struct task_struct *target, 568 + const struct user_regset *regset, 569 + unsigned int pos, unsigned int count, 570 + const void *kbuf, const void __user *ubuf) 571 + { 572 + u32 ctrl; 573 + u64 addr, mask; 574 + int ret, idx = 0, offset, limit; 575 + unsigned int note_type = regset->core_note_type; 576 + 577 + /* Resource info */ 578 + offset = offsetof(struct user_watch_state, dbg_regs); 579 + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 580 + 581 + /* (address, ctrl) registers */ 582 + limit = regset->n * regset->size; 583 + while (count && offset < limit) { 584 + if (count < PTRACE_HBP_ADDR_SZ) 585 + return -EINVAL; 586 + 587 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 588 + offset, offset + PTRACE_HBP_ADDR_SZ); 589 + if (ret) 590 + return ret; 591 + 592 + ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 593 + if (ret) 594 + return ret; 595 + offset += PTRACE_HBP_ADDR_SZ; 596 + 597 + if (!count) 598 + break; 599 + 600 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask, 601 + offset, offset + PTRACE_HBP_ADDR_SZ); 602 + if (ret) 603 + return ret; 604 + 605 + ret = ptrace_hbp_set_mask(note_type, target, idx, mask); 606 + if (ret) 607 + return ret; 608 + offset += PTRACE_HBP_MASK_SZ; 609 + 610 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask, 611 + offset, offset + PTRACE_HBP_MASK_SZ); 612 + if (ret) 613 + return ret; 614 + 615 + ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 616 + if (ret) 617 + return ret; 618 + offset += PTRACE_HBP_CTRL_SZ; 619 + idx++; 620 + } 621 + 622 + return 0; 623 + } 624 + 625 + #endif 626 + 253 627 struct pt_regs_offset { 254 628 const char *name; 255 629 int offset; ··· 701 319 REGSET_GPR, 702 320 REGSET_FPR, 703 321 REGSET_CPUCFG, 322 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 323 + REGSET_HW_BREAK, 324 + REGSET_HW_WATCH, 325 + #endif 704 326 }; 705 327 706 328 static const struct user_regset loongarch64_regsets[] = { ··· 732 346 .regset_get = cfg_get, 733 347 .set = cfg_set, 734 348 }, 349 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 350 + [REGSET_HW_BREAK] = { 351 + .core_note_type = NT_LOONGARCH_HW_BREAK, 352 + .n = sizeof(struct user_watch_state) / sizeof(u32), 353 + .size = sizeof(u32), 354 + .align = sizeof(u32), 355 + .regset_get = hw_break_get, 356 + .set = hw_break_set, 357 + }, 358 + [REGSET_HW_WATCH] = { 359 + .core_note_type = NT_LOONGARCH_HW_WATCH, 360 + .n = sizeof(struct user_watch_state) / sizeof(u32), 361 + .size = sizeof(u32), 362 + .align = sizeof(u32), 363 + .regset_get = hw_break_get, 364 + .set = hw_break_set, 365 + }, 366 + #endif 735 367 }; 736 368 737 369 static const struct user_regset_view user_loongarch64_view = { ··· 835 431 836 432 return ret; 837 433 } 434 + 435 + #ifdef CONFIG_HAVE_HW_BREAKPOINT 436 + static void ptrace_triggered(struct perf_event *bp, 437 + struct perf_sample_data *data, struct pt_regs *regs) 438 + { 439 + struct perf_event_attr attr; 440 + 441 + attr = bp->attr; 442 + attr.disabled = true; 443 + modify_user_hw_breakpoint(bp, &attr); 444 + } 445 + 446 + static int set_single_step(struct task_struct *tsk, unsigned long addr) 447 + { 448 + struct perf_event *bp; 449 + struct perf_event_attr attr; 450 + struct arch_hw_breakpoint *info; 451 + struct thread_struct *thread = &tsk->thread; 452 + 453 + bp = thread->hbp_break[0]; 454 + if (!bp) { 455 + ptrace_breakpoint_init(&attr); 456 + 457 + attr.bp_addr = addr; 458 + attr.bp_len = HW_BREAKPOINT_LEN_8; 459 + attr.bp_type = HW_BREAKPOINT_X; 460 + 461 + bp = register_user_hw_breakpoint(&attr, ptrace_triggered, 462 + NULL, tsk); 463 + if (IS_ERR(bp)) 464 + return PTR_ERR(bp); 465 + 466 + thread->hbp_break[0] = bp; 467 + } else { 468 + int err; 469 + 470 + attr = bp->attr; 471 + attr.bp_addr = addr; 472 + 473 + /* Reenable breakpoint */ 474 + attr.disabled = false; 475 + err = modify_user_hw_breakpoint(bp, &attr); 476 + if (unlikely(err)) 477 + return err; 478 + 479 + csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR); 480 + } 481 + info = counter_arch_bp(bp); 482 + info->mask = TASK_SIZE - 1; 483 + 484 + return 0; 485 + } 486 + 487 + /* ptrace API */ 488 + void user_enable_single_step(struct task_struct *task) 489 + { 490 + struct thread_info *ti = task_thread_info(task); 491 + 492 + set_single_step(task, task_pt_regs(task)->csr_era); 493 + task->thread.single_step = task_pt_regs(task)->csr_era; 494 + set_ti_thread_flag(ti, TIF_SINGLESTEP); 495 + } 496 + 497 + void user_disable_single_step(struct task_struct *task) 498 + { 499 + clear_tsk_thread_flag(task, TIF_SINGLESTEP); 500 + } 501 + #endif
+242
arch/loongarch/kernel/relocate.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Support for Kernel relocation at boot time 4 + * 5 + * Copyright (C) 2023 Loongson Technology Corporation Limited 6 + */ 7 + 8 + #include <linux/elf.h> 9 + #include <linux/kernel.h> 10 + #include <linux/printk.h> 11 + #include <linux/panic_notifier.h> 12 + #include <linux/start_kernel.h> 13 + #include <asm/bootinfo.h> 14 + #include <asm/early_ioremap.h> 15 + #include <asm/inst.h> 16 + #include <asm/sections.h> 17 + #include <asm/setup.h> 18 + 19 + #define RELOCATED(x) ((void *)((long)x + reloc_offset)) 20 + #define RELOCATED_KASLR(x) ((void *)((long)x + random_offset)) 21 + 22 + static unsigned long reloc_offset; 23 + 24 + static inline void __init relocate_relative(void) 25 + { 26 + Elf64_Rela *rela, *rela_end; 27 + rela = (Elf64_Rela *)&__rela_dyn_begin; 28 + rela_end = (Elf64_Rela *)&__rela_dyn_end; 29 + 30 + for ( ; rela < rela_end; rela++) { 31 + Elf64_Addr addr = rela->r_offset; 32 + Elf64_Addr relocated_addr = rela->r_addend; 33 + 34 + if (rela->r_info != R_LARCH_RELATIVE) 35 + continue; 36 + 37 + if (relocated_addr >= VMLINUX_LOAD_ADDRESS) 38 + relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr); 39 + 40 + *(Elf64_Addr *)RELOCATED(addr) = relocated_addr; 41 + } 42 + } 43 + 44 + static inline void __init relocate_absolute(long random_offset) 45 + { 46 + void *begin, *end; 47 + struct rela_la_abs *p; 48 + 49 + begin = RELOCATED_KASLR(&__la_abs_begin); 50 + end = RELOCATED_KASLR(&__la_abs_end); 51 + 52 + for (p = begin; (void *)p < end; p++) { 53 + long v = p->symvalue; 54 + uint32_t lu12iw, ori, lu32id, lu52id; 55 + union loongarch_instruction *insn = (void *)p - p->offset; 56 + 57 + lu12iw = (v >> 12) & 0xfffff; 58 + ori = v & 0xfff; 59 + lu32id = (v >> 32) & 0xfffff; 60 + lu52id = v >> 52; 61 + 62 + insn[0].reg1i20_format.immediate = lu12iw; 63 + insn[1].reg2i12_format.immediate = ori; 64 + insn[2].reg1i20_format.immediate = lu32id; 65 + insn[3].reg2i12_format.immediate = lu52id; 66 + } 67 + } 68 + 69 + #ifdef CONFIG_RANDOMIZE_BASE 70 + static inline __init unsigned long rotate_xor(unsigned long hash, 71 + const void *area, size_t size) 72 + { 73 + size_t i, diff; 74 + const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash)); 75 + 76 + diff = (void *)ptr - area; 77 + if (size < diff + sizeof(hash)) 78 + return hash; 79 + 80 + size = ALIGN_DOWN(size - diff, sizeof(hash)); 81 + 82 + for (i = 0; i < size / sizeof(hash); i++) { 83 + /* Rotate by odd number of bits and XOR. */ 84 + hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); 85 + hash ^= ptr[i]; 86 + } 87 + 88 + return hash; 89 + } 90 + 91 + static inline __init unsigned long get_random_boot(void) 92 + { 93 + unsigned long hash = 0; 94 + unsigned long entropy = random_get_entropy(); 95 + 96 + /* Attempt to create a simple but unpredictable starting entropy. */ 97 + hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); 98 + 99 + /* Add in any runtime entropy we can get */ 100 + hash = rotate_xor(hash, &entropy, sizeof(entropy)); 101 + 102 + return hash; 103 + } 104 + 105 + static inline __init bool kaslr_disabled(void) 106 + { 107 + char *str; 108 + const char *builtin_cmdline = CONFIG_CMDLINE; 109 + 110 + str = strstr(builtin_cmdline, "nokaslr"); 111 + if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' ')) 112 + return true; 113 + 114 + str = strstr(boot_command_line, "nokaslr"); 115 + if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) 116 + return true; 117 + 118 + return false; 119 + } 120 + 121 + /* Choose a new address for the kernel */ 122 + static inline void __init *determine_relocation_address(void) 123 + { 124 + unsigned long kernel_length; 125 + unsigned long random_offset; 126 + void *destination = _text; 127 + 128 + if (kaslr_disabled()) 129 + return destination; 130 + 131 + kernel_length = (long)_end - (long)_text; 132 + 133 + random_offset = get_random_boot() << 16; 134 + random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1); 135 + if (random_offset < kernel_length) 136 + random_offset += ALIGN(kernel_length, 0xffff); 137 + 138 + return RELOCATED_KASLR(destination); 139 + } 140 + 141 + static inline int __init relocation_addr_valid(void *location_new) 142 + { 143 + if ((unsigned long)location_new & 0x00000ffff) 144 + return 0; /* Inappropriately aligned new location */ 145 + 146 + if ((unsigned long)location_new < (unsigned long)_end) 147 + return 0; /* New location overlaps original kernel */ 148 + 149 + return 1; 150 + } 151 + #endif 152 + 153 + static inline void __init update_reloc_offset(unsigned long *addr, long random_offset) 154 + { 155 + unsigned long *new_addr = (unsigned long *)RELOCATED_KASLR(addr); 156 + 157 + *new_addr = (unsigned long)reloc_offset; 158 + } 159 + 160 + void * __init relocate_kernel(void) 161 + { 162 + unsigned long kernel_length; 163 + unsigned long random_offset = 0; 164 + void *location_new = _text; /* Default to original kernel start */ 165 + void *kernel_entry = start_kernel; /* Default to original kernel entry point */ 166 + char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */ 167 + 168 + strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); 169 + 170 + #ifdef CONFIG_RANDOMIZE_BASE 171 + location_new = determine_relocation_address(); 172 + 173 + /* Sanity check relocation address */ 174 + if (relocation_addr_valid(location_new)) 175 + random_offset = (unsigned long)location_new - (unsigned long)(_text); 176 + #endif 177 + reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS; 178 + 179 + if (random_offset) { 180 + kernel_length = (long)(_end) - (long)(_text); 181 + 182 + /* Copy the kernel to it's new location */ 183 + memcpy(location_new, _text, kernel_length); 184 + 185 + /* Sync the caches ready for execution of new kernel */ 186 + __asm__ __volatile__ ( 187 + "ibar 0 \t\n" 188 + "dbar 0 \t\n" 189 + ::: "memory"); 190 + 191 + reloc_offset += random_offset; 192 + 193 + /* Return the new kernel's entry point */ 194 + kernel_entry = RELOCATED_KASLR(start_kernel); 195 + 196 + /* The current thread is now within the relocated kernel */ 197 + __current_thread_info = RELOCATED_KASLR(__current_thread_info); 198 + 199 + update_reloc_offset(&reloc_offset, random_offset); 200 + } 201 + 202 + if (reloc_offset) 203 + relocate_relative(); 204 + 205 + relocate_absolute(random_offset); 206 + 207 + return kernel_entry; 208 + } 209 + 210 + /* 211 + * Show relocation information on panic. 212 + */ 213 + static void show_kernel_relocation(const char *level) 214 + { 215 + if (reloc_offset > 0) { 216 + printk(level); 217 + pr_cont("Kernel relocated by 0x%lx\n", reloc_offset); 218 + pr_cont(" .text @ 0x%px\n", _text); 219 + pr_cont(" .data @ 0x%px\n", _sdata); 220 + pr_cont(" .bss @ 0x%px\n", __bss_start); 221 + } 222 + } 223 + 224 + static int kernel_location_notifier_fn(struct notifier_block *self, 225 + unsigned long v, void *p) 226 + { 227 + show_kernel_relocation(KERN_EMERG); 228 + return NOTIFY_DONE; 229 + } 230 + 231 + static struct notifier_block kernel_location_notifier = { 232 + .notifier_call = kernel_location_notifier_fn 233 + }; 234 + 235 + static int __init register_kernel_offset_dumper(void) 236 + { 237 + atomic_notifier_chain_register(&panic_notifier_list, 238 + &kernel_location_notifier); 239 + return 0; 240 + } 241 + 242 + arch_initcall(register_kernel_offset_dumper);
+11 -3
arch/loongarch/kernel/setup.c
··· 234 234 #endif 235 235 } 236 236 237 + /* 2MB alignment for crash kernel regions */ 238 + #define CRASH_ALIGN SZ_2M 239 + #define CRASH_ADDR_MAX SZ_4G 240 + 237 241 static void __init arch_parse_crashkernel(void) 238 242 { 239 243 #ifdef CONFIG_KEXEC 240 244 int ret; 241 - unsigned long long start; 242 245 unsigned long long total_mem; 243 246 unsigned long long crash_base, crash_size; 244 247 ··· 250 247 if (ret < 0 || crash_size <= 0) 251 248 return; 252 249 253 - start = memblock_phys_alloc_range(crash_size, 1, crash_base, crash_base + crash_size); 254 - if (start != crash_base) { 250 + if (crash_base <= 0) { 251 + crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, CRASH_ALIGN, CRASH_ADDR_MAX); 252 + if (!crash_base) { 253 + pr_warn("crashkernel reservation failed - No suitable area found.\n"); 254 + return; 255 + } 256 + } else if (!memblock_phys_alloc_range(crash_size, CRASH_ALIGN, crash_base, crash_base + crash_size)) { 255 257 pr_warn("Invalid memory region reserved for crash kernel\n"); 256 258 return; 257 259 }
+6 -5
arch/loongarch/kernel/time.c
··· 140 140 141 141 int constant_clockevent_init(void) 142 142 { 143 - int irq; 144 143 unsigned int cpu = smp_processor_id(); 145 144 unsigned long min_delta = 0x600; 146 145 unsigned long max_delta = (1UL << 48) - 1; 147 146 struct clock_event_device *cd; 148 - static int timer_irq_installed = 0; 147 + static int irq = 0, timer_irq_installed = 0; 149 148 150 - irq = get_timer_irq(); 151 - if (irq < 0) 152 - pr_err("Failed to map irq %d (timer)\n", irq); 149 + if (!timer_irq_installed) { 150 + irq = get_timer_irq(); 151 + if (irq < 0) 152 + pr_err("Failed to map irq %d (timer)\n", irq); 153 + } 153 154 154 155 cd = &per_cpu(constant_clockevent_device, cpu); 155 156
+60 -8
arch/loongarch/kernel/traps.c
··· 371 371 372 372 asmlinkage void noinstr do_ale(struct pt_regs *regs) 373 373 { 374 - unsigned int *pc; 375 374 irqentry_state_t state = irqentry_enter(regs); 375 + 376 + #ifndef CONFIG_ARCH_STRICT_ALIGN 377 + die_if_kernel("Kernel ale access", regs); 378 + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); 379 + #else 380 + unsigned int *pc; 376 381 377 382 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr); 378 383 ··· 402 397 sigbus: 403 398 die_if_kernel("Kernel ale access", regs); 404 399 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); 405 - 406 400 out: 401 + #endif 407 402 irqentry_exit(regs, state); 408 403 } 409 404 ··· 437 432 unsigned long era = exception_era(regs); 438 433 irqentry_state_t state = irqentry_enter(regs); 439 434 440 - local_irq_enable(); 435 + if (regs->csr_prmd & CSR_PRMD_PIE) 436 + local_irq_enable(); 437 + 441 438 current->thread.trap_nr = read_csr_excode(); 442 439 if (__get_inst(&opcode, (u32 *)era, user)) 443 440 goto out_sigsegv; ··· 452 445 */ 453 446 switch (bcode) { 454 447 case BRK_KPROBE_BP: 455 - if (notify_die(DIE_BREAK, "Kprobe", regs, bcode, 456 - current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) 448 + if (kprobe_breakpoint_handler(regs)) 457 449 goto out; 458 450 else 459 451 break; 460 452 case BRK_KPROBE_SSTEPBP: 461 - if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode, 462 - current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) 453 + if (kprobe_singlestep_handler(regs)) 463 454 goto out; 464 455 else 465 456 break; ··· 500 495 } 501 496 502 497 out: 503 - local_irq_disable(); 498 + if (regs->csr_prmd & CSR_PRMD_PIE) 499 + local_irq_disable(); 500 + 504 501 irqentry_exit(regs, state); 505 502 return; 506 503 ··· 513 506 514 507 asmlinkage void noinstr do_watch(struct pt_regs *regs) 515 508 { 509 + irqentry_state_t state = irqentry_enter(regs); 510 + 511 + #ifndef CONFIG_HAVE_HW_BREAKPOINT 516 512 pr_warn("Hardware watch point handler not implemented!\n"); 513 + #else 514 + if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) { 515 + int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1); 516 + unsigned long pc = instruction_pointer(regs); 517 + union loongarch_instruction *ip = (union loongarch_instruction *)pc; 518 + 519 + if (llbit) { 520 + /* 521 + * When the ll-sc combo is encountered, it is regarded as an single 522 + * instruction. So don't clear llbit and reset CSR.FWPS.Skip until 523 + * the llsc execution is completed. 524 + */ 525 + csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS); 526 + csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL); 527 + goto out; 528 + } 529 + 530 + if (pc == current->thread.single_step) { 531 + /* 532 + * Certain insns are occasionally not skipped when CSR.FWPS.Skip is 533 + * set, such as fld.d/fst.d. So singlestep needs to compare whether 534 + * the csr_era is equal to the value of singlestep which last time set. 535 + */ 536 + if (!is_self_loop_ins(ip, regs)) { 537 + /* 538 + * Check if the given instruction the target pc is equal to the 539 + * current pc, If yes, then we should not set the CSR.FWPS.SKIP 540 + * bit to break the original instruction stream. 541 + */ 542 + csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS); 543 + goto out; 544 + } 545 + } 546 + } else { 547 + breakpoint_handler(regs); 548 + watchpoint_handler(regs); 549 + } 550 + 551 + force_sig(SIGTRAP); 552 + out: 553 + #endif 554 + irqentry_exit(regs, state); 517 555 } 518 556 519 557 asmlinkage void noinstr do_ri(struct pt_regs *regs)
+18 -2
arch/loongarch/kernel/vmlinux.lds.S
··· 65 65 __alt_instructions_end = .; 66 66 } 67 67 68 + #ifdef CONFIG_RELOCATABLE 69 + . = ALIGN(8); 70 + .la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) { 71 + __la_abs_begin = .; 72 + *(.la_abs) 73 + __la_abs_end = .; 74 + } 75 + #endif 76 + 68 77 .got : ALIGN(16) { *(.got) } 69 78 .plt : ALIGN(16) { *(.plt) } 70 79 .got.plt : ALIGN(16) { *(.got.plt) } 80 + 81 + .data.rel : { *(.data.rel*) } 71 82 72 83 . = ALIGN(PECOFF_SEGMENT_ALIGN); 73 84 __init_begin = .; ··· 103 92 PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT) 104 93 #endif 105 94 106 - .rela.dyn : ALIGN(8) { *(.rela.dyn) *(.rela*) } 107 - 108 95 .init.bss : { 109 96 *(.init.bss) 110 97 } ··· 114 105 _sdata = .; 115 106 RO_DATA(4096) 116 107 RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE) 108 + 109 + .rela.dyn : ALIGN(8) { 110 + __rela_dyn_begin = .; 111 + *(.rela.dyn) *(.rela*) 112 + __rela_dyn_end = .; 113 + } 117 114 118 115 .sdata : { 119 116 *(.sdata) ··· 147 132 148 133 DISCARDS 149 134 /DISCARD/ : { 135 + *(.dynamic .dynsym .dynstr .hash .gnu.hash) 150 136 *(.gnu.attributes) 151 137 *(.options) 152 138 *(.eh_frame)
+3
arch/loongarch/lib/memcpy.S
··· 17 17 ALTERNATIVE "b __memcpy_generic", \ 18 18 "b __memcpy_fast", CPU_FEATURE_UAL 19 19 SYM_FUNC_END(memcpy) 20 + _ASM_NOKPROBE(memcpy) 20 21 21 22 EXPORT_SYMBOL(memcpy) 22 23 ··· 42 41 2: move a0, a3 43 42 jr ra 44 43 SYM_FUNC_END(__memcpy_generic) 44 + _ASM_NOKPROBE(__memcpy_generic) 45 45 46 46 /* 47 47 * void *__memcpy_fast(void *dst, const void *src, size_t n) ··· 95 93 3: move a0, a3 96 94 jr ra 97 95 SYM_FUNC_END(__memcpy_fast) 96 + _ASM_NOKPROBE(__memcpy_fast)
+4
arch/loongarch/lib/memmove.S
··· 29 29 b rmemcpy 30 30 4: b __rmemcpy_generic 31 31 SYM_FUNC_END(memmove) 32 + _ASM_NOKPROBE(memmove) 32 33 33 34 EXPORT_SYMBOL(memmove) 34 35 ··· 40 39 ALTERNATIVE "b __rmemcpy_generic", \ 41 40 "b __rmemcpy_fast", CPU_FEATURE_UAL 42 41 SYM_FUNC_END(rmemcpy) 42 + _ASM_NOKPROBE(rmemcpy) 43 43 44 44 /* 45 45 * void *__rmemcpy_generic(void *dst, const void *src, size_t n) ··· 66 64 2: move a0, a3 67 65 jr ra 68 66 SYM_FUNC_END(__rmemcpy_generic) 67 + _ASM_NOKPROBE(__rmemcpy_generic) 69 68 70 69 /* 71 70 * void *__rmemcpy_fast(void *dst, const void *src, size_t n) ··· 122 119 3: move a0, a3 123 120 jr ra 124 121 SYM_FUNC_END(__rmemcpy_fast) 122 + _ASM_NOKPROBE(__rmemcpy_fast)
+3
arch/loongarch/lib/memset.S
··· 23 23 ALTERNATIVE "b __memset_generic", \ 24 24 "b __memset_fast", CPU_FEATURE_UAL 25 25 SYM_FUNC_END(memset) 26 + _ASM_NOKPROBE(memset) 26 27 27 28 EXPORT_SYMBOL(memset) 28 29 ··· 46 45 2: move a0, a3 47 46 jr ra 48 47 SYM_FUNC_END(__memset_generic) 48 + _ASM_NOKPROBE(__memset_generic) 49 49 50 50 /* 51 51 * void *__memset_fast(void *s, int c, size_t n) ··· 91 89 3: move a0, a3 92 90 jr ra 93 91 SYM_FUNC_END(__memset_fast) 92 + _ASM_NOKPROBE(__memset_fast)
+3
arch/loongarch/mm/fault.c
··· 135 135 struct vm_area_struct *vma = NULL; 136 136 vm_fault_t fault; 137 137 138 + if (kprobe_page_fault(regs, current->thread.trap_nr)) 139 + return; 140 + 138 141 /* 139 142 * We fault-in kernel-space virtual memory on-demand. The 140 143 * 'reference' page table is init_mm.pgd.
+8 -9
arch/loongarch/mm/tlbex.S
··· 24 24 move a0, sp 25 25 REG_S a2, sp, PT_BVADDR 26 26 li.w a1, \write 27 - la.abs t0, do_page_fault 28 - jirl ra, t0, 0 27 + bl do_page_fault 29 28 RESTORE_ALL_AND_RET 30 29 SYM_FUNC_END(tlb_do_page_fault_\write) 31 30 .endm ··· 39 40 move a1, zero 40 41 csrrd a2, LOONGARCH_CSR_BADV 41 42 REG_S a2, sp, PT_BVADDR 42 - la.abs t0, do_page_fault 43 + la_abs t0, do_page_fault 43 44 jirl ra, t0, 0 44 45 RESTORE_ALL_AND_RET 45 46 SYM_FUNC_END(handle_tlb_protect) ··· 115 116 116 117 #ifdef CONFIG_64BIT 117 118 vmalloc_load: 118 - la.abs t1, swapper_pg_dir 119 + la_abs t1, swapper_pg_dir 119 120 b vmalloc_done_load 120 121 #endif 121 122 ··· 186 187 nopage_tlb_load: 187 188 dbar 0 188 189 csrrd ra, EXCEPTION_KS2 189 - la.abs t0, tlb_do_page_fault_0 190 + la_abs t0, tlb_do_page_fault_0 190 191 jr t0 191 192 SYM_FUNC_END(handle_tlb_load) 192 193 ··· 262 263 263 264 #ifdef CONFIG_64BIT 264 265 vmalloc_store: 265 - la.abs t1, swapper_pg_dir 266 + la_abs t1, swapper_pg_dir 266 267 b vmalloc_done_store 267 268 #endif 268 269 ··· 335 336 nopage_tlb_store: 336 337 dbar 0 337 338 csrrd ra, EXCEPTION_KS2 338 - la.abs t0, tlb_do_page_fault_1 339 + la_abs t0, tlb_do_page_fault_1 339 340 jr t0 340 341 SYM_FUNC_END(handle_tlb_store) 341 342 ··· 410 411 411 412 #ifdef CONFIG_64BIT 412 413 vmalloc_modify: 413 - la.abs t1, swapper_pg_dir 414 + la_abs t1, swapper_pg_dir 414 415 b vmalloc_done_modify 415 416 #endif 416 417 ··· 482 483 nopage_tlb_modify: 483 484 dbar 0 484 485 csrrd ra, EXCEPTION_KS2 485 - la.abs t0, tlb_do_page_fault_1 486 + la_abs t0, tlb_do_page_fault_1 486 487 jr t0 487 488 SYM_FUNC_END(handle_tlb_modify) 488 489
+2 -3
arch/loongarch/power/suspend_asm.S
··· 78 78 li.d t0, CSR_DMW1_INIT # CA, PLV0 79 79 csrwr t0, LOONGARCH_CSR_DMWIN1 80 80 81 - la.abs t0, 0f 82 - jr t0 83 - 0: 81 + JUMP_VIRT_ADDR t0, t1 82 + 84 83 la.pcrel t0, acpi_saved_sp 85 84 ld.d sp, t0, 0 86 85 SETUP_WAKEUP
+2
include/uapi/linux/elf.h
··· 445 445 #define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */ 446 446 #define NT_LOONGARCH_LASX 0xa03 /* LoongArch Loongson Advanced SIMD Extension registers */ 447 447 #define NT_LOONGARCH_LBT 0xa04 /* LoongArch Loongson Binary Translation registers */ 448 + #define NT_LOONGARCH_HW_BREAK 0xa05 /* LoongArch hardware breakpoint registers */ 449 + #define NT_LOONGARCH_HW_WATCH 0xa06 /* LoongArch hardware watchpoint registers */ 448 450 449 451 /* Note types with note name "GNU" */ 450 452 #define NT_GNU_PROPERTY_TYPE_0 5
+8
samples/kprobes/kprobe_example.c
··· 55 55 pr_info("<%s> p->addr, 0x%p, ip = 0x%lx, flags = 0x%lx\n", 56 56 p->symbol_name, p->addr, regs->psw.addr, regs->flags); 57 57 #endif 58 + #ifdef CONFIG_LOONGARCH 59 + pr_info("<%s> p->addr = 0x%p, era = 0x%lx, estat = 0x%lx\n", 60 + p->symbol_name, p->addr, regs->csr_era, regs->csr_estat); 61 + #endif 58 62 59 63 /* A dump_stack() here will give a stack backtrace */ 60 64 return 0; ··· 95 91 #ifdef CONFIG_S390 96 92 pr_info("<%s> p->addr, 0x%p, flags = 0x%lx\n", 97 93 p->symbol_name, p->addr, regs->flags); 94 + #endif 95 + #ifdef CONFIG_LOONGARCH 96 + pr_info("<%s> p->addr = 0x%p, estat = 0x%lx\n", 97 + p->symbol_name, p->addr, regs->csr_estat); 98 98 #endif 99 99 } 100 100
+9
tools/arch/loongarch/include/uapi/asm/bitsperlong.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + #ifndef __ASM_LOONGARCH_BITSPERLONG_H 3 + #define __ASM_LOONGARCH_BITSPERLONG_H 4 + 5 + #define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8) 6 + 7 + #include <asm-generic/bitsperlong.h> 8 + 9 + #endif /* __ASM_LOONGARCH_BITSPERLONG_H */
+10 -1
tools/scripts/Makefile.arch
··· 5 5 -e s/s390x/s390/ -e s/parisc64/parisc/ \ 6 6 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ 7 7 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ 8 - -e s/riscv.*/riscv/) 8 + -e s/riscv.*/riscv/ -e s/loongarch.*/loongarch/) 9 9 10 10 ifndef ARCH 11 11 ARCH := $(HOSTARCH) ··· 32 32 # Additional ARCH settings for sh 33 33 ifeq ($(ARCH),sh64) 34 34 SRCARCH := sh 35 + endif 36 + 37 + # Additional ARCH settings for loongarch 38 + ifeq ($(ARCH),loongarch32) 39 + SRCARCH := loongarch 40 + endif 41 + 42 + ifeq ($(ARCH),loongarch64) 43 + SRCARCH := loongarch 35 44 endif 36 45 37 46 LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+3
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
··· 28 28 mips*) 29 29 ARG1=%r4 30 30 ;; 31 + loongarch*) 32 + ARG1=%r4 33 + ;; 31 34 *) 32 35 echo "Please implement other architecture here" 33 36 exit_untested
+4
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
··· 40 40 GOODREG=%r4 41 41 BADREG=%r12 42 42 ;; 43 + loongarch*) 44 + GOODREG=%r4 45 + BADREG=%r12 46 + ;; 43 47 *) 44 48 echo "Please implement other architecture here" 45 49 exit_untested
+6
tools/testing/selftests/seccomp/seccomp_bpf.c
··· 128 128 # define __NR_seccomp 277 129 129 # elif defined(__csky__) 130 130 # define __NR_seccomp 277 131 + # elif defined(__loongarch__) 132 + # define __NR_seccomp 277 131 133 # elif defined(__hppa__) 132 134 # define __NR_seccomp 338 133 135 # elif defined(__powerpc__) ··· 1757 1755 NT_ARM_SYSTEM_CALL, &__v)); \ 1758 1756 } while (0) 1759 1757 # define SYSCALL_RET(_regs) (_regs).regs[0] 1758 + #elif defined(__loongarch__) 1759 + # define ARCH_REGS struct user_pt_regs 1760 + # define SYSCALL_NUM(_regs) (_regs).regs[11] 1761 + # define SYSCALL_RET(_regs) (_regs).regs[4] 1760 1762 #elif defined(__riscv) && __riscv_xlen == 64 1761 1763 # define ARCH_REGS struct user_regs_struct 1762 1764 # define SYSCALL_NUM(_regs) (_regs).a7