Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nds32-for-linus-4.19-tag1' of git://git.kernel.org/pub/scm/linux/kernel/git/greentime/linux

Pull nds32 updates from Greentime Hu:
"Contained in here are the bug fixes, building error fixes and ftrace
support for nds32"

* tag 'nds32-for-linus-4.19-tag1' of git://git.kernel.org/pub/scm/linux/kernel/git/greentime/linux:
nds32: linker script: GCOV kernel may refers data in __exit
nds32: fix build error because of wrong semicolon
nds32: Fix a kernel panic issue because of wrong frame pointer access.
nds32: Only print one page of stack when die to prevent printing too much information.
nds32: Add macro definition for offset of lp register on stack
nds32: Remove the deprecated ABI implementation
nds32/stack: Get real return address by using ftrace_graph_ret_addr
nds32/ftrace: Support dynamic function graph tracer
nds32/ftrace: Support dynamic function tracer
nds32/ftrace: Add RECORD_MCOUNT support
nds32/ftrace: Support static function graph tracer
nds32/ftrace: Support static function tracer
nds32: Extract the checking and getting pointer to a macro
nds32: Clean up the coding style
nds32: Fix get_user/put_user macro expand pointer problem
nds32: Fix empty call trace
nds32: add NULL entry to the end of_device_id array
nds32: fix logic for module

+527 -152
+4
arch/nds32/Kconfig
··· 40 40 select NO_IOPORT_MAP 41 41 select RTC_LIB 42 42 select THREAD_INFO_IN_TASK 43 + select HAVE_FUNCTION_TRACER 44 + select HAVE_FUNCTION_GRAPH_TRACER 45 + select HAVE_FTRACE_MCOUNT_RECORD 46 + select HAVE_DYNAMIC_FTRACE 43 47 help 44 48 Andes(nds32) Linux support. 45 49
+4
arch/nds32/Makefile
··· 5 5 6 6 comma = , 7 7 8 + ifdef CONFIG_FUNCTION_TRACER 9 + arch-y += -malways-save-lp -mno-relax 10 + endif 11 + 8 12 KBUILD_CFLAGS += $(call cc-option, -mno-sched-prolog-epilog) 9 13 KBUILD_CFLAGS += -mcmodel=large 10 14
+2 -2
arch/nds32/include/asm/elf.h
··· 121 121 */ 122 122 #define ELF_CLASS ELFCLASS32 123 123 #ifdef __NDS32_EB__ 124 - #define ELF_DATA ELFDATA2MSB; 124 + #define ELF_DATA ELFDATA2MSB 125 125 #else 126 - #define ELF_DATA ELFDATA2LSB; 126 + #define ELF_DATA ELFDATA2LSB 127 127 #endif 128 128 #define ELF_ARCH EM_NDS32 129 129 #define USE_ELF_CORE_DUMP
+46
arch/nds32/include/asm/ftrace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_NDS32_FTRACE_H 4 + #define __ASM_NDS32_FTRACE_H 5 + 6 + #ifdef CONFIG_FUNCTION_TRACER 7 + 8 + #define HAVE_FUNCTION_GRAPH_FP_TEST 9 + 10 + #define MCOUNT_ADDR ((unsigned long)(_mcount)) 11 + /* mcount call is composed of three instructions: 12 + * sethi + ori + jral 13 + */ 14 + #define MCOUNT_INSN_SIZE 12 15 + 16 + extern void _mcount(unsigned long parent_ip); 17 + 18 + #ifdef CONFIG_DYNAMIC_FTRACE 19 + 20 + #define FTRACE_ADDR ((unsigned long)_ftrace_caller) 21 + 22 + #ifdef __NDS32_EL__ 23 + #define INSN_NOP 0x09000040 24 + #define INSN_SIZE(insn) (((insn & 0x00000080) == 0) ? 4 : 2) 25 + #define IS_SETHI(insn) ((insn & 0x000000fe) == 0x00000046) 26 + #define ENDIAN_CONVERT(insn) be32_to_cpu(insn) 27 + #else /* __NDS32_EB__ */ 28 + #define INSN_NOP 0x40000009 29 + #define INSN_SIZE(insn) (((insn & 0x80000000) == 0) ? 4 : 2) 30 + #define IS_SETHI(insn) ((insn & 0xfe000000) == 0x46000000) 31 + #define ENDIAN_CONVERT(insn) (insn) 32 + #endif 33 + 34 + extern void _ftrace_caller(unsigned long parent_ip); 35 + static inline unsigned long ftrace_call_adjust(unsigned long addr) 36 + { 37 + return addr; 38 + } 39 + struct dyn_arch_ftrace { 40 + }; 41 + 42 + #endif /* CONFIG_DYNAMIC_FTRACE */ 43 + 44 + #endif /* CONFIG_FUNCTION_TRACER */ 45 + 46 + #endif /* __ASM_NDS32_FTRACE_H */
+1
arch/nds32/include/asm/nds32.h
··· 17 17 #else 18 18 #define FP_OFFSET (-2) 19 19 #endif 20 + #define LP_OFFSET (-1) 20 21 21 22 extern void __init early_trap_init(void); 22 23 static inline void GIE_ENABLE(void)
+119 -110
arch/nds32/include/asm/uaccess.h
··· 38 38 extern int fixup_exception(struct pt_regs *regs); 39 39 40 40 #define KERNEL_DS ((mm_segment_t) { ~0UL }) 41 - #define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) 41 + #define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) 42 42 43 43 #define get_ds() (KERNEL_DS) 44 44 #define get_fs() (current_thread_info()->addr_limit) ··· 49 49 current_thread_info()->addr_limit = fs; 50 50 } 51 51 52 - #define segment_eq(a, b) ((a) == (b)) 52 + #define segment_eq(a, b) ((a) == (b)) 53 53 54 54 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) 55 55 56 - #define access_ok(type, addr, size) \ 56 + #define access_ok(type, addr, size) \ 57 57 __range_ok((unsigned long)addr, (unsigned long)size) 58 58 /* 59 59 * Single-value transfer routines. They automatically use the right ··· 75 75 * versions are void (ie, don't return a value as such). 76 76 */ 77 77 78 - #define get_user(x,p) \ 79 - ({ \ 80 - long __e = -EFAULT; \ 81 - if(likely(access_ok(VERIFY_READ, p, sizeof(*p)))) { \ 82 - __e = __get_user(x,p); \ 83 - } else \ 84 - x = 0; \ 85 - __e; \ 86 - }) 87 - #define __get_user(x,ptr) \ 78 + #define get_user __get_user \ 79 + 80 + #define __get_user(x, ptr) \ 88 81 ({ \ 89 82 long __gu_err = 0; \ 90 - __get_user_err((x),(ptr),__gu_err); \ 83 + __get_user_check((x), (ptr), __gu_err); \ 91 84 __gu_err; \ 92 85 }) 93 86 94 - #define __get_user_error(x,ptr,err) \ 87 + #define __get_user_error(x, ptr, err) \ 95 88 ({ \ 96 - __get_user_err((x),(ptr),err); \ 97 - (void) 0; \ 89 + __get_user_check((x), (ptr), (err)); \ 90 + (void)0; \ 98 91 }) 99 92 100 - #define __get_user_err(x,ptr,err) \ 93 + #define __get_user_check(x, ptr, err) \ 94 + ({ \ 95 + const __typeof__(*(ptr)) __user *__p = (ptr); \ 96 + might_fault(); \ 97 + if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ 98 + __get_user_err((x), __p, (err)); \ 99 + } else { \ 100 + (x) = 0; (err) = -EFAULT; \ 101 + } \ 102 + }) 103 + 104 + #define __get_user_err(x, ptr, err) \ 101 105 do { \ 102 - unsigned long __gu_addr = (unsigned long)(ptr); \ 103 106 unsigned long __gu_val; \ 104 107 __chk_user_ptr(ptr); \ 105 108 switch (sizeof(*(ptr))) { \ 106 109 case 1: \ 107 - __get_user_asm("lbi",__gu_val,__gu_addr,err); \ 110 + __get_user_asm("lbi", __gu_val, (ptr), (err)); \ 108 111 break; \ 109 112 case 2: \ 110 - __get_user_asm("lhi",__gu_val,__gu_addr,err); \ 113 + __get_user_asm("lhi", __gu_val, (ptr), (err)); \ 111 114 break; \ 112 115 case 4: \ 113 - __get_user_asm("lwi",__gu_val,__gu_addr,err); \ 116 + __get_user_asm("lwi", __gu_val, (ptr), (err)); \ 114 117 break; \ 115 118 case 8: \ 116 - __get_user_asm_dword(__gu_val,__gu_addr,err); \ 119 + __get_user_asm_dword(__gu_val, (ptr), (err)); \ 117 120 break; \ 118 121 default: \ 119 122 BUILD_BUG(); \ 120 123 break; \ 121 124 } \ 122 - (x) = (__typeof__(*(ptr)))__gu_val; \ 125 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 123 126 } while (0) 124 127 125 - #define __get_user_asm(inst,x,addr,err) \ 126 - asm volatile( \ 127 - "1: "inst" %1,[%2]\n" \ 128 - "2:\n" \ 129 - " .section .fixup,\"ax\"\n" \ 130 - " .align 2\n" \ 131 - "3: move %0, %3\n" \ 132 - " move %1, #0\n" \ 133 - " b 2b\n" \ 134 - " .previous\n" \ 135 - " .section __ex_table,\"a\"\n" \ 136 - " .align 3\n" \ 137 - " .long 1b, 3b\n" \ 138 - " .previous" \ 139 - : "+r" (err), "=&r" (x) \ 140 - : "r" (addr), "i" (-EFAULT) \ 141 - : "cc") 128 + #define __get_user_asm(inst, x, addr, err) \ 129 + __asm__ __volatile__ ( \ 130 + "1: "inst" %1,[%2]\n" \ 131 + "2:\n" \ 132 + " .section .fixup,\"ax\"\n" \ 133 + " .align 2\n" \ 134 + "3: move %0, %3\n" \ 135 + " move %1, #0\n" \ 136 + " b 2b\n" \ 137 + " .previous\n" \ 138 + " .section __ex_table,\"a\"\n" \ 139 + " .align 3\n" \ 140 + " .long 1b, 3b\n" \ 141 + " .previous" \ 142 + : "+r" (err), "=&r" (x) \ 143 + : "r" (addr), "i" (-EFAULT) \ 144 + : "cc") 142 145 143 146 #ifdef __NDS32_EB__ 144 147 #define __gu_reg_oper0 "%H1" ··· 152 149 #endif 153 150 154 151 #define __get_user_asm_dword(x, addr, err) \ 155 - asm volatile( \ 156 - "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \ 157 - "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \ 158 - "3:\n" \ 159 - " .section .fixup,\"ax\"\n" \ 160 - " .align 2\n" \ 161 - "4: move %0, %3\n" \ 162 - " b 3b\n" \ 163 - " .previous\n" \ 164 - " .section __ex_table,\"a\"\n" \ 165 - " .align 3\n" \ 166 - " .long 1b, 4b\n" \ 167 - " .long 2b, 4b\n" \ 168 - " .previous" \ 169 - : "+r"(err), "=&r"(x) \ 170 - : "r"(addr), "i"(-EFAULT) \ 171 - : "cc") 172 - #define put_user(x,p) \ 173 - ({ \ 174 - long __e = -EFAULT; \ 175 - if(likely(access_ok(VERIFY_WRITE, p, sizeof(*p)))) { \ 176 - __e = __put_user(x,p); \ 177 - } \ 178 - __e; \ 179 - }) 180 - #define __put_user(x,ptr) \ 152 + __asm__ __volatile__ ( \ 153 + "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \ 154 + "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \ 155 + "3:\n" \ 156 + " .section .fixup,\"ax\"\n" \ 157 + " .align 2\n" \ 158 + "4: move %0, %3\n" \ 159 + " b 3b\n" \ 160 + " .previous\n" \ 161 + " .section __ex_table,\"a\"\n" \ 162 + " .align 3\n" \ 163 + " .long 1b, 4b\n" \ 164 + " .long 2b, 4b\n" \ 165 + " .previous" \ 166 + : "+r"(err), "=&r"(x) \ 167 + : "r"(addr), "i"(-EFAULT) \ 168 + : "cc") 169 + 170 + #define put_user __put_user \ 171 + 172 + #define __put_user(x, ptr) \ 181 173 ({ \ 182 174 long __pu_err = 0; \ 183 - __put_user_err((x),(ptr),__pu_err); \ 175 + __put_user_err((x), (ptr), __pu_err); \ 184 176 __pu_err; \ 185 177 }) 186 178 187 - #define __put_user_error(x,ptr,err) \ 179 + #define __put_user_error(x, ptr, err) \ 188 180 ({ \ 189 - __put_user_err((x),(ptr),err); \ 190 - (void) 0; \ 181 + __put_user_err((x), (ptr), (err)); \ 182 + (void)0; \ 191 183 }) 192 184 193 - #define __put_user_err(x,ptr,err) \ 185 + #define __put_user_check(x, ptr, err) \ 186 + ({ \ 187 + __typeof__(*(ptr)) __user *__p = (ptr); \ 188 + might_fault(); \ 189 + if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ 190 + __put_user_err((x), __p, (err)); \ 191 + } else { \ 192 + (err) = -EFAULT; \ 193 + } \ 194 + }) 195 + 196 + #define __put_user_err(x, ptr, err) \ 194 197 do { \ 195 - unsigned long __pu_addr = (unsigned long)(ptr); \ 196 198 __typeof__(*(ptr)) __pu_val = (x); \ 197 199 __chk_user_ptr(ptr); \ 198 200 switch (sizeof(*(ptr))) { \ 199 201 case 1: \ 200 - __put_user_asm("sbi",__pu_val,__pu_addr,err); \ 202 + __put_user_asm("sbi", __pu_val, (ptr), (err)); \ 201 203 break; \ 202 204 case 2: \ 203 - __put_user_asm("shi",__pu_val,__pu_addr,err); \ 205 + __put_user_asm("shi", __pu_val, (ptr), (err)); \ 204 206 break; \ 205 207 case 4: \ 206 - __put_user_asm("swi",__pu_val,__pu_addr,err); \ 208 + __put_user_asm("swi", __pu_val, (ptr), (err)); \ 207 209 break; \ 208 210 case 8: \ 209 - __put_user_asm_dword(__pu_val,__pu_addr,err); \ 211 + __put_user_asm_dword(__pu_val, (ptr), (err)); \ 210 212 break; \ 211 213 default: \ 212 214 BUILD_BUG(); \ ··· 219 211 } \ 220 212 } while (0) 221 213 222 - #define __put_user_asm(inst,x,addr,err) \ 223 - asm volatile( \ 224 - "1: "inst" %1,[%2]\n" \ 225 - "2:\n" \ 226 - " .section .fixup,\"ax\"\n" \ 227 - " .align 2\n" \ 228 - "3: move %0, %3\n" \ 229 - " b 2b\n" \ 230 - " .previous\n" \ 231 - " .section __ex_table,\"a\"\n" \ 232 - " .align 3\n" \ 233 - " .long 1b, 3b\n" \ 234 - " .previous" \ 235 - : "+r" (err) \ 236 - : "r" (x), "r" (addr), "i" (-EFAULT) \ 237 - : "cc") 214 + #define __put_user_asm(inst, x, addr, err) \ 215 + __asm__ __volatile__ ( \ 216 + "1: "inst" %1,[%2]\n" \ 217 + "2:\n" \ 218 + " .section .fixup,\"ax\"\n" \ 219 + " .align 2\n" \ 220 + "3: move %0, %3\n" \ 221 + " b 2b\n" \ 222 + " .previous\n" \ 223 + " .section __ex_table,\"a\"\n" \ 224 + " .align 3\n" \ 225 + " .long 1b, 3b\n" \ 226 + " .previous" \ 227 + : "+r" (err) \ 228 + : "r" (x), "r" (addr), "i" (-EFAULT) \ 229 + : "cc") 238 230 239 231 #ifdef __NDS32_EB__ 240 232 #define __pu_reg_oper0 "%H2" ··· 245 237 #endif 246 238 247 239 #define __put_user_asm_dword(x, addr, err) \ 248 - asm volatile( \ 249 - "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \ 250 - "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \ 251 - "3:\n" \ 252 - " .section .fixup,\"ax\"\n" \ 253 - " .align 2\n" \ 254 - "4: move %0, %3\n" \ 255 - " b 3b\n" \ 256 - " .previous\n" \ 257 - " .section __ex_table,\"a\"\n" \ 258 - " .align 3\n" \ 259 - " .long 1b, 4b\n" \ 260 - " .long 2b, 4b\n" \ 261 - " .previous" \ 262 - : "+r"(err) \ 263 - : "r"(addr), "r"(x), "i"(-EFAULT) \ 264 - : "cc") 240 + __asm__ __volatile__ ( \ 241 + "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \ 242 + "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \ 243 + "3:\n" \ 244 + " .section .fixup,\"ax\"\n" \ 245 + " .align 2\n" \ 246 + "4: move %0, %3\n" \ 247 + " b 3b\n" \ 248 + " .previous\n" \ 249 + " .section __ex_table,\"a\"\n" \ 250 + " .align 3\n" \ 251 + " .long 1b, 4b\n" \ 252 + " .long 2b, 4b\n" \ 253 + " .previous" \ 254 + : "+r"(err) \ 255 + : "r"(addr), "r"(x), "i"(-EFAULT) \ 256 + : "cc") 257 + 265 258 extern unsigned long __arch_clear_user(void __user * addr, unsigned long n); 266 259 extern long strncpy_from_user(char *dest, const char __user * src, long count); 267 260 extern __must_check long strlen_user(const char __user * str);
+6
arch/nds32/kernel/Makefile
··· 21 21 22 22 23 23 obj-y += vdso/ 24 + 25 + obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o 26 + 27 + ifdef CONFIG_FUNCTION_TRACER 28 + CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) 29 + endif
+2 -1
arch/nds32/kernel/atl2c.c
··· 9 9 10 10 void __iomem *atl2c_base; 11 11 static const struct of_device_id atl2c_ids[] __initconst = { 12 - {.compatible = "andestech,atl2c",} 12 + {.compatible = "andestech,atl2c",}, 13 + {} 13 14 }; 14 15 15 16 static int __init atl2c_of_init(void)
+1 -1
arch/nds32/kernel/ex-entry.S
··· 118 118 /* interrupt */ 119 119 2: 120 120 #ifdef CONFIG_TRACE_IRQFLAGS 121 - jal trace_hardirqs_off 121 + jal __trace_hardirqs_off 122 122 #endif 123 123 move $r0, $sp 124 124 sethi $lp, hi20(ret_from_intr)
+2 -2
arch/nds32/kernel/ex-exit.S
··· 138 138 #ifdef CONFIG_TRACE_IRQFLAGS 139 139 lwi $p0, [$sp+(#IPSW_OFFSET)] 140 140 andi $p0, $p0, #0x1 141 - la $r10, trace_hardirqs_off 142 - la $r9, trace_hardirqs_on 141 + la $r10, __trace_hardirqs_off 142 + la $r9, __trace_hardirqs_on 143 143 cmovz $r9, $p0, $r10 144 144 jral $r9 145 145 #endif
+309
arch/nds32/kernel/ftrace.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/ftrace.h> 4 + #include <linux/uaccess.h> 5 + #include <asm/cacheflush.h> 6 + 7 + #ifndef CONFIG_DYNAMIC_FTRACE 8 + extern void (*ftrace_trace_function)(unsigned long, unsigned long, 9 + struct ftrace_ops*, struct pt_regs*); 10 + extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); 11 + extern void ftrace_graph_caller(void); 12 + 13 + noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, 14 + struct ftrace_ops *op, struct pt_regs *regs) 15 + { 16 + __asm__ (""); /* avoid to optimize as pure function */ 17 + } 18 + 19 + noinline void _mcount(unsigned long parent_ip) 20 + { 21 + /* save all state by the compiler prologue */ 22 + 23 + unsigned long ip = (unsigned long)__builtin_return_address(0); 24 + 25 + if (ftrace_trace_function != ftrace_stub) 26 + ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip, 27 + NULL, NULL); 28 + 29 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 30 + if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub 31 + || ftrace_graph_entry != ftrace_graph_entry_stub) 32 + ftrace_graph_caller(); 33 + #endif 34 + 35 + /* restore all state by the compiler epilogue */ 36 + } 37 + EXPORT_SYMBOL(_mcount); 38 + 39 + #else /* CONFIG_DYNAMIC_FTRACE */ 40 + 41 + noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, 42 + struct ftrace_ops *op, struct pt_regs *regs) 43 + { 44 + __asm__ (""); /* avoid to optimize as pure function */ 45 + } 46 + 47 + noinline void __naked _mcount(unsigned long parent_ip) 48 + { 49 + __asm__ (""); /* avoid to optimize as pure function */ 50 + } 51 + EXPORT_SYMBOL(_mcount); 52 + 53 + #define XSTR(s) STR(s) 54 + #define STR(s) #s 55 + void _ftrace_caller(unsigned long parent_ip) 56 + { 57 + /* save all state needed by the compiler prologue */ 58 + 59 + /* 60 + * prepare arguments for real tracing function 61 + * first arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE 62 + * second arg : parent_ip 63 + */ 64 + __asm__ __volatile__ ( 65 + "move $r1, %0 \n\t" 66 + "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t" 67 + : 68 + : "r" (parent_ip), "r" (__builtin_return_address(0))); 69 + 70 + /* a placeholder for the call to a real tracing function */ 71 + __asm__ __volatile__ ( 72 + "ftrace_call: \n\t" 73 + "nop \n\t" 74 + "nop \n\t" 75 + "nop \n\t"); 76 + 77 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 78 + /* a placeholder for the call to ftrace_graph_caller */ 79 + __asm__ __volatile__ ( 80 + "ftrace_graph_call: \n\t" 81 + "nop \n\t" 82 + "nop \n\t" 83 + "nop \n\t"); 84 + #endif 85 + /* restore all state needed by the compiler epilogue */ 86 + } 87 + 88 + int __init ftrace_dyn_arch_init(void) 89 + { 90 + return 0; 91 + } 92 + 93 + int ftrace_arch_code_modify_prepare(void) 94 + { 95 + set_all_modules_text_rw(); 96 + return 0; 97 + } 98 + 99 + int ftrace_arch_code_modify_post_process(void) 100 + { 101 + set_all_modules_text_ro(); 102 + return 0; 103 + } 104 + 105 + static unsigned long gen_sethi_insn(unsigned long addr) 106 + { 107 + unsigned long opcode = 0x46000000; 108 + unsigned long imm = addr >> 12; 109 + unsigned long rt_num = 0xf << 20; 110 + 111 + return ENDIAN_CONVERT(opcode | rt_num | imm); 112 + } 113 + 114 + static unsigned long gen_ori_insn(unsigned long addr) 115 + { 116 + unsigned long opcode = 0x58000000; 117 + unsigned long imm = addr & 0x0000fff; 118 + unsigned long rt_num = 0xf << 20; 119 + unsigned long ra_num = 0xf << 15; 120 + 121 + return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm); 122 + } 123 + 124 + static unsigned long gen_jral_insn(unsigned long addr) 125 + { 126 + unsigned long opcode = 0x4a000001; 127 + unsigned long rt_num = 0x1e << 20; 128 + unsigned long rb_num = 0xf << 10; 129 + 130 + return ENDIAN_CONVERT(opcode | rt_num | rb_num); 131 + } 132 + 133 + static void ftrace_gen_call_insn(unsigned long *call_insns, 134 + unsigned long addr) 135 + { 136 + call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u */ 137 + call_insns[1] = gen_ori_insn(addr); /* ori $r15, $r15, imm15u */ 138 + call_insns[2] = gen_jral_insn(addr); /* jral $lp, $r15 */ 139 + } 140 + 141 + static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn, 142 + unsigned long *new_insn, bool validate) 143 + { 144 + unsigned long orig_insn[3]; 145 + 146 + if (validate) { 147 + if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE)) 148 + return -EFAULT; 149 + if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE)) 150 + return -EINVAL; 151 + } 152 + 153 + if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE)) 154 + return -EPERM; 155 + 156 + return 0; 157 + } 158 + 159 + static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn, 160 + unsigned long *new_insn, bool validate) 161 + { 162 + int ret; 163 + 164 + ret = __ftrace_modify_code(pc, old_insn, new_insn, validate); 165 + if (ret) 166 + return ret; 167 + 168 + flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); 169 + 170 + return ret; 171 + } 172 + 173 + int ftrace_update_ftrace_func(ftrace_func_t func) 174 + { 175 + unsigned long pc = (unsigned long)&ftrace_call; 176 + unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 177 + unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 178 + 179 + if (func != ftrace_stub) 180 + ftrace_gen_call_insn(new_insn, (unsigned long)func); 181 + 182 + return ftrace_modify_code(pc, old_insn, new_insn, false); 183 + } 184 + 185 + int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 186 + { 187 + unsigned long pc = rec->ip; 188 + unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 189 + unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 190 + 191 + ftrace_gen_call_insn(call_insn, addr); 192 + 193 + return ftrace_modify_code(pc, nop_insn, call_insn, true); 194 + } 195 + 196 + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 197 + unsigned long addr) 198 + { 199 + unsigned long pc = rec->ip; 200 + unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 201 + unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 202 + 203 + ftrace_gen_call_insn(call_insn, addr); 204 + 205 + return ftrace_modify_code(pc, call_insn, nop_insn, true); 206 + } 207 + #endif /* CONFIG_DYNAMIC_FTRACE */ 208 + 209 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 210 + void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 211 + unsigned long frame_pointer) 212 + { 213 + unsigned long return_hooker = (unsigned long)&return_to_handler; 214 + struct ftrace_graph_ent trace; 215 + unsigned long old; 216 + int err; 217 + 218 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 219 + return; 220 + 221 + old = *parent; 222 + 223 + trace.func = self_addr; 224 + trace.depth = current->curr_ret_stack + 1; 225 + 226 + /* Only trace if the calling function expects to */ 227 + if (!ftrace_graph_entry(&trace)) 228 + return; 229 + 230 + err = ftrace_push_return_trace(old, self_addr, &trace.depth, 231 + frame_pointer, NULL); 232 + 233 + if (err == -EBUSY) 234 + return; 235 + 236 + *parent = return_hooker; 237 + } 238 + 239 + noinline void ftrace_graph_caller(void) 240 + { 241 + unsigned long *parent_ip = 242 + (unsigned long *)(__builtin_frame_address(2) - 4); 243 + 244 + unsigned long selfpc = 245 + (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE); 246 + 247 + unsigned long frame_pointer = 248 + (unsigned long)__builtin_frame_address(3); 249 + 250 + prepare_ftrace_return(parent_ip, selfpc, frame_pointer); 251 + } 252 + 253 + extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer); 254 + void __naked return_to_handler(void) 255 + { 256 + __asm__ __volatile__ ( 257 + /* save state needed by the ABI */ 258 + "smw.adm $r0,[$sp],$r1,#0x0 \n\t" 259 + 260 + /* get original return address */ 261 + "move $r0, $fp \n\t" 262 + "bal ftrace_return_to_handler\n\t" 263 + "move $lp, $r0 \n\t" 264 + 265 + /* restore state nedded by the ABI */ 266 + "lmw.bim $r0,[$sp],$r1,#0x0 \n\t"); 267 + } 268 + 269 + #ifdef CONFIG_DYNAMIC_FTRACE 270 + extern unsigned long ftrace_graph_call; 271 + 272 + static int ftrace_modify_graph_caller(bool enable) 273 + { 274 + unsigned long pc = (unsigned long)&ftrace_graph_call; 275 + unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 276 + unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; 277 + 278 + ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller); 279 + 280 + if (enable) 281 + return ftrace_modify_code(pc, nop_insn, call_insn, true); 282 + else 283 + return ftrace_modify_code(pc, call_insn, nop_insn, true); 284 + } 285 + 286 + int ftrace_enable_ftrace_graph_caller(void) 287 + { 288 + return ftrace_modify_graph_caller(true); 289 + } 290 + 291 + int ftrace_disable_ftrace_graph_caller(void) 292 + { 293 + return ftrace_modify_graph_caller(false); 294 + } 295 + #endif /* CONFIG_DYNAMIC_FTRACE */ 296 + 297 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 298 + 299 + 300 + #ifdef CONFIG_TRACE_IRQFLAGS 301 + noinline void __trace_hardirqs_off(void) 302 + { 303 + trace_hardirqs_off(); 304 + } 305 + noinline void __trace_hardirqs_on(void) 306 + { 307 + trace_hardirqs_on(); 308 + } 309 + #endif /* CONFIG_TRACE_IRQFLAGS */
+2 -2
arch/nds32/kernel/module.c
··· 40 40 41 41 tmp2 = tmp & loc_mask; 42 42 if (partial_in_place) { 43 - tmp &= (!loc_mask); 43 + tmp &= (~loc_mask); 44 44 tmp = 45 45 tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); 46 46 } else { ··· 70 70 71 71 tmp2 = tmp & loc_mask; 72 72 if (partial_in_place) { 73 - tmp &= (!loc_mask); 73 + tmp &= (~loc_mask); 74 74 tmp = 75 75 tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); 76 76 } else {
+5 -1
arch/nds32/kernel/stacktrace.c
··· 4 4 #include <linux/sched/debug.h> 5 5 #include <linux/sched/task_stack.h> 6 6 #include <linux/stacktrace.h> 7 + #include <linux/ftrace.h> 7 8 8 9 void save_stack_trace(struct stack_trace *trace) 9 10 { ··· 17 16 unsigned long *fpn; 18 17 int skip = trace->skip; 19 18 int savesched; 19 + int graph_idx = 0; 20 20 21 21 if (tsk == current) { 22 22 __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn)); ··· 31 29 && (fpn >= (unsigned long *)TASK_SIZE)) { 32 30 unsigned long lpp, fpp; 33 31 34 - lpp = fpn[-1]; 32 + lpp = fpn[LP_OFFSET]; 35 33 fpp = fpn[FP_OFFSET]; 36 34 if (!__kernel_text_address(lpp)) 37 35 break; 36 + else 37 + lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL); 38 38 39 39 if (savesched || !in_sched_functions(lpp)) { 40 40 if (skip) {
+9 -33
arch/nds32/kernel/traps.c
··· 8 8 #include <linux/kdebug.h> 9 9 #include <linux/sched/task_stack.h> 10 10 #include <linux/uaccess.h> 11 + #include <linux/ftrace.h> 11 12 12 13 #include <asm/proc-fns.h> 13 14 #include <asm/unistd.h> ··· 95 94 set_fs(fs); 96 95 } 97 96 98 - #ifdef CONFIG_FUNCTION_GRAPH_TRACER 99 - #include <linux/ftrace.h> 100 - static void 101 - get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) 102 - { 103 - if (*addr == (unsigned long)return_to_handler) { 104 - int index = tsk->curr_ret_stack; 105 - 106 - if (tsk->ret_stack && index >= *graph) { 107 - index -= *graph; 108 - *addr = tsk->ret_stack[index].ret; 109 - (*graph)++; 110 - } 111 - } 112 - } 113 - #else 114 - static inline void 115 - get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) 116 - { 117 - } 118 - #endif 119 - 120 97 #define LOOP_TIMES (100) 121 98 static void __dump(struct task_struct *tsk, unsigned long *base_reg) 122 99 { ··· 105 126 while (!kstack_end(base_reg)) { 106 127 ret_addr = *base_reg++; 107 128 if (__kernel_text_address(ret_addr)) { 108 - get_real_ret_addr(&ret_addr, tsk, &graph); 129 + ret_addr = ftrace_graph_ret_addr( 130 + tsk, &graph, ret_addr, NULL); 109 131 print_ip_sym(ret_addr); 110 132 } 111 133 if (--cnt < 0) ··· 117 137 !((unsigned long)base_reg & 0x3) && 118 138 ((unsigned long)base_reg >= TASK_SIZE)) { 119 139 unsigned long next_fp; 120 - #if !defined(NDS32_ABI_2) 121 - ret_addr = base_reg[0]; 122 - next_fp = base_reg[1]; 123 - #else 124 - ret_addr = base_reg[-1]; 140 + ret_addr = base_reg[LP_OFFSET]; 125 141 next_fp = base_reg[FP_OFFSET]; 126 - #endif 127 142 if (__kernel_text_address(ret_addr)) { 128 - get_real_ret_addr(&ret_addr, tsk, &graph); 143 + 144 + ret_addr = ftrace_graph_ret_addr( 145 + tsk, &graph, ret_addr, NULL); 129 146 print_ip_sym(ret_addr); 130 147 } 131 148 if (--cnt < 0) ··· 173 196 pr_emerg("CPU: %i\n", smp_processor_id()); 174 197 show_regs(regs); 175 198 pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n", 176 - tsk->comm, tsk->pid, task_thread_info(tsk) + 1); 199 + tsk->comm, tsk->pid, end_of_stack(tsk)); 177 200 178 201 if (!user_mode(regs) || in_interrupt()) { 179 - dump_mem("Stack: ", regs->sp, 180 - THREAD_SIZE + (unsigned long)task_thread_info(tsk)); 202 + dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK); 181 203 dump_instr(regs); 182 204 dump_stack(); 183 205 }
+12
arch/nds32/kernel/vmlinux.lds.S
··· 13 13 ENTRY(_stext_lma) 14 14 jiffies = jiffies_64; 15 15 16 + #if defined(CONFIG_GCOV_KERNEL) 17 + #define NDS32_EXIT_KEEP(x) x 18 + #else 19 + #define NDS32_EXIT_KEEP(x) 20 + #endif 21 + 16 22 SECTIONS 17 23 { 18 24 _stext_lma = TEXTADDR - LOAD_OFFSET; 19 25 . = TEXTADDR; 20 26 __init_begin = .; 21 27 HEAD_TEXT_SECTION 28 + .exit.text : { 29 + NDS32_EXIT_KEEP(EXIT_TEXT) 30 + } 22 31 INIT_TEXT_SECTION(PAGE_SIZE) 23 32 INIT_DATA_SECTION(16) 33 + .exit.data : { 34 + NDS32_EXIT_KEEP(EXIT_DATA) 35 + } 24 36 PERCPU_SECTION(L1_CACHE_BYTES) 25 37 __init_end = .; 26 38
+3
scripts/recordmcount.pl
··· 389 389 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$"; 390 390 $type = ".quad"; 391 391 $alignment = 2; 392 + } elsif ($arch eq "nds32") { 393 + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$"; 394 + $alignment = 2; 392 395 } else { 393 396 die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; 394 397 }