Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'parisc-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
"Dynamic ftrace support by Sven Schnelle and a header guard fix by
Denis Efremov"

* 'parisc-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
parisc: asm: psw.h: missing header guard
parisc: add dynamic ftrace
compiler.h: add CC_USING_PATCHABLE_FUNCTION_ENTRY
parisc: use pr_debug() in kernel/module.c
parisc: add WARN_ON() to clear_fixmap
parisc: add spinlock to patch function
parisc: add support for patching multiple words

+358 -60
+2
arch/parisc/Kconfig
··· 59 59 select HAVE_ARCH_KGDB 60 60 select HAVE_KPROBES 61 61 select HAVE_KRETPROBES 62 + select HAVE_DYNAMIC_FTRACE if $(cc-option,-fpatchable-function-entry=1,1) 63 + select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE 62 64 63 65 help 64 66 The PA-RISC microprocessor is designed by Hewlett-Packard and used
+18
arch/parisc/Makefile
··· 47 47 endif 48 48 endif 49 49 50 + ifdef CONFIG_DYNAMIC_FTRACE 51 + ifdef CONFIG_64BIT 52 + NOP_COUNT := 8 53 + else 54 + NOP_COUNT := 5 55 + endif 56 + 57 + export CC_USING_RECORD_MCOUNT:=1 58 + export CC_USING_PATCHABLE_FUNCTION_ENTRY:=1 59 + 60 + KBUILD_AFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY=1 61 + KBUILD_CFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY=1 \ 62 + -DFTRACE_PATCHABLE_FUNCTION_SIZE=$(NOP_COUNT) 63 + 64 + CC_FLAGS_FTRACE := -fpatchable-function-entry=$(NOP_COUNT),$(shell echo $$(($(NOP_COUNT)-1))) 65 + KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/parisc/kernel/module.lds 66 + endif 67 + 50 68 OBJCOPY_FLAGS =-O binary -R .note -R .comment -S 51 69 52 70 cflags-y := -pipe
+13 -2
arch/parisc/include/asm/ftrace.h
··· 5 5 #ifndef __ASSEMBLY__ 6 6 extern void mcount(void); 7 7 8 - #define MCOUNT_INSN_SIZE 4 9 - 8 + #define MCOUNT_ADDR ((unsigned long)mcount) 9 + #define MCOUNT_INSN_SIZE 4 10 + #define CC_USING_NOP_MCOUNT 10 11 extern unsigned long sys_call_table[]; 11 12 12 13 extern unsigned long return_address(unsigned int); 14 + 15 + #ifdef CONFIG_DYNAMIC_FTRACE 16 + extern void ftrace_caller(void); 17 + 18 + struct dyn_arch_ftrace { 19 + }; 20 + 21 + unsigned long ftrace_call_adjust(unsigned long addr); 22 + 23 + #endif 13 24 14 25 #define ftrace_return_address(n) return_address(n) 15 26
+3 -1
arch/parisc/include/asm/patch.h
··· 4 4 5 5 /* stop machine and patch kernel text */ 6 6 void patch_text(void *addr, unsigned int insn); 7 + void patch_text_multiple(void *addr, u32 *insn, unsigned int len); 7 8 8 9 /* patch kernel text with machine already stopped (e.g. in kgdb) */ 9 - void __patch_text(void *addr, unsigned int insn); 10 + void __patch_text(void *addr, u32 insn); 11 + void __patch_text_multiple(void *addr, u32 *insn, unsigned int len); 10 12 11 13 #endif
+1 -1
arch/parisc/include/asm/psw.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #ifndef _PARISC_PSW_H 3 - 3 + #define _PARISC_PSW_H 4 4 5 5 #define PSW_I 0x00000001 6 6 #define PSW_D 0x00000002
+5 -4
arch/parisc/kernel/Makefile
··· 14 14 15 15 ifdef CONFIG_FUNCTION_TRACER 16 16 # Do not profile debug and lowlevel utilities 17 - CFLAGS_REMOVE_ftrace.o = -pg 18 - CFLAGS_REMOVE_cache.o = -pg 19 - CFLAGS_REMOVE_perf.o = -pg 20 - CFLAGS_REMOVE_unwind.o = -pg 17 + CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) 18 + CFLAGS_REMOVE_cache.o = $(CC_FLAGS_FTRACE) 19 + CFLAGS_REMOVE_perf.o = $(CC_FLAGS_FTRACE) 20 + CFLAGS_REMOVE_unwind.o = $(CC_FLAGS_FTRACE) 21 + CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE) 21 22 endif 22 23 23 24 obj-$(CONFIG_SMP) += smp.o
+64
arch/parisc/kernel/entry.S
··· 2012 2012 #endif 2013 2013 ENDPROC_CFI(mcount) 2014 2014 2015 + #ifdef CONFIG_DYNAMIC_FTRACE 2016 + 2017 + #ifdef CONFIG_64BIT 2018 + #define FTRACE_FRAME_SIZE (2*FRAME_SIZE) 2019 + #else 2020 + #define FTRACE_FRAME_SIZE FRAME_SIZE 2021 + #endif 2022 + ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 2023 + ftrace_caller: 2024 + .global ftrace_caller 2025 + 2026 + STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp) 2027 + ldo -FTRACE_FRAME_SIZE(%sp), %r3 2028 + STREG %rp, -RP_OFFSET(%r3) 2029 + 2030 + /* Offset 0 is already allocated for %r1 */ 2031 + STREG %r23, 2*REG_SZ(%r3) 2032 + STREG %r24, 3*REG_SZ(%r3) 2033 + STREG %r25, 4*REG_SZ(%r3) 2034 + STREG %r26, 5*REG_SZ(%r3) 2035 + STREG %r28, 6*REG_SZ(%r3) 2036 + STREG %r29, 7*REG_SZ(%r3) 2037 + #ifdef CONFIG_64BIT 2038 + STREG %r19, 8*REG_SZ(%r3) 2039 + STREG %r20, 9*REG_SZ(%r3) 2040 + STREG %r21, 10*REG_SZ(%r3) 2041 + STREG %r22, 11*REG_SZ(%r3) 2042 + STREG %r27, 12*REG_SZ(%r3) 2043 + STREG %r31, 13*REG_SZ(%r3) 2044 + loadgp 2045 + ldo -16(%sp),%r29 2046 + #endif 2047 + LDREG 0(%r3), %r25 2048 + copy %rp, %r26 2049 + ldo -8(%r25), %r25 2050 + b,l ftrace_function_trampoline, %rp 2051 + copy %r3, %r24 2052 + 2053 + LDREG -RP_OFFSET(%r3), %rp 2054 + LDREG 2*REG_SZ(%r3), %r23 2055 + LDREG 3*REG_SZ(%r3), %r24 2056 + LDREG 4*REG_SZ(%r3), %r25 2057 + LDREG 5*REG_SZ(%r3), %r26 2058 + LDREG 6*REG_SZ(%r3), %r28 2059 + LDREG 7*REG_SZ(%r3), %r29 2060 + #ifdef CONFIG_64BIT 2061 + LDREG 8*REG_SZ(%r3), %r19 2062 + LDREG 9*REG_SZ(%r3), %r20 2063 + LDREG 10*REG_SZ(%r3), %r21 2064 + LDREG 11*REG_SZ(%r3), %r22 2065 + LDREG 12*REG_SZ(%r3), %r27 2066 + LDREG 13*REG_SZ(%r3), %r31 2067 + #endif 2068 + LDREG 1*REG_SZ(%r3), %r3 2069 + 2070 + LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2071 + /* Adjust return point to jump back to beginning of traced function */ 2072 + ldo -4(%r1), %r1 2073 + bv,n (%r1) 2074 + 2075 + ENDPROC_CFI(ftrace_caller) 2076 + 2077 + #endif 2078 + 2015 2079 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 2016 2080 .align 8 2017 2081 ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
+120 -9
arch/parisc/kernel/ftrace.c
··· 7 7 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 8 8 * 9 9 * future possible enhancements: 10 - * - add CONFIG_DYNAMIC_FTRACE 11 10 * - add CONFIG_STACK_TRACER 12 11 */ 13 12 14 13 #include <linux/init.h> 15 14 #include <linux/ftrace.h> 15 + #include <linux/uaccess.h> 16 16 17 17 #include <asm/assembly.h> 18 18 #include <asm/sections.h> 19 19 #include <asm/ftrace.h> 20 - 20 + #include <asm/patch.h> 21 21 22 22 #define __hot __attribute__ ((__section__ (".text.hot"))) 23 23 ··· 50 50 unsigned long self_addr, 51 51 unsigned long org_sp_gr3) 52 52 { 53 - extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */ 54 - 55 - if (ftrace_trace_function != ftrace_stub) { 56 - /* struct ftrace_ops *op, struct pt_regs *regs); */ 57 - ftrace_trace_function(parent, self_addr, NULL, NULL); 58 - return; 59 - } 53 + #ifndef CONFIG_DYNAMIC_FTRACE 54 + extern ftrace_func_t ftrace_trace_function; 55 + #endif 56 + if (ftrace_trace_function != ftrace_stub) 57 + ftrace_trace_function(self_addr, parent, NULL, NULL); 60 58 61 59 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 62 60 if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub || ··· 73 75 #endif 74 76 } 75 77 78 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 79 + int ftrace_enable_ftrace_graph_caller(void) 80 + { 81 + return 0; 82 + } 83 + 84 + int ftrace_disable_ftrace_graph_caller(void) 85 + { 86 + return 0; 87 + } 88 + #endif 89 + 90 + #ifdef CONFIG_DYNAMIC_FTRACE 91 + 92 + int __init ftrace_dyn_arch_init(void) 93 + { 94 + return 0; 95 + } 96 + int ftrace_update_ftrace_func(ftrace_func_t func) 97 + { 98 + return 0; 99 + } 100 + 101 + unsigned long ftrace_call_adjust(unsigned long addr) 102 + { 103 + return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4; 104 + } 105 + 106 + int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 107 + { 108 + u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE]; 109 + u32 *tramp; 110 + int size, ret, i; 111 + void *ip; 112 + 113 + #ifdef CONFIG_64BIT 114 + unsigned long addr2 = 115 + (unsigned long)dereference_function_descriptor((void *)addr); 116 + 117 + u32 ftrace_trampoline[] = { 118 + 0x73c10208, /* std,ma r1,100(sp) */ 119 + 0x0c2110c1, /* ldd -10(r1),r1 */ 120 + 0xe820d002, /* bve,n (r1) */ 121 + addr2 >> 32, 122 + addr2 & 0xffffffff, 123 + 0xe83f1fd7, /* b,l,n .-14,r1 */ 124 + }; 125 + 126 + u32 ftrace_trampoline_unaligned[] = { 127 + addr2 >> 32, 128 + addr2 & 0xffffffff, 129 + 0x37de0200, /* ldo 100(sp),sp */ 130 + 0x73c13e01, /* std r1,-100(sp) */ 131 + 0x34213ff9, /* ldo -4(r1),r1 */ 132 + 0x50213fc1, /* ldd -20(r1),r1 */ 133 + 0xe820d002, /* bve,n (r1) */ 134 + 0xe83f1fcf, /* b,l,n .-20,r1 */ 135 + }; 136 + 137 + BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) > 138 + FTRACE_PATCHABLE_FUNCTION_SIZE); 139 + #else 140 + u32 ftrace_trampoline[] = { 141 + (u32)addr, 142 + 0x6fc10080, /* stw,ma r1,40(sp) */ 143 + 0x48213fd1, /* ldw -18(r1),r1 */ 144 + 0xe820c002, /* bv,n r0(r1) */ 145 + 0xe83f1fdf, /* b,l,n .-c,r1 */ 146 + }; 147 + #endif 148 + 149 + BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) > 150 + FTRACE_PATCHABLE_FUNCTION_SIZE); 151 + 152 + size = sizeof(ftrace_trampoline); 153 + tramp = ftrace_trampoline; 154 + 155 + #ifdef CONFIG_64BIT 156 + if (rec->ip & 0x4) { 157 + size = sizeof(ftrace_trampoline_unaligned); 158 + tramp = ftrace_trampoline_unaligned; 159 + } 160 + #endif 161 + 162 + ip = (void *)(rec->ip + 4 - size); 163 + 164 + ret = probe_kernel_read(insn, ip, size); 165 + if (ret) 166 + return ret; 167 + 168 + for (i = 0; i < size / 4; i++) { 169 + if (insn[i] != INSN_NOP) 170 + return -EINVAL; 171 + } 172 + 173 + __patch_text_multiple(ip, tramp, size); 174 + return 0; 175 + } 176 + 177 + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 178 + unsigned long addr) 179 + { 180 + u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE]; 181 + int i; 182 + 183 + for (i = 0; i < ARRAY_SIZE(insn); i++) 184 + insn[i] = INSN_NOP; 185 + 186 + __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn), 187 + insn, sizeof(insn)); 188 + return 0; 189 + } 190 + #endif
+38 -26
arch/parisc/kernel/module.c
··· 33 33 * However, SEGREL32 is used only for PARISC unwind entries, and we want 34 34 * those entries to have an absolute address, and not just an offset. 35 35 * 36 - * The unwind table mechanism has the ability to specify an offset for 36 + * The unwind table mechanism has the ability to specify an offset for 37 37 * the unwind table; however, because we split off the init functions into 38 - * a different piece of memory, it is not possible to do this using a 38 + * a different piece of memory, it is not possible to do this using a 39 39 * single offset. Instead, we use the above hack for now. 40 40 */ 41 41 ··· 52 52 #include <asm/pgtable.h> 53 53 #include <asm/unwind.h> 54 54 #include <asm/sections.h> 55 - 56 - #if 0 57 - #define DEBUGP printk 58 - #else 59 - #define DEBUGP(fmt...) 60 - #endif 61 55 62 56 #define RELOC_REACHABLE(val, bits) \ 63 57 (( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \ ··· 294 300 * sizeof(struct stub_entry); 295 301 } 296 302 297 - #define CONST 303 + #define CONST 298 304 int module_frob_arch_sections(CONST Elf_Ehdr *hdr, 299 305 CONST Elf_Shdr *sechdrs, 300 306 CONST char *secstrings, ··· 380 386 381 387 got[i].addr = value; 382 388 out: 383 - DEBUGP("GOT ENTRY %d[%x] val %lx\n", i, i*sizeof(struct got_entry), 389 + pr_debug("GOT ENTRY %d[%lx] val %lx\n", i, i*sizeof(struct got_entry), 384 390 value); 385 391 return i * sizeof(struct got_entry); 386 392 } ··· 533 539 //unsigned long dp = (unsigned long)$global$; 534 540 register unsigned long dp asm ("r27"); 535 541 536 - DEBUGP("Applying relocate section %u to %u\n", relsec, 542 + pr_debug("Applying relocate section %u to %u\n", relsec, 537 543 targetsec); 538 544 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 539 545 /* This is where to make the change */ ··· 557 563 558 564 #if 0 559 565 #define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t : 560 - DEBUGP("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n", 566 + pr_debug("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n", 561 567 strtab + sym->st_name, 562 568 (uint32_t)loc, val, addend, 563 569 r(R_PARISC_PLABEL32) ··· 598 604 /* See note about special handling of SEGREL32 at 599 605 * the beginning of this file. 600 606 */ 601 - *loc = fsel(val, addend); 607 + *loc = fsel(val, addend); 602 608 break; 603 609 case R_PARISC_SECREL32: 604 610 /* 32-bit section relative address. */ ··· 677 683 Elf_Addr loc0; 678 684 unsigned int targetsec = sechdrs[relsec].sh_info; 679 685 680 - DEBUGP("Applying relocate section %u to %u\n", relsec, 686 + pr_debug("Applying relocate section %u to %u\n", relsec, 681 687 targetsec); 682 688 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 683 689 /* This is where to make the change */ ··· 719 725 case R_PARISC_LTOFF21L: 720 726 /* LT-relative; left 21 bits */ 721 727 val = get_got(me, val, addend); 722 - DEBUGP("LTOFF21L Symbol %s loc %p val %lx\n", 728 + pr_debug("LTOFF21L Symbol %s loc %p val %llx\n", 723 729 strtab + sym->st_name, 724 730 loc, val); 725 731 val = lrsel(val, 0); ··· 730 736 /* LT-relative; right 14 bits */ 731 737 val = get_got(me, val, addend); 732 738 val = rrsel(val, 0); 733 - DEBUGP("LTOFF14R Symbol %s loc %p val %lx\n", 739 + pr_debug("LTOFF14R Symbol %s loc %p val %llx\n", 734 740 strtab + sym->st_name, 735 741 loc, val); 736 742 *loc = mask(*loc, 14) | reassemble_14(val); 737 743 break; 738 744 case R_PARISC_PCREL22F: 739 745 /* PC-relative; 22 bits */ 740 - DEBUGP("PCREL22F Symbol %s loc %p val %lx\n", 746 + pr_debug("PCREL22F Symbol %s loc %p val %llx\n", 741 747 strtab + sym->st_name, 742 748 loc, val); 743 749 val += addend; ··· 769 775 val = get_stub(me, val, addend, ELF_STUB_GOT, 770 776 loc0, targetsec); 771 777 } 772 - DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n", 778 + pr_debug("STUB FOR %s loc %px, val %llx+%llx at %llx\n", 773 779 strtab + sym->st_name, loc, sym->st_value, 774 780 addend, val); 775 781 val = (val - dot - 8)/4; ··· 793 799 /* See note about special handling of SEGREL32 at 794 800 * the beginning of this file. 795 801 */ 796 - *loc = fsel(val, addend); 802 + *loc = fsel(val, addend); 797 803 break; 798 804 case R_PARISC_SECREL32: 799 805 /* 32-bit section relative address. */ ··· 803 809 /* 64-bit function address */ 804 810 if(in_local(me, (void *)(val + addend))) { 805 811 *loc64 = get_fdesc(me, val+addend); 806 - DEBUGP("FDESC for %s at %p points to %lx\n", 812 + pr_debug("FDESC for %s at %llx points to %llx\n", 807 813 strtab + sym->st_name, *loc64, 808 814 ((Elf_Fdesc *)*loc64)->addr); 809 815 } else { 810 816 /* if the symbol is not local to this 811 817 * module then val+addend is a pointer 812 818 * to the function descriptor */ 813 - DEBUGP("Non local FPTR64 Symbol %s loc %p val %lx\n", 819 + pr_debug("Non local FPTR64 Symbol %s loc %p val %llx\n", 814 820 strtab + sym->st_name, 815 821 loc, val); 816 822 *loc64 = val + addend; ··· 841 847 end = table + sechdrs[me->arch.unwind_section].sh_size; 842 848 gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; 843 849 844 - DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", 850 + pr_debug("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", 845 851 me->arch.unwind_section, table, end, gp); 846 852 me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end); 847 853 } ··· 862 868 const char *strtab = NULL; 863 869 const Elf_Shdr *s; 864 870 char *secstrings; 871 + int err, symindex = -1; 865 872 Elf_Sym *newptr, *oldptr; 866 873 Elf_Shdr *symhdr = NULL; 867 874 #ifdef DEBUG ··· 889 894 if(sechdrs[i].sh_type == SHT_SYMTAB 890 895 && (sechdrs[i].sh_flags & SHF_ALLOC)) { 891 896 int strindex = sechdrs[i].sh_link; 897 + symindex = i; 892 898 /* FIXME: AWFUL HACK 893 899 * The cast is to drop the const from 894 900 * the sechdrs pointer */ ··· 899 903 } 900 904 } 901 905 902 - DEBUGP("module %s: strtab %p, symhdr %p\n", 906 + pr_debug("module %s: strtab %p, symhdr %p\n", 903 907 me->name, strtab, symhdr); 904 908 905 909 if(me->arch.got_count > MAX_GOTS) { ··· 918 922 oldptr = (void *)symhdr->sh_addr; 919 923 newptr = oldptr + 1; /* we start counting at 1 */ 920 924 nsyms = symhdr->sh_size / sizeof(Elf_Sym); 921 - DEBUGP("OLD num_symtab %lu\n", nsyms); 925 + pr_debug("OLD num_symtab %lu\n", nsyms); 922 926 923 927 for (i = 1; i < nsyms; i++) { 924 928 oldptr++; /* note, count starts at 1 so preincrement */ ··· 933 937 934 938 } 935 939 nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; 936 - DEBUGP("NEW num_symtab %lu\n", nsyms); 940 + pr_debug("NEW num_symtab %lu\n", nsyms); 937 941 symhdr->sh_size = nsyms * sizeof(Elf_Sym); 938 942 939 943 /* find .altinstructions section */ ··· 945 949 if (!strcmp(".altinstructions", secname)) 946 950 /* patch .altinstructions */ 947 951 apply_alternatives(aseg, aseg + s->sh_size, me->name); 948 - } 949 952 953 + /* For 32 bit kernels we're compiling modules with 954 + * -ffunction-sections so we must relocate the addresses in the 955 + *__mcount_loc section. 956 + */ 957 + if (symindex != -1 && !strcmp(secname, "__mcount_loc")) { 958 + if (s->sh_type == SHT_REL) 959 + err = apply_relocate((Elf_Shdr *)sechdrs, 960 + strtab, symindex, 961 + s - sechdrs, me); 962 + else if (s->sh_type == SHT_RELA) 963 + err = apply_relocate_add((Elf_Shdr *)sechdrs, 964 + strtab, symindex, 965 + s - sechdrs, me); 966 + if (err) 967 + return err; 968 + } 969 + } 950 970 return 0; 951 971 } 952 972
+7
arch/parisc/kernel/module.lds
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + SECTIONS { 4 + __mcount_loc : { 5 + *(__patchable_function_entries) 6 + } 7 + }
+71 -15
arch/parisc/kernel/patch.c
··· 17 17 18 18 struct patch { 19 19 void *addr; 20 - unsigned int insn; 20 + u32 *insn; 21 + unsigned int len; 21 22 }; 22 23 23 - static void __kprobes *patch_map(void *addr, int fixmap) 24 + static DEFINE_RAW_SPINLOCK(patch_lock); 25 + 26 + static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags, 27 + int *need_unmap) 24 28 { 25 29 unsigned long uintaddr = (uintptr_t) addr; 26 30 bool module = !core_kernel_text(uintaddr); 27 31 struct page *page; 28 32 33 + *need_unmap = 0; 29 34 if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) 30 35 page = vmalloc_to_page(addr); 31 36 else if (!module && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) ··· 38 33 else 39 34 return addr; 40 35 36 + *need_unmap = 1; 41 37 set_fixmap(fixmap, page_to_phys(page)); 38 + if (flags) 39 + raw_spin_lock_irqsave(&patch_lock, *flags); 40 + else 41 + __acquire(&patch_lock); 42 42 43 43 return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); 44 44 } 45 45 46 - static void __kprobes patch_unmap(int fixmap) 46 + static void __kprobes patch_unmap(int fixmap, unsigned long *flags) 47 47 { 48 48 clear_fixmap(fixmap); 49 + 50 + if (flags) 51 + raw_spin_unlock_irqrestore(&patch_lock, *flags); 52 + else 53 + __release(&patch_lock); 49 54 } 50 55 51 - void __kprobes __patch_text(void *addr, unsigned int insn) 56 + void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) 52 57 { 53 - void *waddr = addr; 54 - int size; 58 + unsigned long start = (unsigned long)addr; 59 + unsigned long end = (unsigned long)addr + len; 60 + unsigned long flags; 61 + u32 *p, *fixmap; 62 + int mapped; 55 63 56 - waddr = patch_map(addr, FIX_TEXT_POKE0); 57 - *(u32 *)waddr = insn; 58 - size = sizeof(u32); 59 - flush_kernel_vmap_range(waddr, size); 60 - patch_unmap(FIX_TEXT_POKE0); 61 - flush_icache_range((uintptr_t)(addr), 62 - (uintptr_t)(addr) + size); 64 + /* Make sure we don't have any aliases in cache */ 65 + flush_kernel_vmap_range(addr, len); 66 + flush_icache_range(start, end); 67 + 68 + p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped); 69 + 70 + while (len >= 4) { 71 + *p++ = *insn++; 72 + addr += sizeof(u32); 73 + len -= sizeof(u32); 74 + if (len && offset_in_page(addr) == 0) { 75 + /* 76 + * We're crossing a page boundary, so 77 + * need to remap 78 + */ 79 + flush_kernel_vmap_range((void *)fixmap, 80 + (p-fixmap) * sizeof(*p)); 81 + if (mapped) 82 + patch_unmap(FIX_TEXT_POKE0, &flags); 83 + p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, 84 + &mapped); 85 + } 86 + } 87 + 88 + flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p)); 89 + if (mapped) 90 + patch_unmap(FIX_TEXT_POKE0, &flags); 91 + flush_icache_range(start, end); 92 + } 93 + 94 + void __kprobes __patch_text(void *addr, u32 insn) 95 + { 96 + __patch_text_multiple(addr, &insn, sizeof(insn)); 63 97 } 64 98 65 99 static int __kprobes patch_text_stop_machine(void *data) 66 100 { 67 101 struct patch *patch = data; 68 102 69 - __patch_text(patch->addr, patch->insn); 70 - 103 + __patch_text_multiple(patch->addr, patch->insn, patch->len); 71 104 return 0; 72 105 } 73 106 ··· 113 70 { 114 71 struct patch patch = { 115 72 .addr = addr, 73 + .insn = &insn, 74 + .len = sizeof(insn), 75 + }; 76 + 77 + stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL); 78 + } 79 + 80 + void __kprobes patch_text_multiple(void *addr, u32 *insn, unsigned int len) 81 + { 82 + 83 + struct patch patch = { 84 + .addr = addr, 116 85 .insn = insn, 86 + .len = len 117 87 }; 118 88 119 89 stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
+2
arch/parisc/kernel/vmlinux.lds.S
··· 18 18 *(.data..vm0.pgd) \ 19 19 *(.data..vm0.pte) 20 20 21 + #define CC_USING_PATCHABLE_FUNCTION_ENTRY 22 + 21 23 #include <asm-generic/vmlinux.lds.h> 22 24 23 25 /* needed for the processor specific cache alignment size */
+5 -2
arch/parisc/mm/fixmap.c
··· 10 10 #include <asm/cacheflush.h> 11 11 #include <asm/fixmap.h> 12 12 13 - void set_fixmap(enum fixed_addresses idx, phys_addr_t phys) 13 + void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys) 14 14 { 15 15 unsigned long vaddr = __fix_to_virt(idx); 16 16 pgd_t *pgd = pgd_offset_k(vaddr); ··· 28 28 flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); 29 29 } 30 30 31 - void clear_fixmap(enum fixed_addresses idx) 31 + void notrace clear_fixmap(enum fixed_addresses idx) 32 32 { 33 33 unsigned long vaddr = __fix_to_virt(idx); 34 34 pgd_t *pgd = pgd_offset_k(vaddr); 35 35 pmd_t *pmd = pmd_offset(pgd, vaddr); 36 36 pte_t *pte = pte_offset_kernel(pmd, vaddr); 37 + 38 + if (WARN_ON(pte_none(*pte))) 39 + return; 37 40 38 41 pte_clear(&init_mm, vaddr, pte); 39 42
+7
include/asm-generic/vmlinux.lds.h
··· 110 110 #endif 111 111 112 112 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 113 + #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY 114 + #define MCOUNT_REC() . = ALIGN(8); \ 115 + __start_mcount_loc = .; \ 116 + KEEP(*(__patchable_function_entries)) \ 117 + __stop_mcount_loc = .; 118 + #else 113 119 #define MCOUNT_REC() . = ALIGN(8); \ 114 120 __start_mcount_loc = .; \ 115 121 KEEP(*(__mcount_loc)) \ 116 122 __stop_mcount_loc = .; 123 + #endif 117 124 #else 118 125 #define MCOUNT_REC() 119 126 #endif
+2
include/linux/compiler_types.h
··· 112 112 113 113 #if defined(CC_USING_HOTPATCH) 114 114 #define notrace __attribute__((hotpatch(0, 0))) 115 + #elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY) 116 + #define notrace __attribute__((patchable_function_entry(0, 0))) 115 117 #else 116 118 #define notrace __attribute__((__no_instrument_function__)) 117 119 #endif