Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'loongarch-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch updates from Huacai Chen:

- Fix objtool about do_syscall() and Clang

- Enable generic CPU vulnerabilites support

- Enable ACPI BGRT handling

- Rework CPU feature probe from CPUCFG/IOCSR

- Add ARCH_HAS_SET_MEMORY support

- Add ARCH_HAS_SET_DIRECT_MAP support

- Improve hardware page table walker

- Simplify _percpu_read() and _percpu_write()

- Add advanced extended IRQ model documentions

- Some bug fixes and other small changes

* tag 'loongarch-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
Docs/LoongArch: Add advanced extended IRQ model description
LoongArch: Remove posix_types.h include from sigcontext.h
LoongArch: Fix memleak in pci_acpi_scan_root()
LoongArch: Simplify _percpu_read() and _percpu_write()
LoongArch: Improve hardware page table walker
LoongArch: Add ARCH_HAS_SET_DIRECT_MAP support
LoongArch: Add ARCH_HAS_SET_MEMORY support
LoongArch: Rework CPU feature probe from CPUCFG/IOCSR
LoongArch: Enable ACPI BGRT handling
LoongArch: Enable generic CPU vulnerabilites support
LoongArch: Remove STACK_FRAME_NON_STANDARD(do_syscall)
LoongArch: Set AS_HAS_THIN_ADD_SUB as y if AS_IS_LLVM
LoongArch: Enable objtool for Clang
objtool: Handle frame pointer related instructions

+571 -199
+32
Documentation/arch/loongarch/irq-chip-model.rst
··· 85 85 | Devices | 86 86 +---------+ 87 87 88 + Advanced Extended IRQ model 89 + =========================== 90 + 91 + In this model, IPI (Inter-Processor Interrupt) and CPU Local Timer interrupt go 92 + to CPUINTC directly, CPU UARTS interrupts go to LIOINTC, PCH-MSI interrupts go 93 + to AVECINTC, and then go to CPUINTC directly, while all other devices interrupts 94 + go to PCH-PIC/PCH-LPC and gathered by EIOINTC, and then go to CPUINTC directly:: 95 + 96 + +-----+ +-----------------------+ +-------+ 97 + | IPI | --> | CPUINTC | <-- | Timer | 98 + +-----+ +-----------------------+ +-------+ 99 + ^ ^ ^ 100 + | | | 101 + +---------+ +----------+ +---------+ +-------+ 102 + | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs | 103 + +---------+ +----------+ +---------+ +-------+ 104 + ^ ^ 105 + | | 106 + +---------+ +---------+ 107 + | PCH-PIC | | PCH-MSI | 108 + +---------+ +---------+ 109 + ^ ^ ^ 110 + | | | 111 + +---------+ +---------+ +---------+ 112 + | Devices | | PCH-LPC | | Devices | 113 + +---------+ +---------+ +---------+ 114 + ^ 115 + | 116 + +---------+ 117 + | Devices | 118 + +---------+ 119 + 88 120 ACPI-related definitions 89 121 ======================== 90 122
+32
Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst
··· 87 87 | Devices | 88 88 +---------+ 89 89 90 + 高级扩展IRQ模型 91 + =============== 92 + 93 + 在这种模型里面,IPI(Inter-Processor Interrupt)和CPU本地时钟中断直接发送到CPUINTC, 94 + CPU串口(UARTs)中断发送到LIOINTC,PCH-MSI中断发送到AVECINTC,而后通过AVECINTC直接 95 + 送达CPUINTC,而其他所有设备的中断则分别发送到所连接的PCH-PIC/PCH-LPC,然后由EIOINTC 96 + 统一收集,再直接到达CPUINTC:: 97 + 98 + +-----+ +-----------------------+ +-------+ 99 + | IPI | --> | CPUINTC | <-- | Timer | 100 + +-----+ +-----------------------+ +-------+ 101 + ^ ^ ^ 102 + | | | 103 + +---------+ +----------+ +---------+ +-------+ 104 + | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs | 105 + +---------+ +----------+ +---------+ +-------+ 106 + ^ ^ 107 + | | 108 + +---------+ +---------+ 109 + | PCH-PIC | | PCH-MSI | 110 + +---------+ +---------+ 111 + ^ ^ ^ 112 + | | | 113 + +---------+ +---------+ +---------+ 114 + | Devices | | PCH-LPC | | Devices | 115 + +---------+ +---------+ +---------+ 116 + ^ 117 + | 118 + +---------+ 119 + | Devices | 120 + +---------+ 121 + 90 122 ACPI相关的定义 91 123 ============== 92 124
+5 -2
arch/loongarch/Kconfig
··· 25 25 select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 26 26 select ARCH_HAS_PTE_DEVMAP 27 27 select ARCH_HAS_PTE_SPECIAL 28 + select ARCH_HAS_SET_MEMORY 29 + select ARCH_HAS_SET_DIRECT_MAP 28 30 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 29 31 select ARCH_INLINE_READ_LOCK if !PREEMPTION 30 32 select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION ··· 84 82 select GENERIC_CMOS_UPDATE 85 83 select GENERIC_CPU_AUTOPROBE 86 84 select GENERIC_CPU_DEVICES 85 + select GENERIC_CPU_VULNERABILITIES 87 86 select GENERIC_ENTRY 88 87 select GENERIC_GETTIMEOFDAY 89 88 select GENERIC_IOREMAP if !ARCH_IOREMAP ··· 150 147 select HAVE_LIVEPATCH 151 148 select HAVE_MOD_ARCH_SPECIFIC 152 149 select HAVE_NMI 153 - select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG 150 + select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB 154 151 select HAVE_PCI 155 152 select HAVE_PERF_EVENTS 156 153 select HAVE_PERF_REGS ··· 270 267 def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0) 271 268 272 269 config AS_HAS_THIN_ADD_SUB 273 - def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) 270 + def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) || AS_IS_LLVM 274 271 275 272 config AS_HAS_LSX_EXTENSION 276 273 def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
+2
arch/loongarch/include/asm/atomic.h
··· 15 15 #define __LL "ll.w " 16 16 #define __SC "sc.w " 17 17 #define __AMADD "amadd.w " 18 + #define __AMOR "amor.w " 18 19 #define __AMAND_DB "amand_db.w " 19 20 #define __AMOR_DB "amor_db.w " 20 21 #define __AMXOR_DB "amxor_db.w " ··· 23 22 #define __LL "ll.d " 24 23 #define __SC "sc.d " 25 24 #define __AMADD "amadd.d " 25 + #define __AMOR "amor.d " 26 26 #define __AMAND_DB "amand_db.d " 27 27 #define __AMOR_DB "amor_db.d " 28 28 #define __AMXOR_DB "amxor_db.d "
+2
arch/loongarch/include/asm/cpu-features.h
··· 51 51 #define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS) 52 52 #define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips) 53 53 #define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR) 54 + #define cpu_has_iocsr cpu_opt(LOONGARCH_CPU_IOCSR) 54 55 #define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB) 55 56 #define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH) 56 57 #define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT) ··· 66 65 #define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID) 67 66 #define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR) 68 67 #define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW) 68 + #define cpu_has_lspw cpu_opt(LOONGARCH_CPU_LSPW) 69 69 #define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT) 70 70 71 71 #endif /* __ASM_CPU_FEATURES_H */
+17 -13
arch/loongarch/include/asm/cpu.h
··· 87 87 #define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */ 88 88 #define CPU_FEATURE_TLB 13 /* CPU has TLB */ 89 89 #define CPU_FEATURE_CSR 14 /* CPU has CSR */ 90 - #define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */ 91 - #define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */ 92 - #define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */ 93 - #define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */ 94 - #define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */ 95 - #define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */ 96 - #define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */ 97 - #define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */ 98 - #define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */ 99 - #define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */ 100 - #define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */ 101 - #define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */ 102 - #define CPU_FEATURE_AVECINT 27 /* CPU has avec interrupt */ 90 + #define CPU_FEATURE_IOCSR 15 /* CPU has IOCSR */ 91 + #define CPU_FEATURE_WATCH 16 /* CPU has watchpoint registers */ 92 + #define CPU_FEATURE_VINT 17 /* CPU has vectored interrupts */ 93 + #define CPU_FEATURE_CSRIPI 18 /* CPU has CSR-IPI */ 94 + #define CPU_FEATURE_EXTIOI 19 /* CPU has EXT-IOI */ 95 + #define CPU_FEATURE_PREFETCH 20 /* CPU has prefetch instructions */ 96 + #define CPU_FEATURE_PMP 21 /* CPU has perfermance counter */ 97 + #define CPU_FEATURE_SCALEFREQ 22 /* CPU supports cpufreq scaling */ 98 + #define CPU_FEATURE_FLATMODE 23 /* CPU has flat mode */ 99 + #define CPU_FEATURE_EIODECODE 24 /* CPU has EXTIOI interrupt pin decode mode */ 100 + #define CPU_FEATURE_GUESTID 25 /* CPU has GuestID feature */ 101 + #define CPU_FEATURE_HYPERVISOR 26 /* CPU has hypervisor (running in VM) */ 102 + #define CPU_FEATURE_PTW 27 /* CPU has hardware page table walker */ 103 + #define CPU_FEATURE_LSPW 28 /* CPU has LSPW (lddir/ldpte instructions) */ 104 + #define CPU_FEATURE_AVECINT 29 /* CPU has AVEC interrupt */ 103 105 104 106 #define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG) 105 107 #define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM) ··· 117 115 #define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM) 118 116 #define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS) 119 117 #define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB) 118 + #define LOONGARCH_CPU_IOCSR BIT_ULL(CPU_FEATURE_IOCSR) 120 119 #define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR) 121 120 #define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH) 122 121 #define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT) ··· 131 128 #define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID) 132 129 #define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR) 133 130 #define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW) 131 + #define LOONGARCH_CPU_LSPW BIT_ULL(CPU_FEATURE_LSPW) 134 132 #define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT) 135 133 136 134 #endif /* _ASM_CPU_H */
+1
arch/loongarch/include/asm/loongarch.h
··· 62 62 #define LOONGARCH_CPUCFG1 0x1 63 63 #define CPUCFG1_ISGR32 BIT(0) 64 64 #define CPUCFG1_ISGR64 BIT(1) 65 + #define CPUCFG1_ISA GENMASK(1, 0) 65 66 #define CPUCFG1_PAGING BIT(2) 66 67 #define CPUCFG1_IOCSR BIT(3) 67 68 #define CPUCFG1_PABITS GENMASK(11, 4)
+27 -8
arch/loongarch/include/asm/mmu_context.h
··· 49 49 50 50 /* Normal, classic get_new_mmu_context */ 51 51 static inline void 52 - get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 52 + get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush) 53 53 { 54 54 u64 asid = asid_cache(cpu); 55 55 56 56 if (!((++asid) & cpu_asid_mask(&cpu_data[cpu]))) 57 - local_flush_tlb_user(); /* start new asid cycle */ 57 + *need_flush = true; /* start new asid cycle */ 58 58 59 59 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 60 60 } ··· 74 74 return 0; 75 75 } 76 76 77 + static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl) 78 + { 79 + __asm__ __volatile__( 80 + "csrwr %[pgdl_val], %[pgdl_reg] \n\t" 81 + "csrwr %[asid_val], %[asid_reg] \n\t" 82 + : [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl) 83 + : [asid_reg] "i" (LOONGARCH_CSR_ASID), [pgdl_reg] "i" (LOONGARCH_CSR_PGDL) 84 + : "memory" 85 + ); 86 + } 87 + 77 88 static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 78 89 struct task_struct *tsk) 79 90 { 91 + bool need_flush = false; 80 92 unsigned int cpu = smp_processor_id(); 81 93 82 94 /* Check if our ASID is of an older version and thus invalid */ 83 95 if (!asid_valid(next, cpu)) 84 - get_new_mmu_context(next, cpu); 85 - 86 - write_csr_asid(cpu_asid(cpu, next)); 96 + get_new_mmu_context(next, cpu, &need_flush); 87 97 88 98 if (next != &init_mm) 89 - csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL); 99 + atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)next->pgd); 90 100 else 91 - csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL); 101 + atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)invalid_pg_dir); 102 + 103 + if (need_flush) 104 + local_flush_tlb_user(); /* Flush tlb after update ASID */ 92 105 93 106 /* 94 107 * Mark current->active_mm as not "active" anymore. ··· 148 135 asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data); 149 136 150 137 if (asid == cpu_asid(cpu, mm)) { 138 + bool need_flush = false; 139 + 151 140 if (!current->mm || (current->mm == mm)) { 152 - get_new_mmu_context(mm, cpu); 141 + get_new_mmu_context(mm, cpu, &need_flush); 142 + 153 143 write_csr_asid(cpu_asid(cpu, mm)); 144 + if (need_flush) 145 + local_flush_tlb_user(); /* Flush tlb after update ASID */ 146 + 154 147 goto out; 155 148 } 156 149 }
+35 -89
arch/loongarch/include/asm/percpu.h
··· 68 68 PERCPU_OP(or, or, |) 69 69 #undef PERCPU_OP 70 70 71 - static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size) 72 - { 73 - unsigned long ret; 74 - 75 - switch (size) { 76 - case 1: 77 - __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n" 78 - : [ret] "=&r"(ret) 79 - : [ptr] "r"(ptr) 80 - : "memory"); 81 - break; 82 - case 2: 83 - __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n" 84 - : [ret] "=&r"(ret) 85 - : [ptr] "r"(ptr) 86 - : "memory"); 87 - break; 88 - case 4: 89 - __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n" 90 - : [ret] "=&r"(ret) 91 - : [ptr] "r"(ptr) 92 - : "memory"); 93 - break; 94 - case 8: 95 - __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n" 96 - : [ret] "=&r"(ret) 97 - : [ptr] "r"(ptr) 98 - : "memory"); 99 - break; 100 - default: 101 - ret = 0; 102 - BUILD_BUG(); 103 - } 104 - 105 - return ret; 106 - } 107 - 108 - static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size) 109 - { 110 - switch (size) { 111 - case 1: 112 - __asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n" 113 - : 114 - : [val] "r" (val), [ptr] "r" (ptr) 115 - : "memory"); 116 - break; 117 - case 2: 118 - __asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n" 119 - : 120 - : [val] "r" (val), [ptr] "r" (ptr) 121 - : "memory"); 122 - break; 123 - case 4: 124 - __asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n" 125 - : 126 - : [val] "r" (val), [ptr] "r" (ptr) 127 - : "memory"); 128 - break; 129 - case 8: 130 - __asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n" 131 - : 132 - : [val] "r" (val), [ptr] "r" (ptr) 133 - : "memory"); 134 - break; 135 - default: 136 - BUILD_BUG(); 137 - } 138 - } 139 - 140 71 static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size) 141 72 { 142 73 switch (size) { ··· 88 157 return 0; 89 158 } 90 159 160 + #define __pcpu_op_1(op) op ".b " 161 + #define __pcpu_op_2(op) op ".h " 162 + #define __pcpu_op_4(op) op ".w " 163 + #define __pcpu_op_8(op) op ".d " 164 + 165 + #define _percpu_read(size, _pcp) \ 166 + ({ \ 167 + typeof(_pcp) __pcp_ret; \ 168 + \ 169 + __asm__ __volatile__( \ 170 + __pcpu_op_##size("ldx") "%[ret], $r21, %[ptr] \n" \ 171 + : [ret] "=&r"(__pcp_ret) \ 172 + : [ptr] "r"(&(_pcp)) \ 173 + : "memory"); \ 174 + \ 175 + __pcp_ret; \ 176 + }) 177 + 178 + #define _percpu_write(size, _pcp, _val) \ 179 + do { \ 180 + __asm__ __volatile__( \ 181 + __pcpu_op_##size("stx") "%[val], $r21, %[ptr] \n" \ 182 + : \ 183 + : [val] "r"(_val), [ptr] "r"(&(_pcp)) \ 184 + : "memory"); \ 185 + } while (0) 186 + 91 187 /* this_cpu_cmpxchg */ 92 188 #define _protect_cmpxchg_local(pcp, o, n) \ 93 189 ({ \ ··· 124 166 preempt_enable_notrace(); \ 125 167 __ret; \ 126 168 }) 127 - 128 - #define _percpu_read(pcp) \ 129 - ({ \ 130 - typeof(pcp) __retval; \ 131 - __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \ 132 - __retval; \ 133 - }) 134 - 135 - #define _percpu_write(pcp, val) \ 136 - do { \ 137 - __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \ 138 - } while (0) \ 139 169 140 170 #define _pcp_protect(operation, pcp, val) \ 141 171 ({ \ ··· 161 215 #define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) 162 216 #define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) 163 217 164 - #define this_cpu_read_1(pcp) _percpu_read(pcp) 165 - #define this_cpu_read_2(pcp) _percpu_read(pcp) 166 - #define this_cpu_read_4(pcp) _percpu_read(pcp) 167 - #define this_cpu_read_8(pcp) _percpu_read(pcp) 218 + #define this_cpu_read_1(pcp) _percpu_read(1, pcp) 219 + #define this_cpu_read_2(pcp) _percpu_read(2, pcp) 220 + #define this_cpu_read_4(pcp) _percpu_read(4, pcp) 221 + #define this_cpu_read_8(pcp) _percpu_read(8, pcp) 168 222 169 - #define this_cpu_write_1(pcp, val) _percpu_write(pcp, val) 170 - #define this_cpu_write_2(pcp, val) _percpu_write(pcp, val) 171 - #define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) 172 - #define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) 223 + #define this_cpu_write_1(pcp, val) _percpu_write(1, pcp, val) 224 + #define this_cpu_write_2(pcp, val) _percpu_write(2, pcp, val) 225 + #define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val) 226 + #define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val) 173 227 174 228 #define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) 175 229 #define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
+12 -18
arch/loongarch/include/asm/pgtable.h
··· 331 331 * Make sure the buddy is global too (if it's !none, 332 332 * it better already be global) 333 333 */ 334 + if (pte_none(ptep_get(buddy))) { 334 335 #ifdef CONFIG_SMP 335 - /* 336 - * For SMP, multiple CPUs can race, so we need to do 337 - * this atomically. 338 - */ 339 - unsigned long page_global = _PAGE_GLOBAL; 340 - unsigned long tmp; 336 + /* 337 + * For SMP, multiple CPUs can race, so we need 338 + * to do this atomically. 339 + */ 340 + __asm__ __volatile__( 341 + __AMOR "$zero, %[global], %[buddy] \n" 342 + : [buddy] "+ZB" (buddy->pte) 343 + : [global] "r" (_PAGE_GLOBAL) 344 + : "memory"); 341 345 342 - __asm__ __volatile__ ( 343 - "1:" __LL "%[tmp], %[buddy] \n" 344 - " bnez %[tmp], 2f \n" 345 - " or %[tmp], %[tmp], %[global] \n" 346 - __SC "%[tmp], %[buddy] \n" 347 - " beqz %[tmp], 1b \n" 348 - " nop \n" 349 - "2: \n" 350 - __WEAK_LLSC_MB 351 - : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 352 - : [global] "r" (page_global)); 346 + DBAR(0b11000); /* o_wrw = 0b11000 */ 353 347 #else /* !CONFIG_SMP */ 354 - if (pte_none(ptep_get(buddy))) 355 348 WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL)); 356 349 #endif /* CONFIG_SMP */ 350 + } 357 351 } 358 352 } 359 353
+21
arch/loongarch/include/asm/set_memory.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2024 Loongson Technology Corporation Limited 4 + */ 5 + 6 + #ifndef _ASM_LOONGARCH_SET_MEMORY_H 7 + #define _ASM_LOONGARCH_SET_MEMORY_H 8 + 9 + /* 10 + * Functions to change memory attributes. 11 + */ 12 + int set_memory_x(unsigned long addr, int numpages); 13 + int set_memory_nx(unsigned long addr, int numpages); 14 + int set_memory_ro(unsigned long addr, int numpages); 15 + int set_memory_rw(unsigned long addr, int numpages); 16 + 17 + bool kernel_page_present(struct page *page); 18 + int set_direct_map_default_noflush(struct page *page); 19 + int set_direct_map_invalid_noflush(struct page *page); 20 + 21 + #endif /* _ASM_LOONGARCH_SET_MEMORY_H */
+1
arch/loongarch/include/uapi/asm/hwcap.h
··· 17 17 #define HWCAP_LOONGARCH_LBT_ARM (1 << 11) 18 18 #define HWCAP_LOONGARCH_LBT_MIPS (1 << 12) 19 19 #define HWCAP_LOONGARCH_PTW (1 << 13) 20 + #define HWCAP_LOONGARCH_LSPW (1 << 14) 20 21 21 22 #endif /* _UAPI_ASM_HWCAP_H */
-1
arch/loongarch/include/uapi/asm/sigcontext.h
··· 9 9 #define _UAPI_ASM_SIGCONTEXT_H 10 10 11 11 #include <linux/types.h> 12 - #include <linux/posix_types.h> 13 12 14 13 /* FP context was used */ 15 14 #define SC_USED_FP (1 << 0)
+4
arch/loongarch/kernel/acpi.c
··· 9 9 10 10 #include <linux/init.h> 11 11 #include <linux/acpi.h> 12 + #include <linux/efi-bgrt.h> 12 13 #include <linux/irq.h> 13 14 #include <linux/irqdomain.h> 14 15 #include <linux/memblock.h> ··· 212 211 213 212 /* Do not enable ACPI SPCR console by default */ 214 213 acpi_parse_spcr(earlycon_acpi_spcr_enable, false); 214 + 215 + if (IS_ENABLED(CONFIG_ACPI_BGRT)) 216 + acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt); 215 217 216 218 return; 217 219
+79 -55
arch/loongarch/kernel/cpu-probe.c
··· 91 91 unsigned int config; 92 92 unsigned long asid_mask; 93 93 94 - c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | 95 - LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH; 94 + c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | LOONGARCH_CPU_VINT; 96 95 97 96 elf_hwcap = HWCAP_LOONGARCH_CPUCFG; 98 97 99 98 config = read_cpucfg(LOONGARCH_CPUCFG1); 99 + 100 + switch (config & CPUCFG1_ISA) { 101 + case 0: 102 + set_isa(c, LOONGARCH_CPU_ISA_LA32R); 103 + break; 104 + case 1: 105 + set_isa(c, LOONGARCH_CPU_ISA_LA32S); 106 + break; 107 + case 2: 108 + set_isa(c, LOONGARCH_CPU_ISA_LA64); 109 + break; 110 + default: 111 + pr_warn("Warning: unknown ISA level\n"); 112 + } 113 + 114 + if (config & CPUCFG1_PAGING) 115 + c->options |= LOONGARCH_CPU_TLB; 116 + if (config & CPUCFG1_IOCSR) 117 + c->options |= LOONGARCH_CPU_IOCSR; 100 118 if (config & CPUCFG1_UAL) { 101 119 c->options |= LOONGARCH_CPU_UAL; 102 120 elf_hwcap |= HWCAP_LOONGARCH_UAL; ··· 157 139 c->options |= LOONGARCH_CPU_PTW; 158 140 elf_hwcap |= HWCAP_LOONGARCH_PTW; 159 141 } 142 + if (config & CPUCFG2_LSPW) { 143 + c->options |= LOONGARCH_CPU_LSPW; 144 + elf_hwcap |= HWCAP_LOONGARCH_LSPW; 145 + } 160 146 if (config & CPUCFG2_LVZP) { 161 147 c->options |= LOONGARCH_CPU_LVZ; 162 148 elf_hwcap |= HWCAP_LOONGARCH_LVZ; ··· 183 161 config = read_cpucfg(LOONGARCH_CPUCFG6); 184 162 if (config & CPUCFG6_PMP) 185 163 c->options |= LOONGARCH_CPU_PMP; 186 - 187 - config = iocsr_read32(LOONGARCH_IOCSR_FEATURES); 188 - if (config & IOCSRF_CSRIPI) 189 - c->options |= LOONGARCH_CPU_CSRIPI; 190 - if (config & IOCSRF_EXTIOI) 191 - c->options |= LOONGARCH_CPU_EXTIOI; 192 - if (config & IOCSRF_FREQSCALE) 193 - c->options |= LOONGARCH_CPU_SCALEFREQ; 194 - if (config & IOCSRF_FLATMODE) 195 - c->options |= LOONGARCH_CPU_FLATMODE; 196 - if (config & IOCSRF_EIODECODE) 197 - c->options |= LOONGARCH_CPU_EIODECODE; 198 - if (config & IOCSRF_AVEC) 199 - c->options |= LOONGARCH_CPU_AVECINT; 200 - if (config & IOCSRF_VM) 201 - c->options |= LOONGARCH_CPU_HYPERVISOR; 202 164 203 165 config = csr_read32(LOONGARCH_CSR_ASID); 204 166 config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT; ··· 216 210 default: 217 211 pr_warn("Warning: unknown TLB type\n"); 218 212 } 213 + 214 + if (get_num_brps() + get_num_wrps()) 215 + c->options |= LOONGARCH_CPU_WATCH; 219 216 } 220 217 221 218 #define MAX_NAME_LEN 32 ··· 229 220 230 221 static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu) 231 222 { 223 + uint32_t config; 232 224 uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]); 233 225 uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]); 226 + const char *core_name = "Unknown"; 227 + 228 + switch (BIT(fls(c->isa_level) - 1)) { 229 + case LOONGARCH_CPU_ISA_LA32R: 230 + case LOONGARCH_CPU_ISA_LA32S: 231 + c->cputype = CPU_LOONGSON32; 232 + __cpu_family[cpu] = "Loongson-32bit"; 233 + break; 234 + case LOONGARCH_CPU_ISA_LA64: 235 + c->cputype = CPU_LOONGSON64; 236 + __cpu_family[cpu] = "Loongson-64bit"; 237 + break; 238 + } 239 + 240 + switch (c->processor_id & PRID_SERIES_MASK) { 241 + case PRID_SERIES_LA132: 242 + core_name = "LA132"; 243 + break; 244 + case PRID_SERIES_LA264: 245 + core_name = "LA264"; 246 + break; 247 + case PRID_SERIES_LA364: 248 + core_name = "LA364"; 249 + break; 250 + case PRID_SERIES_LA464: 251 + core_name = "LA464"; 252 + break; 253 + case PRID_SERIES_LA664: 254 + core_name = "LA664"; 255 + break; 256 + } 257 + 258 + pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name); 259 + 260 + if (!cpu_has_iocsr) 261 + return; 234 262 235 263 if (!__cpu_full_name[cpu]) 236 264 __cpu_full_name[cpu] = cpu_full_name; ··· 275 229 *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR); 276 230 *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); 277 231 278 - switch (c->processor_id & PRID_SERIES_MASK) { 279 - case PRID_SERIES_LA132: 280 - c->cputype = CPU_LOONGSON32; 281 - set_isa(c, LOONGARCH_CPU_ISA_LA32S); 282 - __cpu_family[cpu] = "Loongson-32bit"; 283 - pr_info("32-bit Loongson Processor probed (LA132 Core)\n"); 284 - break; 285 - case PRID_SERIES_LA264: 286 - c->cputype = CPU_LOONGSON64; 287 - set_isa(c, LOONGARCH_CPU_ISA_LA64); 288 - __cpu_family[cpu] = "Loongson-64bit"; 289 - pr_info("64-bit Loongson Processor probed (LA264 Core)\n"); 290 - break; 291 - case PRID_SERIES_LA364: 292 - c->cputype = CPU_LOONGSON64; 293 - set_isa(c, LOONGARCH_CPU_ISA_LA64); 294 - __cpu_family[cpu] = "Loongson-64bit"; 295 - pr_info("64-bit Loongson Processor probed (LA364 Core)\n"); 296 - break; 297 - case PRID_SERIES_LA464: 298 - c->cputype = CPU_LOONGSON64; 299 - set_isa(c, LOONGARCH_CPU_ISA_LA64); 300 - __cpu_family[cpu] = "Loongson-64bit"; 301 - pr_info("64-bit Loongson Processor probed (LA464 Core)\n"); 302 - break; 303 - case PRID_SERIES_LA664: 304 - c->cputype = CPU_LOONGSON64; 305 - set_isa(c, LOONGARCH_CPU_ISA_LA64); 306 - __cpu_family[cpu] = "Loongson-64bit"; 307 - pr_info("64-bit Loongson Processor probed (LA664 Core)\n"); 308 - break; 309 - default: /* Default to 64 bit */ 310 - c->cputype = CPU_LOONGSON64; 311 - set_isa(c, LOONGARCH_CPU_ISA_LA64); 312 - __cpu_family[cpu] = "Loongson-64bit"; 313 - pr_info("64-bit Loongson Processor probed (Unknown Core)\n"); 314 - } 232 + config = iocsr_read32(LOONGARCH_IOCSR_FEATURES); 233 + if (config & IOCSRF_CSRIPI) 234 + c->options |= LOONGARCH_CPU_CSRIPI; 235 + if (config & IOCSRF_EXTIOI) 236 + c->options |= LOONGARCH_CPU_EXTIOI; 237 + if (config & IOCSRF_FREQSCALE) 238 + c->options |= LOONGARCH_CPU_SCALEFREQ; 239 + if (config & IOCSRF_FLATMODE) 240 + c->options |= LOONGARCH_CPU_FLATMODE; 241 + if (config & IOCSRF_EIODECODE) 242 + c->options |= LOONGARCH_CPU_EIODECODE; 243 + if (config & IOCSRF_AVEC) 244 + c->options |= LOONGARCH_CPU_AVECINT; 245 + if (config & IOCSRF_VM) 246 + c->options |= LOONGARCH_CPU_HYPERVISOR; 315 247 } 316 248 317 249 #ifdef CONFIG_64BIT
+7 -3
arch/loongarch/kernel/proc.c
··· 31 31 static int show_cpuinfo(struct seq_file *m, void *v) 32 32 { 33 33 unsigned long n = (unsigned long) v - 1; 34 + unsigned int isa = cpu_data[n].isa_level; 34 35 unsigned int version = cpu_data[n].processor_id & 0xff; 35 36 unsigned int fp_version = cpu_data[n].fpu_vers; 36 37 struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args; ··· 65 64 cpu_pabits + 1, cpu_vabits + 1); 66 65 67 66 seq_printf(m, "ISA\t\t\t:"); 68 - if (cpu_has_loongarch32) 69 - seq_printf(m, " loongarch32"); 70 - if (cpu_has_loongarch64) 67 + if (isa & LOONGARCH_CPU_ISA_LA32R) 68 + seq_printf(m, " loongarch32r"); 69 + if (isa & LOONGARCH_CPU_ISA_LA32S) 70 + seq_printf(m, " loongarch32s"); 71 + if (isa & LOONGARCH_CPU_ISA_LA64) 71 72 seq_printf(m, " loongarch64"); 72 73 seq_printf(m, "\n"); 73 74 ··· 84 81 if (cpu_has_complex) seq_printf(m, " complex"); 85 82 if (cpu_has_crypto) seq_printf(m, " crypto"); 86 83 if (cpu_has_ptw) seq_printf(m, " ptw"); 84 + if (cpu_has_lspw) seq_printf(m, " lspw"); 87 85 if (cpu_has_lvz) seq_printf(m, " lvz"); 88 86 if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86"); 89 87 if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm");
-4
arch/loongarch/kernel/syscall.c
··· 79 79 80 80 syscall_exit_to_user_mode(regs); 81 81 } 82 - 83 - #ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET 84 - STACK_FRAME_NON_STANDARD(do_syscall); 85 - #endif
+2 -1
arch/loongarch/mm/Makefile
··· 4 4 # 5 5 6 6 obj-y += init.o cache.o tlb.o tlbex.o extable.o \ 7 - fault.o ioremap.o maccess.o mmap.o pgtable.o page.o 7 + fault.o ioremap.o maccess.o mmap.o pgtable.o \ 8 + page.o pageattr.o 8 9 9 10 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 10 11 obj-$(CONFIG_KASAN) += kasan_init.o
+41
arch/loongarch/mm/fault.c
··· 31 31 32 32 int show_unhandled_signals = 1; 33 33 34 + static int __kprobes spurious_fault(unsigned long write, unsigned long address) 35 + { 36 + pgd_t *pgd; 37 + p4d_t *p4d; 38 + pud_t *pud; 39 + pmd_t *pmd; 40 + pte_t *pte; 41 + 42 + if (!(address & __UA_LIMIT)) 43 + return 0; 44 + 45 + pgd = pgd_offset_k(address); 46 + if (!pgd_present(pgdp_get(pgd))) 47 + return 0; 48 + 49 + p4d = p4d_offset(pgd, address); 50 + if (!p4d_present(p4dp_get(p4d))) 51 + return 0; 52 + 53 + pud = pud_offset(p4d, address); 54 + if (!pud_present(pudp_get(pud))) 55 + return 0; 56 + 57 + pmd = pmd_offset(pud, address); 58 + if (!pmd_present(pmdp_get(pmd))) 59 + return 0; 60 + 61 + if (pmd_leaf(*pmd)) { 62 + return write ? pmd_write(pmdp_get(pmd)) : 1; 63 + } else { 64 + pte = pte_offset_kernel(pmd, address); 65 + if (!pte_present(ptep_get(pte))) 66 + return 0; 67 + 68 + return write ? pte_write(ptep_get(pte)) : 1; 69 + } 70 + } 71 + 34 72 static void __kprobes no_context(struct pt_regs *regs, 35 73 unsigned long write, unsigned long address) 36 74 { 37 75 const int field = sizeof(unsigned long) * 2; 76 + 77 + if (spurious_fault(write, address)) 78 + return; 38 79 39 80 /* Are we prepared to handle this kernel fault? */ 40 81 if (fixup_exception(regs))
+218
arch/loongarch/mm/pageattr.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2024 Loongson Technology Corporation Limited 4 + */ 5 + 6 + #include <linux/pagewalk.h> 7 + #include <linux/pgtable.h> 8 + #include <asm/set_memory.h> 9 + #include <asm/tlbflush.h> 10 + 11 + struct pageattr_masks { 12 + pgprot_t set_mask; 13 + pgprot_t clear_mask; 14 + }; 15 + 16 + static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk) 17 + { 18 + unsigned long new_val = val; 19 + struct pageattr_masks *masks = walk->private; 20 + 21 + new_val &= ~(pgprot_val(masks->clear_mask)); 22 + new_val |= (pgprot_val(masks->set_mask)); 23 + 24 + return new_val; 25 + } 26 + 27 + static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr, 28 + unsigned long next, struct mm_walk *walk) 29 + { 30 + pgd_t val = pgdp_get(pgd); 31 + 32 + if (pgd_leaf(val)) { 33 + val = __pgd(set_pageattr_masks(pgd_val(val), walk)); 34 + set_pgd(pgd, val); 35 + } 36 + 37 + return 0; 38 + } 39 + 40 + static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, 41 + unsigned long next, struct mm_walk *walk) 42 + { 43 + p4d_t val = p4dp_get(p4d); 44 + 45 + if (p4d_leaf(val)) { 46 + val = __p4d(set_pageattr_masks(p4d_val(val), walk)); 47 + set_p4d(p4d, val); 48 + } 49 + 50 + return 0; 51 + } 52 + 53 + static int pageattr_pud_entry(pud_t *pud, unsigned long addr, 54 + unsigned long next, struct mm_walk *walk) 55 + { 56 + pud_t val = pudp_get(pud); 57 + 58 + if (pud_leaf(val)) { 59 + val = __pud(set_pageattr_masks(pud_val(val), walk)); 60 + set_pud(pud, val); 61 + } 62 + 63 + return 0; 64 + } 65 + 66 + static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr, 67 + unsigned long next, struct mm_walk *walk) 68 + { 69 + pmd_t val = pmdp_get(pmd); 70 + 71 + if (pmd_leaf(val)) { 72 + val = __pmd(set_pageattr_masks(pmd_val(val), walk)); 73 + set_pmd(pmd, val); 74 + } 75 + 76 + return 0; 77 + } 78 + 79 + static int pageattr_pte_entry(pte_t *pte, unsigned long addr, 80 + unsigned long next, struct mm_walk *walk) 81 + { 82 + pte_t val = ptep_get(pte); 83 + 84 + val = __pte(set_pageattr_masks(pte_val(val), walk)); 85 + set_pte(pte, val); 86 + 87 + return 0; 88 + } 89 + 90 + static int pageattr_pte_hole(unsigned long addr, unsigned long next, 91 + int depth, struct mm_walk *walk) 92 + { 93 + return 0; 94 + } 95 + 96 + static const struct mm_walk_ops pageattr_ops = { 97 + .pgd_entry = pageattr_pgd_entry, 98 + .p4d_entry = pageattr_p4d_entry, 99 + .pud_entry = pageattr_pud_entry, 100 + .pmd_entry = pageattr_pmd_entry, 101 + .pte_entry = pageattr_pte_entry, 102 + .pte_hole = pageattr_pte_hole, 103 + .walk_lock = PGWALK_RDLOCK, 104 + }; 105 + 106 + static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) 107 + { 108 + int ret; 109 + unsigned long start = addr; 110 + unsigned long end = start + PAGE_SIZE * numpages; 111 + struct pageattr_masks masks = { 112 + .set_mask = set_mask, 113 + .clear_mask = clear_mask 114 + }; 115 + 116 + if (!numpages) 117 + return 0; 118 + 119 + mmap_write_lock(&init_mm); 120 + ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks); 121 + mmap_write_unlock(&init_mm); 122 + 123 + flush_tlb_kernel_range(start, end); 124 + 125 + return ret; 126 + } 127 + 128 + int set_memory_x(unsigned long addr, int numpages) 129 + { 130 + if (addr < vm_map_base) 131 + return 0; 132 + 133 + return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_NO_EXEC)); 134 + } 135 + 136 + int set_memory_nx(unsigned long addr, int numpages) 137 + { 138 + if (addr < vm_map_base) 139 + return 0; 140 + 141 + return __set_memory(addr, numpages, __pgprot(_PAGE_NO_EXEC), __pgprot(0)); 142 + } 143 + 144 + int set_memory_ro(unsigned long addr, int numpages) 145 + { 146 + if (addr < vm_map_base) 147 + return 0; 148 + 149 + return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_WRITE | _PAGE_DIRTY)); 150 + } 151 + 152 + int set_memory_rw(unsigned long addr, int numpages) 153 + { 154 + if (addr < vm_map_base) 155 + return 0; 156 + 157 + return __set_memory(addr, numpages, __pgprot(_PAGE_WRITE | _PAGE_DIRTY), __pgprot(0)); 158 + } 159 + 160 + bool kernel_page_present(struct page *page) 161 + { 162 + pgd_t *pgd; 163 + p4d_t *p4d; 164 + pud_t *pud; 165 + pmd_t *pmd; 166 + pte_t *pte; 167 + unsigned long addr = (unsigned long)page_address(page); 168 + 169 + if (addr < vm_map_base) 170 + return true; 171 + 172 + pgd = pgd_offset_k(addr); 173 + if (pgd_none(pgdp_get(pgd))) 174 + return false; 175 + if (pgd_leaf(pgdp_get(pgd))) 176 + return true; 177 + 178 + p4d = p4d_offset(pgd, addr); 179 + if (p4d_none(p4dp_get(p4d))) 180 + return false; 181 + if (p4d_leaf(p4dp_get(p4d))) 182 + return true; 183 + 184 + pud = pud_offset(p4d, addr); 185 + if (pud_none(pudp_get(pud))) 186 + return false; 187 + if (pud_leaf(pudp_get(pud))) 188 + return true; 189 + 190 + pmd = pmd_offset(pud, addr); 191 + if (pmd_none(pmdp_get(pmd))) 192 + return false; 193 + if (pmd_leaf(pmdp_get(pmd))) 194 + return true; 195 + 196 + pte = pte_offset_kernel(pmd, addr); 197 + return pte_present(ptep_get(pte)); 198 + } 199 + 200 + int set_direct_map_default_noflush(struct page *page) 201 + { 202 + unsigned long addr = (unsigned long)page_address(page); 203 + 204 + if (addr < vm_map_base) 205 + return 0; 206 + 207 + return __set_memory(addr, 1, PAGE_KERNEL, __pgprot(0)); 208 + } 209 + 210 + int set_direct_map_invalid_noflush(struct page *page) 211 + { 212 + unsigned long addr = (unsigned long)page_address(page); 213 + 214 + if (addr < vm_map_base) 215 + return 0; 216 + 217 + return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | _PAGE_VALID)); 218 + }
+1
arch/loongarch/pci/acpi.c
··· 225 225 if (bus) { 226 226 memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window)); 227 227 kfree(info); 228 + kfree(root_ops); 228 229 } else { 229 230 struct pci_bus *child; 230 231
+1 -1
drivers/acpi/Kconfig
··· 451 451 452 452 config ACPI_BGRT 453 453 bool "Boottime Graphics Resource Table support" 454 - depends on EFI && (X86 || ARM64) 454 + depends on EFI && (X86 || ARM64 || LOONGARCH) 455 455 help 456 456 This driver adds support for exposing the ACPI Boottime Graphics 457 457 Resource Table, which allows the operating system to obtain
+10 -1
tools/objtool/arch/loongarch/decode.c
··· 122 122 switch (inst.reg2i12_format.opcode) { 123 123 case addid_op: 124 124 if ((inst.reg2i12_format.rd == CFI_SP) || (inst.reg2i12_format.rj == CFI_SP)) { 125 - /* addi.d sp,sp,si12 or addi.d fp,sp,si12 */ 125 + /* addi.d sp,sp,si12 or addi.d fp,sp,si12 or addi.d sp,fp,si12 */ 126 126 insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11); 127 127 ADD_OP(op) { 128 128 op->src.type = OP_SRC_ADD; ··· 131 131 op->dest.type = OP_DEST_REG; 132 132 op->dest.reg = inst.reg2i12_format.rd; 133 133 } 134 + } 135 + if ((inst.reg2i12_format.rd == CFI_SP) && (inst.reg2i12_format.rj == CFI_FP)) { 136 + /* addi.d sp,fp,si12 */ 137 + struct symbol *func = find_func_containing(insn->sec, insn->offset); 138 + 139 + if (!func) 140 + return false; 141 + 142 + func->frame_pointer = true; 134 143 } 135 144 break; 136 145 case ldd_op:
+20 -3
tools/objtool/check.c
··· 3043 3043 break; 3044 3044 } 3045 3045 3046 - if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 3046 + if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP && 3047 + insn->sym->frame_pointer) { 3048 + /* addi.d fp,sp,imm on LoongArch */ 3049 + if (cfa->base == CFI_SP && cfa->offset == op->src.offset) { 3050 + cfa->base = CFI_BP; 3051 + cfa->offset = 0; 3052 + } 3053 + break; 3054 + } 3047 3055 3048 - /* lea disp(%rbp), %rsp */ 3049 - cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 3056 + if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 3057 + /* addi.d sp,fp,imm on LoongArch */ 3058 + if (cfa->base == CFI_BP && cfa->offset == 0) { 3059 + if (insn->sym->frame_pointer) { 3060 + cfa->base = CFI_SP; 3061 + cfa->offset = -op->src.offset; 3062 + } 3063 + } else { 3064 + /* lea disp(%rbp), %rsp */ 3065 + cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 3066 + } 3050 3067 break; 3051 3068 } 3052 3069
+1
tools/objtool/include/objtool/elf.h
··· 68 68 u8 warned : 1; 69 69 u8 embedded_insn : 1; 70 70 u8 local_label : 1; 71 + u8 frame_pointer : 1; 71 72 struct list_head pv_target; 72 73 struct reloc *relocs; 73 74 };