Merge tag 'loongarch-fixes-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch fixes from Huacai Chen:
"Add a missing Kconfig option, fix some bugs in exception handlers,
memory management and KVM"

* tag 'loongarch-fixes-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
LoongArch: KVM: Fix PMU pass-through issue if VM exits to host finally
LoongArch: KVM: Fully clear some CSRs when VM reboot
LoongArch: KVM: Fix multiple typos of KVM code
LoongArch: Return NULL from huge_pte_offset() for invalid PMD
LoongArch: Remove a bogus reference to ZONE_DMA
LoongArch: Handle fp, lsx, lasx and lbt assembly symbols
LoongArch: Make do_xyz() exception handlers more robust
LoongArch: Make regs_irqs_disabled() more clear
LoongArch: Select ARCH_USE_MEMTEST

+65 -55
+1
arch/loongarch/Kconfig
··· 73 73 select ARCH_SUPPORTS_RT 74 74 select ARCH_USE_BUILTIN_BSWAP 75 75 select ARCH_USE_CMPXCHG_LOCKREF 76 + select ARCH_USE_MEMTEST 76 77 select ARCH_USE_QUEUED_RWLOCKS 77 78 select ARCH_USE_QUEUED_SPINLOCKS 78 79 select ARCH_WANT_DEFAULT_BPF_JIT
+20 -13
arch/loongarch/include/asm/fpu.h
··· 22 22 struct sigcontext; 23 23 24 24 #define kernel_fpu_available() cpu_has_fpu 25 - extern void kernel_fpu_begin(void); 26 - extern void kernel_fpu_end(void); 27 25 28 - extern void _init_fpu(unsigned int); 29 - extern void _save_fp(struct loongarch_fpu *); 30 - extern void _restore_fp(struct loongarch_fpu *); 26 + void kernel_fpu_begin(void); 27 + void kernel_fpu_end(void); 31 28 32 - extern void _save_lsx(struct loongarch_fpu *fpu); 33 - extern void _restore_lsx(struct loongarch_fpu *fpu); 34 - extern void _init_lsx_upper(void); 35 - extern void _restore_lsx_upper(struct loongarch_fpu *fpu); 29 + asmlinkage void _init_fpu(unsigned int); 30 + asmlinkage void _save_fp(struct loongarch_fpu *); 31 + asmlinkage void _restore_fp(struct loongarch_fpu *); 32 + asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); 33 + asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); 36 34 37 - extern void _save_lasx(struct loongarch_fpu *fpu); 38 - extern void _restore_lasx(struct loongarch_fpu *fpu); 39 - extern void _init_lasx_upper(void); 40 - extern void _restore_lasx_upper(struct loongarch_fpu *fpu); 35 + asmlinkage void _save_lsx(struct loongarch_fpu *fpu); 36 + asmlinkage void _restore_lsx(struct loongarch_fpu *fpu); 37 + asmlinkage void _init_lsx_upper(void); 38 + asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu); 39 + asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 40 + asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 41 + 42 + asmlinkage void _save_lasx(struct loongarch_fpu *fpu); 43 + asmlinkage void _restore_lasx(struct loongarch_fpu *fpu); 44 + asmlinkage void _init_lasx_upper(void); 45 + asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu); 46 + asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 47 + asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 41 48 42 49 static inline void enable_lsx(void); 43 50 static inline void disable_lsx(void);
+7 -3
arch/loongarch/include/asm/lbt.h
··· 12 12 #include <asm/loongarch.h> 13 13 #include <asm/processor.h> 14 14 15 - extern void _init_lbt(void); 16 - extern void _save_lbt(struct loongarch_lbt *); 17 - extern void _restore_lbt(struct loongarch_lbt *); 15 + asmlinkage void _init_lbt(void); 16 + asmlinkage void _save_lbt(struct loongarch_lbt *); 17 + asmlinkage void _restore_lbt(struct loongarch_lbt *); 18 + asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags); 19 + asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags); 20 + asmlinkage int _save_ftop_context(void __user *ftop); 21 + asmlinkage int _restore_ftop_context(void __user *ftop); 18 22 19 23 static inline int is_lbt_enabled(void) 20 24 {
+2 -2
arch/loongarch/include/asm/ptrace.h
··· 33 33 unsigned long __last[]; 34 34 } __aligned(8); 35 35 36 - static inline int regs_irqs_disabled(struct pt_regs *regs) 36 + static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) 37 37 { 38 - return arch_irqs_disabled_flags(regs->csr_prmd); 38 + return !(regs->csr_prmd & CSR_PRMD_PIE); 39 39 } 40 40 41 41 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+6
arch/loongarch/kernel/fpu.S
··· 458 458 li.w a0, 0 # success 459 459 jr ra 460 460 SYM_FUNC_END(_save_fp_context) 461 + EXPORT_SYMBOL_GPL(_save_fp_context) 461 462 462 463 /* 463 464 * a0: fpregs ··· 472 471 li.w a0, 0 # success 473 472 jr ra 474 473 SYM_FUNC_END(_restore_fp_context) 474 + EXPORT_SYMBOL_GPL(_restore_fp_context) 475 475 476 476 /* 477 477 * a0: fpregs ··· 486 484 li.w a0, 0 # success 487 485 jr ra 488 486 SYM_FUNC_END(_save_lsx_context) 487 + EXPORT_SYMBOL_GPL(_save_lsx_context) 489 488 490 489 /* 491 490 * a0: fpregs ··· 500 497 li.w a0, 0 # success 501 498 jr ra 502 499 SYM_FUNC_END(_restore_lsx_context) 500 + EXPORT_SYMBOL_GPL(_restore_lsx_context) 503 501 504 502 /* 505 503 * a0: fpregs ··· 514 510 li.w a0, 0 # success 515 511 jr ra 516 512 SYM_FUNC_END(_save_lasx_context) 513 + EXPORT_SYMBOL_GPL(_save_lasx_context) 517 514 518 515 /* 519 516 * a0: fpregs ··· 528 523 li.w a0, 0 # success 529 524 jr ra 530 525 SYM_FUNC_END(_restore_lasx_context) 526 + EXPORT_SYMBOL_GPL(_restore_lasx_context) 531 527 532 528 .L_fpu_fault: 533 529 li.w a0, -EFAULT # failure
+4
arch/loongarch/kernel/lbt.S
··· 90 90 li.w a0, 0 # success 91 91 jr ra 92 92 SYM_FUNC_END(_save_lbt_context) 93 + EXPORT_SYMBOL_GPL(_save_lbt_context) 93 94 94 95 /* 95 96 * a0: scr ··· 111 110 li.w a0, 0 # success 112 111 jr ra 113 112 SYM_FUNC_END(_restore_lbt_context) 113 + EXPORT_SYMBOL_GPL(_restore_lbt_context) 114 114 115 115 /* 116 116 * a0: ftop ··· 122 120 li.w a0, 0 # success 123 121 jr ra 124 122 SYM_FUNC_END(_save_ftop_context) 123 + EXPORT_SYMBOL_GPL(_save_ftop_context) 125 124 126 125 /* 127 126 * a0: ftop ··· 153 150 li.w a0, 0 # success 154 151 jr ra 155 152 SYM_FUNC_END(_restore_ftop_context) 153 + EXPORT_SYMBOL_GPL(_restore_ftop_context) 156 154 157 155 .L_lbt_fault: 158 156 li.w a0, -EFAULT # failure
-21
arch/loongarch/kernel/signal.c
··· 51 51 #define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); }) 52 52 #define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); }) 53 53 54 - /* Assembly functions to move context to/from the FPU */ 55 - extern asmlinkage int 56 - _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); 57 - extern asmlinkage int 58 - _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); 59 - extern asmlinkage int 60 - _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 61 - extern asmlinkage int 62 - _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 63 - extern asmlinkage int 64 - _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 65 - extern asmlinkage int 66 - _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 67 - 68 - #ifdef CONFIG_CPU_HAS_LBT 69 - extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags); 70 - extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags); 71 - extern asmlinkage int _save_ftop_context(void __user *ftop); 72 - extern asmlinkage int _restore_ftop_context(void __user *ftop); 73 - #endif 74 - 75 54 struct rt_sigframe { 76 55 struct siginfo rs_info; 77 56 struct ucontext rs_uctx;
+12 -8
arch/loongarch/kernel/traps.c
··· 553 553 die_if_kernel("Kernel ale access", regs); 554 554 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); 555 555 #else 556 + bool pie = regs_irqs_disabled(regs); 556 557 unsigned int *pc; 557 558 558 - if (regs->csr_prmd & CSR_PRMD_PIE) 559 + if (!pie) 559 560 local_irq_enable(); 560 561 561 562 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr); ··· 583 582 die_if_kernel("Kernel ale access", regs); 584 583 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); 585 584 out: 586 - if (regs->csr_prmd & CSR_PRMD_PIE) 585 + if (!pie) 587 586 local_irq_disable(); 588 587 #endif 589 588 irqentry_exit(regs, state); ··· 622 621 asmlinkage void noinstr do_bce(struct pt_regs *regs) 623 622 { 624 623 bool user = user_mode(regs); 624 + bool pie = regs_irqs_disabled(regs); 625 625 unsigned long era = exception_era(regs); 626 626 u64 badv = 0, lower = 0, upper = ULONG_MAX; 627 627 union loongarch_instruction insn; 628 628 irqentry_state_t state = irqentry_enter(regs); 629 629 630 - if (regs->csr_prmd & CSR_PRMD_PIE) 630 + if (!pie) 631 631 local_irq_enable(); 632 632 633 633 current->thread.trap_nr = read_csr_excode(); ··· 694 692 force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper); 695 693 696 694 out: 697 - if (regs->csr_prmd & CSR_PRMD_PIE) 695 + if (!pie) 698 696 local_irq_disable(); 699 697 700 698 irqentry_exit(regs, state); ··· 712 710 asmlinkage void noinstr do_bp(struct pt_regs *regs) 713 711 { 714 712 bool user = user_mode(regs); 713 + bool pie = regs_irqs_disabled(regs); 715 714 unsigned int opcode, bcode; 716 715 unsigned long era = exception_era(regs); 717 716 irqentry_state_t state = irqentry_enter(regs); 718 717 719 - if (regs->csr_prmd & CSR_PRMD_PIE) 718 + if (!pie) 720 719 local_irq_enable(); 721 720 722 721 if (__get_inst(&opcode, (u32 *)era, user)) ··· 783 780 } 784 781 785 782 out: 786 - if (regs->csr_prmd & CSR_PRMD_PIE) 783 + if (!pie) 787 784 local_irq_disable(); 788 785 789 786 irqentry_exit(regs, state); ··· 1018 1015 1019 1016 asmlinkage void noinstr do_lbt(struct pt_regs *regs) 1020 1017 { 1018 + bool pie = regs_irqs_disabled(regs); 1021 1019 irqentry_state_t state = irqentry_enter(regs); 1022 1020 1023 1021 /* ··· 1028 1024 * (including the user using 'MOVGR2GCSR' to turn on TM, which 1029 1025 * will not trigger the BTE), we need to check PRMD first. 1030 1026 */ 1031 - if (regs->csr_prmd & CSR_PRMD_PIE) 1027 + if (!pie) 1032 1028 local_irq_enable(); 1033 1029 1034 1030 if (!cpu_has_lbt) { ··· 1042 1038 preempt_enable(); 1043 1039 1044 1040 out: 1045 - if (regs->csr_prmd & CSR_PRMD_PIE) 1041 + if (!pie) 1046 1042 local_irq_disable(); 1047 1043 1048 1044 irqentry_exit(regs, state);
+2 -2
arch/loongarch/kvm/intc/ipi.c
··· 111 111 ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); 112 112 srcu_read_unlock(&vcpu->kvm->srcu, idx); 113 113 if (unlikely(ret)) { 114 - kvm_err("%s: : read date from addr %llx failed\n", __func__, addr); 114 + kvm_err("%s: : read data from addr %llx failed\n", __func__, addr); 115 115 return ret; 116 116 } 117 117 /* Construct the mask by scanning the bit 27-30 */ ··· 127 127 ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); 128 128 srcu_read_unlock(&vcpu->kvm->srcu, idx); 129 129 if (unlikely(ret)) 130 - kvm_err("%s: : write date to addr %llx failed\n", __func__, addr); 130 + kvm_err("%s: : write data to addr %llx failed\n", __func__, addr); 131 131 132 132 return ret; 133 133 }
+2 -2
arch/loongarch/kvm/main.c
··· 296 296 /* 297 297 * Enable virtualization features granting guest direct control of 298 298 * certain features: 299 - * GCI=2: Trap on init or unimplement cache instruction. 299 + * GCI=2: Trap on init or unimplemented cache instruction. 300 300 * TORU=0: Trap on Root Unimplement. 301 301 * CACTRL=1: Root control cache. 302 - * TOP=0: Trap on Previlege. 302 + * TOP=0: Trap on Privilege. 303 303 * TOE=0: Trap on Exception. 304 304 * TIT=0: Trap on Timer. 305 305 */
+8
arch/loongarch/kvm/vcpu.c
··· 294 294 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; 295 295 296 296 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { 297 + kvm_lose_pmu(vcpu); 297 298 /* make sure the vcpu mode has been written */ 298 299 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); 299 300 local_irq_enable(); ··· 903 902 vcpu->arch.st.guest_addr = 0; 904 903 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); 905 904 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); 905 + 906 + /* 907 + * When vCPU reset, clear the ESTAT and GINTC registers 908 + * Other CSR registers are cleared with function _kvm_setcsr(). 909 + */ 910 + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0); 911 + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0); 906 912 break; 907 913 default: 908 914 ret = -EINVAL;
+1 -1
arch/loongarch/mm/hugetlbpage.c
··· 47 47 pmd = pmd_offset(pud, addr); 48 48 } 49 49 } 50 - return (pte_t *) pmd; 50 + return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd; 51 51 } 52 52 53 53 uint64_t pmd_to_entrylo(unsigned long pmd_val)
-3
arch/loongarch/mm/init.c
··· 65 65 { 66 66 unsigned long max_zone_pfns[MAX_NR_ZONES]; 67 67 68 - #ifdef CONFIG_ZONE_DMA 69 - max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 70 - #endif 71 68 #ifdef CONFIG_ZONE_DMA32 72 69 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; 73 70 #endif