Merge tag 'loongarch-fixes-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch fixes from Huacai Chen:
"Add a missing Kconfig option, fix some bugs in exception handlers,
memory management and KVM"

* tag 'loongarch-fixes-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
LoongArch: KVM: Fix PMU pass-through issue if VM exits to host finally
LoongArch: KVM: Fully clear some CSRs when VM reboot
LoongArch: KVM: Fix multiple typos of KVM code
LoongArch: Return NULL from huge_pte_offset() for invalid PMD
LoongArch: Remove a bogus reference to ZONE_DMA
LoongArch: Handle fp, lsx, lasx and lbt assembly symbols
LoongArch: Make do_xyz() exception handlers more robust
LoongArch: Make regs_irqs_disabled() more clear
LoongArch: Select ARCH_USE_MEMTEST

+65 -55
+1
arch/loongarch/Kconfig
··· 73 select ARCH_SUPPORTS_RT 74 select ARCH_USE_BUILTIN_BSWAP 75 select ARCH_USE_CMPXCHG_LOCKREF 76 select ARCH_USE_QUEUED_RWLOCKS 77 select ARCH_USE_QUEUED_SPINLOCKS 78 select ARCH_WANT_DEFAULT_BPF_JIT
··· 73 select ARCH_SUPPORTS_RT 74 select ARCH_USE_BUILTIN_BSWAP 75 select ARCH_USE_CMPXCHG_LOCKREF 76 + select ARCH_USE_MEMTEST 77 select ARCH_USE_QUEUED_RWLOCKS 78 select ARCH_USE_QUEUED_SPINLOCKS 79 select ARCH_WANT_DEFAULT_BPF_JIT
+20 -13
arch/loongarch/include/asm/fpu.h
··· 22 struct sigcontext; 23 24 #define kernel_fpu_available() cpu_has_fpu 25 - extern void kernel_fpu_begin(void); 26 - extern void kernel_fpu_end(void); 27 28 - extern void _init_fpu(unsigned int); 29 - extern void _save_fp(struct loongarch_fpu *); 30 - extern void _restore_fp(struct loongarch_fpu *); 31 32 - extern void _save_lsx(struct loongarch_fpu *fpu); 33 - extern void _restore_lsx(struct loongarch_fpu *fpu); 34 - extern void _init_lsx_upper(void); 35 - extern void _restore_lsx_upper(struct loongarch_fpu *fpu); 36 37 - extern void _save_lasx(struct loongarch_fpu *fpu); 38 - extern void _restore_lasx(struct loongarch_fpu *fpu); 39 - extern void _init_lasx_upper(void); 40 - extern void _restore_lasx_upper(struct loongarch_fpu *fpu); 41 42 static inline void enable_lsx(void); 43 static inline void disable_lsx(void);
··· 22 struct sigcontext; 23 24 #define kernel_fpu_available() cpu_has_fpu 25 26 + void kernel_fpu_begin(void); 27 + void kernel_fpu_end(void); 28 29 + asmlinkage void _init_fpu(unsigned int); 30 + asmlinkage void _save_fp(struct loongarch_fpu *); 31 + asmlinkage void _restore_fp(struct loongarch_fpu *); 32 + asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); 33 + asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); 34 35 + asmlinkage void _save_lsx(struct loongarch_fpu *fpu); 36 + asmlinkage void _restore_lsx(struct loongarch_fpu *fpu); 37 + asmlinkage void _init_lsx_upper(void); 38 + asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu); 39 + asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 40 + asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 41 + 42 + asmlinkage void _save_lasx(struct loongarch_fpu *fpu); 43 + asmlinkage void _restore_lasx(struct loongarch_fpu *fpu); 44 + asmlinkage void _init_lasx_upper(void); 45 + asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu); 46 + asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 47 + asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 48 49 static inline void enable_lsx(void); 50 static inline void disable_lsx(void);
+7 -3
arch/loongarch/include/asm/lbt.h
··· 12 #include <asm/loongarch.h> 13 #include <asm/processor.h> 14 15 - extern void _init_lbt(void); 16 - extern void _save_lbt(struct loongarch_lbt *); 17 - extern void _restore_lbt(struct loongarch_lbt *); 18 19 static inline int is_lbt_enabled(void) 20 {
··· 12 #include <asm/loongarch.h> 13 #include <asm/processor.h> 14 15 + asmlinkage void _init_lbt(void); 16 + asmlinkage void _save_lbt(struct loongarch_lbt *); 17 + asmlinkage void _restore_lbt(struct loongarch_lbt *); 18 + asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags); 19 + asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags); 20 + asmlinkage int _save_ftop_context(void __user *ftop); 21 + asmlinkage int _restore_ftop_context(void __user *ftop); 22 23 static inline int is_lbt_enabled(void) 24 {
+2 -2
arch/loongarch/include/asm/ptrace.h
··· 33 unsigned long __last[]; 34 } __aligned(8); 35 36 - static inline int regs_irqs_disabled(struct pt_regs *regs) 37 { 38 - return arch_irqs_disabled_flags(regs->csr_prmd); 39 } 40 41 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
··· 33 unsigned long __last[]; 34 } __aligned(8); 35 36 + static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) 37 { 38 + return !(regs->csr_prmd & CSR_PRMD_PIE); 39 } 40 41 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+6
arch/loongarch/kernel/fpu.S
··· 458 li.w a0, 0 # success 459 jr ra 460 SYM_FUNC_END(_save_fp_context) 461 462 /* 463 * a0: fpregs ··· 472 li.w a0, 0 # success 473 jr ra 474 SYM_FUNC_END(_restore_fp_context) 475 476 /* 477 * a0: fpregs ··· 486 li.w a0, 0 # success 487 jr ra 488 SYM_FUNC_END(_save_lsx_context) 489 490 /* 491 * a0: fpregs ··· 500 li.w a0, 0 # success 501 jr ra 502 SYM_FUNC_END(_restore_lsx_context) 503 504 /* 505 * a0: fpregs ··· 514 li.w a0, 0 # success 515 jr ra 516 SYM_FUNC_END(_save_lasx_context) 517 518 /* 519 * a0: fpregs ··· 528 li.w a0, 0 # success 529 jr ra 530 SYM_FUNC_END(_restore_lasx_context) 531 532 .L_fpu_fault: 533 li.w a0, -EFAULT # failure
··· 458 li.w a0, 0 # success 459 jr ra 460 SYM_FUNC_END(_save_fp_context) 461 + EXPORT_SYMBOL_GPL(_save_fp_context) 462 463 /* 464 * a0: fpregs ··· 471 li.w a0, 0 # success 472 jr ra 473 SYM_FUNC_END(_restore_fp_context) 474 + EXPORT_SYMBOL_GPL(_restore_fp_context) 475 476 /* 477 * a0: fpregs ··· 484 li.w a0, 0 # success 485 jr ra 486 SYM_FUNC_END(_save_lsx_context) 487 + EXPORT_SYMBOL_GPL(_save_lsx_context) 488 489 /* 490 * a0: fpregs ··· 497 li.w a0, 0 # success 498 jr ra 499 SYM_FUNC_END(_restore_lsx_context) 500 + EXPORT_SYMBOL_GPL(_restore_lsx_context) 501 502 /* 503 * a0: fpregs ··· 510 li.w a0, 0 # success 511 jr ra 512 SYM_FUNC_END(_save_lasx_context) 513 + EXPORT_SYMBOL_GPL(_save_lasx_context) 514 515 /* 516 * a0: fpregs ··· 523 li.w a0, 0 # success 524 jr ra 525 SYM_FUNC_END(_restore_lasx_context) 526 + EXPORT_SYMBOL_GPL(_restore_lasx_context) 527 528 .L_fpu_fault: 529 li.w a0, -EFAULT # failure
+4
arch/loongarch/kernel/lbt.S
··· 90 li.w a0, 0 # success 91 jr ra 92 SYM_FUNC_END(_save_lbt_context) 93 94 /* 95 * a0: scr ··· 111 li.w a0, 0 # success 112 jr ra 113 SYM_FUNC_END(_restore_lbt_context) 114 115 /* 116 * a0: ftop ··· 122 li.w a0, 0 # success 123 jr ra 124 SYM_FUNC_END(_save_ftop_context) 125 126 /* 127 * a0: ftop ··· 153 li.w a0, 0 # success 154 jr ra 155 SYM_FUNC_END(_restore_ftop_context) 156 157 .L_lbt_fault: 158 li.w a0, -EFAULT # failure
··· 90 li.w a0, 0 # success 91 jr ra 92 SYM_FUNC_END(_save_lbt_context) 93 + EXPORT_SYMBOL_GPL(_save_lbt_context) 94 95 /* 96 * a0: scr ··· 110 li.w a0, 0 # success 111 jr ra 112 SYM_FUNC_END(_restore_lbt_context) 113 + EXPORT_SYMBOL_GPL(_restore_lbt_context) 114 115 /* 116 * a0: ftop ··· 120 li.w a0, 0 # success 121 jr ra 122 SYM_FUNC_END(_save_ftop_context) 123 + EXPORT_SYMBOL_GPL(_save_ftop_context) 124 125 /* 126 * a0: ftop ··· 150 li.w a0, 0 # success 151 jr ra 152 SYM_FUNC_END(_restore_ftop_context) 153 + EXPORT_SYMBOL_GPL(_restore_ftop_context) 154 155 .L_lbt_fault: 156 li.w a0, -EFAULT # failure
-21
arch/loongarch/kernel/signal.c
··· 51 #define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); }) 52 #define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); }) 53 54 - /* Assembly functions to move context to/from the FPU */ 55 - extern asmlinkage int 56 - _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); 57 - extern asmlinkage int 58 - _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); 59 - extern asmlinkage int 60 - _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 61 - extern asmlinkage int 62 - _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 63 - extern asmlinkage int 64 - _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 65 - extern asmlinkage int 66 - _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); 67 - 68 - #ifdef CONFIG_CPU_HAS_LBT 69 - extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags); 70 - extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags); 71 - extern asmlinkage int _save_ftop_context(void __user *ftop); 72 - extern asmlinkage int _restore_ftop_context(void __user *ftop); 73 - #endif 74 - 75 struct rt_sigframe { 76 struct siginfo rs_info; 77 struct ucontext rs_uctx;
··· 51 #define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); }) 52 #define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); }) 53 54 struct rt_sigframe { 55 struct siginfo rs_info; 56 struct ucontext rs_uctx;
+12 -8
arch/loongarch/kernel/traps.c
··· 553 die_if_kernel("Kernel ale access", regs); 554 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); 555 #else 556 unsigned int *pc; 557 558 - if (regs->csr_prmd & CSR_PRMD_PIE) 559 local_irq_enable(); 560 561 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr); ··· 583 die_if_kernel("Kernel ale access", regs); 584 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); 585 out: 586 - if (regs->csr_prmd & CSR_PRMD_PIE) 587 local_irq_disable(); 588 #endif 589 irqentry_exit(regs, state); ··· 622 asmlinkage void noinstr do_bce(struct pt_regs *regs) 623 { 624 bool user = user_mode(regs); 625 unsigned long era = exception_era(regs); 626 u64 badv = 0, lower = 0, upper = ULONG_MAX; 627 union loongarch_instruction insn; 628 irqentry_state_t state = irqentry_enter(regs); 629 630 - if (regs->csr_prmd & CSR_PRMD_PIE) 631 local_irq_enable(); 632 633 current->thread.trap_nr = read_csr_excode(); ··· 694 force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper); 695 696 out: 697 - if (regs->csr_prmd & CSR_PRMD_PIE) 698 local_irq_disable(); 699 700 irqentry_exit(regs, state); ··· 712 asmlinkage void noinstr do_bp(struct pt_regs *regs) 713 { 714 bool user = user_mode(regs); 715 unsigned int opcode, bcode; 716 unsigned long era = exception_era(regs); 717 irqentry_state_t state = irqentry_enter(regs); 718 719 - if (regs->csr_prmd & CSR_PRMD_PIE) 720 local_irq_enable(); 721 722 if (__get_inst(&opcode, (u32 *)era, user)) ··· 783 } 784 785 out: 786 - if (regs->csr_prmd & CSR_PRMD_PIE) 787 local_irq_disable(); 788 789 irqentry_exit(regs, state); ··· 1018 1019 asmlinkage void noinstr do_lbt(struct pt_regs *regs) 1020 { 1021 irqentry_state_t state = irqentry_enter(regs); 1022 1023 /* ··· 1028 * (including the user using 'MOVGR2GCSR' to turn on TM, which 1029 * will not trigger the BTE), we need to check PRMD first. 1030 */ 1031 - if (regs->csr_prmd & CSR_PRMD_PIE) 1032 local_irq_enable(); 1033 1034 if (!cpu_has_lbt) { ··· 1042 preempt_enable(); 1043 1044 out: 1045 - if (regs->csr_prmd & CSR_PRMD_PIE) 1046 local_irq_disable(); 1047 1048 irqentry_exit(regs, state);
··· 553 die_if_kernel("Kernel ale access", regs); 554 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); 555 #else 556 + bool pie = regs_irqs_disabled(regs); 557 unsigned int *pc; 558 559 + if (!pie) 560 local_irq_enable(); 561 562 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr); ··· 582 die_if_kernel("Kernel ale access", regs); 583 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); 584 out: 585 + if (!pie) 586 local_irq_disable(); 587 #endif 588 irqentry_exit(regs, state); ··· 621 asmlinkage void noinstr do_bce(struct pt_regs *regs) 622 { 623 bool user = user_mode(regs); 624 + bool pie = regs_irqs_disabled(regs); 625 unsigned long era = exception_era(regs); 626 u64 badv = 0, lower = 0, upper = ULONG_MAX; 627 union loongarch_instruction insn; 628 irqentry_state_t state = irqentry_enter(regs); 629 630 + if (!pie) 631 local_irq_enable(); 632 633 current->thread.trap_nr = read_csr_excode(); ··· 692 force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper); 693 694 out: 695 + if (!pie) 696 local_irq_disable(); 697 698 irqentry_exit(regs, state); ··· 710 asmlinkage void noinstr do_bp(struct pt_regs *regs) 711 { 712 bool user = user_mode(regs); 713 + bool pie = regs_irqs_disabled(regs); 714 unsigned int opcode, bcode; 715 unsigned long era = exception_era(regs); 716 irqentry_state_t state = irqentry_enter(regs); 717 718 + if (!pie) 719 local_irq_enable(); 720 721 if (__get_inst(&opcode, (u32 *)era, user)) ··· 780 } 781 782 out: 783 + if (!pie) 784 local_irq_disable(); 785 786 irqentry_exit(regs, state); ··· 1015 1016 asmlinkage void noinstr do_lbt(struct pt_regs *regs) 1017 { 1018 + bool pie = regs_irqs_disabled(regs); 1019 irqentry_state_t state = irqentry_enter(regs); 1020 1021 /* ··· 1024 * (including the user using 'MOVGR2GCSR' to turn on TM, which 1025 * will not trigger the BTE), we need to check PRMD first. 1026 */ 1027 + if (!pie) 1028 local_irq_enable(); 1029 1030 if (!cpu_has_lbt) { ··· 1038 preempt_enable(); 1039 1040 out: 1041 + if (!pie) 1042 local_irq_disable(); 1043 1044 irqentry_exit(regs, state);
+2 -2
arch/loongarch/kvm/intc/ipi.c
··· 111 ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); 112 srcu_read_unlock(&vcpu->kvm->srcu, idx); 113 if (unlikely(ret)) { 114 - kvm_err("%s: : read date from addr %llx failed\n", __func__, addr); 115 return ret; 116 } 117 /* Construct the mask by scanning the bit 27-30 */ ··· 127 ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); 128 srcu_read_unlock(&vcpu->kvm->srcu, idx); 129 if (unlikely(ret)) 130 - kvm_err("%s: : write date to addr %llx failed\n", __func__, addr); 131 132 return ret; 133 }
··· 111 ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); 112 srcu_read_unlock(&vcpu->kvm->srcu, idx); 113 if (unlikely(ret)) { 114 + kvm_err("%s: : read data from addr %llx failed\n", __func__, addr); 115 return ret; 116 } 117 /* Construct the mask by scanning the bit 27-30 */ ··· 127 ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); 128 srcu_read_unlock(&vcpu->kvm->srcu, idx); 129 if (unlikely(ret)) 130 + kvm_err("%s: : write data to addr %llx failed\n", __func__, addr); 131 132 return ret; 133 }
+2 -2
arch/loongarch/kvm/main.c
··· 296 /* 297 * Enable virtualization features granting guest direct control of 298 * certain features: 299 - * GCI=2: Trap on init or unimplement cache instruction. 300 * TORU=0: Trap on Root Unimplement. 301 * CACTRL=1: Root control cache. 302 - * TOP=0: Trap on Previlege. 303 * TOE=0: Trap on Exception. 304 * TIT=0: Trap on Timer. 305 */
··· 296 /* 297 * Enable virtualization features granting guest direct control of 298 * certain features: 299 + * GCI=2: Trap on init or unimplemented cache instruction. 300 * TORU=0: Trap on Root Unimplement. 301 * CACTRL=1: Root control cache. 302 + * TOP=0: Trap on Privilege. 303 * TOE=0: Trap on Exception. 304 * TIT=0: Trap on Timer. 305 */
+8
arch/loongarch/kvm/vcpu.c
··· 294 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; 295 296 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { 297 /* make sure the vcpu mode has been written */ 298 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); 299 local_irq_enable(); ··· 903 vcpu->arch.st.guest_addr = 0; 904 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); 905 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); 906 break; 907 default: 908 ret = -EINVAL;
··· 294 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; 295 296 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { 297 + kvm_lose_pmu(vcpu); 298 /* make sure the vcpu mode has been written */ 299 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); 300 local_irq_enable(); ··· 902 vcpu->arch.st.guest_addr = 0; 903 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); 904 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); 905 + 906 + /* 907 + * When vCPU reset, clear the ESTAT and GINTC registers 908 + * Other CSR registers are cleared with function _kvm_setcsr(). 909 + */ 910 + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0); 911 + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0); 912 break; 913 default: 914 ret = -EINVAL;
+1 -1
arch/loongarch/mm/hugetlbpage.c
··· 47 pmd = pmd_offset(pud, addr); 48 } 49 } 50 - return (pte_t *) pmd; 51 } 52 53 uint64_t pmd_to_entrylo(unsigned long pmd_val)
··· 47 pmd = pmd_offset(pud, addr); 48 } 49 } 50 + return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd; 51 } 52 53 uint64_t pmd_to_entrylo(unsigned long pmd_val)
-3
arch/loongarch/mm/init.c
··· 65 { 66 unsigned long max_zone_pfns[MAX_NR_ZONES]; 67 68 - #ifdef CONFIG_ZONE_DMA 69 - max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 70 - #endif 71 #ifdef CONFIG_ZONE_DMA32 72 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; 73 #endif
··· 65 { 66 unsigned long max_zone_pfns[MAX_NR_ZONES]; 67 68 #ifdef CONFIG_ZONE_DMA32 69 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; 70 #endif