Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'loongarch-fixes-6.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch fixes from Huacai Chen:

- replace __ASSEMBLY__ with __ASSEMBLER__ in headers like others

- fix build warnings about export.h

- reserve the EFI memory map region for kdump

- handle __init vs inline mismatches

- fix some KVM bugs

* tag 'loongarch-fixes-6.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
LoongArch: KVM: Disable updating of "num_cpu" and "feature"
LoongArch: KVM: Check validity of "num_cpu" from user space
LoongArch: KVM: Check interrupt route from physical CPU
LoongArch: KVM: Fix interrupt route update with EIOINTC
LoongArch: KVM: Add address alignment check for IOCSR emulation
LoongArch: KVM: Avoid overflow with array index
LoongArch: Handle KCOV __init vs inline mismatches
LoongArch: Reserve the EFI memory map region
LoongArch: Fix build warnings about export.h
LoongArch: Replace __ASSEMBLY__ with __ASSEMBLER__ in headers

+151 -100
+4 -4
arch/loongarch/include/asm/addrspace.h
··· 18 18 /* 19 19 * This gives the physical RAM offset. 20 20 */ 21 - #ifndef __ASSEMBLY__ 21 + #ifndef __ASSEMBLER__ 22 22 #ifndef PHYS_OFFSET 23 23 #define PHYS_OFFSET _UL(0) 24 24 #endif 25 25 extern unsigned long vm_map_base; 26 - #endif /* __ASSEMBLY__ */ 26 + #endif /* __ASSEMBLER__ */ 27 27 28 28 #ifndef IO_BASE 29 29 #define IO_BASE CSR_DMW0_BASE ··· 66 66 #define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) 67 67 #endif 68 68 69 - #ifdef __ASSEMBLY__ 69 + #ifdef __ASSEMBLER__ 70 70 #define _ATYPE_ 71 71 #define _ATYPE32_ 72 72 #define _ATYPE64_ ··· 85 85 /* 86 86 * 32/64-bit LoongArch address spaces 87 87 */ 88 - #ifdef __ASSEMBLY__ 88 + #ifdef __ASSEMBLER__ 89 89 #define _ACAST32_ 90 90 #define _ACAST64_ 91 91 #else
+2 -2
arch/loongarch/include/asm/alternative-asm.h
··· 2 2 #ifndef _ASM_ALTERNATIVE_ASM_H 3 3 #define _ASM_ALTERNATIVE_ASM_H 4 4 5 - #ifdef __ASSEMBLY__ 5 + #ifdef __ASSEMBLER__ 6 6 7 7 #include <asm/asm.h> 8 8 ··· 77 77 .previous 78 78 .endm 79 79 80 - #endif /* __ASSEMBLY__ */ 80 + #endif /* __ASSEMBLER__ */ 81 81 82 82 #endif /* _ASM_ALTERNATIVE_ASM_H */
+2 -2
arch/loongarch/include/asm/alternative.h
··· 2 2 #ifndef _ASM_ALTERNATIVE_H 3 3 #define _ASM_ALTERNATIVE_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <linux/types.h> 8 8 #include <linux/stddef.h> ··· 106 106 #define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ 107 107 (asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")) 108 108 109 - #endif /* __ASSEMBLY__ */ 109 + #endif /* __ASSEMBLER__ */ 110 110 111 111 #endif /* _ASM_ALTERNATIVE_H */
+3 -3
arch/loongarch/include/asm/asm-extable.h
··· 7 7 #define EX_TYPE_UACCESS_ERR_ZERO 2 8 8 #define EX_TYPE_BPF 3 9 9 10 - #ifdef __ASSEMBLY__ 10 + #ifdef __ASSEMBLER__ 11 11 12 12 #define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ 13 13 .pushsection __ex_table, "a"; \ ··· 22 22 __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0) 23 23 .endm 24 24 25 - #else /* __ASSEMBLY__ */ 25 + #else /* __ASSEMBLER__ */ 26 26 27 27 #include <linux/bits.h> 28 28 #include <linux/stringify.h> ··· 60 60 #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ 61 61 _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) 62 62 63 - #endif /* __ASSEMBLY__ */ 63 + #endif /* __ASSEMBLER__ */ 64 64 65 65 #endif /* __ASM_ASM_EXTABLE_H */
+4 -4
arch/loongarch/include/asm/asm.h
··· 110 110 #define LONG_SRA srai.w 111 111 #define LONG_SRAV sra.w 112 112 113 - #ifdef __ASSEMBLY__ 113 + #ifdef __ASSEMBLER__ 114 114 #define LONG .word 115 115 #endif 116 116 #define LONGSIZE 4 ··· 131 131 #define LONG_SRA srai.d 132 132 #define LONG_SRAV sra.d 133 133 134 - #ifdef __ASSEMBLY__ 134 + #ifdef __ASSEMBLER__ 135 135 #define LONG .dword 136 136 #endif 137 137 #define LONGSIZE 8 ··· 158 158 159 159 #define PTR_SCALESHIFT 2 160 160 161 - #ifdef __ASSEMBLY__ 161 + #ifdef __ASSEMBLER__ 162 162 #define PTR .word 163 163 #endif 164 164 #define PTRSIZE 4 ··· 181 181 182 182 #define PTR_SCALESHIFT 3 183 183 184 - #ifdef __ASSEMBLY__ 184 + #ifdef __ASSEMBLER__ 185 185 #define PTR .dword 186 186 #endif 187 187 #define PTRSIZE 8
+2 -2
arch/loongarch/include/asm/cpu.h
··· 46 46 47 47 #define PRID_PRODUCT_MASK 0x0fff 48 48 49 - #if !defined(__ASSEMBLY__) 49 + #if !defined(__ASSEMBLER__) 50 50 51 51 enum cpu_type_enum { 52 52 CPU_UNKNOWN, ··· 55 55 CPU_LAST 56 56 }; 57 57 58 - #endif /* !__ASSEMBLY */ 58 + #endif /* !__ASSEMBLER__ */ 59 59 60 60 /* 61 61 * ISA Level encodings
+2 -2
arch/loongarch/include/asm/ftrace.h
··· 14 14 15 15 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 16 16 17 - #ifndef __ASSEMBLY__ 17 + #ifndef __ASSEMBLER__ 18 18 19 19 #ifndef CONFIG_DYNAMIC_FTRACE 20 20 ··· 84 84 85 85 #endif 86 86 87 - #endif /* __ASSEMBLY__ */ 87 + #endif /* __ASSEMBLER__ */ 88 88 89 89 #endif /* CONFIG_FUNCTION_TRACER */ 90 90
+3 -3
arch/loongarch/include/asm/gpr-num.h
··· 2 2 #ifndef __ASM_GPR_NUM_H 3 3 #define __ASM_GPR_NUM_H 4 4 5 - #ifdef __ASSEMBLY__ 5 + #ifdef __ASSEMBLER__ 6 6 7 7 .equ .L__gpr_num_zero, 0 8 8 .irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 ··· 25 25 .equ .L__gpr_num_$s\num, 23 + \num 26 26 .endr 27 27 28 - #else /* __ASSEMBLY__ */ 28 + #else /* __ASSEMBLER__ */ 29 29 30 30 #define __DEFINE_ASM_GPR_NUMS \ 31 31 " .equ .L__gpr_num_zero, 0\n" \ ··· 47 47 " .equ .L__gpr_num_$s\\num, 23 + \\num\n" \ 48 48 " .endr\n" \ 49 49 50 - #endif /* __ASSEMBLY__ */ 50 + #endif /* __ASSEMBLER__ */ 51 51 52 52 #endif /* __ASM_GPR_NUM_H */
+2 -2
arch/loongarch/include/asm/irqflags.h
··· 5 5 #ifndef _ASM_IRQFLAGS_H 6 6 #define _ASM_IRQFLAGS_H 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 10 10 #include <linux/compiler.h> 11 11 #include <linux/stringify.h> ··· 80 80 return arch_irqs_disabled_flags(arch_local_save_flags()); 81 81 } 82 82 83 - #endif /* #ifndef __ASSEMBLY__ */ 83 + #endif /* #ifndef __ASSEMBLER__ */ 84 84 85 85 #endif /* _ASM_IRQFLAGS_H */
+2 -2
arch/loongarch/include/asm/jump_label.h
··· 7 7 #ifndef __ASM_JUMP_LABEL_H 8 8 #define __ASM_JUMP_LABEL_H 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 12 12 #include <linux/types.h> 13 13 ··· 50 50 return true; 51 51 } 52 52 53 - #endif /* __ASSEMBLY__ */ 53 + #endif /* __ASSEMBLER__ */ 54 54 #endif /* __ASM_JUMP_LABEL_H */
+1 -1
arch/loongarch/include/asm/kasan.h
··· 2 2 #ifndef __ASM_KASAN_H 3 3 #define __ASM_KASAN_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <linux/linkage.h> 8 8 #include <linux/mmzone.h>
+8 -8
arch/loongarch/include/asm/loongarch.h
··· 9 9 #include <linux/linkage.h> 10 10 #include <linux/types.h> 11 11 12 - #ifndef __ASSEMBLY__ 12 + #ifndef __ASSEMBLER__ 13 13 #include <larchintrin.h> 14 14 15 15 /* CPUCFG */ 16 16 #define read_cpucfg(reg) __cpucfg(reg) 17 17 18 - #endif /* !__ASSEMBLY__ */ 18 + #endif /* !__ASSEMBLER__ */ 19 19 20 - #ifdef __ASSEMBLY__ 20 + #ifdef __ASSEMBLER__ 21 21 22 22 /* LoongArch Registers */ 23 23 #define REG_ZERO 0x0 ··· 53 53 #define REG_S7 0x1e 54 54 #define REG_S8 0x1f 55 55 56 - #endif /* __ASSEMBLY__ */ 56 + #endif /* __ASSEMBLER__ */ 57 57 58 58 /* Bit fields for CPUCFG registers */ 59 59 #define LOONGARCH_CPUCFG0 0x0 ··· 171 171 * SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h 172 172 */ 173 173 174 - #ifndef __ASSEMBLY__ 174 + #ifndef __ASSEMBLER__ 175 175 176 176 /* CSR */ 177 177 #define csr_read32(reg) __csrrd_w(reg) ··· 187 187 #define iocsr_write32(val, reg) __iocsrwr_w(val, reg) 188 188 #define iocsr_write64(val, reg) __iocsrwr_d(val, reg) 189 189 190 - #endif /* !__ASSEMBLY__ */ 190 + #endif /* !__ASSEMBLER__ */ 191 191 192 192 /* CSR register number */ 193 193 ··· 1195 1195 #define LOONGARCH_IOCSR_EXTIOI_ROUTE_BASE 0x1c00 1196 1196 #define IOCSR_EXTIOI_VECTOR_NUM 256 1197 1197 1198 - #ifndef __ASSEMBLY__ 1198 + #ifndef __ASSEMBLER__ 1199 1199 1200 1200 static __always_inline u64 drdtime(void) 1201 1201 { ··· 1357 1357 #define clear_csr_estat(val) \ 1358 1358 csr_xchg32(~(val), val, LOONGARCH_CSR_ESTAT) 1359 1359 1360 - #endif /* __ASSEMBLY__ */ 1360 + #endif /* __ASSEMBLER__ */ 1361 1361 1362 1362 /* Generic EntryLo bit definitions */ 1363 1363 #define ENTRYLO_V (_ULCAST_(1) << 0)
+2 -2
arch/loongarch/include/asm/orc_types.h
··· 34 34 #define ORC_TYPE_REGS 3 35 35 #define ORC_TYPE_REGS_PARTIAL 4 36 36 37 - #ifndef __ASSEMBLY__ 37 + #ifndef __ASSEMBLER__ 38 38 /* 39 39 * This struct is more or less a vastly simplified version of the DWARF Call 40 40 * Frame Information standard. It contains only the necessary parts of DWARF ··· 53 53 unsigned int type:3; 54 54 unsigned int signal:1; 55 55 }; 56 - #endif /* __ASSEMBLY__ */ 56 + #endif /* __ASSEMBLER__ */ 57 57 58 58 #endif /* _ORC_TYPES_H */
+2 -2
arch/loongarch/include/asm/page.h
··· 15 15 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 16 16 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 17 17 18 - #ifndef __ASSEMBLY__ 18 + #ifndef __ASSEMBLER__ 19 19 20 20 #include <linux/kernel.h> 21 21 #include <linux/pfn.h> ··· 110 110 #include <asm-generic/memory_model.h> 111 111 #include <asm-generic/getorder.h> 112 112 113 - #endif /* !__ASSEMBLY__ */ 113 + #endif /* !__ASSEMBLER__ */ 114 114 115 115 #endif /* _ASM_PAGE_H */
+2 -2
arch/loongarch/include/asm/pgtable-bits.h
··· 92 92 #define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 93 93 _PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC) 94 94 95 - #ifndef __ASSEMBLY__ 95 + #ifndef __ASSEMBLER__ 96 96 97 97 #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC) 98 98 ··· 127 127 return __pgprot(prot); 128 128 } 129 129 130 - #endif /* !__ASSEMBLY__ */ 130 + #endif /* !__ASSEMBLER__ */ 131 131 132 132 #endif /* _ASM_PGTABLE_BITS_H */
+2 -2
arch/loongarch/include/asm/pgtable.h
··· 55 55 56 56 #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) 57 57 58 - #ifndef __ASSEMBLY__ 58 + #ifndef __ASSEMBLER__ 59 59 60 60 #include <linux/mm_types.h> 61 61 #include <linux/mmzone.h> ··· 618 618 #define HAVE_ARCH_UNMAPPED_AREA 619 619 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 620 620 621 - #endif /* !__ASSEMBLY__ */ 621 + #endif /* !__ASSEMBLER__ */ 622 622 623 623 #endif /* _ASM_PGTABLE_H */
+1 -1
arch/loongarch/include/asm/prefetch.h
··· 8 8 #define Pref_Load 0 9 9 #define Pref_Store 8 10 10 11 - #ifdef __ASSEMBLY__ 11 + #ifdef __ASSEMBLER__ 12 12 13 13 .macro __pref hint addr 14 14 #ifdef CONFIG_CPU_HAS_PREFETCH
+1 -1
arch/loongarch/include/asm/smp.h
··· 39 39 void loongson_cpu_die(unsigned int cpu); 40 40 #endif 41 41 42 - static inline void plat_smp_setup(void) 42 + static inline void __init plat_smp_setup(void) 43 43 { 44 44 loongson_smp_setup(); 45 45 }
+2 -2
arch/loongarch/include/asm/thread_info.h
··· 10 10 11 11 #ifdef __KERNEL__ 12 12 13 - #ifndef __ASSEMBLY__ 13 + #ifndef __ASSEMBLER__ 14 14 15 15 #include <asm/processor.h> 16 16 ··· 53 53 54 54 register unsigned long current_stack_pointer __asm__("$sp"); 55 55 56 - #endif /* !__ASSEMBLY__ */ 56 + #endif /* !__ASSEMBLER__ */ 57 57 58 58 /* thread information allocation */ 59 59 #define THREAD_SIZE SZ_16K
+1 -1
arch/loongarch/include/asm/types.h
··· 8 8 #include <asm-generic/int-ll64.h> 9 9 #include <uapi/asm/types.h> 10 10 11 - #ifdef __ASSEMBLY__ 11 + #ifdef __ASSEMBLER__ 12 12 #define _ULCAST_ 13 13 #define _U64CAST_ 14 14 #else
+3 -3
arch/loongarch/include/asm/unwind_hints.h
··· 5 5 #include <linux/objtool.h> 6 6 #include <asm/orc_types.h> 7 7 8 - #ifdef __ASSEMBLY__ 8 + #ifdef __ASSEMBLER__ 9 9 10 10 .macro UNWIND_HINT_UNDEFINED 11 11 UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED ··· 23 23 UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL 24 24 .endm 25 25 26 - #else /* !__ASSEMBLY__ */ 26 + #else /* !__ASSEMBLER__ */ 27 27 28 28 #define UNWIND_HINT_SAVE \ 29 29 UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0) ··· 31 31 #define UNWIND_HINT_RESTORE \ 32 32 UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0) 33 33 34 - #endif /* !__ASSEMBLY__ */ 34 + #endif /* !__ASSEMBLER__ */ 35 35 36 36 #endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */
+2 -2
arch/loongarch/include/asm/vdso/arch_data.h
··· 7 7 #ifndef _VDSO_ARCH_DATA_H 8 8 #define _VDSO_ARCH_DATA_H 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 12 12 #include <asm/asm.h> 13 13 #include <asm/vdso.h> ··· 20 20 struct vdso_pcpu_data pdata[NR_CPUS]; 21 21 }; 22 22 23 - #endif /* __ASSEMBLY__ */ 23 + #endif /* __ASSEMBLER__ */ 24 24 25 25 #endif
+2 -2
arch/loongarch/include/asm/vdso/getrandom.h
··· 5 5 #ifndef __ASM_VDSO_GETRANDOM_H 6 6 #define __ASM_VDSO_GETRANDOM_H 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 10 10 #include <asm/unistd.h> 11 11 #include <asm/vdso/vdso.h> ··· 28 28 return ret; 29 29 } 30 30 31 - #endif /* !__ASSEMBLY__ */ 31 + #endif /* !__ASSEMBLER__ */ 32 32 33 33 #endif /* __ASM_VDSO_GETRANDOM_H */
+2 -2
arch/loongarch/include/asm/vdso/gettimeofday.h
··· 7 7 #ifndef __ASM_VDSO_GETTIMEOFDAY_H 8 8 #define __ASM_VDSO_GETTIMEOFDAY_H 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 12 12 #include <asm/unistd.h> 13 13 #include <asm/vdso/vdso.h> ··· 89 89 } 90 90 #define __arch_vdso_hres_capable loongarch_vdso_hres_capable 91 91 92 - #endif /* !__ASSEMBLY__ */ 92 + #endif /* !__ASSEMBLER__ */ 93 93 94 94 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
+2 -2
arch/loongarch/include/asm/vdso/processor.h
··· 5 5 #ifndef __ASM_VDSO_PROCESSOR_H 6 6 #define __ASM_VDSO_PROCESSOR_H 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 10 10 #define cpu_relax() barrier() 11 11 12 - #endif /* __ASSEMBLY__ */ 12 + #endif /* __ASSEMBLER__ */ 13 13 14 14 #endif /* __ASM_VDSO_PROCESSOR_H */
+2 -2
arch/loongarch/include/asm/vdso/vdso.h
··· 7 7 #ifndef _ASM_VDSO_VDSO_H 8 8 #define _ASM_VDSO_VDSO_H 9 9 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 12 12 #include <asm/asm.h> 13 13 #include <asm/page.h> ··· 16 16 17 17 #define VVAR_SIZE (VDSO_NR_PAGES << PAGE_SHIFT) 18 18 19 - #endif /* __ASSEMBLY__ */ 19 + #endif /* __ASSEMBLER__ */ 20 20 21 21 #endif
+2 -2
arch/loongarch/include/asm/vdso/vsyscall.h
··· 2 2 #ifndef __ASM_VDSO_VSYSCALL_H 3 3 #define __ASM_VDSO_VSYSCALL_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <vdso/datapage.h> 8 8 9 9 /* The asm-generic header needs to be included after the definitions above */ 10 10 #include <asm-generic/vdso/vsyscall.h> 11 11 12 - #endif /* !__ASSEMBLY__ */ 12 + #endif /* !__ASSEMBLER__ */ 13 13 14 14 #endif /* __ASM_VDSO_VSYSCALL_H */
+1
arch/loongarch/kernel/acpi.c
··· 10 10 #include <linux/init.h> 11 11 #include <linux/acpi.h> 12 12 #include <linux/efi-bgrt.h> 13 + #include <linux/export.h> 13 14 #include <linux/irq.h> 14 15 #include <linux/irqdomain.h> 15 16 #include <linux/memblock.h>
+1
arch/loongarch/kernel/alternative.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 + #include <linux/export.h> 2 3 #include <linux/mm.h> 3 4 #include <linux/module.h> 4 5 #include <asm/alternative.h>
+12
arch/loongarch/kernel/efi.c
··· 144 144 if (efi_memmap_init_early(&data) < 0) 145 145 panic("Unable to map EFI memory map.\n"); 146 146 147 + /* 148 + * Reserve the physical memory region occupied by the EFI 149 + * memory map table (header + descriptors). This is crucial 150 + * for kdump, as the kdump kernel relies on this original 151 + * memmap passed by the bootloader. Without reservation, 152 + * this region could be overwritten by the primary kernel. 153 + * Also, set the EFI_PRESERVE_BS_REGIONS flag to indicate that 154 + * critical boot services code/data regions like this are preserved. 155 + */ 156 + memblock_reserve((phys_addr_t)boot_memmap, sizeof(*tbl) + data.size); 157 + set_bit(EFI_PRESERVE_BS_REGIONS, &efi.flags); 158 + 147 159 early_memunmap(tbl, sizeof(*tbl)); 148 160 } 149 161
-1
arch/loongarch/kernel/elf.c
··· 6 6 7 7 #include <linux/binfmts.h> 8 8 #include <linux/elf.h> 9 - #include <linux/export.h> 10 9 #include <linux/sched.h> 11 10 12 11 #include <asm/cpu-features.h>
+1
arch/loongarch/kernel/kfpu.c
··· 4 4 */ 5 5 6 6 #include <linux/cpu.h> 7 + #include <linux/export.h> 7 8 #include <linux/init.h> 8 9 #include <asm/fpu.h> 9 10 #include <asm/smp.h>
-1
arch/loongarch/kernel/paravirt.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #include <linux/export.h> 3 2 #include <linux/types.h> 4 3 #include <linux/interrupt.h> 5 4 #include <linux/irq_work.h>
+1 -1
arch/loongarch/kernel/time.c
··· 102 102 return 0; 103 103 } 104 104 105 - static unsigned long __init get_loops_per_jiffy(void) 105 + static unsigned long get_loops_per_jiffy(void) 106 106 { 107 107 unsigned long lpj = (unsigned long)const_clock_freq; 108 108
+1
arch/loongarch/kernel/traps.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/kexec.h> 15 15 #include <linux/module.h> 16 + #include <linux/export.h> 16 17 #include <linux/extable.h> 17 18 #include <linux/mm.h> 18 19 #include <linux/sched/mm.h>
+1
arch/loongarch/kernel/unwind_guess.c
··· 3 3 * Copyright (C) 2022 Loongson Technology Corporation Limited 4 4 */ 5 5 #include <asm/unwind.h> 6 + #include <linux/export.h> 6 7 7 8 unsigned long unwind_get_return_address(struct unwind_state *state) 8 9 {
+2 -1
arch/loongarch/kernel/unwind_orc.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - #include <linux/objtool.h> 2 + #include <linux/export.h> 3 3 #include <linux/module.h> 4 + #include <linux/objtool.h> 4 5 #include <linux/sort.h> 5 6 #include <asm/exception.h> 6 7 #include <asm/orc_header.h>
+1
arch/loongarch/kernel/unwind_prologue.c
··· 3 3 * Copyright (C) 2022 Loongson Technology Corporation Limited 4 4 */ 5 5 #include <linux/cpumask.h> 6 + #include <linux/export.h> 6 7 #include <linux/ftrace.h> 7 8 #include <linux/kallsyms.h> 8 9
+61 -28
arch/loongarch/kvm/intc/eiointc.c
··· 9 9 10 10 static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s) 11 11 { 12 - int ipnum, cpu, irq_index, irq_mask, irq; 12 + int ipnum, cpu, cpuid, irq_index, irq_mask, irq; 13 + struct kvm_vcpu *vcpu; 13 14 14 15 for (irq = 0; irq < EIOINTC_IRQS; irq++) { 15 16 ipnum = s->ipmap.reg_u8[irq / 32]; ··· 21 20 irq_index = irq / 32; 22 21 irq_mask = BIT(irq & 0x1f); 23 22 24 - cpu = s->coremap.reg_u8[irq]; 23 + cpuid = s->coremap.reg_u8[irq]; 24 + vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid); 25 + if (!vcpu) 26 + continue; 27 + 28 + cpu = vcpu->vcpu_id; 25 29 if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask)) 26 30 set_bit(irq, s->sw_coreisr[cpu][ipnum]); 27 31 else ··· 72 66 } 73 67 74 68 static inline void eiointc_update_sw_coremap(struct loongarch_eiointc *s, 75 - int irq, void *pvalue, u32 len, bool notify) 69 + int irq, u64 val, u32 len, bool notify) 76 70 { 77 - int i, cpu; 78 - u64 val = *(u64 *)pvalue; 71 + int i, cpu, cpuid; 72 + struct kvm_vcpu *vcpu; 79 73 80 74 for (i = 0; i < len; i++) { 81 - cpu = val & 0xff; 75 + cpuid = val & 0xff; 82 76 val = val >> 8; 83 77 84 78 if (!(s->status & BIT(EIOINTC_ENABLE_CPU_ENCODE))) { 85 - cpu = ffs(cpu) - 1; 86 - cpu = (cpu >= 4) ? 0 : cpu; 79 + cpuid = ffs(cpuid) - 1; 80 + cpuid = (cpuid >= 4) ? 0 : cpuid; 87 81 } 88 82 83 + vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid); 84 + if (!vcpu) 85 + continue; 86 + 87 + cpu = vcpu->vcpu_id; 89 88 if (s->sw_coremap[irq + i] == cpu) 90 89 continue; 91 90 ··· 316 305 return -EINVAL; 317 306 } 318 307 308 + if (addr & (len - 1)) { 309 + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); 310 + return -EINVAL; 311 + } 312 + 319 313 vcpu->kvm->stat.eiointc_read_exits++; 320 314 spin_lock_irqsave(&eiointc->lock, flags); 321 315 switch (len) { ··· 414 398 irq = offset - EIOINTC_COREMAP_START; 415 399 index = irq; 416 400 s->coremap.reg_u8[index] = data; 417 - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); 401 + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); 418 402 break; 419 403 default: 420 404 ret = -EINVAL; ··· 452 436 break; 453 437 case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END: 454 438 index = (offset - EIOINTC_ENABLE_START) >> 1; 455 - old_data = s->enable.reg_u32[index]; 439 + old_data = s->enable.reg_u16[index]; 456 440 s->enable.reg_u16[index] = data; 457 441 /* 458 442 * 1: enable irq. 459 443 * update irq when isr is set. 460 444 */ 461 445 data = s->enable.reg_u16[index] & ~old_data & s->isr.reg_u16[index]; 462 - index = index << 1; 463 446 for (i = 0; i < sizeof(data); i++) { 464 447 u8 mask = (data >> (i * 8)) & 0xff; 465 - eiointc_enable_irq(vcpu, s, index + i, mask, 1); 448 + eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 1); 466 449 } 467 450 /* 468 451 * 0: disable irq. ··· 470 455 data = ~s->enable.reg_u16[index] & old_data & s->isr.reg_u16[index]; 471 456 for (i = 0; i < sizeof(data); i++) { 472 457 u8 mask = (data >> (i * 8)) & 0xff; 473 - eiointc_enable_irq(vcpu, s, index, mask, 0); 458 + eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 0); 474 459 } 475 460 break; 476 461 case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: ··· 499 484 irq = offset - EIOINTC_COREMAP_START; 500 485 index = irq >> 1; 501 486 s->coremap.reg_u16[index] = data; 502 - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); 487 + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); 503 488 break; 504 489 default: 505 490 ret = -EINVAL; ··· 544 529 * update irq when isr is set. 545 530 */ 546 531 data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index]; 547 - index = index << 2; 548 532 for (i = 0; i < sizeof(data); i++) { 549 533 u8 mask = (data >> (i * 8)) & 0xff; 550 - eiointc_enable_irq(vcpu, s, index + i, mask, 1); 534 + eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 1); 551 535 } 552 536 /* 553 537 * 0: disable irq. ··· 555 541 data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index]; 556 542 for (i = 0; i < sizeof(data); i++) { 557 543 u8 mask = (data >> (i * 8)) & 0xff; 558 - eiointc_enable_irq(vcpu, s, index, mask, 0); 544 + eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 0); 559 545 } 560 546 break; 561 547 case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: ··· 584 570 irq = offset - EIOINTC_COREMAP_START; 585 571 index = irq >> 2; 586 572 s->coremap.reg_u32[index] = data; 587 - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); 573 + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); 588 574 break; 589 575 default: 590 576 ret = -EINVAL; ··· 629 615 * update irq when isr is set. 630 616 */ 631 617 data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index]; 632 - index = index << 3; 633 618 for (i = 0; i < sizeof(data); i++) { 634 619 u8 mask = (data >> (i * 8)) & 0xff; 635 - eiointc_enable_irq(vcpu, s, index + i, mask, 1); 620 + eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 1); 636 621 } 637 622 /* 638 623 * 0: disable irq. ··· 640 627 data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index]; 641 628 for (i = 0; i < sizeof(data); i++) { 642 629 u8 mask = (data >> (i * 8)) & 0xff; 643 - eiointc_enable_irq(vcpu, s, index, mask, 0); 630 + eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 0); 644 631 } 645 632 break; 646 633 case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END: ··· 669 656 irq = offset - EIOINTC_COREMAP_START; 670 657 index = irq >> 3; 671 658 s->coremap.reg_u64[index] = data; 672 - eiointc_update_sw_coremap(s, irq, (void *)&data, sizeof(data), true); 659 + eiointc_update_sw_coremap(s, irq, data, sizeof(data), true); 673 660 break; 674 661 default: 675 662 ret = -EINVAL; ··· 689 676 690 677 if (!eiointc) { 691 678 kvm_err("%s: eiointc irqchip not valid!\n", __func__); 679 + return -EINVAL; 680 + } 681 + 682 + if (addr & (len - 1)) { 683 + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); 692 684 return -EINVAL; 693 685 } 694 686 ··· 805 787 int ret = 0; 806 788 unsigned long flags; 807 789 unsigned long type = (unsigned long)attr->attr; 808 - u32 i, start_irq; 790 + u32 i, start_irq, val; 809 791 void __user *data; 810 792 struct loongarch_eiointc *s = dev->kvm->arch.eiointc; 811 793 ··· 813 795 spin_lock_irqsave(&s->lock, flags); 814 796 switch (type) { 815 797 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU: 816 - if (copy_from_user(&s->num_cpu, data, 4)) 798 + if (copy_from_user(&val, data, 4)) 817 799 ret = -EFAULT; 800 + else { 801 + if (val >= EIOINTC_ROUTE_MAX_VCPUS) 802 + ret = -EINVAL; 803 + else 804 + s->num_cpu = val; 805 + } 818 806 break; 819 807 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE: 820 808 if (copy_from_user(&s->features, data, 4)) ··· 833 809 for (i = 0; i < (EIOINTC_IRQS / 4); i++) { 834 810 start_irq = i * 4; 835 811 eiointc_update_sw_coremap(s, start_irq, 836 - (void *)&s->coremap.reg_u32[i], sizeof(u32), false); 812 + s->coremap.reg_u32[i], sizeof(u32), false); 837 813 } 838 814 break; 839 815 default: ··· 848 824 struct kvm_device_attr *attr, 849 825 bool is_write) 850 826 { 851 - int addr, cpuid, offset, ret = 0; 827 + int addr, cpu, offset, ret = 0; 852 828 unsigned long flags; 853 829 void *p = NULL; 854 830 void __user *data; ··· 856 832 857 833 s = dev->kvm->arch.eiointc; 858 834 addr = attr->attr; 859 - cpuid = addr >> 16; 835 + cpu = addr >> 16; 860 836 addr &= 0xffff; 861 837 data = (void __user *)attr->addr; 862 838 switch (addr) { ··· 881 857 p = &s->isr.reg_u32[offset]; 882 858 break; 883 859 case EIOINTC_COREISR_START ... EIOINTC_COREISR_END: 860 + if (cpu >= s->num_cpu) 861 + return -EINVAL; 862 + 884 863 offset = (addr - EIOINTC_COREISR_START) / 4; 885 - p = &s->coreisr.reg_u32[cpuid][offset]; 864 + p = &s->coreisr.reg_u32[cpu][offset]; 886 865 break; 887 866 case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END: 888 867 offset = (addr - EIOINTC_COREMAP_START) / 4; ··· 926 899 data = (void __user *)attr->addr; 927 900 switch (addr) { 928 901 case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU: 902 + if (is_write) 903 + return ret; 904 + 929 905 p = &s->num_cpu; 930 906 break; 931 907 case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE: 908 + if (is_write) 909 + return ret; 910 + 932 911 p = &s->features; 933 912 break; 934 913 case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE:
+1
arch/loongarch/lib/crc32-loongarch.c
··· 11 11 12 12 #include <asm/cpu-features.h> 13 13 #include <linux/crc32.h> 14 + #include <linux/export.h> 14 15 #include <linux/module.h> 15 16 #include <linux/unaligned.h> 16 17
+1
arch/loongarch/lib/csum.c
··· 2 2 // Copyright (C) 2019-2020 Arm Ltd. 3 3 4 4 #include <linux/compiler.h> 5 + #include <linux/export.h> 5 6 #include <linux/kasan-checks.h> 6 7 #include <linux/kernel.h> 7 8
+2 -2
arch/loongarch/mm/ioremap.c
··· 16 16 17 17 } 18 18 19 - void *early_memremap_ro(resource_size_t phys_addr, unsigned long size) 19 + void * __init early_memremap_ro(resource_size_t phys_addr, unsigned long size) 20 20 { 21 21 return early_memremap(phys_addr, size); 22 22 } 23 23 24 - void *early_memremap_prot(resource_size_t phys_addr, unsigned long size, 24 + void * __init early_memremap_prot(resource_size_t phys_addr, unsigned long size, 25 25 unsigned long prot_val) 26 26 { 27 27 return early_memremap(phys_addr, size);
-1
arch/loongarch/pci/pci.c
··· 3 3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 4 */ 5 5 #include <linux/kernel.h> 6 - #include <linux/export.h> 7 6 #include <linux/init.h> 8 7 #include <linux/acpi.h> 9 8 #include <linux/types.h>
+2 -2
tools/arch/loongarch/include/asm/orc_types.h
··· 34 34 #define ORC_TYPE_REGS 3 35 35 #define ORC_TYPE_REGS_PARTIAL 4 36 36 37 - #ifndef __ASSEMBLY__ 37 + #ifndef __ASSEMBLER__ 38 38 /* 39 39 * This struct is more or less a vastly simplified version of the DWARF Call 40 40 * Frame Information standard. It contains only the necessary parts of DWARF ··· 53 53 unsigned int type:3; 54 54 unsigned int signal:1; 55 55 }; 56 - #endif /* __ASSEMBLY__ */ 56 + #endif /* __ASSEMBLER__ */ 57 57 58 58 #endif /* _ORC_TYPES_H */