Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

LoongArch: Adjust misc routines for 32BIT/64BIT

Adjust misc routines for both 32BIT and 64BIT, including: bitops, bswap,
checksum, string, jump label, unaligned access emulator, suspend/wakeup
routines, etc.

Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>

+151 -82
+11
arch/loongarch/include/asm/bitops.h
··· 13 13 14 14 #include <asm/barrier.h> 15 15 16 + #ifdef CONFIG_32BIT_REDUCED 17 + 18 + #include <asm-generic/bitops/ffs.h> 19 + #include <asm-generic/bitops/fls.h> 20 + #include <asm-generic/bitops/__ffs.h> 21 + #include <asm-generic/bitops/__fls.h> 22 + 23 + #else /* CONFIG_32BIT_STANDARD || CONFIG_64BIT */ 24 + 16 25 #include <asm-generic/bitops/builtin-ffs.h> 17 26 #include <asm-generic/bitops/builtin-fls.h> 18 27 #include <asm-generic/bitops/builtin-__ffs.h> 19 28 #include <asm-generic/bitops/builtin-__fls.h> 29 + 30 + #endif 20 31 21 32 #include <asm-generic/bitops/ffz.h> 22 33 #include <asm-generic/bitops/fls64.h>
+4
arch/loongarch/include/asm/checksum.h
··· 9 9 #include <linux/bitops.h> 10 10 #include <linux/in6.h> 11 11 12 + #ifdef CONFIG_64BIT 13 + 12 14 #define _HAVE_ARCH_IPV6_CSUM 13 15 __sum16 csum_ipv6_magic(const struct in6_addr *saddr, 14 16 const struct in6_addr *daddr, ··· 62 60 63 61 extern unsigned int do_csum(const unsigned char *buff, int len); 64 62 #define do_csum do_csum 63 + 64 + #endif 65 65 66 66 #include <asm-generic/checksum.h> 67 67
+10 -2
arch/loongarch/include/asm/jump_label.h
··· 10 10 #ifndef __ASSEMBLER__ 11 11 12 12 #include <linux/types.h> 13 + #include <linux/stringify.h> 14 + #include <asm/asm.h> 13 15 14 16 #define JUMP_LABEL_NOP_SIZE 4 17 + 18 + #ifdef CONFIG_32BIT 19 + #define JUMP_LABEL_TYPE ".long " 20 + #else 21 + #define JUMP_LABEL_TYPE ".quad " 22 + #endif 15 23 16 24 /* This macro is also expanded on the Rust side. */ 17 25 #define JUMP_TABLE_ENTRY(key, label) \ 18 26 ".pushsection __jump_table, \"aw\" \n\t" \ 19 - ".align 3 \n\t" \ 27 + ".align " __stringify(PTRLOG) " \n\t" \ 20 28 ".long 1b - ., " label " - . \n\t" \ 21 - ".quad " key " - . \n\t" \ 29 + JUMP_LABEL_TYPE key " - . \n\t" \ 22 30 ".popsection \n\t" 23 31 24 32 #define ARCH_STATIC_BRANCH_ASM(key, label) \
+2
arch/loongarch/include/asm/string.h
··· 5 5 #ifndef _ASM_STRING_H 6 6 #define _ASM_STRING_H 7 7 8 + #ifdef CONFIG_64BIT 8 9 #define __HAVE_ARCH_MEMSET 9 10 extern void *memset(void *__s, int __c, size_t __count); 10 11 extern void *__memset(void *__s, int __c, size_t __count); ··· 17 16 #define __HAVE_ARCH_MEMMOVE 18 17 extern void *memmove(void *__dest, __const__ void *__src, size_t __n); 19 18 extern void *__memmove(void *__dest, __const__ void *__src, size_t __n); 19 + #endif 20 20 21 21 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) 22 22
+24 -6
arch/loongarch/kernel/unaligned.c
··· 27 27 static u32 unaligned_instructions_kernel; 28 28 #endif 29 29 30 - static inline unsigned long read_fpr(unsigned int idx) 30 + static inline u64 read_fpr(unsigned int idx) 31 31 { 32 + #ifdef CONFIG_64BIT 32 33 #define READ_FPR(idx, __value) \ 33 34 __asm__ __volatile__("movfr2gr.d %0, $f"#idx"\n\t" : "=r"(__value)); 34 - 35 - unsigned long __value; 35 + #else 36 + #define READ_FPR(idx, __value) \ 37 + { \ 38 + u32 __value_lo, __value_hi; \ 39 + __asm__ __volatile__("movfr2gr.s %0, $f"#idx"\n\t" : "=r"(__value_lo)); \ 40 + __asm__ __volatile__("movfrh2gr.s %0, $f"#idx"\n\t" : "=r"(__value_hi)); \ 41 + __value = (__value_lo | ((u64)__value_hi << 32)); \ 42 + } 43 + #endif 44 + u64 __value; 36 45 37 46 switch (idx) { 38 47 case 0: ··· 147 138 return __value; 148 139 } 149 140 150 - static inline void write_fpr(unsigned int idx, unsigned long value) 141 + static inline void write_fpr(unsigned int idx, u64 value) 151 142 { 143 + #ifdef CONFIG_64BIT 152 144 #define WRITE_FPR(idx, value) \ 153 145 __asm__ __volatile__("movgr2fr.d $f"#idx", %0\n\t" :: "r"(value)); 154 - 146 + #else 147 + #define WRITE_FPR(idx, value) \ 148 + { \ 149 + u32 value_lo = value; \ 150 + u32 value_hi = value >> 32; \ 151 + __asm__ __volatile__("movgr2fr.w $f"#idx", %0\n\t" :: "r"(value_lo)); \ 152 + __asm__ __volatile__("movgr2frh.w $f"#idx", %0\n\t" :: "r"(value_hi)); \ 153 + } 154 + #endif 155 155 switch (idx) { 156 156 case 0: 157 157 WRITE_FPR(0, value); ··· 270 252 bool sign, write; 271 253 bool user = user_mode(regs); 272 254 unsigned int res, size = 0; 273 - unsigned long value = 0; 255 + u64 value = 0; 274 256 union loongarch_instruction insn; 275 257 276 258 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
+13
arch/loongarch/lib/bswapdi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/export.h> 3 + #include <linux/compiler.h> 4 + #include <uapi/linux/swab.h> 5 + 6 + /* To silence -Wmissing-prototypes. */ 7 + unsigned long long __bswapdi2(unsigned long long u); 8 + 9 + unsigned long long notrace __bswapdi2(unsigned long long u) 10 + { 11 + return ___constant_swab64(u); 12 + } 13 + EXPORT_SYMBOL(__bswapdi2);
+13
arch/loongarch/lib/bswapsi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/export.h> 3 + #include <linux/compiler.h> 4 + #include <uapi/linux/swab.h> 5 + 6 + /* To silence -Wmissing-prototypes. */ 7 + unsigned int __bswapsi2(unsigned int u); 8 + 9 + unsigned int notrace __bswapsi2(unsigned int u) 10 + { 11 + return ___constant_swab32(u); 12 + } 13 + EXPORT_SYMBOL(__bswapsi2);
+36 -36
arch/loongarch/lib/unaligned.S
··· 24 24 * a3: sign 25 25 */ 26 26 SYM_FUNC_START(unaligned_read) 27 - beqz a2, 5f 27 + beqz a2, 5f 28 28 29 - li.w t2, 0 30 - addi.d t0, a2, -1 31 - slli.d t1, t0, 3 32 - add.d a0, a0, t0 29 + li.w t2, 0 30 + LONG_ADDI t0, a2, -1 31 + PTR_SLLI t1, t0, LONGLOG 32 + PTR_ADD a0, a0, t0 33 33 34 - beqz a3, 2f 35 - 1: ld.b t3, a0, 0 36 - b 3f 34 + beqz a3, 2f 35 + 1: ld.b t3, a0, 0 36 + b 3f 37 37 38 - 2: ld.bu t3, a0, 0 39 - 3: sll.d t3, t3, t1 40 - or t2, t2, t3 41 - addi.d t1, t1, -8 42 - addi.d a0, a0, -1 43 - addi.d a2, a2, -1 44 - bgtz a2, 2b 45 - 4: st.d t2, a1, 0 38 + 2: ld.bu t3, a0, 0 39 + 3: LONG_SLLV t3, t3, t1 40 + or t2, t2, t3 41 + LONG_ADDI t1, t1, -8 42 + PTR_ADDI a0, a0, -1 43 + PTR_ADDI a2, a2, -1 44 + bgtz a2, 2b 45 + 4: LONG_S t2, a1, 0 46 46 47 - move a0, a2 48 - jr ra 47 + move a0, a2 48 + jr ra 49 49 50 - 5: li.w a0, -EFAULT 51 - jr ra 50 + 5: li.w a0, -EFAULT 51 + jr ra 52 52 53 - _asm_extable 1b, .L_fixup_handle_unaligned 54 - _asm_extable 2b, .L_fixup_handle_unaligned 55 - _asm_extable 4b, .L_fixup_handle_unaligned 53 + _asm_extable 1b, .L_fixup_handle_unaligned 54 + _asm_extable 2b, .L_fixup_handle_unaligned 55 + _asm_extable 4b, .L_fixup_handle_unaligned 56 56 SYM_FUNC_END(unaligned_read) 57 57 58 58 /* ··· 63 63 * a2: n 64 64 */ 65 65 SYM_FUNC_START(unaligned_write) 66 - beqz a2, 3f 66 + beqz a2, 3f 67 67 68 - li.w t0, 0 69 - 1: srl.d t1, a1, t0 70 - 2: st.b t1, a0, 0 71 - addi.d t0, t0, 8 72 - addi.d a2, a2, -1 73 - addi.d a0, a0, 1 74 - bgtz a2, 1b 68 + li.w t0, 0 69 + 1: LONG_SRLV t1, a1, t0 70 + 2: st.b t1, a0, 0 71 + LONG_ADDI t0, t0, 8 72 + PTR_ADDI a2, a2, -1 73 + PTR_ADDI a0, a0, 1 74 + bgtz a2, 1b 75 75 76 - move a0, a2 77 - jr ra 76 + move a0, a2 77 + jr ra 78 78 79 - 3: li.w a0, -EFAULT 80 - jr ra 79 + 3: li.w a0, -EFAULT 80 + jr ra 81 81 82 - _asm_extable 2b, .L_fixup_handle_unaligned 82 + _asm_extable 2b, .L_fixup_handle_unaligned 83 83 SYM_FUNC_END(unaligned_write)
+2 -2
arch/loongarch/power/platform.c
··· 72 72 status = acpi_evaluate_integer(NULL, "\\SADR", NULL, &suspend_addr); 73 73 if (ACPI_FAILURE(status) || !suspend_addr) { 74 74 pr_info("ACPI S3 supported with hardware register default\n"); 75 - loongson_sysconf.suspend_addr = (u64)default_suspend_addr; 75 + loongson_sysconf.suspend_addr = (unsigned long)default_suspend_addr; 76 76 } else { 77 77 pr_info("ACPI S3 supported with Loongson ACPI SADR extension\n"); 78 - loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr)); 78 + loongson_sysconf.suspend_addr = (unsigned long)phys_to_virt(PHYSADDR(suspend_addr)); 79 79 } 80 80 #endif 81 81 return 0;
+36 -36
arch/loongarch/power/suspend_asm.S
··· 14 14 15 15 /* preparatory stuff */ 16 16 .macro SETUP_SLEEP 17 - addi.d sp, sp, -PT_SIZE 18 - st.d $r1, sp, PT_R1 19 - st.d $r2, sp, PT_R2 20 - st.d $r3, sp, PT_R3 21 - st.d $r4, sp, PT_R4 22 - st.d $r21, sp, PT_R21 23 - st.d $r22, sp, PT_R22 24 - st.d $r23, sp, PT_R23 25 - st.d $r24, sp, PT_R24 26 - st.d $r25, sp, PT_R25 27 - st.d $r26, sp, PT_R26 28 - st.d $r27, sp, PT_R27 29 - st.d $r28, sp, PT_R28 30 - st.d $r29, sp, PT_R29 31 - st.d $r30, sp, PT_R30 32 - st.d $r31, sp, PT_R31 17 + PTR_ADDI sp, sp, -PT_SIZE 18 + REG_S $r1, sp, PT_R1 19 + REG_S $r2, sp, PT_R2 20 + REG_S $r3, sp, PT_R3 21 + REG_S $r4, sp, PT_R4 22 + REG_S $r21, sp, PT_R21 23 + REG_S $r22, sp, PT_R22 24 + REG_S $r23, sp, PT_R23 25 + REG_S $r24, sp, PT_R24 26 + REG_S $r25, sp, PT_R25 27 + REG_S $r26, sp, PT_R26 28 + REG_S $r27, sp, PT_R27 29 + REG_S $r28, sp, PT_R28 30 + REG_S $r29, sp, PT_R29 31 + REG_S $r30, sp, PT_R30 32 + REG_S $r31, sp, PT_R31 33 33 .endm 34 34 35 35 .macro SETUP_WAKEUP 36 - ld.d $r1, sp, PT_R1 37 - ld.d $r2, sp, PT_R2 38 - ld.d $r3, sp, PT_R3 39 - ld.d $r4, sp, PT_R4 40 - ld.d $r21, sp, PT_R21 41 - ld.d $r22, sp, PT_R22 42 - ld.d $r23, sp, PT_R23 43 - ld.d $r24, sp, PT_R24 44 - ld.d $r25, sp, PT_R25 45 - ld.d $r26, sp, PT_R26 46 - ld.d $r27, sp, PT_R27 47 - ld.d $r28, sp, PT_R28 48 - ld.d $r29, sp, PT_R29 49 - ld.d $r30, sp, PT_R30 50 - ld.d $r31, sp, PT_R31 51 - addi.d sp, sp, PT_SIZE 36 + REG_L $r1, sp, PT_R1 37 + REG_L $r2, sp, PT_R2 38 + REG_L $r3, sp, PT_R3 39 + REG_L $r4, sp, PT_R4 40 + REG_L $r21, sp, PT_R21 41 + REG_L $r22, sp, PT_R22 42 + REG_L $r23, sp, PT_R23 43 + REG_L $r24, sp, PT_R24 44 + REG_L $r25, sp, PT_R25 45 + REG_L $r26, sp, PT_R26 46 + REG_L $r27, sp, PT_R27 47 + REG_L $r28, sp, PT_R28 48 + REG_L $r29, sp, PT_R29 49 + REG_L $r30, sp, PT_R30 50 + REG_L $r31, sp, PT_R31 51 + PTR_ADDI sp, sp, PT_SIZE 52 52 .endm 53 53 54 54 .text ··· 59 59 SETUP_SLEEP 60 60 61 61 la.pcrel t0, acpi_saved_sp 62 - st.d sp, t0, 0 62 + REG_S sp, t0, 0 63 63 64 64 bl __flush_cache_all 65 65 66 66 /* Pass RA and SP to BIOS */ 67 - addi.d a1, sp, 0 67 + PTR_ADDI a1, sp, 0 68 68 la.pcrel a0, loongarch_wakeup_start 69 69 la.pcrel t0, loongarch_suspend_addr 70 - ld.d t0, t0, 0 70 + REG_L t0, t0, 0 71 71 jirl ra, t0, 0 /* Call BIOS's STR sleep routine */ 72 72 73 73 /* ··· 83 83 csrwr t0, LOONGARCH_CSR_CRMD 84 84 85 85 la.pcrel t0, acpi_saved_sp 86 - ld.d sp, t0, 0 86 + REG_L sp, t0, 0 87 87 88 88 SETUP_WAKEUP 89 89 jr ra