Merge tag 'loongarch-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch updates from Huacai Chen:

- Initialize acpi_gbl_use_global_lock to false

- Allow specify SIMD width via kernel parameters

- Add kexec_file (both EFI & ELF format) support

- Add PER_VMA_LOCK for page fault handling support

- Improve BPF trampoline support

- Update the default config file

- Some bug fixes and other small changes

* tag 'loongarch-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: (23 commits)
LoongArch: Update Loongson-3 default config file
LoongArch: BPF: Sign-extend struct ops return values properly
LoongArch: BPF: Make error handling robust in arch_prepare_bpf_trampoline()
LoongArch: BPF: Make trampoline size stable
LoongArch: BPF: Don't align trampoline size
LoongArch: BPF: No support of struct argument in trampoline programs
LoongArch: BPF: No text_poke() for kernel text
LoongArch: BPF: Remove duplicated bpf_flush_icache()
LoongArch: BPF: Remove duplicated flags check
LoongArch: BPF: Fix uninitialized symbol 'retval_off'
LoongArch: BPF: Optimize sign-extention mov instructions
LoongArch: Handle new atomic instructions for probes
LoongArch: Try VMA lock-based page fault handling first
LoongArch: Automatically disable kaslr if boot from kexec_file
LoongArch: Add crash dump support for kexec_file
LoongArch: Add ELF binary support for kexec_file
LoongArch: Add EFI binary support for kexec_file
LoongArch: Add preparatory infrastructure for kexec_file
LoongArch: Add struct loongarch_image_header for kernel
LoongArch: Allow specify SIMD width via kernel parameters
...

+809 -50
+11
arch/loongarch/Kconfig
··· 70 70 select ARCH_SUPPORTS_LTO_CLANG_THIN 71 71 select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS 72 72 select ARCH_SUPPORTS_NUMA_BALANCING 73 + select ARCH_SUPPORTS_PER_VMA_LOCK 73 74 select ARCH_SUPPORTS_RT 74 75 select ARCH_SUPPORTS_SCHED_SMT if SMP 75 76 select ARCH_SUPPORTS_SCHED_MC if SMP ··· 618 617 619 618 config ARCH_SUPPORTS_KEXEC 620 619 def_bool y 620 + 621 + config ARCH_SUPPORTS_KEXEC_FILE 622 + def_bool 64BIT 623 + 624 + config ARCH_SELECTS_KEXEC_FILE 625 + def_bool 64BIT 626 + depends on KEXEC_FILE 627 + select KEXEC_ELF 628 + select RELOCATABLE 629 + select HAVE_IMA_KEXEC if IMA 621 630 622 631 config ARCH_SUPPORTS_CRASH_DUMP 623 632 def_bool y
+2 -2
arch/loongarch/Makefile
··· 115 115 # The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled. 116 116 # Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to 117 117 # be passed via '-mllvm' to ld.lld. 118 - KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump 118 + KBUILD_LDFLAGS += $(call ld-option,-mllvm --loongarch-annotate-tablejump) 119 119 endif 120 120 endif 121 121 ··· 129 129 LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs) 130 130 endif 131 131 132 - cflags-y += $(call cc-option, -mno-check-zero-division) 132 + cflags-y += $(call cc-option, -mno-check-zero-division -fno-isolate-erroneous-paths-dereference) 133 133 134 134 ifndef CONFIG_KASAN 135 135 cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset
+68 -5
arch/loongarch/configs/loongson3_defconfig
··· 45 45 CONFIG_KALLSYMS_ALL=y 46 46 CONFIG_PERF_EVENTS=y 47 47 CONFIG_KEXEC=y 48 + CONFIG_KEXEC_FILE=y 48 49 CONFIG_CRASH_DUMP=y 49 50 CONFIG_LOONGARCH=y 50 51 CONFIG_64BIT=y ··· 56 55 CONFIG_EFI=y 57 56 CONFIG_SMP=y 58 57 CONFIG_HOTPLUG_CPU=y 59 - CONFIG_NR_CPUS=256 58 + CONFIG_NR_CPUS=2048 60 59 CONFIG_NUMA=y 61 60 CONFIG_CPU_HAS_FPU=y 62 61 CONFIG_CPU_HAS_LSX=y ··· 155 154 CONFIG_INET_IPCOMP=m 156 155 CONFIG_INET_UDP_DIAG=y 157 156 CONFIG_TCP_CONG_ADVANCED=y 158 - CONFIG_TCP_CONG_BBR=m 157 + CONFIG_TCP_CONG_BIC=y 158 + CONFIG_TCP_CONG_HSTCP=m 159 + CONFIG_TCP_CONG_HYBLA=m 160 + CONFIG_TCP_CONG_VEGAS=m 161 + CONFIG_TCP_CONG_NV=m 162 + CONFIG_TCP_CONG_SCALABLE=m 163 + CONFIG_TCP_CONG_VENO=m 164 + CONFIG_TCP_CONG_DCTCP=m 165 + CONFIG_TCP_CONG_CDG=m 166 + CONFIG_TCP_CONG_BBR=y 159 167 CONFIG_IPV6_ROUTER_PREF=y 160 168 CONFIG_IPV6_ROUTE_INFO=y 161 169 CONFIG_INET6_AH=m ··· 341 331 CONFIG_NET_SCHED=y 342 332 CONFIG_NET_SCH_HTB=m 343 333 CONFIG_NET_SCH_PRIO=m 334 + CONFIG_NET_SCH_MULTIQ=m 335 + CONFIG_NET_SCH_RED=m 336 + CONFIG_NET_SCH_SFB=m 344 337 CONFIG_NET_SCH_SFQ=m 345 338 CONFIG_NET_SCH_TBF=m 339 + CONFIG_NET_SCH_CBS=m 340 + CONFIG_NET_SCH_GRED=m 346 341 CONFIG_NET_SCH_NETEM=m 342 + CONFIG_NET_SCH_MQPRIO=m 343 + CONFIG_NET_SCH_SKBPRIO=m 344 + CONFIG_NET_SCH_QFQ=m 345 + CONFIG_NET_SCH_CODEL=m 346 + CONFIG_NET_SCH_FQ_CODEL=m 347 + CONFIG_NET_SCH_CAKE=m 348 + CONFIG_NET_SCH_FQ=m 349 + CONFIG_NET_SCH_PIE=m 350 + CONFIG_NET_SCH_FQ_PIE=m 347 351 CONFIG_NET_SCH_INGRESS=m 352 + CONFIG_NET_SCH_DEFAULT=y 348 353 CONFIG_NET_CLS_BASIC=m 349 354 CONFIG_NET_CLS_FW=m 350 355 CONFIG_NET_CLS_U32=m 356 + CONFIG_NET_CLS_FLOW=m 351 357 CONFIG_NET_CLS_CGROUP=m 352 358 CONFIG_NET_CLS_BPF=m 359 + CONFIG_NET_CLS_FLOWER=m 360 + CONFIG_NET_CLS_MATCHALL=m 353 361 CONFIG_NET_CLS_ACT=y 354 362 CONFIG_NET_ACT_POLICE=m 355 363 CONFIG_NET_ACT_GACT=m ··· 435 407 CONFIG_DEVTMPFS_MOUNT=y 436 408 CONFIG_FW_LOADER_COMPRESS=y 437 409 CONFIG_FW_LOADER_COMPRESS_ZSTD=y 410 + CONFIG_SYSFB_SIMPLEFB=y 438 411 CONFIG_EFI_ZBOOT=y 439 412 CONFIG_EFI_BOOTLOADER_CONTROL=m 440 413 CONFIG_EFI_CAPSULE_LOADER=m ··· 449 420 CONFIG_MTD_CFI_STAA=m 450 421 CONFIG_MTD_RAM=m 451 422 CONFIG_MTD_ROM=m 423 + CONFIG_MTD_RAW_NAND=m 424 + CONFIG_MTD_NAND_PLATFORM=m 425 + CONFIG_MTD_NAND_LOONGSON=m 426 + CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y 427 + CONFIG_MTD_NAND_ECC_SW_BCH=y 452 428 CONFIG_MTD_UBI=m 453 429 CONFIG_MTD_UBI_BLOCK=y 454 430 CONFIG_PARPORT=y ··· 609 575 CONFIG_E1000E=y 610 576 CONFIG_IGB=y 611 577 CONFIG_IXGBE=y 578 + CONFIG_I40E=y 579 + CONFIG_ICE=y 580 + CONFIG_FM10K=y 581 + CONFIG_IGC=y 582 + CONFIG_IDPF=y 612 583 # CONFIG_NET_VENDOR_MARVELL is not set 613 584 # CONFIG_NET_VENDOR_MELLANOX is not set 614 585 # CONFIG_NET_VENDOR_MICREL is not set ··· 718 679 CONFIG_INPUT_MOUSEDEV=y 719 680 CONFIG_INPUT_MOUSEDEV_PSAUX=y 720 681 CONFIG_INPUT_EVDEV=y 682 + CONFIG_KEYBOARD_GPIO=m 683 + CONFIG_KEYBOARD_GPIO_POLLED=m 684 + CONFIG_KEYBOARD_MATRIX=m 721 685 CONFIG_KEYBOARD_XTKBD=m 722 686 CONFIG_MOUSE_PS2_ELANTECH=y 723 687 CONFIG_MOUSE_PS2_SENTELIC=y ··· 745 703 CONFIG_IPMI_HANDLER=m 746 704 CONFIG_IPMI_DEVICE_INTERFACE=m 747 705 CONFIG_IPMI_SI=m 706 + CONFIG_IPMI_LS2K=y 748 707 CONFIG_HW_RANDOM=y 749 708 CONFIG_HW_RANDOM_VIRTIO=m 709 + CONFIG_TCG_TPM=m 710 + CONFIG_TCG_LOONGSON=m 750 711 CONFIG_I2C_CHARDEV=y 751 712 CONFIG_I2C_PIIX4=y 752 713 CONFIG_I2C_DESIGNWARE_CORE=y ··· 765 720 CONFIG_GPIO_SYSFS=y 766 721 CONFIG_GPIO_LOONGSON=y 767 722 CONFIG_GPIO_LOONGSON_64BIT=y 723 + CONFIG_GPIO_PCA953X=m 724 + CONFIG_GPIO_PCA953X_IRQ=y 725 + CONFIG_GPIO_PCA9570=m 726 + CONFIG_GPIO_PCF857X=m 768 727 CONFIG_POWER_RESET=y 769 728 CONFIG_POWER_RESET_RESTART=y 770 729 CONFIG_POWER_RESET_SYSCON=y ··· 779 730 CONFIG_SENSORS_W83795=m 780 731 CONFIG_SENSORS_W83627HF=m 781 732 CONFIG_LOONGSON2_THERMAL=m 733 + CONFIG_MFD_LOONGSON_SE=m 782 734 CONFIG_RC_CORE=m 783 735 CONFIG_LIRC=y 784 736 CONFIG_RC_DECODERS=y ··· 811 761 CONFIG_DRM_QXL=m 812 762 CONFIG_DRM_VIRTIO_GPU=m 813 763 CONFIG_DRM_LOONGSON=y 764 + CONFIG_DRM_SIMPLEDRM=y 814 765 CONFIG_FB=y 815 766 CONFIG_FB_EFI=y 816 767 CONFIG_FB_RADEON=y ··· 852 801 CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y 853 802 CONFIG_SND_HDA_CODEC_CONEXANT=y 854 803 CONFIG_SND_USB_AUDIO=m 804 + CONFIG_SND_USB_AUDIO_MIDI_V2=y 855 805 CONFIG_SND_SOC=m 856 806 CONFIG_SND_SOC_LOONGSON_CARD=m 857 807 CONFIG_SND_SOC_ES7134=m ··· 913 861 CONFIG_TYPEC_TCPCI=m 914 862 CONFIG_TYPEC_UCSI=m 915 863 CONFIG_UCSI_ACPI=m 864 + CONFIG_MMC=y 865 + CONFIG_MMC_LOONGSON2=m 916 866 CONFIG_INFINIBAND=m 917 867 CONFIG_EDAC=y 918 868 # CONFIG_EDAC_LEGACY_SYSFS is not set ··· 976 922 CONFIG_NTB_PERF=m 977 923 CONFIG_NTB_TRANSPORT=m 978 924 CONFIG_PWM=y 925 + CONFIG_PWM_LOONGSON=y 979 926 CONFIG_GENERIC_PHY=y 980 927 CONFIG_USB4=y 981 928 CONFIG_EXT2_FS=y 982 929 CONFIG_EXT2_FS_XATTR=y 983 930 CONFIG_EXT2_FS_POSIX_ACL=y 984 931 CONFIG_EXT2_FS_SECURITY=y 985 - CONFIG_EXT3_FS=y 986 - CONFIG_EXT3_FS_POSIX_ACL=y 987 - CONFIG_EXT3_FS_SECURITY=y 932 + CONFIG_EXT4_FS=y 933 + CONFIG_EXT4_FS_POSIX_ACL=y 934 + CONFIG_EXT4_FS_SECURITY=y 988 935 CONFIG_JFS_FS=m 989 936 CONFIG_JFS_POSIX_ACL=y 990 937 CONFIG_JFS_SECURITY=y 991 938 CONFIG_XFS_FS=y 939 + CONFIG_XFS_SUPPORT_V4=y 940 + CONFIG_XFS_SUPPORT_ASCII_CI=y 992 941 CONFIG_XFS_QUOTA=y 993 942 CONFIG_XFS_POSIX_ACL=y 994 943 CONFIG_GFS2_FS=m ··· 1083 1026 CONFIG_CIFS=m 1084 1027 # CONFIG_CIFS_DEBUG is not set 1085 1028 CONFIG_9P_FS=y 1029 + CONFIG_NLS_DEFAULT="utf8" 1086 1030 CONFIG_NLS_CODEPAGE_437=y 1087 1031 CONFIG_NLS_CODEPAGE_936=y 1032 + CONFIG_NLS_CODEPAGE_950=y 1088 1033 CONFIG_NLS_ASCII=y 1034 + CONFIG_NLS_ISO8859_1=y 1089 1035 CONFIG_NLS_UTF8=y 1090 1036 CONFIG_DLM=m 1091 1037 CONFIG_KEY_DH_OPERATIONS=y ··· 1109 1049 CONFIG_CRYPTO_KHAZAD=m 1110 1050 CONFIG_CRYPTO_SEED=m 1111 1051 CONFIG_CRYPTO_SERPENT=m 1052 + CONFIG_CRYPTO_SM4_GENERIC=m 1112 1053 CONFIG_CRYPTO_TEA=m 1113 1054 CONFIG_CRYPTO_TWOFISH=m 1114 1055 CONFIG_CRYPTO_CHACHA20POLY1305=m 1056 + CONFIG_CRYPTO_SM3_GENERIC=m 1115 1057 CONFIG_CRYPTO_WP512=m 1116 1058 CONFIG_CRYPTO_DEFLATE=m 1117 1059 CONFIG_CRYPTO_LZO=m ··· 1125 1063 CONFIG_CRYPTO_USER_API_RNG=m 1126 1064 CONFIG_CRYPTO_USER_API_AEAD=m 1127 1065 CONFIG_CRYPTO_DEV_VIRTIO=m 1066 + CONFIG_CRYPTO_DEV_LOONGSON_RNG=m 1128 1067 CONFIG_DMA_CMA=y 1129 1068 CONFIG_DMA_NUMA_CMA=y 1130 1069 CONFIG_CMA_SIZE_MBYTES=0
+52
arch/loongarch/include/asm/image.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * LoongArch binary image header for EFI(PE/COFF) format. 4 + * 5 + * Author: Youling Tang <tangyouling@kylinos.cn> 6 + * Copyright (C) 2025 KylinSoft Corporation. 7 + */ 8 + 9 + #ifndef __ASM_IMAGE_H 10 + #define __ASM_IMAGE_H 11 + 12 + #ifndef __ASSEMBLER__ 13 + 14 + /** 15 + * struct loongarch_image_header 16 + * 17 + * @dos_sig: Optional PE format 'MZ' signature. 18 + * @padding_1: Reserved. 19 + * @kernel_entry: Kernel image entry pointer. 20 + * @kernel_asize: An estimated size of the memory image size in LSB byte order. 21 + * @text_offset: The image load offset in LSB byte order. 22 + * @padding_2: Reserved. 23 + * @pe_header: Optional offset to a PE format header. 24 + **/ 25 + 26 + struct loongarch_image_header { 27 + uint8_t dos_sig[2]; 28 + uint16_t padding_1[3]; 29 + uint64_t kernel_entry; 30 + uint64_t kernel_asize; 31 + uint64_t text_offset; 32 + uint32_t padding_2[7]; 33 + uint32_t pe_header; 34 + }; 35 + 36 + /* 37 + * loongarch_header_check_dos_sig - Helper to check the header 38 + * 39 + * Returns true (non-zero) if 'MZ' signature is found. 40 + */ 41 + 42 + static inline int loongarch_header_check_dos_sig(const struct loongarch_image_header *h) 43 + { 44 + if (!h) 45 + return 0; 46 + 47 + return (h->dos_sig[0] == 'M' && h->dos_sig[1] == 'Z'); 48 + } 49 + 50 + #endif /* __ASSEMBLER__ */ 51 + 52 + #endif /* __ASM_IMAGE_H */
+5
arch/loongarch/include/asm/inst.h
··· 77 77 iocsrwrh_op = 0x19205, 78 78 iocsrwrw_op = 0x19206, 79 79 iocsrwrd_op = 0x19207, 80 + llacqw_op = 0xe15e0, 81 + screlw_op = 0xe15e1, 82 + llacqd_op = 0xe15e2, 83 + screld_op = 0xe15e3, 80 84 }; 81 85 82 86 enum reg2i5_op { ··· 193 189 fldxd_op = 0x7068, 194 190 fstxs_op = 0x7070, 195 191 fstxd_op = 0x7078, 192 + scq_op = 0x70ae, 196 193 amswapw_op = 0x70c0, 197 194 amswapd_op = 0x70c1, 198 195 amaddw_op = 0x70c2,
+12
arch/loongarch/include/asm/kexec.h
··· 41 41 unsigned long systable_ptr; 42 42 }; 43 43 44 + #ifdef CONFIG_KEXEC_FILE 45 + extern const struct kexec_file_ops kexec_efi_ops; 46 + extern const struct kexec_file_ops kexec_elf_ops; 47 + 48 + int arch_kimage_file_post_load_cleanup(struct kimage *image); 49 + #define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup 50 + 51 + extern int load_other_segments(struct kimage *image, 52 + unsigned long kernel_load_addr, unsigned long kernel_size, 53 + char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len); 54 + #endif 55 + 44 56 typedef void (*do_kexec_t)(unsigned long efi_boot, 45 57 unsigned long cmdline_ptr, 46 58 unsigned long systable_ptr,
+1
arch/loongarch/kernel/Makefile
··· 62 62 obj-$(CONFIG_RELOCATABLE) += relocate.o 63 63 64 64 obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o 65 + obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_efi.o kexec_elf.o 65 66 obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 66 67 67 68 obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
+44 -2
arch/loongarch/kernel/cpu-probe.c
··· 52 52 c->fpu_mask = ~(fcsr0 ^ fcsr1) & ~mask; 53 53 } 54 54 55 + /* simd = -1/0/128/256 */ 56 + static unsigned int simd = -1U; 57 + 58 + static int __init cpu_setup_simd(char *str) 59 + { 60 + get_option(&str, &simd); 61 + pr_info("Set SIMD width = %u\n", simd); 62 + 63 + return 0; 64 + } 65 + 66 + early_param("simd", cpu_setup_simd); 67 + 68 + static int __init cpu_final_simd(void) 69 + { 70 + struct cpuinfo_loongarch *c = &cpu_data[0]; 71 + 72 + if (simd < 128) { 73 + c->options &= ~LOONGARCH_CPU_LSX; 74 + elf_hwcap &= ~HWCAP_LOONGARCH_LSX; 75 + } 76 + 77 + if (simd < 256) { 78 + c->options &= ~LOONGARCH_CPU_LASX; 79 + elf_hwcap &= ~HWCAP_LOONGARCH_LASX; 80 + } 81 + 82 + simd = 0; 83 + 84 + if (c->options & LOONGARCH_CPU_LSX) 85 + simd = 128; 86 + 87 + if (c->options & LOONGARCH_CPU_LASX) 88 + simd = 256; 89 + 90 + pr_info("Final SIMD width = %u\n", simd); 91 + 92 + return 0; 93 + } 94 + 95 + arch_initcall(cpu_final_simd); 96 + 55 97 static inline void set_elf_platform(int cpu, const char *plat) 56 98 { 57 99 if (cpu == 0) ··· 176 134 elf_hwcap |= HWCAP_LOONGARCH_FPU; 177 135 } 178 136 #ifdef CONFIG_CPU_HAS_LSX 179 - if (config & CPUCFG2_LSX) { 137 + if ((config & CPUCFG2_LSX) && (simd >= 128)) { 180 138 c->options |= LOONGARCH_CPU_LSX; 181 139 elf_hwcap |= HWCAP_LOONGARCH_LSX; 182 140 } 183 141 #endif 184 142 #ifdef CONFIG_CPU_HAS_LASX 185 - if (config & CPUCFG2_LASX) { 143 + if ((config & CPUCFG2_LASX) && (simd >= 256)) { 186 144 c->options |= LOONGARCH_CPU_LASX; 187 145 elf_hwcap |= HWCAP_LOONGARCH_LASX; 188 146 }
+12
arch/loongarch/kernel/inst.c
··· 141 141 case amswapw_op ... ammindbdu_op: 142 142 pr_notice("atomic memory access instructions are not supported\n"); 143 143 return true; 144 + case scq_op: 145 + pr_notice("sc.q instruction is not supported\n"); 146 + return true; 144 147 } 145 148 146 149 switch (insn.reg2i14_format.opcode) { ··· 152 149 case scw_op: 153 150 case scd_op: 154 151 pr_notice("ll and sc instructions are not supported\n"); 152 + return true; 153 + } 154 + 155 + switch (insn.reg2_format.opcode) { 156 + case llacqw_op: 157 + case llacqd_op: 158 + case screlw_op: 159 + case screld_op: 160 + pr_notice("llacq and screl instructions are not supported\n"); 155 161 return true; 156 162 } 157 163
+113
arch/loongarch/kernel/kexec_efi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Load EFI vmlinux file for the kexec_file_load syscall. 4 + * 5 + * Author: Youling Tang <tangyouling@kylinos.cn> 6 + * Copyright (C) 2025 KylinSoft Corporation. 7 + */ 8 + 9 + #define pr_fmt(fmt) "kexec_file(EFI): " fmt 10 + 11 + #include <linux/err.h> 12 + #include <linux/errno.h> 13 + #include <linux/kernel.h> 14 + #include <linux/kexec.h> 15 + #include <linux/pe.h> 16 + #include <linux/string.h> 17 + #include <asm/byteorder.h> 18 + #include <asm/cpufeature.h> 19 + #include <asm/image.h> 20 + 21 + static int efi_kexec_probe(const char *kernel_buf, unsigned long kernel_len) 22 + { 23 + const struct loongarch_image_header *h = (const struct loongarch_image_header *)kernel_buf; 24 + 25 + if (!h || (kernel_len < sizeof(*h))) { 26 + kexec_dprintk("No LoongArch image header.\n"); 27 + return -EINVAL; 28 + } 29 + 30 + if (!loongarch_header_check_dos_sig(h)) { 31 + kexec_dprintk("No LoongArch PE image header.\n"); 32 + return -EINVAL; 33 + } 34 + 35 + return 0; 36 + } 37 + 38 + static void *efi_kexec_load(struct kimage *image, 39 + char *kernel, unsigned long kernel_len, 40 + char *initrd, unsigned long initrd_len, 41 + char *cmdline, unsigned long cmdline_len) 42 + { 43 + int ret; 44 + unsigned long text_offset, kernel_segment_number; 45 + struct kexec_buf kbuf; 46 + struct kexec_segment *kernel_segment; 47 + struct loongarch_image_header *h; 48 + 49 + h = (struct loongarch_image_header *)kernel; 50 + if (!h->kernel_asize) 51 + return ERR_PTR(-EINVAL); 52 + 53 + /* 54 + * Load the kernel 55 + * FIXME: Non-relocatable kernel rejected for kexec_file (require CONFIG_RELOCATABLE) 56 + */ 57 + kbuf.image = image; 58 + kbuf.buf_max = ULONG_MAX; 59 + kbuf.top_down = false; 60 + 61 + kbuf.buffer = kernel; 62 + kbuf.bufsz = kernel_len; 63 + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 64 + kbuf.memsz = le64_to_cpu(h->kernel_asize); 65 + text_offset = le64_to_cpu(h->text_offset); 66 + kbuf.buf_min = text_offset; 67 + kbuf.buf_align = SZ_2M; 68 + 69 + kernel_segment_number = image->nr_segments; 70 + 71 + /* 72 + * The location of the kernel segment may make it impossible to 73 + * satisfy the other segment requirements, so we try repeatedly 74 + * to find a location that will work. 75 + */ 76 + while ((ret = kexec_add_buffer(&kbuf)) == 0) { 77 + /* Try to load additional data */ 78 + kernel_segment = &image->segment[kernel_segment_number]; 79 + ret = load_other_segments(image, kernel_segment->mem, 80 + kernel_segment->memsz, initrd, 81 + initrd_len, cmdline, cmdline_len); 82 + if (!ret) 83 + break; 84 + 85 + /* 86 + * We couldn't find space for the other segments; erase the 87 + * kernel segment and try the next available hole. 88 + */ 89 + image->nr_segments -= 1; 90 + kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz; 91 + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 92 + } 93 + 94 + if (ret < 0) { 95 + pr_err("Could not find any suitable kernel location!"); 96 + return ERR_PTR(ret); 97 + } 98 + 99 + kernel_segment = &image->segment[kernel_segment_number]; 100 + 101 + /* Make sure the second kernel jumps to the correct "kernel_entry" */ 102 + image->start = kernel_segment->mem + h->kernel_entry - text_offset; 103 + 104 + kexec_dprintk("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", 105 + kernel_segment->mem, kbuf.bufsz, kernel_segment->memsz); 106 + 107 + return NULL; 108 + } 109 + 110 + const struct kexec_file_ops kexec_efi_ops = { 111 + .probe = efi_kexec_probe, 112 + .load = efi_kexec_load, 113 + };
+105
arch/loongarch/kernel/kexec_elf.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Load ELF vmlinux file for the kexec_file_load syscall. 4 + * 5 + * Author: Youling Tang <tangyouling@kylinos.cn> 6 + * Copyright (C) 2025 KylinSoft Corporation. 7 + */ 8 + 9 + #define pr_fmt(fmt) "kexec_file(ELF): " fmt 10 + 11 + #include <linux/elf.h> 12 + #include <linux/kexec.h> 13 + #include <linux/slab.h> 14 + #include <linux/types.h> 15 + #include <linux/memblock.h> 16 + #include <asm/setup.h> 17 + 18 + #define elf_kexec_probe kexec_elf_probe 19 + 20 + static int _elf_kexec_load(struct kimage *image, 21 + struct elfhdr *ehdr, struct kexec_elf_info *elf_info, 22 + struct kexec_buf *kbuf, unsigned long *text_offset) 23 + { 24 + int i, ret = -1; 25 + 26 + /* Read in the PT_LOAD segments. */ 27 + for (i = 0; i < ehdr->e_phnum; i++) { 28 + size_t size; 29 + const struct elf_phdr *phdr; 30 + 31 + phdr = &elf_info->proghdrs[i]; 32 + if (phdr->p_type != PT_LOAD) 33 + continue; 34 + 35 + size = phdr->p_filesz; 36 + if (size > phdr->p_memsz) 37 + size = phdr->p_memsz; 38 + 39 + kbuf->buffer = (void *)elf_info->buffer + phdr->p_offset; 40 + kbuf->bufsz = size; 41 + kbuf->buf_align = phdr->p_align; 42 + *text_offset = __pa(phdr->p_paddr); 43 + kbuf->buf_min = *text_offset; 44 + kbuf->memsz = ALIGN(phdr->p_memsz, SZ_64K); 45 + kbuf->mem = KEXEC_BUF_MEM_UNKNOWN; 46 + ret = kexec_add_buffer(kbuf); 47 + if (ret < 0) 48 + break; 49 + } 50 + 51 + return ret; 52 + } 53 + 54 + static void *elf_kexec_load(struct kimage *image, 55 + char *kernel, unsigned long kernel_len, 56 + char *initrd, unsigned long initrd_len, 57 + char *cmdline, unsigned long cmdline_len) 58 + { 59 + int ret; 60 + unsigned long text_offset, kernel_segment_number; 61 + struct elfhdr ehdr; 62 + struct kexec_buf kbuf; 63 + struct kexec_elf_info elf_info; 64 + struct kexec_segment *kernel_segment; 65 + 66 + ret = kexec_build_elf_info(kernel, kernel_len, &ehdr, &elf_info); 67 + if (ret < 0) 68 + return ERR_PTR(ret); 69 + 70 + /* 71 + * Load the kernel 72 + * FIXME: Non-relocatable kernel rejected for kexec_file (require CONFIG_RELOCATABLE) 73 + */ 74 + kbuf.image = image; 75 + kbuf.buf_max = ULONG_MAX; 76 + kbuf.top_down = false; 77 + 78 + kernel_segment_number = image->nr_segments; 79 + 80 + ret = _elf_kexec_load(image, &ehdr, &elf_info, &kbuf, &text_offset); 81 + if (ret < 0) 82 + goto out; 83 + 84 + /* Load additional data */ 85 + kernel_segment = &image->segment[kernel_segment_number]; 86 + ret = load_other_segments(image, kernel_segment->mem, kernel_segment->memsz, 87 + initrd, initrd_len, cmdline, cmdline_len); 88 + if (ret < 0) 89 + goto out; 90 + 91 + /* Make sure the second kernel jumps to the correct "kernel_entry". */ 92 + image->start = kernel_segment->mem + __pa(ehdr.e_entry) - text_offset; 93 + 94 + kexec_dprintk("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", 95 + kernel_segment->mem, kbuf.bufsz, kernel_segment->memsz); 96 + 97 + out: 98 + kexec_free_elf_info(&elf_info); 99 + return ret ? ERR_PTR(ret) : NULL; 100 + } 101 + 102 + const struct kexec_file_ops kexec_elf_ops = { 103 + .probe = elf_kexec_probe, 104 + .load = elf_kexec_load, 105 + };
+24 -13
arch/loongarch/kernel/machine_kexec.c
··· 70 70 kimage->arch.efi_boot = fw_arg0; 71 71 kimage->arch.systable_ptr = fw_arg2; 72 72 73 - /* Find the command line */ 74 - for (i = 0; i < kimage->nr_segments; i++) { 75 - if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) { 76 - if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) 77 - kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; 78 - break; 73 + if (kimage->file_mode == 1) { 74 + /* 75 + * kimage->cmdline_buf will be released in kexec_file_load, so copy 76 + * to the KEXEC_CMDLINE_ADDR safe area. 77 + */ 78 + memcpy((void *)KEXEC_CMDLINE_ADDR, (void *)kimage->arch.cmdline_ptr, 79 + strlen((char *)kimage->arch.cmdline_ptr) + 1); 80 + kimage->arch.cmdline_ptr = (unsigned long)KEXEC_CMDLINE_ADDR; 81 + } else { 82 + /* Find the command line */ 83 + for (i = 0; i < kimage->nr_segments; i++) { 84 + if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) { 85 + if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) 86 + kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; 87 + break; 88 + } 79 89 } 80 - } 81 90 82 - if (!kimage->arch.cmdline_ptr) { 83 - pr_err("Command line not included in the provided image\n"); 84 - return -EINVAL; 91 + if (!kimage->arch.cmdline_ptr) { 92 + pr_err("Command line not included in the provided image\n"); 93 + return -EINVAL; 94 + } 85 95 } 86 96 87 97 /* kexec/kdump need a safe page to save reboot_code_buffer */ ··· 297 287 /* We do not want to be bothered. */ 298 288 local_irq_disable(); 299 289 300 - pr_notice("EFI boot flag 0x%lx\n", efi_boot); 301 - pr_notice("Command line at 0x%lx\n", cmdline_ptr); 302 - pr_notice("System table at 0x%lx\n", systable_ptr); 290 + pr_notice("EFI boot flag: 0x%lx\n", efi_boot); 291 + pr_notice("Command line addr: 0x%lx\n", cmdline_ptr); 292 + pr_notice("Command line string: %s\n", (char *)cmdline_ptr); 293 + pr_notice("System table addr: 0x%lx\n", systable_ptr); 303 294 pr_notice("We will call new kernel at 0x%lx\n", start_addr); 304 295 pr_notice("Bye ...\n"); 305 296
+239
arch/loongarch/kernel/machine_kexec_file.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * kexec_file for LoongArch 4 + * 5 + * Author: Youling Tang <tangyouling@kylinos.cn> 6 + * Copyright (C) 2025 KylinSoft Corporation. 7 + * 8 + * Most code is derived from LoongArch port of kexec-tools 9 + */ 10 + 11 + #define pr_fmt(fmt) "kexec_file: " fmt 12 + 13 + #include <linux/ioport.h> 14 + #include <linux/kernel.h> 15 + #include <linux/kexec.h> 16 + #include <linux/memblock.h> 17 + #include <linux/slab.h> 18 + #include <linux/string.h> 19 + #include <linux/types.h> 20 + #include <linux/vmalloc.h> 21 + #include <asm/bootinfo.h> 22 + 23 + const struct kexec_file_ops * const kexec_file_loaders[] = { 24 + &kexec_efi_ops, 25 + &kexec_elf_ops, 26 + NULL 27 + }; 28 + 29 + int arch_kimage_file_post_load_cleanup(struct kimage *image) 30 + { 31 + vfree(image->elf_headers); 32 + image->elf_headers = NULL; 33 + image->elf_headers_sz = 0; 34 + 35 + return kexec_image_post_load_cleanup_default(image); 36 + } 37 + 38 + /* Add the "kexec_file" command line parameter to command line. */ 39 + static void cmdline_add_loader(unsigned long *cmdline_tmplen, char *modified_cmdline) 40 + { 41 + int loader_strlen; 42 + 43 + loader_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "kexec_file "); 44 + *cmdline_tmplen += loader_strlen; 45 + } 46 + 47 + /* Add the "initrd=start,size" command line parameter to command line. */ 48 + static void cmdline_add_initrd(struct kimage *image, unsigned long *cmdline_tmplen, 49 + char *modified_cmdline, unsigned long initrd) 50 + { 51 + int initrd_strlen; 52 + 53 + initrd_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "initrd=0x%lx,0x%lx ", 54 + initrd, image->initrd_buf_len); 55 + *cmdline_tmplen += initrd_strlen; 56 + } 57 + 58 + #ifdef CONFIG_CRASH_DUMP 59 + 60 + static int prepare_elf_headers(void **addr, unsigned long *sz) 61 + { 62 + int ret, nr_ranges; 63 + uint64_t i; 64 + phys_addr_t start, end; 65 + struct crash_mem *cmem; 66 + 67 + nr_ranges = 2; /* for exclusion of crashkernel region */ 68 + for_each_mem_range(i, &start, &end) 69 + nr_ranges++; 70 + 71 + cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); 72 + if (!cmem) 73 + return -ENOMEM; 74 + 75 + cmem->max_nr_ranges = nr_ranges; 76 + cmem->nr_ranges = 0; 77 + for_each_mem_range(i, &start, &end) { 78 + cmem->ranges[cmem->nr_ranges].start = start; 79 + cmem->ranges[cmem->nr_ranges].end = end - 1; 80 + cmem->nr_ranges++; 81 + } 82 + 83 + /* Exclude crashkernel region */ 84 + ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); 85 + if (ret < 0) 86 + goto out; 87 + 88 + if (crashk_low_res.end) { 89 + ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); 90 + if (ret < 0) 91 + goto out; 92 + } 93 + 94 + ret = crash_prepare_elf64_headers(cmem, true, addr, sz); 95 + 96 + out: 97 + kfree(cmem); 98 + return ret; 99 + } 100 + 101 + /* 102 + * Add the "mem=size@start" command line parameter to command line, indicating the 103 + * memory region the new kernel can use to boot into. 104 + */ 105 + static void cmdline_add_mem(unsigned long *cmdline_tmplen, char *modified_cmdline) 106 + { 107 + int mem_strlen = 0; 108 + 109 + mem_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "mem=0x%llx@0x%llx ", 110 + crashk_res.end - crashk_res.start + 1, crashk_res.start); 111 + *cmdline_tmplen += mem_strlen; 112 + 113 + if (crashk_low_res.end) { 114 + mem_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "mem=0x%llx@0x%llx ", 115 + crashk_low_res.end - crashk_low_res.start + 1, crashk_low_res.start); 116 + *cmdline_tmplen += mem_strlen; 117 + } 118 + } 119 + 120 + /* Add the "elfcorehdr=size@start" command line parameter to command line. */ 121 + static void cmdline_add_elfcorehdr(struct kimage *image, unsigned long *cmdline_tmplen, 122 + char *modified_cmdline, unsigned long elfcorehdr_sz) 123 + { 124 + int elfcorehdr_strlen = 0; 125 + 126 + elfcorehdr_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "elfcorehdr=0x%lx@0x%lx ", 127 + elfcorehdr_sz, image->elf_load_addr); 128 + *cmdline_tmplen += elfcorehdr_strlen; 129 + } 130 + 131 + #endif 132 + 133 + /* 134 + * Try to add the initrd to the image. If it is not possible to find valid 135 + * locations, this function will undo changes to the image and return non zero. 136 + */ 137 + int load_other_segments(struct kimage *image, 138 + unsigned long kernel_load_addr, unsigned long kernel_size, 139 + char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len) 140 + { 141 + int ret = 0; 142 + unsigned long cmdline_tmplen = 0; 143 + unsigned long initrd_load_addr = 0; 144 + unsigned long orig_segments = image->nr_segments; 145 + char *modified_cmdline = NULL; 146 + struct kexec_buf kbuf; 147 + 148 + kbuf.image = image; 149 + /* Don't allocate anything below the kernel */ 150 + kbuf.buf_min = kernel_load_addr + kernel_size; 151 + 152 + modified_cmdline = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL); 153 + if (!modified_cmdline) 154 + return -EINVAL; 155 + 156 + cmdline_add_loader(&cmdline_tmplen, modified_cmdline); 157 + /* Ensure it's null terminated */ 158 + modified_cmdline[COMMAND_LINE_SIZE - 1] = '\0'; 159 + 160 + #ifdef CONFIG_CRASH_DUMP 161 + /* Load elf core header */ 162 + if (image->type == KEXEC_TYPE_CRASH) { 163 + void *headers; 164 + unsigned long headers_sz; 165 + 166 + ret = prepare_elf_headers(&headers, &headers_sz); 167 + if (ret < 0) { 168 + pr_err("Preparing elf core header failed\n"); 169 + goto out_err; 170 + } 171 + 172 + kbuf.buffer = headers; 173 + kbuf.bufsz = headers_sz; 174 + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 175 + kbuf.memsz = headers_sz; 176 + kbuf.buf_align = SZ_64K; /* largest supported page size */ 177 + kbuf.buf_max = ULONG_MAX; 178 + kbuf.top_down = true; 179 + 180 + ret = kexec_add_buffer(&kbuf); 181 + if (ret < 0) { 182 + vfree(headers); 183 + goto out_err; 184 + } 185 + image->elf_headers = headers; 186 + image->elf_load_addr = kbuf.mem; 187 + image->elf_headers_sz = headers_sz; 188 + 189 + kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", 190 + image->elf_load_addr, kbuf.bufsz, kbuf.memsz); 191 + 192 + /* Add the mem=size@start parameter to the command line */ 193 + cmdline_add_mem(&cmdline_tmplen, modified_cmdline); 194 + 195 + /* Add the elfcorehdr=size@start parameter to the command line */ 196 + cmdline_add_elfcorehdr(image, &cmdline_tmplen, modified_cmdline, headers_sz); 197 + } 198 + #endif 199 + 200 + /* Load initrd */ 201 + if (initrd) { 202 + kbuf.buffer = initrd; 203 + kbuf.bufsz = initrd_len; 204 + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 205 + kbuf.memsz = initrd_len; 206 + kbuf.buf_align = 0; 207 + /* within 1GB-aligned window of up to 32GB in size */ 208 + kbuf.buf_max = round_down(kernel_load_addr, SZ_1G) + (unsigned long)SZ_1G * 32; 209 + kbuf.top_down = false; 210 + 211 + ret = kexec_add_buffer(&kbuf); 212 + if (ret < 0) 213 + goto out_err; 214 + initrd_load_addr = kbuf.mem; 215 + 216 + kexec_dprintk("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", 217 + initrd_load_addr, kbuf.bufsz, kbuf.memsz); 218 + 219 + /* Add the initrd=start,size parameter to the command line */ 220 + cmdline_add_initrd(image, &cmdline_tmplen, modified_cmdline, initrd_load_addr); 221 + } 222 + 223 + if (cmdline_len + cmdline_tmplen > COMMAND_LINE_SIZE) { 224 + pr_err("Appending command line exceeds COMMAND_LINE_SIZE\n"); 225 + ret = -EINVAL; 226 + goto out_err; 227 + } 228 + 229 + memcpy(modified_cmdline + cmdline_tmplen, cmdline, cmdline_len); 230 + cmdline = modified_cmdline; 231 + image->arch.cmdline_ptr = (unsigned long)cmdline; 232 + 233 + return 0; 234 + 235 + out_err: 236 + image->nr_segments = orig_segments; 237 + kfree(modified_cmdline); 238 + return ret; 239 + }
+4
arch/loongarch/kernel/relocate.c
··· 166 166 return true; 167 167 #endif 168 168 169 + str = strstr(boot_command_line, "kexec_file"); 170 + if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' ')) 171 + return true; 172 + 169 173 return false; 170 174 } 171 175
+1
arch/loongarch/kernel/setup.c
··· 355 355 356 356 #ifdef CONFIG_ACPI 357 357 acpi_table_upgrade(); 358 + acpi_gbl_use_global_lock = false; 358 359 acpi_gbl_use_default_register_widths = false; 359 360 acpi_boot_table_init(); 360 361 #endif
+55 -3
arch/loongarch/mm/fault.c
··· 215 215 flags |= FAULT_FLAG_USER; 216 216 217 217 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 218 + 219 + if (!(flags & FAULT_FLAG_USER)) 220 + goto lock_mmap; 221 + 222 + vma = lock_vma_under_rcu(mm, address); 223 + if (!vma) 224 + goto lock_mmap; 225 + 226 + if (write) { 227 + flags |= FAULT_FLAG_WRITE; 228 + if (!(vma->vm_flags & VM_WRITE)) { 229 + vma_end_read(vma); 230 + si_code = SEGV_ACCERR; 231 + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); 232 + goto bad_area_nosemaphore; 233 + } 234 + } else { 235 + if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs)) { 236 + vma_end_read(vma); 237 + si_code = SEGV_ACCERR; 238 + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); 239 + goto bad_area_nosemaphore; 240 + } 241 + if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs)) { 242 + vma_end_read(vma); 243 + si_code = SEGV_ACCERR; 244 + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); 245 + goto bad_area_nosemaphore; 246 + } 247 + } 248 + 249 + fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); 250 + if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) 251 + vma_end_read(vma); 252 + 253 + if (!(fault & VM_FAULT_RETRY)) { 254 + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); 255 + goto done; 256 + } 257 + 258 + count_vm_vma_lock_event(VMA_LOCK_RETRY); 259 + if (fault & VM_FAULT_MAJOR) 260 + flags |= FAULT_FLAG_TRIED; 261 + 262 + /* Quick path to respond to signals */ 263 + if (fault_signal_pending(fault, regs)) { 264 + if (!user_mode(regs)) 265 + no_context(regs, write, address); 266 + return; 267 + } 268 + lock_mmap: 269 + 218 270 retry: 219 271 vma = lock_mm_and_find_vma(mm, address, regs); 220 272 if (unlikely(!vma)) ··· 328 276 */ 329 277 goto retry; 330 278 } 279 + mmap_read_unlock(mm); 280 + 281 + done: 331 282 if (unlikely(fault & VM_FAULT_ERROR)) { 332 - mmap_read_unlock(mm); 333 283 if (fault & VM_FAULT_OOM) { 334 284 do_out_of_memory(regs, write, address); 335 285 return; ··· 344 290 } 345 291 BUG(); 346 292 } 347 - 348 - mmap_read_unlock(mm); 349 293 } 350 294 351 295 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+61 -25
arch/loongarch/net/bpf_jit.c
··· 527 527 emit_zext_32(ctx, dst, is32); 528 528 break; 529 529 case 8: 530 - move_reg(ctx, t1, src); 531 - emit_insn(ctx, extwb, dst, t1); 530 + emit_insn(ctx, extwb, dst, src); 532 531 emit_zext_32(ctx, dst, is32); 533 532 break; 534 533 case 16: 535 - move_reg(ctx, t1, src); 536 - emit_insn(ctx, extwh, dst, t1); 534 + emit_insn(ctx, extwh, dst, src); 537 535 emit_zext_32(ctx, dst, is32); 538 536 break; 539 537 case 32: ··· 1292 1294 u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; 1293 1295 u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP}; 1294 1296 1295 - if (!is_kernel_text((unsigned long)ip) && 1296 - !is_bpf_text_address((unsigned long)ip)) 1297 + /* Only poking bpf text is supported. Since kernel function entry 1298 + * is set up by ftrace, we rely on ftrace to poke kernel functions. 1299 + */ 1300 + if (!is_bpf_text_address((unsigned long)ip)) 1297 1301 return -ENOTSUPP; 1298 1302 1299 1303 ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call); ··· 1448 1448 bpf_prog_pack_free(image, size); 1449 1449 } 1450 1450 1451 + /* 1452 + * Sign-extend the register if necessary 1453 + */ 1454 + static void sign_extend(struct jit_ctx *ctx, int rd, int rj, u8 size, bool sign) 1455 + { 1456 + /* ABI requires unsigned char/short to be zero-extended */ 1457 + if (!sign && (size == 1 || size == 2)) { 1458 + if (rd != rj) 1459 + move_reg(ctx, rd, rj); 1460 + return; 1461 + } 1462 + 1463 + switch (size) { 1464 + case 1: 1465 + emit_insn(ctx, extwb, rd, rj); 1466 + break; 1467 + case 2: 1468 + emit_insn(ctx, extwh, rd, rj); 1469 + break; 1470 + case 4: 1471 + emit_insn(ctx, addiw, rd, rj, 0); 1472 + break; 1473 + case 8: 1474 + if (rd != rj) 1475 + move_reg(ctx, rd, rj); 1476 + break; 1477 + default: 1478 + pr_warn("bpf_jit: invalid size %d for sign_extend\n", size); 1479 + } 1480 + } 1481 + 1451 1482 static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, 1452 1483 const struct btf_func_model *m, struct bpf_tramp_links *tlinks, 1453 1484 void *func_addr, u32 flags) 1454 1485 { 1455 1486 int i, ret, save_ret; 1456 - int stack_size = 0, nargs = 0; 1487 + int stack_size, nargs; 1457 1488 int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off; 1458 1489 bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT; 1459 1490 void *orig_call = func_addr; ··· 1492 1461 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 1493 1462 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 1494 1463 u32 **branches = NULL; 1495 - 1496 - if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) 1497 - return -ENOTSUPP; 1498 1464 1499 1465 /* 1500 1466 * FP + 8 [ RA to parent func ] return address to parent ··· 1523 1495 if (m->nr_args > LOONGARCH_MAX_REG_ARGS) 1524 1496 return -ENOTSUPP; 1525 1497 1498 + /* FIXME: No support of struct argument */ 1499 + for (i = 0; i < m->nr_args; i++) { 1500 + if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) 1501 + return -ENOTSUPP; 1502 + } 1503 + 1526 1504 if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) 1527 1505 return -ENOTSUPP; 1528 1506 1529 - stack_size = 0; 1530 - 1531 1507 /* Room of trampoline frame to store return address and frame pointer */ 1532 - stack_size += 16; 1508 + stack_size = 16; 1533 1509 1534 1510 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 1535 - if (save_ret) { 1536 - /* Save BPF R0 and A0 */ 1537 - stack_size += 16; 1538 - retval_off = stack_size; 1539 - } 1511 + if (save_ret) 1512 + stack_size += 16; /* Save BPF R0 and A0 */ 1513 + 1514 + retval_off = stack_size; 1540 1515 1541 1516 /* Room of trampoline frame to store args */ 1542 1517 nargs = m->nr_args; ··· 1626 1595 orig_call += LOONGARCH_BPF_FENTRY_NBYTES; 1627 1596 1628 1597 if (flags & BPF_TRAMP_F_CALL_ORIG) { 1629 - move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false); 1598 + move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im); 1630 1599 ret = emit_call(ctx, (const u64)__bpf_tramp_enter); 1631 1600 if (ret) 1632 1601 return ret; ··· 1676 1645 1677 1646 if (flags & BPF_TRAMP_F_CALL_ORIG) { 1678 1647 im->ip_epilogue = ctx->ro_image + ctx->idx; 1679 - move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false); 1648 + move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im); 1680 1649 ret = emit_call(ctx, (const u64)__bpf_tramp_exit); 1681 1650 if (ret) 1682 1651 goto out; ··· 1686 1655 restore_args(ctx, m->nr_args, args_off); 1687 1656 1688 1657 if (save_ret) { 1689 - emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); 1690 1658 emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8)); 1659 + if (is_struct_ops) 1660 + sign_extend(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 1661 + m->ret_size, m->ret_flags & BTF_FMODEL_SIGNED_ARG); 1662 + else 1663 + emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off); 1691 1664 } 1692 1665 1693 1666 emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off); ··· 1750 1715 1751 1716 jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image)); 1752 1717 ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags); 1753 - if (ret > 0 && validate_code(&ctx) < 0) { 1718 + if (ret < 0) 1719 + goto out; 1720 + 1721 + if (validate_code(&ctx) < 0) { 1754 1722 ret = -EINVAL; 1755 1723 goto out; 1756 1724 } ··· 1764 1726 goto out; 1765 1727 } 1766 1728 1767 - bpf_flush_icache(ro_image, ro_image_end); 1768 1729 out: 1769 1730 kvfree(image); 1770 1731 return ret < 0 ? ret : size; ··· 1781 1744 1782 1745 ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags); 1783 1746 1784 - /* Page align */ 1785 - return ret < 0 ? ret : round_up(ret * LOONGARCH_INSN_SIZE, PAGE_SIZE); 1747 + return ret < 0 ? ret : ret * LOONGARCH_INSN_SIZE; 1786 1748 } 1787 1749 1788 1750 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)