Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'loongarch-fixes-6.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch fixes from Huacai Chen:
"Use UAPI types in ptrace UAPI header to fix nolibc ptrace.

Fix CPU name display, NUMA node parsing, kexec/kdump, PCI init and BPF
trampoline"

* tag 'loongarch-fixes-6.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
LoongArch: BPF: Disable trampoline for kernel module function trace
LoongArch: Don't panic if no valid cache info for PCI
LoongArch: Mask all interrupts during kexec/kdump
LoongArch: Fix NUMA node parsing with numa_memblks
LoongArch: Consolidate CPU names in /proc/cpuinfo
LoongArch: Use UAPI types in ptrace UAPI header

+79 -91
+21
arch/loongarch/include/asm/cpu.h
··· 55 55 CPU_LAST 56 56 }; 57 57 58 + static inline char *id_to_core_name(unsigned int id) 59 + { 60 + if ((id & PRID_COMP_MASK) != PRID_COMP_LOONGSON) 61 + return "Unknown"; 62 + 63 + switch (id & PRID_SERIES_MASK) { 64 + case PRID_SERIES_LA132: 65 + return "LA132"; 66 + case PRID_SERIES_LA264: 67 + return "LA264"; 68 + case PRID_SERIES_LA364: 69 + return "LA364"; 70 + case PRID_SERIES_LA464: 71 + return "LA464"; 72 + case PRID_SERIES_LA664: 73 + return "LA664"; 74 + default: 75 + return "Unknown"; 76 + } 77 + } 78 + 58 79 #endif /* !__ASSEMBLER__ */ 59 80 60 81 /*
+18 -22
arch/loongarch/include/uapi/asm/ptrace.h
··· 10 10 11 11 #include <linux/types.h> 12 12 13 - #ifndef __KERNEL__ 14 - #include <stdint.h> 15 - #endif 16 - 17 13 /* 18 14 * For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs, 19 15 * 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR. ··· 37 41 } __attribute__((aligned(8))); 38 42 39 43 struct user_fp_state { 40 - uint64_t fpr[32]; 41 - uint64_t fcc; 42 - uint32_t fcsr; 44 + __u64 fpr[32]; 45 + __u64 fcc; 46 + __u32 fcsr; 43 47 }; 44 48 45 49 struct user_lsx_state { 46 50 /* 32 registers, 128 bits width per register. */ 47 - uint64_t vregs[32*2]; 51 + __u64 vregs[32*2]; 48 52 }; 49 53 50 54 struct user_lasx_state { 51 55 /* 32 registers, 256 bits width per register. */ 52 - uint64_t vregs[32*4]; 56 + __u64 vregs[32*4]; 53 57 }; 54 58 55 59 struct user_lbt_state { 56 - uint64_t scr[4]; 57 - uint32_t eflags; 58 - uint32_t ftop; 60 + __u64 scr[4]; 61 + __u32 eflags; 62 + __u32 ftop; 59 63 }; 60 64 61 65 struct user_watch_state { 62 - uint64_t dbg_info; 66 + __u64 dbg_info; 63 67 struct { 64 - uint64_t addr; 65 - uint64_t mask; 66 - uint32_t ctrl; 67 - uint32_t pad; 68 + __u64 addr; 69 + __u64 mask; 70 + __u32 ctrl; 71 + __u32 pad; 68 72 } dbg_regs[8]; 69 73 }; 70 74 71 75 struct user_watch_state_v2 { 72 - uint64_t dbg_info; 76 + __u64 dbg_info; 73 77 struct { 74 - uint64_t addr; 75 - uint64_t mask; 76 - uint32_t ctrl; 77 - uint32_t pad; 78 + __u64 addr; 79 + __u64 mask; 80 + __u32 ctrl; 81 + __u32 pad; 78 82 } dbg_regs[14]; 79 83 }; 80 84
+11 -23
arch/loongarch/kernel/cpu-probe.c
··· 277 277 uint32_t config; 278 278 uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]); 279 279 uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]); 280 - const char *core_name = "Unknown"; 280 + const char *core_name = id_to_core_name(c->processor_id); 281 281 282 282 switch (BIT(fls(c->isa_level) - 1)) { 283 283 case LOONGARCH_CPU_ISA_LA32R: ··· 291 291 break; 292 292 } 293 293 294 - switch (c->processor_id & PRID_SERIES_MASK) { 295 - case PRID_SERIES_LA132: 296 - core_name = "LA132"; 297 - break; 298 - case PRID_SERIES_LA264: 299 - core_name = "LA264"; 300 - break; 301 - case PRID_SERIES_LA364: 302 - core_name = "LA364"; 303 - break; 304 - case PRID_SERIES_LA464: 305 - core_name = "LA464"; 306 - break; 307 - case PRID_SERIES_LA664: 308 - core_name = "LA664"; 309 - break; 310 - } 311 - 312 294 pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name); 313 295 314 - if (!cpu_has_iocsr) 296 + if (!cpu_has_iocsr) { 297 + __cpu_full_name[cpu] = "Unknown"; 315 298 return; 316 - 317 - if (!__cpu_full_name[cpu]) 318 - __cpu_full_name[cpu] = cpu_full_name; 299 + } 319 300 320 301 *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR); 321 302 *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); 303 + 304 + if (!__cpu_full_name[cpu]) { 305 + if (((char *)vendor)[0] == 0) 306 + __cpu_full_name[cpu] = "Unknown"; 307 + else 308 + __cpu_full_name[cpu] = cpu_full_name; 309 + } 322 310 323 311 config = iocsr_read32(LOONGARCH_IOCSR_FEATURES); 324 312 if (config & IOCSRF_CSRIPI)
+2
arch/loongarch/kernel/machine_kexec.c
··· 237 237 #ifdef CONFIG_SMP 238 238 crash_smp_send_stop(); 239 239 #endif 240 + machine_kexec_mask_interrupts(); 240 241 cpumask_set_cpu(crashing_cpu, &cpus_in_crash); 241 242 242 243 pr_info("Starting crashdump kernel...\n"); ··· 275 274 276 275 /* We do not want to be bothered. */ 277 276 local_irq_disable(); 277 + machine_kexec_mask_interrupts(); 278 278 279 279 pr_notice("EFI boot flag: 0x%lx\n", efi_boot); 280 280 pr_notice("Command line addr: 0x%lx\n", cmdline_ptr);
+18 -42
arch/loongarch/kernel/numa.c
··· 158 158 159 159 #ifdef CONFIG_ACPI_NUMA 160 160 161 - /* 162 - * add_numamem_region 163 - * 164 - * Add a uasable memory region described by BIOS. The 165 - * routine gets each intersection between BIOS's region 166 - * and node's region, and adds them into node's memblock 167 - * pool. 168 - * 169 - */ 170 - static void __init add_numamem_region(u64 start, u64 end, u32 type) 171 - { 172 - u32 node = pa_to_nid(start); 173 - u64 size = end - start; 174 - static unsigned long num_physpages; 161 + static unsigned long num_physpages; 175 162 176 - if (start >= end) { 177 - pr_debug("Invalid region: %016llx-%016llx\n", start, end); 178 - return; 179 - } 180 - 181 - num_physpages += (size >> PAGE_SHIFT); 182 - pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n", 183 - node, type, start, size); 184 - pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", 185 - start >> PAGE_SHIFT, end >> PAGE_SHIFT, num_physpages); 186 - memblock_set_node(start, size, &memblock.memory, node); 187 - } 188 - 189 - static void __init init_node_memblock(void) 163 + static void __init info_node_memblock(void) 190 164 { 191 165 u32 mem_type; 192 166 u64 mem_end, mem_start, mem_size; ··· 180 206 case EFI_BOOT_SERVICES_DATA: 181 207 case EFI_PERSISTENT_MEMORY: 182 208 case EFI_CONVENTIONAL_MEMORY: 183 - add_numamem_region(mem_start, mem_end, mem_type); 209 + num_physpages += (mem_size >> PAGE_SHIFT); 210 + pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n", 211 + (u32)pa_to_nid(mem_start), mem_type, mem_start, mem_size); 212 + pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", 213 + mem_start >> PAGE_SHIFT, mem_end >> PAGE_SHIFT, num_physpages); 184 214 break; 185 215 case EFI_PAL_CODE: 186 216 case EFI_UNUSABLE_MEMORY: 187 217 case EFI_ACPI_RECLAIM_MEMORY: 188 - add_numamem_region(mem_start, mem_end, mem_type); 218 + num_physpages += (mem_size >> PAGE_SHIFT); 219 + pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n", 220 + (u32)pa_to_nid(mem_start), mem_type, mem_start, mem_size); 221 + pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", 222 + mem_start >> PAGE_SHIFT, mem_end >> PAGE_SHIFT, num_physpages); 189 223 fallthrough; 190 224 case EFI_RESERVED_TYPE: 191 225 case EFI_RUNTIME_SERVICES_CODE: ··· 231 249 for (i = 0; i < NR_CPUS; i++) 232 250 set_cpuid_to_node(i, NUMA_NO_NODE); 233 251 234 - numa_reset_distance(); 235 - nodes_clear(numa_nodes_parsed); 236 - nodes_clear(node_possible_map); 237 - nodes_clear(node_online_map); 238 - WARN_ON(memblock_clear_hotplug(0, PHYS_ADDR_MAX)); 239 - 240 252 /* Parse SRAT and SLIT if provided by firmware. */ 241 - ret = acpi_disabled ? fake_numa_init() : acpi_numa_init(); 253 + if (!acpi_disabled) 254 + ret = numa_memblks_init(acpi_numa_init, false); 255 + else 256 + ret = numa_memblks_init(fake_numa_init, false); 257 + 242 258 if (ret < 0) 243 259 return ret; 244 260 245 - node_possible_map = numa_nodes_parsed; 246 - if (WARN_ON(nodes_empty(node_possible_map))) 247 - return -EINVAL; 248 - 249 - init_node_memblock(); 261 + info_node_memblock(); 250 262 if (!memblock_validate_numa_coverage(SZ_1M)) 251 263 return -EINVAL; 252 264
+2
arch/loongarch/kernel/proc.c
··· 17 17 { 18 18 unsigned long n = (unsigned long) v - 1; 19 19 unsigned int isa = cpu_data[n].isa_level; 20 + unsigned int prid = cpu_data[n].processor_id; 20 21 unsigned int version = cpu_data[n].processor_id & 0xff; 21 22 unsigned int fp_version = cpu_data[n].fpu_vers; 22 23 ··· 38 37 seq_printf(m, "global_id\t\t: %d\n", cpu_data[n].global_id); 39 38 seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]); 40 39 seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]); 40 + seq_printf(m, "PRID\t\t\t: %s (%08x)\n", id_to_core_name(prid), prid); 41 41 seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version); 42 42 seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version); 43 43 seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
+3
arch/loongarch/net/bpf_jit.c
··· 1624 1624 /* Direct jump skips 5 NOP instructions */ 1625 1625 else if (is_bpf_text_address((unsigned long)orig_call)) 1626 1626 orig_call += LOONGARCH_BPF_FENTRY_NBYTES; 1627 + /* Module tracing not supported - cause kernel lockups */ 1628 + else if (is_module_text_address((unsigned long)orig_call)) 1629 + return -ENOTSUPP; 1627 1630 1628 1631 if (flags & BPF_TRAMP_F_CALL_ORIG) { 1629 1632 move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
+4 -4
arch/loongarch/pci/pci.c
··· 50 50 */ 51 51 lsize = cpu_last_level_cache_line_size(); 52 52 53 - BUG_ON(!lsize); 53 + if (lsize) { 54 + pci_dfl_cache_line_size = lsize >> 2; 54 55 55 - pci_dfl_cache_line_size = lsize >> 2; 56 - 57 - pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize); 56 + pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize); 57 + } 58 58 59 59 return 0; 60 60 }