Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

- remove a misuse of kernel-doc comment

- use "Call trace:" for backtraces like other architectures

- implement copy_from_kernel_nofault_allowed() to fix a LKDTM test

- add a "cut here" line for prefetch aborts

- remove unnecessary Kconfing entry for FRAME_POINTER

- remove iwmmxy support for PJ4/PJ4B cores

- use bitfield helpers in ptrace to improve readabililty

- check if folio is reserved before flushing

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: 9359/1: flush: check if the folio is reserved for no-mapping addresses
ARM: 9354/1: ptrace: Use bitfield helpers
ARM: 9352/1: iwmmxt: Remove support for PJ4/PJ4B cores
ARM: 9353/1: remove unneeded entry for CONFIG_FRAME_POINTER
ARM: 9351/1: fault: Add "cut here" line for prefetch aborts
ARM: 9350/1: fault: Implement copy_from_kernel_nofault_allowed()
ARM: 9349/1: unwind: Add missing "Call trace:" line
ARM: 9334/1: mm: init: remove misuse of kernel-doc comment

+33 -184
+2 -2
arch/arm/Kconfig
··· 505 505 506 506 config IWMMXT 507 507 bool "Enable iWMMXt support" 508 - depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B 509 - default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 || CPU_PJ4B 508 + depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK 509 + default y if PXA27x || PXA3xx || ARCH_MMP 510 510 help 511 511 Enable support for iWMMXt context switching at run time if 512 512 running on a CPU that supports it.
-3
arch/arm/Kconfig.debug
··· 90 90 In most cases, say N here, unless you are intending to debug the 91 91 kernel and have access to the kernel binary image. 92 92 93 - config FRAME_POINTER 94 - bool 95 - 96 93 config DEBUG_USER 97 94 bool "Verbose user fault messages" 98 95 help
+3 -2
arch/arm/include/asm/ptrace.h
··· 10 10 #include <uapi/asm/ptrace.h> 11 11 12 12 #ifndef __ASSEMBLY__ 13 + #include <linux/bitfield.h> 13 14 #include <linux/types.h> 14 15 15 16 struct pt_regs { ··· 36 35 37 36 #ifndef CONFIG_CPU_V7M 38 37 #define isa_mode(regs) \ 39 - ((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \ 40 - (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT)))) 38 + (FIELD_GET(PSR_J_BIT, (regs)->ARM_cpsr) << 1 | \ 39 + FIELD_GET(PSR_T_BIT, (regs)->ARM_cpsr)) 41 40 #else 42 41 #define isa_mode(regs) 1 /* Thumb */ 43 42 #endif
-2
arch/arm/kernel/Makefile
··· 76 76 obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o 77 77 obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o 78 78 obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o 79 - obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o 80 - obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o 81 79 obj-$(CONFIG_IWMMXT) += iwmmxt.o 82 80 obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o 83 81 obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
+13 -38
arch/arm/kernel/iwmmxt.S
··· 18 18 #include <asm/assembler.h> 19 19 #include "iwmmxt.h" 20 20 21 - #if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B) 22 - #define PJ4(code...) code 23 - #define XSC(code...) 24 - #elif defined(CONFIG_CPU_MOHAWK) || \ 25 - defined(CONFIG_CPU_XSC3) || \ 26 - defined(CONFIG_CPU_XSCALE) 27 - #define PJ4(code...) 28 - #define XSC(code...) code 29 - #else 30 - #error "Unsupported iWMMXt architecture" 31 - #endif 32 - 33 21 #define MMX_WR0 (0x00) 34 22 #define MMX_WR1 (0x08) 35 23 #define MMX_WR2 (0x10) ··· 69 81 ENTRY(iwmmxt_task_enable) 70 82 inc_preempt_count r10, r3 71 83 72 - XSC(mrc p15, 0, r2, c15, c1, 0) 73 - PJ4(mrc p15, 0, r2, c1, c0, 2) 84 + mrc p15, 0, r2, c15, c1, 0 74 85 @ CP0 and CP1 accessible? 75 - XSC(tst r2, #0x3) 76 - PJ4(tst r2, #0xf) 86 + tst r2, #0x3 77 87 bne 4f @ if so no business here 78 88 @ enable access to CP0 and CP1 79 - XSC(orr r2, r2, #0x3) 80 - XSC(mcr p15, 0, r2, c15, c1, 0) 81 - PJ4(orr r2, r2, #0xf) 82 - PJ4(mcr p15, 0, r2, c1, c0, 2) 89 + orr r2, r2, #0x3 90 + mcr p15, 0, r2, c15, c1, 0 83 91 84 92 ldr r3, =concan_owner 85 93 ldr r2, [r0, #S_PC] @ current task pc value ··· 202 218 bne 1f @ no: quit 203 219 204 220 @ enable access to CP0 and CP1 205 - XSC(mrc p15, 0, r4, c15, c1, 0) 206 - XSC(orr r4, r4, #0x3) 207 - XSC(mcr p15, 0, r4, c15, c1, 0) 208 - PJ4(mrc p15, 0, r4, c1, c0, 2) 209 - PJ4(orr r4, r4, #0xf) 210 - PJ4(mcr p15, 0, r4, c1, c0, 2) 221 + mrc p15, 0, r4, c15, c1, 0 222 + orr r4, r4, #0x3 223 + mcr p15, 0, r4, c15, c1, 0 211 224 212 225 mov r0, #0 @ nothing to load 213 226 str r0, [r3] @ no more current owner ··· 213 232 bl concan_save 214 233 215 234 @ disable access to CP0 and CP1 216 - XSC(bic r4, r4, #0x3) 217 - XSC(mcr p15, 0, r4, c15, c1, 0) 218 - PJ4(bic r4, r4, #0xf) 219 - PJ4(mcr p15, 0, r4, c1, c0, 2) 235 + bic r4, r4, #0x3 236 + mcr p15, 0, r4, c15, c1, 0 220 237 221 238 mrc p15, 0, r2, c2, c0, 0 222 239 mov r2, r2 @ cpwait ··· 309 330 */ 310 331 ENTRY(iwmmxt_task_switch) 311 332 312 - XSC(mrc p15, 0, r1, c15, c1, 0) 313 - PJ4(mrc p15, 0, r1, c1, c0, 2) 333 + mrc p15, 0, r1, c15, c1, 0 314 334 @ CP0 and CP1 accessible? 315 - XSC(tst r1, #0x3) 316 - PJ4(tst r1, #0xf) 335 + tst r1, #0x3 317 336 bne 1f @ yes: block them for next task 318 337 319 338 ldr r2, =concan_owner ··· 321 344 retne lr @ no: leave Concan disabled 322 345 323 346 1: @ flip Concan access 324 - XSC(eor r1, r1, #0x3) 325 - XSC(mcr p15, 0, r1, c15, c1, 0) 326 - PJ4(eor r1, r1, #0xf) 327 - PJ4(mcr p15, 0, r1, c1, c0, 2) 347 + eor r1, r1, #0x3 348 + mcr p15, 0, r1, c15, c1, 0 328 349 329 350 mrc p15, 0, r1, c2, c0, 0 330 351 sub pc, lr, r1, lsr #32 @ cpwait and return
-135
arch/arm/kernel/pj4-cp0.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * linux/arch/arm/kernel/pj4-cp0.c 4 - * 5 - * PJ4 iWMMXt coprocessor context switching and handling 6 - * 7 - * Copyright (c) 2010 Marvell International Inc. 8 - */ 9 - 10 - #include <linux/types.h> 11 - #include <linux/kernel.h> 12 - #include <linux/signal.h> 13 - #include <linux/sched.h> 14 - #include <linux/init.h> 15 - #include <linux/io.h> 16 - #include <asm/thread_notify.h> 17 - #include <asm/cputype.h> 18 - 19 - static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) 20 - { 21 - struct thread_info *thread = t; 22 - 23 - switch (cmd) { 24 - case THREAD_NOTIFY_FLUSH: 25 - /* 26 - * flush_thread() zeroes thread->fpstate, so no need 27 - * to do anything here. 28 - * 29 - * FALLTHROUGH: Ensure we don't try to overwrite our newly 30 - * initialised state information on the first fault. 31 - */ 32 - 33 - case THREAD_NOTIFY_EXIT: 34 - iwmmxt_task_release(thread); 35 - break; 36 - 37 - case THREAD_NOTIFY_SWITCH: 38 - iwmmxt_task_switch(thread); 39 - break; 40 - } 41 - 42 - return NOTIFY_DONE; 43 - } 44 - 45 - static struct notifier_block __maybe_unused iwmmxt_notifier_block = { 46 - .notifier_call = iwmmxt_do, 47 - }; 48 - 49 - 50 - static u32 __init pj4_cp_access_read(void) 51 - { 52 - u32 value; 53 - 54 - __asm__ __volatile__ ( 55 - "mrc p15, 0, %0, c1, c0, 2\n\t" 56 - : "=r" (value)); 57 - return value; 58 - } 59 - 60 - static void __init pj4_cp_access_write(u32 value) 61 - { 62 - u32 temp; 63 - 64 - __asm__ __volatile__ ( 65 - "mcr p15, 0, %1, c1, c0, 2\n\t" 66 - #ifdef CONFIG_THUMB2_KERNEL 67 - "isb\n\t" 68 - #else 69 - "mrc p15, 0, %0, c1, c0, 2\n\t" 70 - "mov %0, %0\n\t" 71 - "sub pc, pc, #4\n\t" 72 - #endif 73 - : "=r" (temp) : "r" (value)); 74 - } 75 - 76 - static int __init pj4_get_iwmmxt_version(void) 77 - { 78 - u32 cp_access, wcid; 79 - 80 - cp_access = pj4_cp_access_read(); 81 - pj4_cp_access_write(cp_access | 0xf); 82 - 83 - /* check if coprocessor 0 and 1 are available */ 84 - if ((pj4_cp_access_read() & 0xf) != 0xf) { 85 - pj4_cp_access_write(cp_access); 86 - return -ENODEV; 87 - } 88 - 89 - /* read iWMMXt coprocessor id register p1, c0 */ 90 - __asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid)); 91 - 92 - pj4_cp_access_write(cp_access); 93 - 94 - /* iWMMXt v1 */ 95 - if ((wcid & 0xffffff00) == 0x56051000) 96 - return 1; 97 - /* iWMMXt v2 */ 98 - if ((wcid & 0xffffff00) == 0x56052000) 99 - return 2; 100 - 101 - return -EINVAL; 102 - } 103 - 104 - /* 105 - * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy 106 - * switch code handle iWMMXt context switching. 107 - */ 108 - static int __init pj4_cp0_init(void) 109 - { 110 - u32 __maybe_unused cp_access; 111 - int vers; 112 - 113 - if (!cpu_is_pj4()) 114 - return 0; 115 - 116 - vers = pj4_get_iwmmxt_version(); 117 - if (vers < 0) 118 - return 0; 119 - 120 - #ifndef CONFIG_IWMMXT 121 - pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n"); 122 - #else 123 - cp_access = pj4_cp_access_read() & ~0xf; 124 - pj4_cp_access_write(cp_access); 125 - 126 - pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers); 127 - elf_hwcap |= HWCAP_IWMMXT; 128 - thread_register_notifier(&iwmmxt_notifier_block); 129 - register_iwmmxt_undef_handler(); 130 - #endif 131 - 132 - return 0; 133 - } 134 - 135 - late_initcall(pj4_cp0_init);
+1 -1
arch/arm/kernel/traps.c
··· 220 220 unsigned int fp, mode; 221 221 int ok = 1; 222 222 223 - printk("%sBacktrace: ", loglvl); 223 + printk("%sCall trace: ", loglvl); 224 224 225 225 if (!tsk) 226 226 tsk = current;
+2
arch/arm/kernel/unwind.c
··· 524 524 { 525 525 struct stackframe frame; 526 526 527 + printk("%sCall trace: ", loglvl); 528 + 527 529 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 528 530 529 531 if (!tsk)
+8
arch/arm/mm/fault.c
··· 25 25 26 26 #include "fault.h" 27 27 28 + bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) 29 + { 30 + unsigned long addr = (unsigned long)unsafe_src; 31 + 32 + return addr >= TASK_SIZE && ULONG_MAX - addr >= size; 33 + } 34 + 28 35 #ifdef CONFIG_MMU 29 36 30 37 /* ··· 595 588 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) 596 589 return; 597 590 591 + pr_alert("8<--- cut here ---\n"); 598 592 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", 599 593 inf->name, ifsr, addr); 600 594
+3
arch/arm/mm/flush.c
··· 296 296 return; 297 297 298 298 folio = page_folio(pfn_to_page(pfn)); 299 + if (folio_test_reserved(folio)) 300 + return; 301 + 299 302 if (cache_is_vipt_aliasing()) 300 303 mapping = folio_flush_mapping(folio); 301 304 else
+1 -1
arch/arm/mm/init.c
··· 418 418 419 419 } 420 420 421 - /** 421 + /* 422 422 * update_sections_early intended to be called only through stop_machine 423 423 * framework and executed by only one CPU while all other CPUs will spin and 424 424 * wait, so no locking is required in this function.