Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: ep93xx: remove MaverickCrunch support

The MaverickCrunch support for ep93xx never made it into glibc and
was removed from gcc in its 4.8 release in 2012. It is now one of
the last parts of arch/arm/ that fails to build with the clang
integrated assembler, which is unlikely to ever want to support it.

The two alternatives are to force the use of binutils/gas when
building the crunch support, or to remove it entirely.

According to Hartley Sweeten:

"Martin Guy did a lot of work trying to get the maverick crunch working
but I was never able to successfully use it for anything. It "kind"
of works but depending on the EP93xx silicon revision there are still
a number of hardware bugs that either give imprecise or garbage results.

I have no problem with removing the kernel support for the maverick
crunch."

Unless someone else comes up with a good reason to keep it around,
remove it now. This touches mostly the ep93xx platform, but removes
a bit of code from ARM common ptrace and signal frame handling as well.

If there are remaining users of MaverickCrunch, they can use LTS
kernels for at least another five years before kernel support ends.

Link: https://lore.kernel.org/linux-arm-kernel/20210802141245.1146772-1-arnd@kernel.org/
Link: https://lore.kernel.org/linux-arm-kernel/20210226164345.3889993-1-arnd@kernel.org/
Link: https://github.com/ClangBuiltLinux/linux/issues/1272
Link: https://gcc.gnu.org/legacy-ml/gcc/2008-03/msg01063.html
Cc: "Martin Guy" <martinwguy@martinwguy@gmail.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+4 -557
-1
arch/arm/configs/ep93xx_defconfig
··· 12 12 # CONFIG_BLK_DEV_BSG is not set 13 13 CONFIG_PARTITION_ADVANCED=y 14 14 CONFIG_ARCH_EP93XX=y 15 - CONFIG_CRUNCH=y 16 15 CONFIG_MACH_ADSSPHERE=y 17 16 CONFIG_MACH_EDB9301=y 18 17 CONFIG_MACH_EDB9302=y
-8
arch/arm/include/asm/fpstate.h
··· 77 77 78 78 #define FP_SIZE (sizeof(union fp_state) / sizeof(int)) 79 79 80 - struct crunch_state { 81 - unsigned int mvdx[16][2]; 82 - unsigned int mvax[4][3]; 83 - unsigned int dspsc[2]; 84 - }; 85 - 86 - #define CRUNCH_SIZE sizeof(struct crunch_state) 87 - 88 80 #endif 89 81 90 82 #endif
-8
arch/arm/include/asm/thread_info.h
··· 65 65 __u32 syscall; /* syscall number */ 66 66 __u8 used_cp[16]; /* thread used copro */ 67 67 unsigned long tp_value[2]; /* TLS registers */ 68 - #ifdef CONFIG_CRUNCH 69 - struct crunch_state crunchstate; 70 - #endif 71 68 union fp_state fpstate __attribute__((aligned(8))); 72 69 union vfp_state vfpstate; 73 70 #ifdef CONFIG_ARM_THUMBEE ··· 103 106 #define thread_saved_fp(tsk) \ 104 107 ((unsigned long)(task_thread_info(tsk)->cpu_context.r7)) 105 108 #endif 106 - 107 - extern void crunch_task_disable(struct thread_info *); 108 - extern void crunch_task_copy(struct thread_info *, void *); 109 - extern void crunch_task_restore(struct thread_info *, void *); 110 - extern void crunch_task_release(struct thread_info *); 111 109 112 110 extern void iwmmxt_task_disable(struct thread_info *); 113 111 extern void iwmmxt_task_copy(struct thread_info *, void *);
-14
arch/arm/include/asm/ucontext.h
··· 43 43 */ 44 44 #define DUMMY_MAGIC 0xb0d9ed01 45 45 46 - #ifdef CONFIG_CRUNCH 47 - #define CRUNCH_MAGIC 0x5065cf03 48 - #define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8) 49 - 50 - struct crunch_sigframe { 51 - unsigned long magic; 52 - unsigned long size; 53 - struct crunch_state storage; 54 - } __attribute__((__aligned__(8))); 55 - #endif 56 - 57 46 #ifdef CONFIG_IWMMXT 58 47 /* iwmmxt_area is 0x98 bytes long, preceded by 8 bytes of signature */ 59 48 #define IWMMXT_MAGIC 0x12ef842a ··· 81 92 * one of these. 82 93 */ 83 94 struct aux_sigframe { 84 - #ifdef CONFIG_CRUNCH 85 - struct crunch_sigframe crunch; 86 - #endif 87 95 #ifdef CONFIG_IWMMXT 88 96 struct iwmmxt_sigframe iwmmxt; 89 97 #endif
+1 -1
arch/arm/include/uapi/asm/hwcap.h
··· 15 15 #define HWCAP_EDSP (1 << 7) 16 16 #define HWCAP_JAVA (1 << 8) 17 17 #define HWCAP_IWMMXT (1 << 9) 18 - #define HWCAP_CRUNCH (1 << 10) 18 + #define HWCAP_CRUNCH (1 << 10) /* Obsolete */ 19 19 #define HWCAP_THUMBEE (1 << 11) 20 20 #define HWCAP_NEON (1 << 12) 21 21 #define HWCAP_VFPv3 (1 << 13)
+2 -2
arch/arm/include/uapi/asm/ptrace.h
··· 26 26 #define PTRACE_GET_THREAD_AREA 22 27 27 #define PTRACE_SET_SYSCALL 23 28 28 /* PTRACE_SYSCALL is 24 */ 29 - #define PTRACE_GETCRUNCHREGS 25 30 - #define PTRACE_SETCRUNCHREGS 26 29 + #define PTRACE_GETCRUNCHREGS 25 /* obsolete */ 30 + #define PTRACE_SETCRUNCHREGS 26 /* obsolete */ 31 31 #define PTRACE_GETVFPREGS 27 32 32 #define PTRACE_SETVFPREGS 28 33 33 #define PTRACE_GETHBPREGS 29
-3
arch/arm/kernel/asm-offsets.c
··· 63 63 #ifdef CONFIG_IWMMXT 64 64 DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt)); 65 65 #endif 66 - #ifdef CONFIG_CRUNCH 67 - DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate)); 68 - #endif 69 66 #ifdef CONFIG_STACKPROTECTOR_PER_TASK 70 67 DEFINE(TI_STACK_CANARY, offsetof(struct thread_info, stack_canary)); 71 68 #endif
-6
arch/arm/kernel/entry-armv.S
··· 618 618 W(b) do_fpe @ CP#1 (FPE) 619 619 W(b) do_fpe @ CP#2 (FPE) 620 620 ret.w lr @ CP#3 621 - #ifdef CONFIG_CRUNCH 622 - b crunch_task_enable @ CP#4 (MaverickCrunch) 623 - b crunch_task_enable @ CP#5 (MaverickCrunch) 624 - b crunch_task_enable @ CP#6 (MaverickCrunch) 625 - #else 626 621 ret.w lr @ CP#4 627 622 ret.w lr @ CP#5 628 623 ret.w lr @ CP#6 629 - #endif 630 624 ret.w lr @ CP#7 631 625 ret.w lr @ CP#8 632 626 ret.w lr @ CP#9
-36
arch/arm/kernel/ptrace.c
··· 318 318 319 319 #endif 320 320 321 - #ifdef CONFIG_CRUNCH 322 - /* 323 - * Get the child Crunch state. 324 - */ 325 - static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp) 326 - { 327 - struct thread_info *thread = task_thread_info(tsk); 328 - 329 - crunch_task_disable(thread); /* force it to ram */ 330 - return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE) 331 - ? -EFAULT : 0; 332 - } 333 - 334 - /* 335 - * Set the child Crunch state. 336 - */ 337 - static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp) 338 - { 339 - struct thread_info *thread = task_thread_info(tsk); 340 - 341 - crunch_task_release(thread); /* force a reload */ 342 - return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE) 343 - ? -EFAULT : 0; 344 - } 345 - #endif 346 - 347 321 #ifdef CONFIG_HAVE_HW_BREAKPOINT 348 322 /* 349 323 * Convert a virtual register number into an index for a thread_info ··· 788 814 task_thread_info(child)->syscall = data; 789 815 ret = 0; 790 816 break; 791 - 792 - #ifdef CONFIG_CRUNCH 793 - case PTRACE_GETCRUNCHREGS: 794 - ret = ptrace_getcrunchregs(child, datap); 795 - break; 796 - 797 - case PTRACE_SETCRUNCHREGS: 798 - ret = ptrace_setcrunchregs(child, datap); 799 - break; 800 - #endif 801 817 802 818 #ifdef CONFIG_VFP 803 819 case PTRACE_GETVFPREGS:
-42
arch/arm/kernel/signal.c
··· 25 25 26 26 static unsigned long signal_return_offset; 27 27 28 - #ifdef CONFIG_CRUNCH 29 - static int preserve_crunch_context(struct crunch_sigframe __user *frame) 30 - { 31 - char kbuf[sizeof(*frame) + 8]; 32 - struct crunch_sigframe *kframe; 33 - 34 - /* the crunch context must be 64 bit aligned */ 35 - kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); 36 - kframe->magic = CRUNCH_MAGIC; 37 - kframe->size = CRUNCH_STORAGE_SIZE; 38 - crunch_task_copy(current_thread_info(), &kframe->storage); 39 - return __copy_to_user(frame, kframe, sizeof(*frame)); 40 - } 41 - 42 - static int restore_crunch_context(char __user **auxp) 43 - { 44 - struct crunch_sigframe __user *frame = 45 - (struct crunch_sigframe __user *)*auxp; 46 - char kbuf[sizeof(*frame) + 8]; 47 - struct crunch_sigframe *kframe; 48 - 49 - /* the crunch context must be 64 bit aligned */ 50 - kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); 51 - if (__copy_from_user(kframe, frame, sizeof(*frame))) 52 - return -1; 53 - if (kframe->magic != CRUNCH_MAGIC || 54 - kframe->size != CRUNCH_STORAGE_SIZE) 55 - return -1; 56 - *auxp += CRUNCH_STORAGE_SIZE; 57 - crunch_task_restore(current_thread_info(), &kframe->storage); 58 - return 0; 59 - } 60 - #endif 61 - 62 28 #ifdef CONFIG_IWMMXT 63 29 64 30 static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) ··· 171 205 err |= !valid_user_regs(regs); 172 206 173 207 aux = (char __user *) sf->uc.uc_regspace; 174 - #ifdef CONFIG_CRUNCH 175 - if (err == 0) 176 - err |= restore_crunch_context(&aux); 177 - #endif 178 208 #ifdef CONFIG_IWMMXT 179 209 if (err == 0) 180 210 err |= restore_iwmmxt_context(&aux); ··· 283 321 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 284 322 285 323 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; 286 - #ifdef CONFIG_CRUNCH 287 - if (err == 0) 288 - err |= preserve_crunch_context(&aux->crunch); 289 - #endif 290 324 #ifdef CONFIG_IWMMXT 291 325 if (err == 0) 292 326 err |= preserve_iwmmxt_context(&aux->iwmmxt);
-5
arch/arm/mach-ep93xx/Kconfig
··· 9 9 select SOC_BUS 10 10 select LEDS_GPIO_REGISTER 11 11 12 - config CRUNCH 13 - bool "Support for MaverickCrunch" 14 - help 15 - Enable kernel support for MaverickCrunch. 16 - 17 12 comment "EP93xx Platforms" 18 13 19 14 config MACH_ADSSPHERE
-3
arch/arm/mach-ep93xx/Makefile
··· 6 6 7 7 obj-$(CONFIG_EP93XX_DMA) += dma.o 8 8 9 - obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o 10 - AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 11 - 12 9 obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o 13 10 obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o 14 11 obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o
-1
arch/arm/mach-ep93xx/adssphere.c
··· 36 36 .init_irq = ep93xx_init_irq, 37 37 .init_time = ep93xx_timer_init, 38 38 .init_machine = adssphere_init_machine, 39 - .init_late = ep93xx_init_late, 40 39 .restart = ep93xx_restart, 41 40 MACHINE_END
-5
arch/arm/mach-ep93xx/core.c
··· 1004 1004 while (1) 1005 1005 ; 1006 1006 } 1007 - 1008 - void __init ep93xx_init_late(void) 1009 - { 1010 - crunch_init(); 1011 - }
-310
arch/arm/mach-ep93xx/crunch-bits.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * arch/arm/kernel/crunch-bits.S 4 - * Cirrus MaverickCrunch context switching and handling 5 - * 6 - * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> 7 - * 8 - * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is 9 - * Copyright (c) 2003-2004, MontaVista Software, Inc. 10 - */ 11 - 12 - #include <linux/linkage.h> 13 - #include <asm/ptrace.h> 14 - #include <asm/thread_info.h> 15 - #include <asm/asm-offsets.h> 16 - #include <asm/assembler.h> 17 - #include <mach/ep93xx-regs.h> 18 - 19 - /* 20 - * We can't use hex constants here due to a bug in gas. 21 - */ 22 - #define CRUNCH_MVDX0 0 23 - #define CRUNCH_MVDX1 8 24 - #define CRUNCH_MVDX2 16 25 - #define CRUNCH_MVDX3 24 26 - #define CRUNCH_MVDX4 32 27 - #define CRUNCH_MVDX5 40 28 - #define CRUNCH_MVDX6 48 29 - #define CRUNCH_MVDX7 56 30 - #define CRUNCH_MVDX8 64 31 - #define CRUNCH_MVDX9 72 32 - #define CRUNCH_MVDX10 80 33 - #define CRUNCH_MVDX11 88 34 - #define CRUNCH_MVDX12 96 35 - #define CRUNCH_MVDX13 104 36 - #define CRUNCH_MVDX14 112 37 - #define CRUNCH_MVDX15 120 38 - #define CRUNCH_MVAX0L 128 39 - #define CRUNCH_MVAX0M 132 40 - #define CRUNCH_MVAX0H 136 41 - #define CRUNCH_MVAX1L 140 42 - #define CRUNCH_MVAX1M 144 43 - #define CRUNCH_MVAX1H 148 44 - #define CRUNCH_MVAX2L 152 45 - #define CRUNCH_MVAX2M 156 46 - #define CRUNCH_MVAX2H 160 47 - #define CRUNCH_MVAX3L 164 48 - #define CRUNCH_MVAX3M 168 49 - #define CRUNCH_MVAX3H 172 50 - #define CRUNCH_DSPSC 176 51 - 52 - #define CRUNCH_SIZE 184 53 - 54 - .text 55 - 56 - /* 57 - * Lazy switching of crunch coprocessor context 58 - * 59 - * r10 = struct thread_info pointer 60 - * r9 = ret_from_exception 61 - * lr = undefined instr exit 62 - * 63 - * called from prefetch exception handler with interrupts enabled 64 - */ 65 - ENTRY(crunch_task_enable) 66 - inc_preempt_count r10, r3 67 - 68 - ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr 69 - 70 - ldr r1, [r8, #0x80] 71 - tst r1, #0x00800000 @ access to crunch enabled? 72 - bne 2f @ if so no business here 73 - mov r3, #0xaa @ unlock syscon swlock 74 - str r3, [r8, #0xc0] 75 - orr r1, r1, #0x00800000 @ enable access to crunch 76 - str r1, [r8, #0x80] 77 - 78 - ldr r3, =crunch_owner 79 - add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area 80 - ldr r2, [sp, #60] @ current task pc value 81 - ldr r1, [r3] @ get current crunch owner 82 - str r0, [r3] @ this task now owns crunch 83 - sub r2, r2, #4 @ adjust pc back 84 - str r2, [sp, #60] 85 - 86 - ldr r2, [r8, #0x80] 87 - mov r2, r2 @ flush out enable (@@@) 88 - 89 - teq r1, #0 @ test for last ownership 90 - mov lr, r9 @ normal exit from exception 91 - beq crunch_load @ no owner, skip save 92 - 93 - crunch_save: 94 - cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers 95 - cfstr64 mvdx1, [r1, #CRUNCH_MVDX1] 96 - cfstr64 mvdx2, [r1, #CRUNCH_MVDX2] 97 - cfstr64 mvdx3, [r1, #CRUNCH_MVDX3] 98 - cfstr64 mvdx4, [r1, #CRUNCH_MVDX4] 99 - cfstr64 mvdx5, [r1, #CRUNCH_MVDX5] 100 - cfstr64 mvdx6, [r1, #CRUNCH_MVDX6] 101 - cfstr64 mvdx7, [r1, #CRUNCH_MVDX7] 102 - cfstr64 mvdx8, [r1, #CRUNCH_MVDX8] 103 - cfstr64 mvdx9, [r1, #CRUNCH_MVDX9] 104 - cfstr64 mvdx10, [r1, #CRUNCH_MVDX10] 105 - cfstr64 mvdx11, [r1, #CRUNCH_MVDX11] 106 - cfstr64 mvdx12, [r1, #CRUNCH_MVDX12] 107 - cfstr64 mvdx13, [r1, #CRUNCH_MVDX13] 108 - cfstr64 mvdx14, [r1, #CRUNCH_MVDX14] 109 - cfstr64 mvdx15, [r1, #CRUNCH_MVDX15] 110 - 111 - #ifdef __ARMEB__ 112 - #error fix me for ARMEB 113 - #endif 114 - 115 - cfmv32al mvfx0, mvax0 @ save 72b accumulators 116 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L] 117 - cfmv32am mvfx0, mvax0 118 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M] 119 - cfmv32ah mvfx0, mvax0 120 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H] 121 - cfmv32al mvfx0, mvax1 122 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L] 123 - cfmv32am mvfx0, mvax1 124 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M] 125 - cfmv32ah mvfx0, mvax1 126 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H] 127 - cfmv32al mvfx0, mvax2 128 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L] 129 - cfmv32am mvfx0, mvax2 130 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M] 131 - cfmv32ah mvfx0, mvax2 132 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H] 133 - cfmv32al mvfx0, mvax3 134 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L] 135 - cfmv32am mvfx0, mvax3 136 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M] 137 - cfmv32ah mvfx0, mvax3 138 - cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H] 139 - 140 - cfmv32sc mvdx0, dspsc @ save status word 141 - cfstr64 mvdx0, [r1, #CRUNCH_DSPSC] 142 - 143 - teq r0, #0 @ anything to load? 144 - cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered 145 - beq 1f 146 - 147 - crunch_load: 148 - cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word 149 - cfmvsc32 dspsc, mvdx0 150 - 151 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators 152 - cfmval32 mvax0, mvfx0 153 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M] 154 - cfmvam32 mvax0, mvfx0 155 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H] 156 - cfmvah32 mvax0, mvfx0 157 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L] 158 - cfmval32 mvax1, mvfx0 159 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M] 160 - cfmvam32 mvax1, mvfx0 161 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H] 162 - cfmvah32 mvax1, mvfx0 163 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L] 164 - cfmval32 mvax2, mvfx0 165 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M] 166 - cfmvam32 mvax2, mvfx0 167 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H] 168 - cfmvah32 mvax2, mvfx0 169 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L] 170 - cfmval32 mvax3, mvfx0 171 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M] 172 - cfmvam32 mvax3, mvfx0 173 - cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H] 174 - cfmvah32 mvax3, mvfx0 175 - 176 - cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers 177 - cfldr64 mvdx1, [r0, #CRUNCH_MVDX1] 178 - cfldr64 mvdx2, [r0, #CRUNCH_MVDX2] 179 - cfldr64 mvdx3, [r0, #CRUNCH_MVDX3] 180 - cfldr64 mvdx4, [r0, #CRUNCH_MVDX4] 181 - cfldr64 mvdx5, [r0, #CRUNCH_MVDX5] 182 - cfldr64 mvdx6, [r0, #CRUNCH_MVDX6] 183 - cfldr64 mvdx7, [r0, #CRUNCH_MVDX7] 184 - cfldr64 mvdx8, [r0, #CRUNCH_MVDX8] 185 - cfldr64 mvdx9, [r0, #CRUNCH_MVDX9] 186 - cfldr64 mvdx10, [r0, #CRUNCH_MVDX10] 187 - cfldr64 mvdx11, [r0, #CRUNCH_MVDX11] 188 - cfldr64 mvdx12, [r0, #CRUNCH_MVDX12] 189 - cfldr64 mvdx13, [r0, #CRUNCH_MVDX13] 190 - cfldr64 mvdx14, [r0, #CRUNCH_MVDX14] 191 - cfldr64 mvdx15, [r0, #CRUNCH_MVDX15] 192 - 193 - 1: 194 - #ifdef CONFIG_PREEMPT_COUNT 195 - get_thread_info r10 196 - #endif 197 - 2: dec_preempt_count r10, r3 198 - ret lr 199 - 200 - /* 201 - * Back up crunch regs to save area and disable access to them 202 - * (mainly for gdb or sleep mode usage) 203 - * 204 - * r0 = struct thread_info pointer of target task or NULL for any 205 - */ 206 - ENTRY(crunch_task_disable) 207 - stmfd sp!, {r4, r5, lr} 208 - 209 - mrs ip, cpsr 210 - orr r2, ip, #PSR_I_BIT @ disable interrupts 211 - msr cpsr_c, r2 212 - 213 - ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr 214 - 215 - ldr r3, =crunch_owner 216 - add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area 217 - ldr r1, [r3] @ get current crunch owner 218 - teq r1, #0 @ any current owner? 219 - beq 1f @ no: quit 220 - teq r0, #0 @ any owner? 221 - teqne r1, r2 @ or specified one? 222 - bne 1f @ no: quit 223 - 224 - ldr r5, [r4, #0x80] @ enable access to crunch 225 - mov r2, #0xaa 226 - str r2, [r4, #0xc0] 227 - orr r5, r5, #0x00800000 228 - str r5, [r4, #0x80] 229 - 230 - mov r0, #0 @ nothing to load 231 - str r0, [r3] @ no more current owner 232 - ldr r2, [r4, #0x80] @ flush out enable (@@@) 233 - mov r2, r2 234 - bl crunch_save 235 - 236 - mov r2, #0xaa @ disable access to crunch 237 - str r2, [r4, #0xc0] 238 - bic r5, r5, #0x00800000 239 - str r5, [r4, #0x80] 240 - ldr r5, [r4, #0x80] @ flush out enable (@@@) 241 - mov r5, r5 242 - 243 - 1: msr cpsr_c, ip @ restore interrupt mode 244 - ldmfd sp!, {r4, r5, pc} 245 - 246 - /* 247 - * Copy crunch state to given memory address 248 - * 249 - * r0 = struct thread_info pointer of target task 250 - * r1 = memory address where to store crunch state 251 - * 252 - * this is called mainly in the creation of signal stack frames 253 - */ 254 - ENTRY(crunch_task_copy) 255 - mrs ip, cpsr 256 - orr r2, ip, #PSR_I_BIT @ disable interrupts 257 - msr cpsr_c, r2 258 - 259 - ldr r3, =crunch_owner 260 - add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area 261 - ldr r3, [r3] @ get current crunch owner 262 - teq r2, r3 @ does this task own it... 263 - beq 1f 264 - 265 - @ current crunch values are in the task save area 266 - msr cpsr_c, ip @ restore interrupt mode 267 - mov r0, r1 268 - mov r1, r2 269 - mov r2, #CRUNCH_SIZE 270 - b memcpy 271 - 272 - 1: @ this task owns crunch regs -- grab a copy from there 273 - mov r0, #0 @ nothing to load 274 - mov r3, lr @ preserve return address 275 - bl crunch_save 276 - msr cpsr_c, ip @ restore interrupt mode 277 - ret r3 278 - 279 - /* 280 - * Restore crunch state from given memory address 281 - * 282 - * r0 = struct thread_info pointer of target task 283 - * r1 = memory address where to get crunch state from 284 - * 285 - * this is used to restore crunch state when unwinding a signal stack frame 286 - */ 287 - ENTRY(crunch_task_restore) 288 - mrs ip, cpsr 289 - orr r2, ip, #PSR_I_BIT @ disable interrupts 290 - msr cpsr_c, r2 291 - 292 - ldr r3, =crunch_owner 293 - add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area 294 - ldr r3, [r3] @ get current crunch owner 295 - teq r2, r3 @ does this task own it... 296 - beq 1f 297 - 298 - @ this task doesn't own crunch regs -- use its save area 299 - msr cpsr_c, ip @ restore interrupt mode 300 - mov r0, r2 301 - mov r2, #CRUNCH_SIZE 302 - b memcpy 303 - 304 - 1: @ this task owns crunch regs -- load them directly 305 - mov r0, r1 306 - mov r1, #0 @ nothing to save 307 - mov r3, lr @ preserve return address 308 - bl crunch_load 309 - msr cpsr_c, ip @ restore interrupt mode 310 - ret r3
-86
arch/arm/mach-ep93xx/crunch.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * arch/arm/kernel/crunch.c 4 - * Cirrus MaverickCrunch context switching and handling 5 - * 6 - * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> 7 - */ 8 - 9 - #include <linux/module.h> 10 - #include <linux/types.h> 11 - #include <linux/kernel.h> 12 - #include <linux/signal.h> 13 - #include <linux/sched.h> 14 - #include <linux/init.h> 15 - #include <linux/io.h> 16 - 17 - #include <asm/thread_notify.h> 18 - 19 - #include "soc.h" 20 - 21 - struct crunch_state *crunch_owner; 22 - 23 - void crunch_task_release(struct thread_info *thread) 24 - { 25 - local_irq_disable(); 26 - if (crunch_owner == &thread->crunchstate) 27 - crunch_owner = NULL; 28 - local_irq_enable(); 29 - } 30 - 31 - static int crunch_enabled(u32 devcfg) 32 - { 33 - return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA); 34 - } 35 - 36 - static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) 37 - { 38 - struct thread_info *thread = (struct thread_info *)t; 39 - struct crunch_state *crunch_state; 40 - u32 devcfg; 41 - 42 - crunch_state = &thread->crunchstate; 43 - 44 - switch (cmd) { 45 - case THREAD_NOTIFY_FLUSH: 46 - memset(crunch_state, 0, sizeof(*crunch_state)); 47 - 48 - /* 49 - * FALLTHROUGH: Ensure we don't try to overwrite our newly 50 - * initialised state information on the first fault. 51 - */ 52 - fallthrough; 53 - 54 - case THREAD_NOTIFY_EXIT: 55 - crunch_task_release(thread); 56 - break; 57 - 58 - case THREAD_NOTIFY_SWITCH: 59 - devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG); 60 - if (crunch_enabled(devcfg) || crunch_owner == crunch_state) { 61 - /* 62 - * We don't use ep93xx_syscon_swlocked_write() here 63 - * because we are on the context switch path and 64 - * preemption is already disabled. 65 - */ 66 - devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA; 67 - __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); 68 - __raw_writel(devcfg, EP93XX_SYSCON_DEVCFG); 69 - } 70 - break; 71 - } 72 - 73 - return NOTIFY_DONE; 74 - } 75 - 76 - static struct notifier_block crunch_notifier_block = { 77 - .notifier_call = crunch_do, 78 - }; 79 - 80 - int __init crunch_init(void) 81 - { 82 - thread_register_notifier(&crunch_notifier_block); 83 - elf_hwcap |= HWCAP_CRUNCH; 84 - 85 - return 0; 86 - }
-8
arch/arm/mach-ep93xx/edb93xx.c
··· 247 247 .init_irq = ep93xx_init_irq, 248 248 .init_time = ep93xx_timer_init, 249 249 .init_machine = edb93xx_init_machine, 250 - .init_late = ep93xx_init_late, 251 250 .restart = ep93xx_restart, 252 251 MACHINE_END 253 252 #endif ··· 259 260 .init_irq = ep93xx_init_irq, 260 261 .init_time = ep93xx_timer_init, 261 262 .init_machine = edb93xx_init_machine, 262 - .init_late = ep93xx_init_late, 263 263 .restart = ep93xx_restart, 264 264 MACHINE_END 265 265 #endif ··· 271 273 .init_irq = ep93xx_init_irq, 272 274 .init_time = ep93xx_timer_init, 273 275 .init_machine = edb93xx_init_machine, 274 - .init_late = ep93xx_init_late, 275 276 .restart = ep93xx_restart, 276 277 MACHINE_END 277 278 #endif ··· 283 286 .init_irq = ep93xx_init_irq, 284 287 .init_time = ep93xx_timer_init, 285 288 .init_machine = edb93xx_init_machine, 286 - .init_late = ep93xx_init_late, 287 289 .restart = ep93xx_restart, 288 290 MACHINE_END 289 291 #endif ··· 295 299 .init_irq = ep93xx_init_irq, 296 300 .init_time = ep93xx_timer_init, 297 301 .init_machine = edb93xx_init_machine, 298 - .init_late = ep93xx_init_late, 299 302 .restart = ep93xx_restart, 300 303 MACHINE_END 301 304 #endif ··· 307 312 .init_irq = ep93xx_init_irq, 308 313 .init_time = ep93xx_timer_init, 309 314 .init_machine = edb93xx_init_machine, 310 - .init_late = ep93xx_init_late, 311 315 .restart = ep93xx_restart, 312 316 MACHINE_END 313 317 #endif ··· 319 325 .init_irq = ep93xx_init_irq, 320 326 .init_time = ep93xx_timer_init, 321 327 .init_machine = edb93xx_init_machine, 322 - .init_late = ep93xx_init_late, 323 328 .restart = ep93xx_restart, 324 329 MACHINE_END 325 330 #endif ··· 331 338 .init_irq = ep93xx_init_irq, 332 339 .init_time = ep93xx_timer_init, 333 340 .init_machine = edb93xx_init_machine, 334 - .init_late = ep93xx_init_late, 335 341 .restart = ep93xx_restart, 336 342 MACHINE_END 337 343 #endif
-1
arch/arm/mach-ep93xx/gesbc9312.c
··· 36 36 .init_irq = ep93xx_init_irq, 37 37 .init_time = ep93xx_timer_init, 38 38 .init_machine = gesbc9312_init_machine, 39 - .init_late = ep93xx_init_late, 40 39 .restart = ep93xx_restart, 41 40 MACHINE_END
-4
arch/arm/mach-ep93xx/micro9.c
··· 80 80 .init_irq = ep93xx_init_irq, 81 81 .init_time = ep93xx_timer_init, 82 82 .init_machine = micro9_init_machine, 83 - .init_late = ep93xx_init_late, 84 83 .restart = ep93xx_restart, 85 84 MACHINE_END 86 85 #endif ··· 92 93 .init_irq = ep93xx_init_irq, 93 94 .init_time = ep93xx_timer_init, 94 95 .init_machine = micro9_init_machine, 95 - .init_late = ep93xx_init_late, 96 96 .restart = ep93xx_restart, 97 97 MACHINE_END 98 98 #endif ··· 104 106 .init_irq = ep93xx_init_irq, 105 107 .init_time = ep93xx_timer_init, 106 108 .init_machine = micro9_init_machine, 107 - .init_late = ep93xx_init_late, 108 109 .restart = ep93xx_restart, 109 110 MACHINE_END 110 111 #endif ··· 116 119 .init_irq = ep93xx_init_irq, 117 120 .init_time = ep93xx_timer_init, 118 121 .init_machine = micro9_init_machine, 119 - .init_late = ep93xx_init_late, 120 122 .restart = ep93xx_restart, 121 123 MACHINE_END 122 124 #endif
-7
arch/arm/mach-ep93xx/platform.h
··· 38 38 extern void ep93xx_timer_init(void); 39 39 40 40 void ep93xx_restart(enum reboot_mode, const char *); 41 - void ep93xx_init_late(void); 42 - 43 - #ifdef CONFIG_CRUNCH 44 - int crunch_init(void); 45 - #else 46 - static inline int crunch_init(void) { return 0; } 47 - #endif 48 41 49 42 #endif
-1
arch/arm/mach-ep93xx/simone.c
··· 123 123 .init_irq = ep93xx_init_irq, 124 124 .init_time = ep93xx_timer_init, 125 125 .init_machine = simone_init_machine, 126 - .init_late = ep93xx_init_late, 127 126 .restart = ep93xx_restart, 128 127 MACHINE_END
-1
arch/arm/mach-ep93xx/snappercl15.c
··· 157 157 .init_irq = ep93xx_init_irq, 158 158 .init_time = ep93xx_timer_init, 159 159 .init_machine = snappercl15_init_machine, 160 - .init_late = ep93xx_init_late, 161 160 .restart = ep93xx_restart, 162 161 MACHINE_END
-2
arch/arm/mach-ep93xx/ts72xx.c
··· 354 354 .init_irq = ep93xx_init_irq, 355 355 .init_time = ep93xx_timer_init, 356 356 .init_machine = ts72xx_init_machine, 357 - .init_late = ep93xx_init_late, 358 357 .restart = ep93xx_restart, 359 358 MACHINE_END 360 359 ··· 417 418 .init_irq = ep93xx_init_irq, 418 419 .init_time = ep93xx_timer_init, 419 420 .init_machine = bk3_init_machine, 420 - .init_late = ep93xx_init_late, 421 421 .restart = ep93xx_restart, 422 422 MACHINE_END
-1
arch/arm/mach-ep93xx/vision_ep9307.c
··· 306 306 .init_irq = ep93xx_init_irq, 307 307 .init_time = ep93xx_timer_init, 308 308 .init_machine = vision_init_machine, 309 - .init_late = ep93xx_init_late, 310 309 .restart = ep93xx_restart, 311 310 MACHINE_END
+1 -1
arch/arm64/include/asm/hwcap.h
··· 18 18 #define COMPAT_HWCAP_EDSP (1 << 7) 19 19 #define COMPAT_HWCAP_JAVA (1 << 8) 20 20 #define COMPAT_HWCAP_IWMMXT (1 << 9) 21 - #define COMPAT_HWCAP_CRUNCH (1 << 10) 21 + #define COMPAT_HWCAP_CRUNCH (1 << 10) /* Obsolete */ 22 22 #define COMPAT_HWCAP_THUMBEE (1 << 11) 23 23 #define COMPAT_HWCAP_NEON (1 << 12) 24 24 #define COMPAT_HWCAP_VFPv3 (1 << 13)