Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm:
[ARM] 3672/1: PXA: don't probe output GPIOs for interrupt
[ARM] 3671/1: ep93xx: add cirrus logic edb9315 support
[ARM] 3370/2: ep93xx: add crunch support
[ARM] 3665/1: crunch: add ptrace support
[ARM] 3664/1: crunch: add signal frame save/restore
[ARM] 3663/1: fix resource->end off-by-one thinko during physmap conversion
[ARM] 3662/1: ixp23xx: don't include asm/hardware.h in uncompress.h
[ARM] 3660/1: Remove legacy defines
[ARM] 3661/1: S3C2412: Fix compilation if CPU_S3C2410 only
[ARM] 3658/1: S3C244X: Change usb-gadget name to s3c2440-usbgadget
[ARM] Remove the __arch_* layer from uaccess.h

+641 -86
+3
arch/arm/kernel/Makefile
··· 22 22 obj-$(CONFIG_SMP) += smp.o 23 23 obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o 24 24 25 + obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o 26 + AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 27 + 25 28 obj-$(CONFIG_IWMMXT) += iwmmxt.o 26 29 AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt 27 30
+5 -5
arch/arm/kernel/armksyms.c
··· 109 109 EXPORT_SYMBOL(__memzero); 110 110 111 111 /* user mem (segment) */ 112 - EXPORT_SYMBOL(__arch_copy_from_user); 113 - EXPORT_SYMBOL(__arch_copy_to_user); 114 - EXPORT_SYMBOL(__arch_clear_user); 115 - EXPORT_SYMBOL(__arch_strnlen_user); 116 - EXPORT_SYMBOL(__arch_strncpy_from_user); 112 + EXPORT_SYMBOL(__copy_from_user); 113 + EXPORT_SYMBOL(__copy_to_user); 114 + EXPORT_SYMBOL(__clear_user); 115 + EXPORT_SYMBOL(__strnlen_user); 116 + EXPORT_SYMBOL(__strncpy_from_user); 117 117 118 118 EXPORT_SYMBOL(__get_user_1); 119 119 EXPORT_SYMBOL(__get_user_2);
+3
arch/arm/kernel/asm-offsets.c
··· 60 60 #ifdef CONFIG_IWMMXT 61 61 DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt)); 62 62 #endif 63 + #ifdef CONFIG_CRUNCH 64 + DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate)); 65 + #endif 63 66 BLANK(); 64 67 DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0)); 65 68 DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1));
+305
arch/arm/kernel/crunch-bits.S
··· 1 + /* 2 + * arch/arm/kernel/crunch-bits.S 3 + * Cirrus MaverickCrunch context switching and handling 4 + * 5 + * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> 6 + * 7 + * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is 8 + * Copyright (c) 2003-2004, MontaVista Software, Inc. 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License version 2 as 12 + * published by the Free Software Foundation. 13 + */ 14 + 15 + #include <linux/linkage.h> 16 + #include <asm/ptrace.h> 17 + #include <asm/thread_info.h> 18 + #include <asm/asm-offsets.h> 19 + #include <asm/arch/ep93xx-regs.h> 20 + 21 + /* 22 + * We can't use hex constants here due to a bug in gas. 23 + */ 24 + #define CRUNCH_MVDX0 0 25 + #define CRUNCH_MVDX1 8 26 + #define CRUNCH_MVDX2 16 27 + #define CRUNCH_MVDX3 24 28 + #define CRUNCH_MVDX4 32 29 + #define CRUNCH_MVDX5 40 30 + #define CRUNCH_MVDX6 48 31 + #define CRUNCH_MVDX7 56 32 + #define CRUNCH_MVDX8 64 33 + #define CRUNCH_MVDX9 72 34 + #define CRUNCH_MVDX10 80 35 + #define CRUNCH_MVDX11 88 36 + #define CRUNCH_MVDX12 96 37 + #define CRUNCH_MVDX13 104 38 + #define CRUNCH_MVDX14 112 39 + #define CRUNCH_MVDX15 120 40 + #define CRUNCH_MVAX0L 128 41 + #define CRUNCH_MVAX0M 132 42 + #define CRUNCH_MVAX0H 136 43 + #define CRUNCH_MVAX1L 140 44 + #define CRUNCH_MVAX1M 144 45 + #define CRUNCH_MVAX1H 148 46 + #define CRUNCH_MVAX2L 152 47 + #define CRUNCH_MVAX2M 156 48 + #define CRUNCH_MVAX2H 160 49 + #define CRUNCH_MVAX3L 164 50 + #define CRUNCH_MVAX3M 168 51 + #define CRUNCH_MVAX3H 172 52 + #define CRUNCH_DSPSC 176 53 + 54 + #define CRUNCH_SIZE 184 55 + 56 + .text 57 + 58 + /* 59 + * Lazy switching of crunch coprocessor context 60 + * 61 + * r10 = struct thread_info pointer 62 + * r9 = ret_from_exception 63 + * lr = undefined instr exit 64 + * 65 + * called from prefetch exception handler with interrupts disabled 66 + */ 67 + ENTRY(crunch_task_enable) 68 + ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr 69 + 70 + ldr r1, [r8, #0x80] 71 + tst r1, #0x00800000 @ access to crunch enabled? 72 + movne pc, lr @ if so no business here 73 + mov r3, #0xaa @ unlock syscon swlock 74 + str r3, [r8, #0xc0] 75 + orr r1, r1, #0x00800000 @ enable access to crunch 76 + str r1, [r8, #0x80] 77 + 78 + ldr r3, =crunch_owner 79 + add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area 80 + ldr r2, [sp, #60] @ current task pc value 81 + ldr r1, [r3] @ get current crunch owner 82 + str r0, [r3] @ this task now owns crunch 83 + sub r2, r2, #4 @ adjust pc back 84 + str r2, [sp, #60] 85 + 86 + ldr r2, [r8, #0x80] 87 + mov r2, r2 @ flush out enable (@@@) 88 + 89 + teq r1, #0 @ test for last ownership 90 + mov lr, r9 @ normal exit from exception 91 + beq crunch_load @ no owner, skip save 92 + 93 + crunch_save: 94 + cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers 95 + cfstr64 mvdx1, [r1, #CRUNCH_MVDX1] 96 + cfstr64 mvdx2, [r1, #CRUNCH_MVDX2] 97 + cfstr64 mvdx3, [r1, #CRUNCH_MVDX3] 98 + cfstr64 mvdx4, [r1, #CRUNCH_MVDX4] 99 + cfstr64 mvdx5, [r1, #CRUNCH_MVDX5] 100 + cfstr64 mvdx6, [r1, #CRUNCH_MVDX6] 101 + cfstr64 mvdx7, [r1, #CRUNCH_MVDX7] 102 + cfstr64 mvdx8, [r1, #CRUNCH_MVDX8] 103 + cfstr64 mvdx9, [r1, #CRUNCH_MVDX9] 104 + cfstr64 mvdx10, [r1, #CRUNCH_MVDX10] 105 + cfstr64 mvdx11, [r1, #CRUNCH_MVDX11] 106 + cfstr64 mvdx12, [r1, #CRUNCH_MVDX12] 107 + cfstr64 mvdx13, [r1, #CRUNCH_MVDX13] 108 + cfstr64 mvdx14, [r1, #CRUNCH_MVDX14] 109 + cfstr64 mvdx15, [r1, #CRUNCH_MVDX15] 110 + 111 + #ifdef __ARMEB__ 112 + #error fix me for ARMEB 113 + #endif 114 + 115 + cfmv32al mvfx0, mvax0 @ save 72b accumulators 116 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L] 117 + cfmv32am mvfx0, mvax0 118 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M] 119 + cfmv32ah mvfx0, mvax0 120 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H] 121 + cfmv32al mvfx0, mvax1 122 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L] 123 + cfmv32am mvfx0, mvax1 124 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M] 125 + cfmv32ah mvfx0, mvax1 126 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H] 127 + cfmv32al mvfx0, mvax2 128 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L] 129 + cfmv32am mvfx0, mvax2 130 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M] 131 + cfmv32ah mvfx0, mvax2 132 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H] 133 + cfmv32al mvfx0, mvax3 134 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L] 135 + cfmv32am mvfx0, mvax3 136 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M] 137 + cfmv32ah mvfx0, mvax3 138 + cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H] 139 + 140 + cfmv32sc mvdx0, dspsc @ save status word 141 + cfstr64 mvdx0, [r1, #CRUNCH_DSPSC] 142 + 143 + teq r0, #0 @ anything to load? 144 + cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered 145 + moveq pc, lr 146 + 147 + crunch_load: 148 + cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word 149 + cfmvsc32 dspsc, mvdx0 150 + 151 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators 152 + cfmval32 mvax0, mvfx0 153 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M] 154 + cfmvam32 mvax0, mvfx0 155 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H] 156 + cfmvah32 mvax0, mvfx0 157 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L] 158 + cfmval32 mvax1, mvfx0 159 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M] 160 + cfmvam32 mvax1, mvfx0 161 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H] 162 + cfmvah32 mvax1, mvfx0 163 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L] 164 + cfmval32 mvax2, mvfx0 165 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M] 166 + cfmvam32 mvax2, mvfx0 167 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H] 168 + cfmvah32 mvax2, mvfx0 169 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L] 170 + cfmval32 mvax3, mvfx0 171 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M] 172 + cfmvam32 mvax3, mvfx0 173 + cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H] 174 + cfmvah32 mvax3, mvfx0 175 + 176 + cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers 177 + cfldr64 mvdx1, [r0, #CRUNCH_MVDX1] 178 + cfldr64 mvdx2, [r0, #CRUNCH_MVDX2] 179 + cfldr64 mvdx3, [r0, #CRUNCH_MVDX3] 180 + cfldr64 mvdx4, [r0, #CRUNCH_MVDX4] 181 + cfldr64 mvdx5, [r0, #CRUNCH_MVDX5] 182 + cfldr64 mvdx6, [r0, #CRUNCH_MVDX6] 183 + cfldr64 mvdx7, [r0, #CRUNCH_MVDX7] 184 + cfldr64 mvdx8, [r0, #CRUNCH_MVDX8] 185 + cfldr64 mvdx9, [r0, #CRUNCH_MVDX9] 186 + cfldr64 mvdx10, [r0, #CRUNCH_MVDX10] 187 + cfldr64 mvdx11, [r0, #CRUNCH_MVDX11] 188 + cfldr64 mvdx12, [r0, #CRUNCH_MVDX12] 189 + cfldr64 mvdx13, [r0, #CRUNCH_MVDX13] 190 + cfldr64 mvdx14, [r0, #CRUNCH_MVDX14] 191 + cfldr64 mvdx15, [r0, #CRUNCH_MVDX15] 192 + 193 + mov pc, lr 194 + 195 + /* 196 + * Back up crunch regs to save area and disable access to them 197 + * (mainly for gdb or sleep mode usage) 198 + * 199 + * r0 = struct thread_info pointer of target task or NULL for any 200 + */ 201 + ENTRY(crunch_task_disable) 202 + stmfd sp!, {r4, r5, lr} 203 + 204 + mrs ip, cpsr 205 + orr r2, ip, #PSR_I_BIT @ disable interrupts 206 + msr cpsr_c, r2 207 + 208 + ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr 209 + 210 + ldr r3, =crunch_owner 211 + add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area 212 + ldr r1, [r3] @ get current crunch owner 213 + teq r1, #0 @ any current owner? 214 + beq 1f @ no: quit 215 + teq r0, #0 @ any owner? 216 + teqne r1, r2 @ or specified one? 217 + bne 1f @ no: quit 218 + 219 + ldr r5, [r4, #0x80] @ enable access to crunch 220 + mov r2, #0xaa 221 + str r2, [r4, #0xc0] 222 + orr r5, r5, #0x00800000 223 + str r5, [r4, #0x80] 224 + 225 + mov r0, #0 @ nothing to load 226 + str r0, [r3] @ no more current owner 227 + ldr r2, [r4, #0x80] @ flush out enable (@@@) 228 + mov r2, r2 229 + bl crunch_save 230 + 231 + mov r2, #0xaa @ disable access to crunch 232 + str r2, [r4, #0xc0] 233 + bic r5, r5, #0x00800000 234 + str r5, [r4, #0x80] 235 + ldr r5, [r4, #0x80] @ flush out enable (@@@) 236 + mov r5, r5 237 + 238 + 1: msr cpsr_c, ip @ restore interrupt mode 239 + ldmfd sp!, {r4, r5, pc} 240 + 241 + /* 242 + * Copy crunch state to given memory address 243 + * 244 + * r0 = struct thread_info pointer of target task 245 + * r1 = memory address where to store crunch state 246 + * 247 + * this is called mainly in the creation of signal stack frames 248 + */ 249 + ENTRY(crunch_task_copy) 250 + mrs ip, cpsr 251 + orr r2, ip, #PSR_I_BIT @ disable interrupts 252 + msr cpsr_c, r2 253 + 254 + ldr r3, =crunch_owner 255 + add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area 256 + ldr r3, [r3] @ get current crunch owner 257 + teq r2, r3 @ does this task own it... 258 + beq 1f 259 + 260 + @ current crunch values are in the task save area 261 + msr cpsr_c, ip @ restore interrupt mode 262 + mov r0, r1 263 + mov r1, r2 264 + mov r2, #CRUNCH_SIZE 265 + b memcpy 266 + 267 + 1: @ this task owns crunch regs -- grab a copy from there 268 + mov r0, #0 @ nothing to load 269 + mov r3, lr @ preserve return address 270 + bl crunch_save 271 + msr cpsr_c, ip @ restore interrupt mode 272 + mov pc, r3 273 + 274 + /* 275 + * Restore crunch state from given memory address 276 + * 277 + * r0 = struct thread_info pointer of target task 278 + * r1 = memory address where to get crunch state from 279 + * 280 + * this is used to restore crunch state when unwinding a signal stack frame 281 + */ 282 + ENTRY(crunch_task_restore) 283 + mrs ip, cpsr 284 + orr r2, ip, #PSR_I_BIT @ disable interrupts 285 + msr cpsr_c, r2 286 + 287 + ldr r3, =crunch_owner 288 + add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area 289 + ldr r3, [r3] @ get current crunch owner 290 + teq r2, r3 @ does this task own it... 291 + beq 1f 292 + 293 + @ this task doesn't own crunch regs -- use its save area 294 + msr cpsr_c, ip @ restore interrupt mode 295 + mov r0, r2 296 + mov r2, #CRUNCH_SIZE 297 + b memcpy 298 + 299 + 1: @ this task owns crunch regs -- load them directly 300 + mov r0, r1 301 + mov r1, #0 @ nothing to save 302 + mov r3, lr @ preserve return address 303 + bl crunch_load 304 + msr cpsr_c, ip @ restore interrupt mode 305 + mov pc, r3
+83
arch/arm/kernel/crunch.c
··· 1 + /* 2 + * arch/arm/kernel/crunch.c 3 + * Cirrus MaverickCrunch context switching and handling 4 + * 5 + * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <linux/module.h> 13 + #include <linux/config.h> 14 + #include <linux/types.h> 15 + #include <linux/kernel.h> 16 + #include <linux/signal.h> 17 + #include <linux/sched.h> 18 + #include <linux/init.h> 19 + #include <asm/arch/ep93xx-regs.h> 20 + #include <asm/thread_notify.h> 21 + #include <asm/io.h> 22 + 23 + struct crunch_state *crunch_owner; 24 + 25 + void crunch_task_release(struct thread_info *thread) 26 + { 27 + local_irq_disable(); 28 + if (crunch_owner == &thread->crunchstate) 29 + crunch_owner = NULL; 30 + local_irq_enable(); 31 + } 32 + 33 + static int crunch_enabled(u32 devcfg) 34 + { 35 + return !!(devcfg & EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE); 36 + } 37 + 38 + static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) 39 + { 40 + struct thread_info *thread = (struct thread_info *)t; 41 + struct crunch_state *crunch_state; 42 + u32 devcfg; 43 + 44 + crunch_state = &thread->crunchstate; 45 + 46 + switch (cmd) { 47 + case THREAD_NOTIFY_FLUSH: 48 + memset(crunch_state, 0, sizeof(*crunch_state)); 49 + 50 + /* 51 + * FALLTHROUGH: Ensure we don't try to overwrite our newly 52 + * initialised state information on the first fault. 53 + */ 54 + 55 + case THREAD_NOTIFY_RELEASE: 56 + crunch_task_release(thread); 57 + break; 58 + 59 + case THREAD_NOTIFY_SWITCH: 60 + devcfg = __raw_readl(EP93XX_SYSCON_DEVICE_CONFIG); 61 + if (crunch_enabled(devcfg) || crunch_owner == crunch_state) { 62 + devcfg ^= EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE; 63 + __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); 64 + __raw_writel(devcfg, EP93XX_SYSCON_DEVICE_CONFIG); 65 + } 66 + break; 67 + } 68 + 69 + return NOTIFY_DONE; 70 + } 71 + 72 + static struct notifier_block crunch_notifier_block = { 73 + .notifier_call = crunch_do, 74 + }; 75 + 76 + static int __init crunch_init(void) 77 + { 78 + thread_register_notifier(&crunch_notifier_block); 79 + 80 + return 0; 81 + } 82 + 83 + late_initcall(crunch_init);
+6
arch/arm/kernel/entry-armv.S
··· 492 492 b do_fpe @ CP#1 (FPE) 493 493 b do_fpe @ CP#2 (FPE) 494 494 mov pc, lr @ CP#3 495 + #ifdef CONFIG_CRUNCH 496 + b crunch_task_enable @ CP#4 (MaverickCrunch) 497 + b crunch_task_enable @ CP#5 (MaverickCrunch) 498 + b crunch_task_enable @ CP#6 (MaverickCrunch) 499 + #else 495 500 mov pc, lr @ CP#4 496 501 mov pc, lr @ CP#5 497 502 mov pc, lr @ CP#6 503 + #endif 498 504 mov pc, lr @ CP#7 499 505 mov pc, lr @ CP#8 500 506 mov pc, lr @ CP#9
+36
arch/arm/kernel/ptrace.c
··· 634 634 635 635 #endif 636 636 637 + #ifdef CONFIG_CRUNCH 638 + /* 639 + * Get the child Crunch state. 640 + */ 641 + static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp) 642 + { 643 + struct thread_info *thread = task_thread_info(tsk); 644 + 645 + crunch_task_disable(thread); /* force it to ram */ 646 + return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE) 647 + ? -EFAULT : 0; 648 + } 649 + 650 + /* 651 + * Set the child Crunch state. 652 + */ 653 + static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp) 654 + { 655 + struct thread_info *thread = task_thread_info(tsk); 656 + 657 + crunch_task_release(thread); /* force a reload */ 658 + return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE) 659 + ? -EFAULT : 0; 660 + } 661 + #endif 662 + 637 663 long arch_ptrace(struct task_struct *child, long request, long addr, long data) 638 664 { 639 665 unsigned long tmp; ··· 790 764 ret = 0; 791 765 child->ptrace_message = data; 792 766 break; 767 + 768 + #ifdef CONFIG_CRUNCH 769 + case PTRACE_GETCRUNCHREGS: 770 + ret = ptrace_getcrunchregs(child, (void __user *)data); 771 + break; 772 + 773 + case PTRACE_SETCRUNCHREGS: 774 + ret = ptrace_setcrunchregs(child, (void __user *)data); 775 + break; 776 + #endif 793 777 794 778 default: 795 779 ret = ptrace_request(child, request, addr, data);
+39
arch/arm/kernel/signal.c
··· 132 132 return ret; 133 133 } 134 134 135 + #ifdef CONFIG_CRUNCH 136 + static int preserve_crunch_context(struct crunch_sigframe *frame) 137 + { 138 + char kbuf[sizeof(*frame) + 8]; 139 + struct crunch_sigframe *kframe; 140 + 141 + /* the crunch context must be 64 bit aligned */ 142 + kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); 143 + kframe->magic = CRUNCH_MAGIC; 144 + kframe->size = CRUNCH_STORAGE_SIZE; 145 + crunch_task_copy(current_thread_info(), &kframe->storage); 146 + return __copy_to_user(frame, kframe, sizeof(*frame)); 147 + } 148 + 149 + static int restore_crunch_context(struct crunch_sigframe *frame) 150 + { 151 + char kbuf[sizeof(*frame) + 8]; 152 + struct crunch_sigframe *kframe; 153 + 154 + /* the crunch context must be 64 bit aligned */ 155 + kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); 156 + if (__copy_from_user(kframe, frame, sizeof(*frame))) 157 + return -1; 158 + if (kframe->magic != CRUNCH_MAGIC || 159 + kframe->size != CRUNCH_STORAGE_SIZE) 160 + return -1; 161 + crunch_task_restore(current_thread_info(), &kframe->storage); 162 + return 0; 163 + } 164 + #endif 165 + 135 166 #ifdef CONFIG_IWMMXT 136 167 137 168 static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) ··· 245 214 err |= !valid_user_regs(regs); 246 215 247 216 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; 217 + #ifdef CONFIG_CRUNCH 218 + if (err == 0) 219 + err |= restore_crunch_context(&aux->crunch); 220 + #endif 248 221 #ifdef CONFIG_IWMMXT 249 222 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) 250 223 err |= restore_iwmmxt_context(&aux->iwmmxt); ··· 368 333 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 369 334 370 335 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; 336 + #ifdef CONFIG_CRUNCH 337 + if (err == 0) 338 + err |= preserve_crunch_context(&aux->crunch); 339 + #endif 371 340 #ifdef CONFIG_IWMMXT 372 341 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) 373 342 err |= preserve_iwmmxt_context(&aux->iwmmxt);
+2 -2
arch/arm/lib/clear_user.S
··· 12 12 13 13 .text 14 14 15 - /* Prototype: int __arch_clear_user(void *addr, size_t sz) 15 + /* Prototype: int __clear_user(void *addr, size_t sz) 16 16 * Purpose : clear some user memory 17 17 * Params : addr - user memory address to clear 18 18 * : sz - number of bytes to clear 19 19 * Returns : number of bytes NOT cleared 20 20 */ 21 - ENTRY(__arch_clear_user) 21 + ENTRY(__clear_user) 22 22 stmfd sp!, {r1, lr} 23 23 mov r2, #0 24 24 cmp r1, #4
+2 -2
arch/arm/lib/copy_from_user.S
··· 16 16 /* 17 17 * Prototype: 18 18 * 19 - * size_t __arch_copy_from_user(void *to, const void *from, size_t n) 19 + * size_t __copy_from_user(void *to, const void *from, size_t n) 20 20 * 21 21 * Purpose: 22 22 * ··· 83 83 84 84 .text 85 85 86 - ENTRY(__arch_copy_from_user) 86 + ENTRY(__copy_from_user) 87 87 88 88 #include "copy_template.S" 89 89
+2 -2
arch/arm/lib/copy_to_user.S
··· 16 16 /* 17 17 * Prototype: 18 18 * 19 - * size_t __arch_copy_to_user(void *to, const void *from, size_t n) 19 + * size_t __copy_to_user(void *to, const void *from, size_t n) 20 20 * 21 21 * Purpose: 22 22 * ··· 86 86 87 87 .text 88 88 89 - ENTRY(__arch_copy_to_user) 89 + ENTRY(__copy_to_user) 90 90 91 91 #include "copy_template.S" 92 92
+1 -1
arch/arm/lib/strncpy_from_user.S
··· 20 20 * returns the number of characters copied (strlen of copied string), 21 21 * -EFAULT on exception, or "len" if we fill the whole buffer 22 22 */ 23 - ENTRY(__arch_strncpy_from_user) 23 + ENTRY(__strncpy_from_user) 24 24 mov ip, r1 25 25 1: subs r2, r2, #1 26 26 USER( ldrplbt r3, [r1], #1)
+2 -2
arch/arm/lib/strnlen_user.S
··· 14 14 .text 15 15 .align 5 16 16 17 - /* Prototype: unsigned long __arch_strnlen_user(const char *str, long n) 17 + /* Prototype: unsigned long __strnlen_user(const char *str, long n) 18 18 * Purpose : get length of a string in user memory 19 19 * Params : str - address of string in user memory 20 20 * Returns : length of string *including terminator* 21 21 * or zero on exception, or n + 1 if too long 22 22 */ 23 - ENTRY(__arch_strnlen_user) 23 + ENTRY(__strnlen_user) 24 24 mov r2, r0 25 25 1: 26 26 USER( ldrbt r3, [r0], #1)
+4 -4
arch/arm/lib/uaccess.S
··· 19 19 20 20 #define PAGE_SHIFT 12 21 21 22 - /* Prototype: int __arch_copy_to_user(void *to, const char *from, size_t n) 22 + /* Prototype: int __copy_to_user(void *to, const char *from, size_t n) 23 23 * Purpose : copy a block to user memory from kernel memory 24 24 * Params : to - user memory 25 25 * : from - kernel memory ··· 39 39 sub r2, r2, ip 40 40 b .Lc2u_dest_aligned 41 41 42 - ENTRY(__arch_copy_to_user) 42 + ENTRY(__copy_to_user) 43 43 stmfd sp!, {r2, r4 - r7, lr} 44 44 cmp r2, #4 45 45 blt .Lc2u_not_enough ··· 283 283 9001: ldmfd sp!, {r0, r4 - r7, pc} 284 284 .previous 285 285 286 - /* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n); 286 + /* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n); 287 287 * Purpose : copy a block from user memory to kernel memory 288 288 * Params : to - kernel memory 289 289 * : from - user memory ··· 302 302 sub r2, r2, ip 303 303 b .Lcfu_dest_aligned 304 304 305 - ENTRY(__arch_copy_from_user) 305 + ENTRY(__copy_from_user) 306 306 stmfd sp!, {r0, r2, r4 - r7, lr} 307 307 cmp r2, #4 308 308 blt .Lcfu_not_enough
+11
arch/arm/mach-ep93xx/Kconfig
··· 2 2 3 3 menu "Cirrus EP93xx Implementation Options" 4 4 5 + config CRUNCH 6 + bool "Support for MaverickCrunch" 7 + help 8 + Enable kernel support for MaverickCrunch. 9 + 5 10 comment "EP93xx Platforms" 11 + 12 + config MACH_EDB9315 13 + bool "Support Cirrus Logic EDB9315" 14 + help 15 + Say 'Y' here if you want your kernel to support the Cirrus 16 + Logic EDB9315 Evaluation Board. 6 17 7 18 config MACH_GESBC9312 8 19 bool "Support Glomation GESBC-9312-sx"
+1
arch/arm/mach-ep93xx/Makefile
··· 6 6 obj-n := 7 7 obj- := 8 8 9 + obj-$(CONFIG_MACH_EDB9315) += edb9315.o 9 10 obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o 10 11 obj-$(CONFIG_MACH_TS72XX) += ts72xx.o
+62
arch/arm/mach-ep93xx/edb9315.c
··· 1 + /* 2 + * arch/arm/mach-ep93xx/edb9315.c 3 + * Cirrus Logic EDB9315 support. 4 + * 5 + * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or (at 10 + * your option) any later version. 11 + */ 12 + 13 + #include <linux/config.h> 14 + #include <linux/kernel.h> 15 + #include <linux/init.h> 16 + #include <linux/mm.h> 17 + #include <linux/sched.h> 18 + #include <linux/interrupt.h> 19 + #include <linux/ioport.h> 20 + #include <linux/mtd/physmap.h> 21 + #include <linux/platform_device.h> 22 + #include <asm/io.h> 23 + #include <asm/hardware.h> 24 + #include <asm/mach-types.h> 25 + #include <asm/mach/arch.h> 26 + 27 + static struct physmap_flash_data edb9315_flash_data = { 28 + .width = 4, 29 + }; 30 + 31 + static struct resource edb9315_flash_resource = { 32 + .start = 0x60000000, 33 + .end = 0x61ffffff, 34 + .flags = IORESOURCE_MEM, 35 + }; 36 + 37 + static struct platform_device edb9315_flash = { 38 + .name = "physmap-flash", 39 + .id = 0, 40 + .dev = { 41 + .platform_data = &edb9315_flash_data, 42 + }, 43 + .num_resources = 1, 44 + .resource = &edb9315_flash_resource, 45 + }; 46 + 47 + static void __init edb9315_init_machine(void) 48 + { 49 + ep93xx_init_devices(); 50 + platform_device_register(&edb9315_flash); 51 + } 52 + 53 + MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board") 54 + /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ 55 + .phys_io = EP93XX_APB_PHYS_BASE, 56 + .io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc, 57 + .boot_params = 0x00000100, 58 + .map_io = ep93xx_map_io, 59 + .init_irq = ep93xx_init_irq, 60 + .timer = &ep93xx_timer, 61 + .init_machine = edb9315_init_machine, 62 + MACHINE_END
+1 -1
arch/arm/mach-ep93xx/gesbc9312.c
··· 30 30 31 31 static struct resource gesbc9312_flash_resource = { 32 32 .start = 0x60000000, 33 - .end = 0x60800000, 33 + .end = 0x607fffff, 34 34 .flags = IORESOURCE_MEM, 35 35 }; 36 36
+1 -1
arch/arm/mach-ep93xx/ts72xx.c
··· 118 118 119 119 static struct resource ts72xx_flash_resource = { 120 120 .start = TS72XX_NOR_PHYS_BASE, 121 - .end = TS72XX_NOR_PHYS_BASE + 0x01000000, 121 + .end = TS72XX_NOR_PHYS_BASE + 0x00ffffff, 122 122 .flags = IORESOURCE_MEM, 123 123 }; 124 124
+1 -1
arch/arm/mach-ixp23xx/espresso.c
··· 59 59 60 60 static struct resource espresso_flash_resource = { 61 61 .start = 0x90000000, 62 - .end = 0x92000000, 62 + .end = 0x91ffffff, 63 63 .flags = IORESOURCE_MEM, 64 64 }; 65 65
+1 -1
arch/arm/mach-ixp23xx/ixdp2351.c
··· 304 304 305 305 static struct resource ixdp2351_flash_resource = { 306 306 .start = 0x90000000, 307 - .end = 0x94000000, 307 + .end = 0x93ffffff, 308 308 .flags = IORESOURCE_MEM, 309 309 }; 310 310
+1 -1
arch/arm/mach-ixp23xx/roadrunner.c
··· 143 143 144 144 static struct resource roadrunner_flash_resource = { 145 145 .start = 0x90000000, 146 - .end = 0x94000000, 146 + .end = 0x93ffffff, 147 147 .flags = IORESOURCE_MEM, 148 148 }; 149 149
+2 -2
arch/arm/mach-pxa/irq.c
··· 88 88 89 89 if (type == IRQT_PROBE) { 90 90 /* Don't mess with enabled GPIOs using preconfigured edges or 91 - GPIOs set to alternate function during probe */ 92 - if ((GPIO_IRQ_rising_edge[idx] | GPIO_IRQ_falling_edge[idx]) & 91 + GPIOs set to alternate function or to output during probe */ 92 + if ((GPIO_IRQ_rising_edge[idx] | GPIO_IRQ_falling_edge[idx] | GPDR(gpio)) & 93 93 GPIO_bit(gpio)) 94 94 return 0; 95 95 if (GAFR(gpio) & (0x3 << (((gpio) & 0xf)*2)))
+1
arch/arm/mach-s3c2410/s3c244x.c
··· 69 69 70 70 s3c_device_i2c.name = "s3c2440-i2c"; 71 71 s3c_device_nand.name = "s3c2440-nand"; 72 + s3c_device_usbgadget.name = "s3c2440-usbgadget"; 72 73 } 73 74 74 75 void __init s3c244x_init_clocks(int xtal)
-2
include/asm-arm/arch-at91rm9200/memory.h
··· 33 33 * bus_to_virt: Used to convert an address for DMA operations 34 34 * to an address that the kernel can use. 35 35 */ 36 - #define __virt_to_bus__is_a_macro 37 36 #define __virt_to_bus(x) __virt_to_phys(x) 38 - #define __bus_to_virt__is_a_macro 39 37 #define __bus_to_virt(x) __phys_to_virt(x) 40 38 41 39 #endif
-2
include/asm-arm/arch-h720x/memory.h
··· 23 23 * There is something to do here later !, Mar 2000, Jungjun Kim 24 24 */ 25 25 26 - #define __virt_to_bus__is_a_macro 27 26 #define __virt_to_bus(x) __virt_to_phys(x) 28 - #define __bus_to_virt__is_a_macro 29 27 #define __bus_to_virt(x) __phys_to_virt(x) 30 28 31 29 #endif
+2 -4
include/asm-arm/arch-imx/memory.h
··· 30 30 * bus_to_virt: Used to convert an address for DMA operations 31 31 * to an address that the kernel can use. 32 32 */ 33 - #define __virt_to_bus__is_a_macro 34 - #define __virt_to_bus(x) (x - PAGE_OFFSET + PHYS_OFFSET) 35 - #define __bus_to_virt__is_a_macro 36 - #define __bus_to_virt(x) (x - PHYS_OFFSET + PAGE_OFFSET) 33 + #define __virt_to_bus(x) (x - PAGE_OFFSET + PHYS_OFFSET) 34 + #define __bus_to_virt(x) (x - PHYS_OFFSET + PAGE_OFFSET) 37 35 38 36 #endif
-11
include/asm-arm/arch-ixp23xx/ixp23xx.h
··· 295 295 #define IXP23XX_PCI_CPP_ADDR_BITS IXP23XX_PCI_CSR(0x0160) 296 296 297 297 298 - #ifndef __ASSEMBLY__ 299 - /* 300 - * Is system memory on the XSI or CPP bus? 301 - */ 302 - static inline unsigned ixp23xx_cpp_boot(void) 303 - { 304 - return (*IXP23XX_EXP_CFG0 & IXP23XX_EXP_CFG0_XSI_NOT_PRES); 305 - } 306 - #endif 307 - 308 - 309 298 #endif
+10
include/asm-arm/arch-ixp23xx/platform.h
··· 43 43 44 44 #define IXP23XX_UART_XTAL 14745600 45 45 46 + #ifndef __ASSEMBLY__ 47 + /* 48 + * Is system memory on the XSI or CPP bus? 49 + */ 50 + static inline unsigned ixp23xx_cpp_boot(void) 51 + { 52 + return (*IXP23XX_EXP_CFG0 & IXP23XX_EXP_CFG0_XSI_NOT_PRES); 53 + } 54 + #endif 55 + 46 56 47 57 #endif
+1 -1
include/asm-arm/arch-ixp23xx/uncompress.h
··· 11 11 #ifndef __ASM_ARCH_UNCOMPRESS_H 12 12 #define __ASM_ARCH_UNCOMPRESS_H 13 13 14 - #include <asm/hardware.h> 14 + #include <asm/arch/ixp23xx.h> 15 15 #include <linux/serial_reg.h> 16 16 17 17 #define UART_BASE ((volatile u32 *)IXP23XX_UART1_PHYS)
+7 -9
include/asm-arm/arch-s3c2410/regs-dsc.h
··· 7 7 * it under the terms of the GNU General Public License version 2 as 8 8 * published by the Free Software Foundation. 9 9 * 10 - * S3C2440 Signal Drive Strength Control 11 - * 12 - * Changelog: 13 - * 11-Aug-2004 BJD Created file 14 - * 25-Aug-2004 BJD Added the _SELECT_* defs for using with functions 10 + * S3C2440/S3C2412 Signal Drive Strength Control 15 11 */ 16 12 17 13 18 14 #ifndef __ASM_ARCH_REGS_DSC_H 19 15 #define __ASM_ARCH_REGS_DSC_H "2440-dsc" 20 16 21 - #ifdef CONFIG_CPU_S3C2440 17 + #if defined(CONFIG_CPU_S3C2412) 18 + #define S3C2412_DSC0 S3C2410_GPIOREG(0xdc) 19 + #define S3C2412_DSC1 S3C2410_GPIOREG(0xe0) 20 + #endif 21 + 22 + #if defined(CONFIG_CPU_S3C2440) 22 23 23 24 #define S3C2440_DSC0 S3C2410_GPIOREG(0xc4) 24 25 #define S3C2440_DSC1 S3C2410_GPIOREG(0xc8) 25 - 26 - #define S3C2412_DSC0 S3C2410_GPIOREG(0xdc) 27 - #define S3C2412_DSC1 S3C2410_GPIOREG(0xe0) 28 26 29 27 #define S3C2440_SELECT_DSC0 (0) 30 28 #define S3C2440_SELECT_DSC1 (1<<31)
+8
include/asm-arm/fpstate.h
··· 72 72 73 73 #define FP_SIZE (sizeof(union fp_state) / sizeof(int)) 74 74 75 + struct crunch_state { 76 + unsigned int mvdx[16][2]; 77 + unsigned int mvax[4][3]; 78 + unsigned int dspsc[2]; 79 + }; 80 + 81 + #define CRUNCH_SIZE sizeof(struct crunch_state) 82 + 75 83 #endif 76 84 77 85 #endif
+5
include/asm-arm/ptrace.h
··· 25 25 26 26 #define PTRACE_SET_SYSCALL 23 27 27 28 + /* PTRACE_SYSCALL is 24 */ 29 + 30 + #define PTRACE_GETCRUNCHREGS 25 31 + #define PTRACE_SETCRUNCHREGS 26 32 + 28 33 /* 29 34 * PSR bits 30 35 */
+6
include/asm-arm/thread_info.h
··· 59 59 struct cpu_context_save cpu_context; /* cpu context */ 60 60 __u8 used_cp[16]; /* thread used copro */ 61 61 unsigned long tp_value; 62 + struct crunch_state crunchstate; 62 63 union fp_state fpstate __attribute__((aligned(8))); 63 64 union vfp_state vfpstate; 64 65 struct restart_block restart_block; ··· 101 100 ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc))) 102 101 #define thread_saved_fp(tsk) \ 103 102 ((unsigned long)(task_thread_info(tsk)->cpu_context.fp)) 103 + 104 + extern void crunch_task_disable(struct thread_info *); 105 + extern void crunch_task_copy(struct thread_info *, void *); 106 + extern void crunch_task_restore(struct thread_info *, void *); 107 + extern void crunch_task_release(struct thread_info *); 104 108 105 109 extern void iwmmxt_task_disable(struct thread_info *); 106 110 extern void iwmmxt_task_copy(struct thread_info *, void *);
+13 -32
include/asm-arm/uaccess.h
··· 353 353 : "r" (x), "i" (-EFAULT) \ 354 354 : "cc") 355 355 356 - extern unsigned long __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 357 - extern unsigned long __arch_copy_to_user(void __user *to, const void *from, unsigned long n); 358 - extern unsigned long __arch_clear_user(void __user *addr, unsigned long n); 359 - extern unsigned long __arch_strncpy_from_user(char *to, const char __user *from, unsigned long count); 360 - extern unsigned long __arch_strnlen_user(const char __user *s, long n); 356 + 357 + extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n); 358 + extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n); 359 + extern unsigned long __clear_user(void __user *addr, unsigned long n); 360 + extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count); 361 + extern unsigned long __strnlen_user(const char __user *s, long n); 361 362 362 363 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 363 364 { 364 365 if (access_ok(VERIFY_READ, from, n)) 365 - n = __arch_copy_from_user(to, from, n); 366 + n = __copy_from_user(to, from, n); 366 367 else /* security hole - plug it */ 367 368 memzero(to, n); 368 369 return n; 369 370 } 370 371 371 - static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) 372 - { 373 - return __arch_copy_from_user(to, from, n); 374 - } 375 - 376 372 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) 377 373 { 378 374 if (access_ok(VERIFY_WRITE, to, n)) 379 - n = __arch_copy_to_user(to, from, n); 375 + n = __copy_to_user(to, from, n); 380 376 return n; 381 - } 382 - 383 - static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 384 - { 385 - return __arch_copy_to_user(to, from, n); 386 377 } 387 378 388 379 #define __copy_to_user_inatomic __copy_to_user 389 380 #define __copy_from_user_inatomic __copy_from_user 390 381 391 - static inline unsigned long clear_user (void __user *to, unsigned long n) 382 + static inline unsigned long clear_user(void __user *to, unsigned long n) 392 383 { 393 384 if (access_ok(VERIFY_WRITE, to, n)) 394 - n = __arch_clear_user(to, n); 385 + n = __clear_user(to, n); 395 386 return n; 396 387 } 397 388 398 - static inline unsigned long __clear_user (void __user *to, unsigned long n) 399 - { 400 - return __arch_clear_user(to, n); 401 - } 402 - 403 - static inline long strncpy_from_user (char *dst, const char __user *src, long count) 389 + static inline long strncpy_from_user(char *dst, const char __user *src, long count) 404 390 { 405 391 long res = -EFAULT; 406 392 if (access_ok(VERIFY_READ, src, 1)) 407 - res = __arch_strncpy_from_user(dst, src, count); 393 + res = __strncpy_from_user(dst, src, count); 408 394 return res; 409 - } 410 - 411 - static inline long __strncpy_from_user (char *dst, const char __user *src, long count) 412 - { 413 - return __arch_strncpy_from_user(dst, src, count); 414 395 } 415 396 416 397 #define strlen_user(s) strnlen_user(s, ~0UL >> 1) ··· 401 420 unsigned long res = 0; 402 421 403 422 if (__addr_ok(s)) 404 - res = __arch_strnlen_user(s, n); 423 + res = __strnlen_user(s, n); 405 424 406 425 return res; 407 426 }
+14
include/asm-arm/ucontext.h
··· 35 35 * bytes, to prevent unpredictable padding in the signal frame. 36 36 */ 37 37 38 + #ifdef CONFIG_CRUNCH 39 + #define CRUNCH_MAGIC 0x5065cf03 40 + #define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8) 41 + 42 + struct crunch_sigframe { 43 + unsigned long magic; 44 + unsigned long size; 45 + struct crunch_state storage; 46 + } __attribute__((__aligned__(8))); 47 + #endif 48 + 38 49 #ifdef CONFIG_IWMMXT 39 50 /* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */ 40 51 #define IWMMXT_MAGIC 0x12ef842a ··· 85 74 * one of these. 86 75 */ 87 76 struct aux_sigframe { 77 + #ifdef CONFIG_CRUNCH 78 + struct crunch_sigframe crunch; 79 + #endif 88 80 #ifdef CONFIG_IWMMXT 89 81 struct iwmmxt_sigframe iwmmxt; 90 82 #endif