Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'debug', 'fixes', 'l2c' (early part), 'misc' and 'sa1100' into for-next

+957 -1258
+20
Documentation/ABI/testing/sysfs-bus-amba
··· 1 + What: /sys/bus/amba/devices/.../driver_override 2 + Date: September 2014 3 + Contact: Antonios Motakis <a.motakis@virtualopensystems.com> 4 + Description: 5 + This file allows the driver for a device to be specified which 6 + will override standard OF, ACPI, ID table, and name matching. 7 + When specified, only a driver with a name matching the value 8 + written to driver_override will have an opportunity to bind to 9 + the device. The override is specified by writing a string to the 10 + driver_override file (echo vfio-amba > driver_override) and may 11 + be cleared with an empty string (echo > driver_override). 12 + This returns the device to standard matching rules binding. 13 + Writing to driver_override does not automatically unbind the 14 + device from its current driver or make any attempt to 15 + automatically load the specified driver. If no driver with a 16 + matching name is currently loaded in the kernel, the device will 17 + not bind to any driver. This also allows devices to opt-out of 18 + driver binding using a driver_override name such as "none". 19 + Only a single driver may be specified in the override, there is 20 + no support for parsing delimiters.
+10
Documentation/devicetree/bindings/arm/l2cc.txt
··· 57 57 - cache-id-part: cache id part number to be used if it is not present 58 58 on hardware 59 59 - wt-override: If present then L2 is forced to Write through mode 60 + - arm,double-linefill : Override double linefill enable setting. Enable if 61 + non-zero, disable if zero. 62 + - arm,double-linefill-incr : Override double linefill on INCR read. Enable 63 + if non-zero, disable if zero. 64 + - arm,double-linefill-wrap : Override double linefill on WRAP read. Enable 65 + if non-zero, disable if zero. 66 + - arm,prefetch-drop : Override prefetch drop enable setting. Enable if non-zero, 67 + disable if zero. 68 + - arm,prefetch-offset : Override prefetch offset value. Valid values are 69 + 0-7, 15, 23, and 31. 60 70 61 71 Example: 62 72
+1
arch/arm/Kconfig
··· 29 29 select HANDLE_DOMAIN_IRQ 30 30 select HARDIRQS_SW_RESEND 31 31 select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) 32 + select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 32 33 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL 33 34 select HAVE_ARCH_KGDB 34 35 select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
+31 -10
arch/arm/boot/compressed/head.S
··· 178 178 179 179 /* 180 180 * Set up a page table only if it won't overwrite ourself. 181 - * That means r4 < pc && r4 - 16k page directory > &_end. 181 + * That means r4 < pc || r4 - 16k page directory > &_end. 182 182 * Given that r4 > &_end is most unfrequent, we add a rough 183 183 * additional 1MB of room for a possible appended DTB. 184 184 */ ··· 263 263 * OK... Let's do some funky business here. 264 264 * If we do have a DTB appended to zImage, and we do have 265 265 * an ATAG list around, we want the later to be translated 266 - * and folded into the former here. To be on the safe side, 267 - * let's temporarily move the stack away into the malloc 268 - * area. No GOT fixup has occurred yet, but none of the 269 - * code we're about to call uses any global variable. 266 + * and folded into the former here. No GOT fixup has occurred 267 + * yet, but none of the code we're about to call uses any 268 + * global variable. 270 269 */ 271 - add sp, sp, #0x10000 270 + 271 + /* Get the initial DTB size */ 272 + ldr r5, [r6, #4] 273 + #ifndef __ARMEB__ 274 + /* convert to little endian */ 275 + eor r1, r5, r5, ror #16 276 + bic r1, r1, #0x00ff0000 277 + mov r5, r5, ror #8 278 + eor r5, r5, r1, lsr #8 279 + #endif 280 + /* 50% DTB growth should be good enough */ 281 + add r5, r5, r5, lsr #1 282 + /* preserve 64-bit alignment */ 283 + add r5, r5, #7 284 + bic r5, r5, #7 285 + /* clamp to 32KB min and 1MB max */ 286 + cmp r5, #(1 << 15) 287 + movlo r5, #(1 << 15) 288 + cmp r5, #(1 << 20) 289 + movhi r5, #(1 << 20) 290 + /* temporarily relocate the stack past the DTB work space */ 291 + add sp, sp, r5 292 + 272 293 stmfd sp!, {r0-r3, ip, lr} 273 294 mov r0, r8 274 295 mov r1, r6 275 - sub r2, sp, r6 296 + mov r2, r5 276 297 bl atags_to_fdt 277 298 278 299 /* ··· 306 285 bic r0, r0, #1 307 286 add r0, r0, #0x100 308 287 mov r1, r6 309 - sub r2, sp, r6 288 + mov r2, r5 310 289 bleq atags_to_fdt 311 290 312 291 ldmfd sp!, {r0-r3, ip, lr} 313 - sub sp, sp, #0x10000 292 + sub sp, sp, r5 314 293 #endif 315 294 316 295 mov r8, r6 @ use the appended device tree ··· 327 306 subs r1, r5, r1 328 307 addhi r9, r9, r1 329 308 330 - /* Get the dtb's size */ 309 + /* Get the current DTB size */ 331 310 ldr r5, [r6, #4] 332 311 #ifndef __ARMEB__ 333 312 /* convert r5 (dtb size) to little endian */
+9
arch/arm/boot/dts/exynos4210.dtsi
··· 81 81 reg = <0x10023CA0 0x20>; 82 82 }; 83 83 84 + l2c: l2-cache-controller@10502000 { 85 + compatible = "arm,pl310-cache"; 86 + reg = <0x10502000 0x1000>; 87 + cache-unified; 88 + cache-level = <2>; 89 + arm,tag-latency = <2 2 1>; 90 + arm,data-latency = <2 2 1>; 91 + }; 92 + 84 93 gic: interrupt-controller@10490000 { 85 94 cpu-offset = <0x8000>; 86 95 };
+14
arch/arm/boot/dts/exynos4x12.dtsi
··· 54 54 reg = <0x10023CA0 0x20>; 55 55 }; 56 56 57 + l2c: l2-cache-controller@10502000 { 58 + compatible = "arm,pl310-cache"; 59 + reg = <0x10502000 0x1000>; 60 + cache-unified; 61 + cache-level = <2>; 62 + arm,tag-latency = <2 2 1>; 63 + arm,data-latency = <3 2 1>; 64 + arm,double-linefill = <1>; 65 + arm,double-linefill-incr = <0>; 66 + arm,double-linefill-wrap = <1>; 67 + arm,prefetch-drop = <1>; 68 + arm,prefetch-offset = <7>; 69 + }; 70 + 57 71 clock: clock-controller@10030000 { 58 72 compatible = "samsung,exynos4412-clock"; 59 73 reg = <0x10030000 0x20000>;
+20
arch/arm/include/asm/bitrev.h
··· 1 + #ifndef __ASM_BITREV_H 2 + #define __ASM_BITREV_H 3 + 4 + static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x) 5 + { 6 + __asm__ ("rbit %0, %1" : "=r" (x) : "r" (x)); 7 + return x; 8 + } 9 + 10 + static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x) 11 + { 12 + return __arch_bitrev32((u32)x) >> 16; 13 + } 14 + 15 + static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x) 16 + { 17 + return __arch_bitrev32((u32)x) >> 24; 18 + } 19 + 20 + #endif
+14 -1
arch/arm/include/asm/compiler.h
··· 8 8 * This string is meant to be concatenated with the inline asm string and 9 9 * will cause compilation to stop on mismatch. 10 10 * (for details, see gcc PR 15089) 11 + * For compatibility with clang, we have to specifically take the equivalence 12 + * of 'r11' <-> 'fp' and 'r12' <-> 'ip' into account as well. 11 13 */ 12 - #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 14 + #define __asmeq(x, y) \ 15 + ".ifnc " x "," y "; " \ 16 + ".ifnc " x y ",fpr11; " \ 17 + ".ifnc " x y ",r11fp; " \ 18 + ".ifnc " x y ",ipr12; " \ 19 + ".ifnc " x y ",r12ip; " \ 20 + ".err; " \ 21 + ".endif; " \ 22 + ".endif; " \ 23 + ".endif; " \ 24 + ".endif; " \ 25 + ".endif\n\t" 13 26 14 27 15 28 #endif /* __ASM_ARM_COMPILER_H */
+3
arch/arm/include/asm/outercache.h
··· 23 23 24 24 #include <linux/types.h> 25 25 26 + struct l2x0_regs; 27 + 26 28 struct outer_cache_fns { 27 29 void (*inv_range)(unsigned long, unsigned long); 28 30 void (*clean_range)(unsigned long, unsigned long); ··· 38 36 39 37 /* This is an ARM L2C thing */ 40 38 void (*write_sec)(unsigned long, unsigned); 39 + void (*configure)(const struct l2x0_regs *); 41 40 }; 42 41 43 42 extern struct outer_cache_fns outer_cache;
+1
arch/arm/include/uapi/asm/unistd.h
··· 413 413 #define __NR_getrandom (__NR_SYSCALL_BASE+384) 414 414 #define __NR_memfd_create (__NR_SYSCALL_BASE+385) 415 415 #define __NR_bpf (__NR_SYSCALL_BASE+386) 416 + #define __NR_execveat (__NR_SYSCALL_BASE+387) 416 417 417 418 /* 418 419 * The following SWIs are ARM private.
+1
arch/arm/kernel/calls.S
··· 396 396 CALL(sys_getrandom) 397 397 /* 385 */ CALL(sys_memfd_create) 398 398 CALL(sys_bpf) 399 + CALL(sys_execveat) 399 400 #ifndef syscalls_counted 400 401 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 401 402 #define syscalls_counted
+7 -6
arch/arm/kernel/entry-header.S
··· 253 253 .endm 254 254 255 255 .macro restore_user_regs, fast = 0, offset = 0 256 - ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 257 - ldr lr, [sp, #\offset + S_PC]! @ get pc 256 + mov r2, sp 257 + ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr 258 + ldr lr, [r2, #\offset + S_PC]! @ get pc 258 259 msr spsr_cxsf, r1 @ save in spsr_svc 259 260 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) 260 261 @ We must avoid clrex due to Cortex-A15 erratum #830321 261 - strex r1, r2, [sp] @ clear the exclusive monitor 262 + strex r1, r2, [r2] @ clear the exclusive monitor 262 263 #endif 263 264 .if \fast 264 - ldmdb sp, {r1 - lr}^ @ get calling r1 - lr 265 + ldmdb r2, {r1 - lr}^ @ get calling r1 - lr 265 266 .else 266 - ldmdb sp, {r0 - lr}^ @ get calling r0 - lr 267 + ldmdb r2, {r0 - lr}^ @ get calling r0 - lr 267 268 .endif 268 269 mov r0, r0 @ ARMv5T and earlier require a nop 269 270 @ after ldm {}^ 270 - add sp, sp, #S_FRAME_SIZE - S_PC 271 + add sp, sp, #\offset + S_FRAME_SIZE 271 272 movs pc, lr @ return & move spsr_svc into cpsr 272 273 .endm 273 274
+2
arch/arm/kernel/entry-v7m.S
··· 22 22 23 23 __invalid_entry: 24 24 v7m_exception_entry 25 + #ifdef CONFIG_PRINTK 25 26 adr r0, strerr 26 27 mrs r1, ipsr 27 28 mov r2, lr 28 29 bl printk 30 + #endif 29 31 mov r0, sp 30 32 bl show_regs 31 33 1: b 1b
+8 -1
arch/arm/kernel/head.S
··· 346 346 347 347 #if defined(CONFIG_SMP) 348 348 .text 349 + ENTRY(secondary_startup_arm) 350 + .arm 351 + THUMB( adr r9, BSYM(1f) ) @ Kernel is entered in ARM. 352 + THUMB( bx r9 ) @ If this is a Thumb-2 kernel, 353 + THUMB( .thumb ) @ switch to Thumb now. 354 + THUMB(1: ) 349 355 ENTRY(secondary_startup) 350 356 /* 351 357 * Common entry point for secondary CPUs. ··· 391 385 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 392 386 THUMB( ret r12 ) 393 387 ENDPROC(secondary_startup) 388 + ENDPROC(secondary_startup_arm) 394 389 395 390 /* 396 391 * r6 = &secondary_data ··· 593 586 add r5, r5, r3 @ adjust table end address 594 587 add r6, r6, r3 @ adjust __pv_phys_pfn_offset address 595 588 add r7, r7, r3 @ adjust __pv_offset address 596 - mov r0, r8, lsr #12 @ convert to PFN 589 + mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN 597 590 str r0, [r6] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset 598 591 strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits 599 592 mov r6, r3, lsr #24 @ constant for add/sub instructions
+2 -1
arch/arm/kernel/irq.c
··· 109 109 110 110 if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_CACHE_L2X0) && 111 111 (machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)) { 112 - outer_cache.write_sec = machine_desc->l2c_write_sec; 112 + if (!outer_cache.write_sec) 113 + outer_cache.write_sec = machine_desc->l2c_write_sec; 113 114 ret = l2x0_of_init(machine_desc->l2c_aux_val, 114 115 machine_desc->l2c_aux_mask); 115 116 if (ret)
+8 -2
arch/arm/kernel/perf_event.c
··· 116 116 ret = 1; 117 117 } 118 118 119 - if (left > (s64)armpmu->max_period) 120 - left = armpmu->max_period; 119 + /* 120 + * Limit the maximum period to prevent the counter value 121 + * from overtaking the one we are about to program. In 122 + * effect we are reducing max_period to account for 123 + * interrupt latency (and we are being very conservative). 124 + */ 125 + if (left > (armpmu->max_period >> 1)) 126 + left = armpmu->max_period >> 1; 121 127 122 128 local64_set(&hwc->prev_count, (u64)-left); 123 129
+5 -2
arch/arm/kernel/setup.c
··· 657 657 658 658 /* 659 659 * Ensure that start/size are aligned to a page boundary. 660 - * Size is appropriately rounded down, start is rounded up. 660 + * Size is rounded down, start is rounded up. 661 661 */ 662 - size -= start & ~PAGE_MASK; 663 662 aligned_start = PAGE_ALIGN(start); 663 + if (aligned_start > start + size) 664 + size = 0; 665 + else 666 + size -= aligned_start - start; 664 667 665 668 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT 666 669 if (aligned_start > ULONG_MAX) {
-4
arch/arm/kernel/suspend.c
··· 14 14 extern void cpu_resume_mmu(void); 15 15 16 16 #ifdef CONFIG_MMU 17 - /* 18 - * Hide the first two arguments to __cpu_suspend - these are an implementation 19 - * detail which platform code shouldn't have to know about. 20 - */ 21 17 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) 22 18 { 23 19 struct mm_struct *mm = current->active_mm;
+2 -13
arch/arm/lib/Makefile
··· 15 15 io-readsb.o io-writesb.o io-readsl.o io-writesl.o \ 16 16 call_with_stack.o bswapsdi2.o 17 17 18 - mmu-y := clear_user.o copy_page.o getuser.o putuser.o 19 - 20 - # the code in uaccess.S is not preemption safe and 21 - # probably faster on ARMv3 only 22 - ifeq ($(CONFIG_PREEMPT),y) 23 - mmu-y += copy_from_user.o copy_to_user.o 24 - else 25 - ifneq ($(CONFIG_CPU_32v3),y) 26 - mmu-y += copy_from_user.o copy_to_user.o 27 - else 28 - mmu-y += uaccess.o 29 - endif 30 - endif 18 + mmu-y := clear_user.o copy_page.o getuser.o putuser.o \ 19 + copy_from_user.o copy_to_user.o 31 20 32 21 # using lib_ here won't override already available weak symbols 33 22 obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
-564
arch/arm/lib/uaccess.S
··· 1 - /* 2 - * linux/arch/arm/lib/uaccess.S 3 - * 4 - * Copyright (C) 1995, 1996,1997,1998 Russell King 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - * 10 - * Routines to block copy data to/from user memory 11 - * These are highly optimised both for the 4k page size 12 - * and for various alignments. 13 - */ 14 - #include <linux/linkage.h> 15 - #include <asm/assembler.h> 16 - #include <asm/errno.h> 17 - #include <asm/domain.h> 18 - 19 - .text 20 - 21 - #define PAGE_SHIFT 12 22 - 23 - /* Prototype: int __copy_to_user(void *to, const char *from, size_t n) 24 - * Purpose : copy a block to user memory from kernel memory 25 - * Params : to - user memory 26 - * : from - kernel memory 27 - * : n - number of bytes to copy 28 - * Returns : Number of bytes NOT copied. 29 - */ 30 - 31 - .Lc2u_dest_not_aligned: 32 - rsb ip, ip, #4 33 - cmp ip, #2 34 - ldrb r3, [r1], #1 35 - USER( TUSER( strb) r3, [r0], #1) @ May fault 36 - ldrgeb r3, [r1], #1 37 - USER( TUSER( strgeb) r3, [r0], #1) @ May fault 38 - ldrgtb r3, [r1], #1 39 - USER( TUSER( strgtb) r3, [r0], #1) @ May fault 40 - sub r2, r2, ip 41 - b .Lc2u_dest_aligned 42 - 43 - ENTRY(__copy_to_user) 44 - stmfd sp!, {r2, r4 - r7, lr} 45 - cmp r2, #4 46 - blt .Lc2u_not_enough 47 - ands ip, r0, #3 48 - bne .Lc2u_dest_not_aligned 49 - .Lc2u_dest_aligned: 50 - 51 - ands ip, r1, #3 52 - bne .Lc2u_src_not_aligned 53 - /* 54 - * Seeing as there has to be at least 8 bytes to copy, we can 55 - * copy one word, and force a user-mode page fault... 56 - */ 57 - 58 - .Lc2u_0fupi: subs r2, r2, #4 59 - addmi ip, r2, #4 60 - bmi .Lc2u_0nowords 61 - ldr r3, [r1], #4 62 - USER( TUSER( str) r3, [r0], #4) @ May fault 63 - mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction 64 - rsb ip, ip, #0 65 - movs ip, ip, lsr #32 - PAGE_SHIFT 66 - beq .Lc2u_0fupi 67 - /* 68 - * ip = max no. of bytes to copy before needing another "strt" insn 69 - */ 70 - cmp r2, ip 71 - movlt ip, r2 72 - sub r2, r2, ip 73 - subs ip, ip, #32 74 - blt .Lc2u_0rem8lp 75 - 76 - .Lc2u_0cpy8lp: ldmia r1!, {r3 - r6} 77 - stmia r0!, {r3 - r6} @ Shouldnt fault 78 - ldmia r1!, {r3 - r6} 79 - subs ip, ip, #32 80 - stmia r0!, {r3 - r6} @ Shouldnt fault 81 - bpl .Lc2u_0cpy8lp 82 - 83 - .Lc2u_0rem8lp: cmn ip, #16 84 - ldmgeia r1!, {r3 - r6} 85 - stmgeia r0!, {r3 - r6} @ Shouldnt fault 86 - tst ip, #8 87 - ldmneia r1!, {r3 - r4} 88 - stmneia r0!, {r3 - r4} @ Shouldnt fault 89 - tst ip, #4 90 - ldrne r3, [r1], #4 91 - TUSER( strne) r3, [r0], #4 @ Shouldnt fault 92 - ands ip, ip, #3 93 - beq .Lc2u_0fupi 94 - .Lc2u_0nowords: teq ip, #0 95 - beq .Lc2u_finished 96 - .Lc2u_nowords: cmp ip, #2 97 - ldrb r3, [r1], #1 98 - USER( TUSER( strb) r3, [r0], #1) @ May fault 99 - ldrgeb r3, [r1], #1 100 - USER( TUSER( strgeb) r3, [r0], #1) @ May fault 101 - ldrgtb r3, [r1], #1 102 - USER( TUSER( strgtb) r3, [r0], #1) @ May fault 103 - b .Lc2u_finished 104 - 105 - .Lc2u_not_enough: 106 - movs ip, r2 107 - bne .Lc2u_nowords 108 - .Lc2u_finished: mov r0, #0 109 - ldmfd sp!, {r2, r4 - r7, pc} 110 - 111 - .Lc2u_src_not_aligned: 112 - bic r1, r1, #3 113 - ldr r7, [r1], #4 114 - cmp ip, #2 115 - bgt .Lc2u_3fupi 116 - beq .Lc2u_2fupi 117 - .Lc2u_1fupi: subs r2, r2, #4 118 - addmi ip, r2, #4 119 - bmi .Lc2u_1nowords 120 - mov r3, r7, lspull #8 121 - ldr r7, [r1], #4 122 - orr r3, r3, r7, lspush #24 123 - USER( TUSER( str) r3, [r0], #4) @ May fault 124 - mov ip, r0, lsl #32 - PAGE_SHIFT 125 - rsb ip, ip, #0 126 - movs ip, ip, lsr #32 - PAGE_SHIFT 127 - beq .Lc2u_1fupi 128 - cmp r2, ip 129 - movlt ip, r2 130 - sub r2, r2, ip 131 - subs ip, ip, #16 132 - blt .Lc2u_1rem8lp 133 - 134 - .Lc2u_1cpy8lp: mov r3, r7, lspull #8 135 - ldmia r1!, {r4 - r7} 136 - subs ip, ip, #16 137 - orr r3, r3, r4, lspush #24 138 - mov r4, r4, lspull #8 139 - orr r4, r4, r5, lspush #24 140 - mov r5, r5, lspull #8 141 - orr r5, r5, r6, lspush #24 142 - mov r6, r6, lspull #8 143 - orr r6, r6, r7, lspush #24 144 - stmia r0!, {r3 - r6} @ Shouldnt fault 145 - bpl .Lc2u_1cpy8lp 146 - 147 - .Lc2u_1rem8lp: tst ip, #8 148 - movne r3, r7, lspull #8 149 - ldmneia r1!, {r4, r7} 150 - orrne r3, r3, r4, lspush #24 151 - movne r4, r4, lspull #8 152 - orrne r4, r4, r7, lspush #24 153 - stmneia r0!, {r3 - r4} @ Shouldnt fault 154 - tst ip, #4 155 - movne r3, r7, lspull #8 156 - ldrne r7, [r1], #4 157 - orrne r3, r3, r7, lspush #24 158 - TUSER( strne) r3, [r0], #4 @ Shouldnt fault 159 - ands ip, ip, #3 160 - beq .Lc2u_1fupi 161 - .Lc2u_1nowords: mov r3, r7, get_byte_1 162 - teq ip, #0 163 - beq .Lc2u_finished 164 - cmp ip, #2 165 - USER( TUSER( strb) r3, [r0], #1) @ May fault 166 - movge r3, r7, get_byte_2 167 - USER( TUSER( strgeb) r3, [r0], #1) @ May fault 168 - movgt r3, r7, get_byte_3 169 - USER( TUSER( strgtb) r3, [r0], #1) @ May fault 170 - b .Lc2u_finished 171 - 172 - .Lc2u_2fupi: subs r2, r2, #4 173 - addmi ip, r2, #4 174 - bmi .Lc2u_2nowords 175 - mov r3, r7, lspull #16 176 - ldr r7, [r1], #4 177 - orr r3, r3, r7, lspush #16 178 - USER( TUSER( str) r3, [r0], #4) @ May fault 179 - mov ip, r0, lsl #32 - PAGE_SHIFT 180 - rsb ip, ip, #0 181 - movs ip, ip, lsr #32 - PAGE_SHIFT 182 - beq .Lc2u_2fupi 183 - cmp r2, ip 184 - movlt ip, r2 185 - sub r2, r2, ip 186 - subs ip, ip, #16 187 - blt .Lc2u_2rem8lp 188 - 189 - .Lc2u_2cpy8lp: mov r3, r7, lspull #16 190 - ldmia r1!, {r4 - r7} 191 - subs ip, ip, #16 192 - orr r3, r3, r4, lspush #16 193 - mov r4, r4, lspull #16 194 - orr r4, r4, r5, lspush #16 195 - mov r5, r5, lspull #16 196 - orr r5, r5, r6, lspush #16 197 - mov r6, r6, lspull #16 198 - orr r6, r6, r7, lspush #16 199 - stmia r0!, {r3 - r6} @ Shouldnt fault 200 - bpl .Lc2u_2cpy8lp 201 - 202 - .Lc2u_2rem8lp: tst ip, #8 203 - movne r3, r7, lspull #16 204 - ldmneia r1!, {r4, r7} 205 - orrne r3, r3, r4, lspush #16 206 - movne r4, r4, lspull #16 207 - orrne r4, r4, r7, lspush #16 208 - stmneia r0!, {r3 - r4} @ Shouldnt fault 209 - tst ip, #4 210 - movne r3, r7, lspull #16 211 - ldrne r7, [r1], #4 212 - orrne r3, r3, r7, lspush #16 213 - TUSER( strne) r3, [r0], #4 @ Shouldnt fault 214 - ands ip, ip, #3 215 - beq .Lc2u_2fupi 216 - .Lc2u_2nowords: mov r3, r7, get_byte_2 217 - teq ip, #0 218 - beq .Lc2u_finished 219 - cmp ip, #2 220 - USER( TUSER( strb) r3, [r0], #1) @ May fault 221 - movge r3, r7, get_byte_3 222 - USER( TUSER( strgeb) r3, [r0], #1) @ May fault 223 - ldrgtb r3, [r1], #0 224 - USER( TUSER( strgtb) r3, [r0], #1) @ May fault 225 - b .Lc2u_finished 226 - 227 - .Lc2u_3fupi: subs r2, r2, #4 228 - addmi ip, r2, #4 229 - bmi .Lc2u_3nowords 230 - mov r3, r7, lspull #24 231 - ldr r7, [r1], #4 232 - orr r3, r3, r7, lspush #8 233 - USER( TUSER( str) r3, [r0], #4) @ May fault 234 - mov ip, r0, lsl #32 - PAGE_SHIFT 235 - rsb ip, ip, #0 236 - movs ip, ip, lsr #32 - PAGE_SHIFT 237 - beq .Lc2u_3fupi 238 - cmp r2, ip 239 - movlt ip, r2 240 - sub r2, r2, ip 241 - subs ip, ip, #16 242 - blt .Lc2u_3rem8lp 243 - 244 - .Lc2u_3cpy8lp: mov r3, r7, lspull #24 245 - ldmia r1!, {r4 - r7} 246 - subs ip, ip, #16 247 - orr r3, r3, r4, lspush #8 248 - mov r4, r4, lspull #24 249 - orr r4, r4, r5, lspush #8 250 - mov r5, r5, lspull #24 251 - orr r5, r5, r6, lspush #8 252 - mov r6, r6, lspull #24 253 - orr r6, r6, r7, lspush #8 254 - stmia r0!, {r3 - r6} @ Shouldnt fault 255 - bpl .Lc2u_3cpy8lp 256 - 257 - .Lc2u_3rem8lp: tst ip, #8 258 - movne r3, r7, lspull #24 259 - ldmneia r1!, {r4, r7} 260 - orrne r3, r3, r4, lspush #8 261 - movne r4, r4, lspull #24 262 - orrne r4, r4, r7, lspush #8 263 - stmneia r0!, {r3 - r4} @ Shouldnt fault 264 - tst ip, #4 265 - movne r3, r7, lspull #24 266 - ldrne r7, [r1], #4 267 - orrne r3, r3, r7, lspush #8 268 - TUSER( strne) r3, [r0], #4 @ Shouldnt fault 269 - ands ip, ip, #3 270 - beq .Lc2u_3fupi 271 - .Lc2u_3nowords: mov r3, r7, get_byte_3 272 - teq ip, #0 273 - beq .Lc2u_finished 274 - cmp ip, #2 275 - USER( TUSER( strb) r3, [r0], #1) @ May fault 276 - ldrgeb r3, [r1], #1 277 - USER( TUSER( strgeb) r3, [r0], #1) @ May fault 278 - ldrgtb r3, [r1], #0 279 - USER( TUSER( strgtb) r3, [r0], #1) @ May fault 280 - b .Lc2u_finished 281 - ENDPROC(__copy_to_user) 282 - 283 - .pushsection .fixup,"ax" 284 - .align 0 285 - 9001: ldmfd sp!, {r0, r4 - r7, pc} 286 - .popsection 287 - 288 - /* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n); 289 - * Purpose : copy a block from user memory to kernel memory 290 - * Params : to - kernel memory 291 - * : from - user memory 292 - * : n - number of bytes to copy 293 - * Returns : Number of bytes NOT copied. 294 - */ 295 - .Lcfu_dest_not_aligned: 296 - rsb ip, ip, #4 297 - cmp ip, #2 298 - USER( TUSER( ldrb) r3, [r1], #1) @ May fault 299 - strb r3, [r0], #1 300 - USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault 301 - strgeb r3, [r0], #1 302 - USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault 303 - strgtb r3, [r0], #1 304 - sub r2, r2, ip 305 - b .Lcfu_dest_aligned 306 - 307 - ENTRY(__copy_from_user) 308 - stmfd sp!, {r0, r2, r4 - r7, lr} 309 - cmp r2, #4 310 - blt .Lcfu_not_enough 311 - ands ip, r0, #3 312 - bne .Lcfu_dest_not_aligned 313 - .Lcfu_dest_aligned: 314 - ands ip, r1, #3 315 - bne .Lcfu_src_not_aligned 316 - 317 - /* 318 - * Seeing as there has to be at least 8 bytes to copy, we can 319 - * copy one word, and force a user-mode page fault... 320 - */ 321 - 322 - .Lcfu_0fupi: subs r2, r2, #4 323 - addmi ip, r2, #4 324 - bmi .Lcfu_0nowords 325 - USER( TUSER( ldr) r3, [r1], #4) 326 - str r3, [r0], #4 327 - mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction 328 - rsb ip, ip, #0 329 - movs ip, ip, lsr #32 - PAGE_SHIFT 330 - beq .Lcfu_0fupi 331 - /* 332 - * ip = max no. of bytes to copy before needing another "strt" insn 333 - */ 334 - cmp r2, ip 335 - movlt ip, r2 336 - sub r2, r2, ip 337 - subs ip, ip, #32 338 - blt .Lcfu_0rem8lp 339 - 340 - .Lcfu_0cpy8lp: ldmia r1!, {r3 - r6} @ Shouldnt fault 341 - stmia r0!, {r3 - r6} 342 - ldmia r1!, {r3 - r6} @ Shouldnt fault 343 - subs ip, ip, #32 344 - stmia r0!, {r3 - r6} 345 - bpl .Lcfu_0cpy8lp 346 - 347 - .Lcfu_0rem8lp: cmn ip, #16 348 - ldmgeia r1!, {r3 - r6} @ Shouldnt fault 349 - stmgeia r0!, {r3 - r6} 350 - tst ip, #8 351 - ldmneia r1!, {r3 - r4} @ Shouldnt fault 352 - stmneia r0!, {r3 - r4} 353 - tst ip, #4 354 - TUSER( ldrne) r3, [r1], #4 @ Shouldnt fault 355 - strne r3, [r0], #4 356 - ands ip, ip, #3 357 - beq .Lcfu_0fupi 358 - .Lcfu_0nowords: teq ip, #0 359 - beq .Lcfu_finished 360 - .Lcfu_nowords: cmp ip, #2 361 - USER( TUSER( ldrb) r3, [r1], #1) @ May fault 362 - strb r3, [r0], #1 363 - USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault 364 - strgeb r3, [r0], #1 365 - USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault 366 - strgtb r3, [r0], #1 367 - b .Lcfu_finished 368 - 369 - .Lcfu_not_enough: 370 - movs ip, r2 371 - bne .Lcfu_nowords 372 - .Lcfu_finished: mov r0, #0 373 - add sp, sp, #8 374 - ldmfd sp!, {r4 - r7, pc} 375 - 376 - .Lcfu_src_not_aligned: 377 - bic r1, r1, #3 378 - USER( TUSER( ldr) r7, [r1], #4) @ May fault 379 - cmp ip, #2 380 - bgt .Lcfu_3fupi 381 - beq .Lcfu_2fupi 382 - .Lcfu_1fupi: subs r2, r2, #4 383 - addmi ip, r2, #4 384 - bmi .Lcfu_1nowords 385 - mov r3, r7, lspull #8 386 - USER( TUSER( ldr) r7, [r1], #4) @ May fault 387 - orr r3, r3, r7, lspush #24 388 - str r3, [r0], #4 389 - mov ip, r1, lsl #32 - PAGE_SHIFT 390 - rsb ip, ip, #0 391 - movs ip, ip, lsr #32 - PAGE_SHIFT 392 - beq .Lcfu_1fupi 393 - cmp r2, ip 394 - movlt ip, r2 395 - sub r2, r2, ip 396 - subs ip, ip, #16 397 - blt .Lcfu_1rem8lp 398 - 399 - .Lcfu_1cpy8lp: mov r3, r7, lspull #8 400 - ldmia r1!, {r4 - r7} @ Shouldnt fault 401 - subs ip, ip, #16 402 - orr r3, r3, r4, lspush #24 403 - mov r4, r4, lspull #8 404 - orr r4, r4, r5, lspush #24 405 - mov r5, r5, lspull #8 406 - orr r5, r5, r6, lspush #24 407 - mov r6, r6, lspull #8 408 - orr r6, r6, r7, lspush #24 409 - stmia r0!, {r3 - r6} 410 - bpl .Lcfu_1cpy8lp 411 - 412 - .Lcfu_1rem8lp: tst ip, #8 413 - movne r3, r7, lspull #8 414 - ldmneia r1!, {r4, r7} @ Shouldnt fault 415 - orrne r3, r3, r4, lspush #24 416 - movne r4, r4, lspull #8 417 - orrne r4, r4, r7, lspush #24 418 - stmneia r0!, {r3 - r4} 419 - tst ip, #4 420 - movne r3, r7, lspull #8 421 - USER( TUSER( ldrne) r7, [r1], #4) @ May fault 422 - orrne r3, r3, r7, lspush #24 423 - strne r3, [r0], #4 424 - ands ip, ip, #3 425 - beq .Lcfu_1fupi 426 - .Lcfu_1nowords: mov r3, r7, get_byte_1 427 - teq ip, #0 428 - beq .Lcfu_finished 429 - cmp ip, #2 430 - strb r3, [r0], #1 431 - movge r3, r7, get_byte_2 432 - strgeb r3, [r0], #1 433 - movgt r3, r7, get_byte_3 434 - strgtb r3, [r0], #1 435 - b .Lcfu_finished 436 - 437 - .Lcfu_2fupi: subs r2, r2, #4 438 - addmi ip, r2, #4 439 - bmi .Lcfu_2nowords 440 - mov r3, r7, lspull #16 441 - USER( TUSER( ldr) r7, [r1], #4) @ May fault 442 - orr r3, r3, r7, lspush #16 443 - str r3, [r0], #4 444 - mov ip, r1, lsl #32 - PAGE_SHIFT 445 - rsb ip, ip, #0 446 - movs ip, ip, lsr #32 - PAGE_SHIFT 447 - beq .Lcfu_2fupi 448 - cmp r2, ip 449 - movlt ip, r2 450 - sub r2, r2, ip 451 - subs ip, ip, #16 452 - blt .Lcfu_2rem8lp 453 - 454 - 455 - .Lcfu_2cpy8lp: mov r3, r7, lspull #16 456 - ldmia r1!, {r4 - r7} @ Shouldnt fault 457 - subs ip, ip, #16 458 - orr r3, r3, r4, lspush #16 459 - mov r4, r4, lspull #16 460 - orr r4, r4, r5, lspush #16 461 - mov r5, r5, lspull #16 462 - orr r5, r5, r6, lspush #16 463 - mov r6, r6, lspull #16 464 - orr r6, r6, r7, lspush #16 465 - stmia r0!, {r3 - r6} 466 - bpl .Lcfu_2cpy8lp 467 - 468 - .Lcfu_2rem8lp: tst ip, #8 469 - movne r3, r7, lspull #16 470 - ldmneia r1!, {r4, r7} @ Shouldnt fault 471 - orrne r3, r3, r4, lspush #16 472 - movne r4, r4, lspull #16 473 - orrne r4, r4, r7, lspush #16 474 - stmneia r0!, {r3 - r4} 475 - tst ip, #4 476 - movne r3, r7, lspull #16 477 - USER( TUSER( ldrne) r7, [r1], #4) @ May fault 478 - orrne r3, r3, r7, lspush #16 479 - strne r3, [r0], #4 480 - ands ip, ip, #3 481 - beq .Lcfu_2fupi 482 - .Lcfu_2nowords: mov r3, r7, get_byte_2 483 - teq ip, #0 484 - beq .Lcfu_finished 485 - cmp ip, #2 486 - strb r3, [r0], #1 487 - movge r3, r7, get_byte_3 488 - strgeb r3, [r0], #1 489 - USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault 490 - strgtb r3, [r0], #1 491 - b .Lcfu_finished 492 - 493 - .Lcfu_3fupi: subs r2, r2, #4 494 - addmi ip, r2, #4 495 - bmi .Lcfu_3nowords 496 - mov r3, r7, lspull #24 497 - USER( TUSER( ldr) r7, [r1], #4) @ May fault 498 - orr r3, r3, r7, lspush #8 499 - str r3, [r0], #4 500 - mov ip, r1, lsl #32 - PAGE_SHIFT 501 - rsb ip, ip, #0 502 - movs ip, ip, lsr #32 - PAGE_SHIFT 503 - beq .Lcfu_3fupi 504 - cmp r2, ip 505 - movlt ip, r2 506 - sub r2, r2, ip 507 - subs ip, ip, #16 508 - blt .Lcfu_3rem8lp 509 - 510 - .Lcfu_3cpy8lp: mov r3, r7, lspull #24 511 - ldmia r1!, {r4 - r7} @ Shouldnt fault 512 - orr r3, r3, r4, lspush #8 513 - mov r4, r4, lspull #24 514 - orr r4, r4, r5, lspush #8 515 - mov r5, r5, lspull #24 516 - orr r5, r5, r6, lspush #8 517 - mov r6, r6, lspull #24 518 - orr r6, r6, r7, lspush #8 519 - stmia r0!, {r3 - r6} 520 - subs ip, ip, #16 521 - bpl .Lcfu_3cpy8lp 522 - 523 - .Lcfu_3rem8lp: tst ip, #8 524 - movne r3, r7, lspull #24 525 - ldmneia r1!, {r4, r7} @ Shouldnt fault 526 - orrne r3, r3, r4, lspush #8 527 - movne r4, r4, lspull #24 528 - orrne r4, r4, r7, lspush #8 529 - stmneia r0!, {r3 - r4} 530 - tst ip, #4 531 - movne r3, r7, lspull #24 532 - USER( TUSER( ldrne) r7, [r1], #4) @ May fault 533 - orrne r3, r3, r7, lspush #8 534 - strne r3, [r0], #4 535 - ands ip, ip, #3 536 - beq .Lcfu_3fupi 537 - .Lcfu_3nowords: mov r3, r7, get_byte_3 538 - teq ip, #0 539 - beq .Lcfu_finished 540 - cmp ip, #2 541 - strb r3, [r0], #1 542 - USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault 543 - strgeb r3, [r0], #1 544 - USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault 545 - strgtb r3, [r0], #1 546 - b .Lcfu_finished 547 - ENDPROC(__copy_from_user) 548 - 549 - .pushsection .fixup,"ax" 550 - .align 0 551 - /* 552 - * We took an exception. r0 contains a pointer to 553 - * the byte not copied. 554 - */ 555 - 9001: ldr r2, [sp], #4 @ void *to 556 - sub r2, r0, r2 @ bytes copied 557 - ldr r1, [sp], #4 @ unsigned long count 558 - subs r4, r1, r2 @ bytes left to copy 559 - movne r1, r4 560 - blne __memzero 561 - mov r0, r4 562 - ldmfd sp!, {r4 - r7, pc} 563 - .popsection 564 -
+50
arch/arm/mach-exynos/firmware.c
··· 17 17 #include <asm/cacheflush.h> 18 18 #include <asm/cputype.h> 19 19 #include <asm/firmware.h> 20 + #include <asm/hardware/cache-l2x0.h> 20 21 #include <asm/suspend.h> 21 22 22 23 #include <mach/map.h> ··· 137 136 .resume = IS_ENABLED(CONFIG_EXYNOS_CPU_SUSPEND) ? exynos_resume : NULL, 138 137 }; 139 138 139 + static void exynos_l2_write_sec(unsigned long val, unsigned reg) 140 + { 141 + static int l2cache_enabled; 142 + 143 + switch (reg) { 144 + case L2X0_CTRL: 145 + if (val & L2X0_CTRL_EN) { 146 + /* 147 + * Before the cache can be enabled, due to firmware 148 + * design, SMC_CMD_L2X0INVALL must be called. 149 + */ 150 + if (!l2cache_enabled) { 151 + exynos_smc(SMC_CMD_L2X0INVALL, 0, 0, 0); 152 + l2cache_enabled = 1; 153 + } 154 + } else { 155 + l2cache_enabled = 0; 156 + } 157 + exynos_smc(SMC_CMD_L2X0CTRL, val, 0, 0); 158 + break; 159 + 160 + case L2X0_DEBUG_CTRL: 161 + exynos_smc(SMC_CMD_L2X0DEBUG, val, 0, 0); 162 + break; 163 + 164 + default: 165 + WARN_ONCE(1, "%s: ignoring write to reg 0x%x\n", __func__, reg); 166 + } 167 + } 168 + 169 + static void exynos_l2_configure(const struct l2x0_regs *regs) 170 + { 171 + exynos_smc(SMC_CMD_L2X0SETUP1, regs->tag_latency, regs->data_latency, 172 + regs->prefetch_ctrl); 173 + exynos_smc(SMC_CMD_L2X0SETUP2, regs->pwr_ctrl, regs->aux_ctrl, 0); 174 + } 175 + 140 176 void __init exynos_firmware_init(void) 141 177 { 142 178 struct device_node *nd; ··· 193 155 pr_info("Running under secure firmware.\n"); 194 156 195 157 register_firmware_ops(&exynos_firmware_ops); 158 + 159 + /* 160 + * Exynos 4 SoCs (based on Cortex A9 and equipped with L2C-310), 161 + * running under secure firmware, require certain registers of L2 162 + * cache controller to be written in secure mode. Here .write_sec 163 + * callback is provided to perform necessary SMC calls. 164 + */ 165 + if (IS_ENABLED(CONFIG_CACHE_L2X0) && 166 + read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) { 167 + outer_cache.write_sec = exynos_l2_write_sec; 168 + outer_cache.configure = exynos_l2_configure; 169 + } 196 170 }
+46
arch/arm/mach-exynos/sleep.S
··· 16 16 */ 17 17 18 18 #include <linux/linkage.h> 19 + #include <asm/asm-offsets.h> 20 + #include <asm/hardware/cache-l2x0.h> 19 21 #include "smc.h" 20 22 21 23 #define CPU_MASK 0xff0ffff0 ··· 76 74 mov r0, #SMC_CMD_C15RESUME 77 75 dsb 78 76 smc #0 77 + #ifdef CONFIG_CACHE_L2X0 78 + adr r0, 1f 79 + ldr r2, [r0] 80 + add r0, r2, r0 81 + 82 + /* Check that the address has been initialised. */ 83 + ldr r1, [r0, #L2X0_R_PHY_BASE] 84 + teq r1, #0 85 + beq skip_l2x0 86 + 87 + /* Check if controller has been enabled. */ 88 + ldr r2, [r1, #L2X0_CTRL] 89 + tst r2, #0x1 90 + bne skip_l2x0 91 + 92 + ldr r1, [r0, #L2X0_R_TAG_LATENCY] 93 + ldr r2, [r0, #L2X0_R_DATA_LATENCY] 94 + ldr r3, [r0, #L2X0_R_PREFETCH_CTRL] 95 + mov r0, #SMC_CMD_L2X0SETUP1 96 + smc #0 97 + 98 + /* Reload saved regs pointer because smc corrupts registers. */ 99 + adr r0, 1f 100 + ldr r2, [r0] 101 + add r0, r2, r0 102 + 103 + ldr r1, [r0, #L2X0_R_PWR_CTRL] 104 + ldr r2, [r0, #L2X0_R_AUX_CTRL] 105 + mov r0, #SMC_CMD_L2X0SETUP2 106 + smc #0 107 + 108 + mov r0, #SMC_CMD_L2X0INVALL 109 + smc #0 110 + 111 + mov r1, #1 112 + mov r0, #SMC_CMD_L2X0CTRL 113 + smc #0 114 + skip_l2x0: 115 + #endif /* CONFIG_CACHE_L2X0 */ 79 116 skip_cp15: 80 117 b cpu_resume 81 118 ENDPROC(exynos_cpu_resume_ns) ··· 124 83 .globl cp15_save_power 125 84 cp15_save_power: 126 85 .long 0 @ cp15 power control 86 + 87 + #ifdef CONFIG_CACHE_L2X0 88 + .align 89 + 1: .long l2x0_saved_regs - . 90 + #endif /* CONFIG_CACHE_L2X0 */
+6
arch/arm/mach-omap2/board-generic.c
··· 171 171 }; 172 172 173 173 DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)") 174 + .l2c_aux_val = OMAP_L2C_AUX_CTRL, 175 + .l2c_aux_mask = 0xcf9fffff, 176 + .l2c_write_sec = omap4_l2c310_write_sec, 174 177 .reserve = omap_reserve, 175 178 .smp = smp_ops(omap4_smp_ops), 176 179 .map_io = omap4_map_io, ··· 217 214 }; 218 215 219 216 DT_MACHINE_START(AM43_DT, "Generic AM43 (Flattened Device Tree)") 217 + .l2c_aux_val = OMAP_L2C_AUX_CTRL, 218 + .l2c_aux_mask = 0xcf9fffff, 219 + .l2c_write_sec = omap4_l2c310_write_sec, 220 220 .map_io = am33xx_map_io, 221 221 .init_early = am43xx_init_early, 222 222 .init_late = am43xx_init_late,
+8
arch/arm/mach-omap2/common.h
··· 35 35 #include <linux/irqchip/irq-omap-intc.h> 36 36 37 37 #include <asm/proc-fns.h> 38 + #include <asm/hardware/cache-l2x0.h> 38 39 39 40 #include "i2c.h" 40 41 #include "serial.h" ··· 95 94 extern void omap4_local_timer_init(void); 96 95 #ifdef CONFIG_CACHE_L2X0 97 96 int omap_l2_cache_init(void); 97 + #define OMAP_L2C_AUX_CTRL (L2C_AUX_CTRL_SHARED_OVERRIDE | \ 98 + L310_AUX_CTRL_DATA_PREFETCH | \ 99 + L310_AUX_CTRL_INSTR_PREFETCH) 100 + void omap4_l2c310_write_sec(unsigned long val, unsigned reg); 98 101 #else 99 102 static inline int omap_l2_cache_init(void) 100 103 { 101 104 return 0; 102 105 } 106 + 107 + #define OMAP_L2C_AUX_CTRL 0 108 + #define omap4_l2c310_write_sec NULL 103 109 #endif 104 110 extern void omap5_realtime_timer_init(void); 105 111
+1 -15
arch/arm/mach-omap2/omap4-common.c
··· 166 166 return l2cache_base; 167 167 } 168 168 169 - static void omap4_l2c310_write_sec(unsigned long val, unsigned reg) 169 + void omap4_l2c310_write_sec(unsigned long val, unsigned reg) 170 170 { 171 171 unsigned smc_op; 172 172 ··· 201 201 202 202 int __init omap_l2_cache_init(void) 203 203 { 204 - u32 aux_ctrl; 205 - 206 204 /* Static mapping, never released */ 207 205 l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); 208 206 if (WARN_ON(!l2cache_base)) 209 207 return -ENOMEM; 210 - 211 - /* 16-way associativity, parity disabled, way size - 64KB (es2.0 +) */ 212 - aux_ctrl = L2C_AUX_CTRL_SHARED_OVERRIDE | 213 - L310_AUX_CTRL_DATA_PREFETCH | 214 - L310_AUX_CTRL_INSTR_PREFETCH; 215 - 216 - outer_cache.write_sec = omap4_l2c310_write_sec; 217 - if (of_have_populated_dt()) 218 - l2x0_of_init(aux_ctrl, 0xcf9fffff); 219 - else 220 - l2x0_init(l2cache_base, aux_ctrl, 0xcf9fffff); 221 - 222 208 return 0; 223 209 } 224 210 #endif
+2 -2
arch/arm/mach-qcom/platsmp.c
··· 44 44 #define APCS_SAW2_VCTL 0x14 45 45 #define APCS_SAW2_2_VCTL 0x1c 46 46 47 - extern void secondary_startup(void); 47 + extern void secondary_startup_arm(void); 48 48 49 49 static DEFINE_SPINLOCK(boot_lock); 50 50 ··· 337 337 flags |= cold_boot_flags[map]; 338 338 } 339 339 340 - if (scm_set_boot_addr(virt_to_phys(secondary_startup), flags)) { 340 + if (scm_set_boot_addr(virt_to_phys(secondary_startup_arm), flags)) { 341 341 for_each_present_cpu(cpu) { 342 342 if (cpu == smp_processor_id()) 343 343 continue;
+1 -1
arch/arm/mach-sa1100/Makefile
··· 3 3 # 4 4 5 5 # Common support 6 - obj-y := clock.o generic.o irq.o time.o #nmi-oopser.o 6 + obj-y := clock.o generic.o irq.o #nmi-oopser.o 7 7 8 8 # Specific board support 9 9 obj-$(CONFIG_SA1100_ASSABET) += assabet.o
+12
arch/arm/mach-sa1100/clock.c
··· 119 119 120 120 static DEFINE_CLK(cpu, &clk_cpu_ops); 121 121 122 + static unsigned long clk_36864_get_rate(struct clk *clk) 123 + { 124 + return 3686400; 125 + } 126 + 127 + static struct clkops clk_36864_ops = { 128 + .get_rate = clk_36864_get_rate, 129 + }; 130 + 131 + static DEFINE_CLK(36864, &clk_36864_ops); 132 + 122 133 static struct clk_lookup sa11xx_clkregs[] = { 123 134 CLKDEV_INIT("sa1111.0", NULL, &clk_gpio27), 124 135 CLKDEV_INIT("sa1100-rtc", NULL, NULL), ··· 137 126 CLKDEV_INIT("sa11x0-pcmcia", NULL, &clk_cpu), 138 127 /* sa1111 names devices using internal offsets, PCMCIA is at 0x1800 */ 139 128 CLKDEV_INIT("1800", NULL, &clk_cpu), 129 + CLKDEV_INIT(NULL, "OSTIMER0", &clk_36864), 140 130 }; 141 131 142 132 static int __init sa11xx_clk_init(void)
+1 -2
arch/arm/mach-sa1100/collie.c
··· 371 371 PPC_LDD6 | PPC_LDD7 | PPC_L_PCLK | PPC_L_LCLK | PPC_L_FCLK | PPC_L_BIAS | 372 372 PPC_TXD1 | PPC_TXD2 | PPC_TXD3 | PPC_TXD4 | PPC_SCLK | PPC_SFRM; 373 373 374 - PWER = _COLLIE_GPIO_AC_IN | _COLLIE_GPIO_CO | _COLLIE_GPIO_ON_KEY | 375 - _COLLIE_GPIO_WAKEUP | _COLLIE_GPIO_nREMOCON_INT | PWER_RTC; 374 + PWER = 0; 376 375 377 376 PGSR = _COLLIE_GPIO_nREMOCON_ON; 378 377
+6
arch/arm/mach-sa1100/generic.c
··· 33 33 #include <mach/irqs.h> 34 34 35 35 #include "generic.h" 36 + #include <clocksource/pxa.h> 36 37 37 38 unsigned int reset_status; 38 39 EXPORT_SYMBOL(reset_status); ··· 368 367 void __init sa1100_map_io(void) 369 368 { 370 369 iotable_init(standard_io_desc, ARRAY_SIZE(standard_io_desc)); 370 + } 371 + 372 + void __init sa1100_timer_init(void) 373 + { 374 + pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x90000000), 3686400); 371 375 } 372 376 373 377 /*
+42 -31
arch/arm/mach-sa1100/include/mach/irqs.h
··· 8 8 * 2001/11/14 RMK Cleaned up and standardised a lot of the IRQs. 9 9 */ 10 10 11 - #define IRQ_GPIO0 1 12 - #define IRQ_GPIO1 2 13 - #define IRQ_GPIO2 3 14 - #define IRQ_GPIO3 4 15 - #define IRQ_GPIO4 5 16 - #define IRQ_GPIO5 6 17 - #define IRQ_GPIO6 7 18 - #define IRQ_GPIO7 8 19 - #define IRQ_GPIO8 9 20 - #define IRQ_GPIO9 10 21 - #define IRQ_GPIO10 11 11 + #define IRQ_GPIO0_SC 1 12 + #define IRQ_GPIO1_SC 2 13 + #define IRQ_GPIO2_SC 3 14 + #define IRQ_GPIO3_SC 4 15 + #define IRQ_GPIO4_SC 5 16 + #define IRQ_GPIO5_SC 6 17 + #define IRQ_GPIO6_SC 7 18 + #define IRQ_GPIO7_SC 8 19 + #define IRQ_GPIO8_SC 9 20 + #define IRQ_GPIO9_SC 10 21 + #define IRQ_GPIO10_SC 11 22 22 #define IRQ_GPIO11_27 12 23 23 #define IRQ_LCD 13 /* LCD controller */ 24 24 #define IRQ_Ser0UDC 14 /* Ser. port 0 UDC */ ··· 41 41 #define IRQ_RTC1Hz 31 /* RTC 1 Hz clock */ 42 42 #define IRQ_RTCAlrm 32 /* RTC Alarm */ 43 43 44 - #define IRQ_GPIO11 33 45 - #define IRQ_GPIO12 34 46 - #define IRQ_GPIO13 35 47 - #define IRQ_GPIO14 36 48 - #define IRQ_GPIO15 37 49 - #define IRQ_GPIO16 38 50 - #define IRQ_GPIO17 39 51 - #define IRQ_GPIO18 40 52 - #define IRQ_GPIO19 41 53 - #define IRQ_GPIO20 42 54 - #define IRQ_GPIO21 43 55 - #define IRQ_GPIO22 44 56 - #define IRQ_GPIO23 45 57 - #define IRQ_GPIO24 46 58 - #define IRQ_GPIO25 47 59 - #define IRQ_GPIO26 48 60 - #define IRQ_GPIO27 49 44 + #define IRQ_GPIO0 33 45 + #define IRQ_GPIO1 34 46 + #define IRQ_GPIO2 35 47 + #define IRQ_GPIO3 36 48 + #define IRQ_GPIO4 37 49 + #define IRQ_GPIO5 38 50 + #define IRQ_GPIO6 39 51 + #define IRQ_GPIO7 40 52 + #define IRQ_GPIO8 41 53 + #define IRQ_GPIO9 42 54 + #define IRQ_GPIO10 43 55 + #define IRQ_GPIO11 44 56 + #define IRQ_GPIO12 45 57 + #define IRQ_GPIO13 46 58 + #define IRQ_GPIO14 47 59 + #define IRQ_GPIO15 48 60 + #define IRQ_GPIO16 49 61 + #define IRQ_GPIO17 50 62 + #define IRQ_GPIO18 51 63 + #define IRQ_GPIO19 52 64 + #define IRQ_GPIO20 53 65 + #define IRQ_GPIO21 54 66 + #define IRQ_GPIO22 55 67 + #define IRQ_GPIO23 56 68 + #define IRQ_GPIO24 57 69 + #define IRQ_GPIO25 58 70 + #define IRQ_GPIO26 59 71 + #define IRQ_GPIO27 60 61 72 62 73 /* 63 74 * The next 16 interrupts are for board specific purposes. Since 64 75 * the kernel can only run on one machine at a time, we can re-use 65 76 * these. If you need more, increase IRQ_BOARD_END, but keep it 66 - * within sensible limits. IRQs 49 to 64 are available. 77 + * within sensible limits. IRQs 61 to 76 are available. 67 78 */ 68 - #define IRQ_BOARD_START 50 69 - #define IRQ_BOARD_END 66 79 + #define IRQ_BOARD_START 61 80 + #define IRQ_BOARD_END 77 70 81 71 82 /* 72 83 * Figure out the MAX IRQ number.
+4 -199
arch/arm/mach-sa1100/irq.c
··· 80 80 81 81 static struct irq_domain *sa1100_normal_irqdomain; 82 82 83 - /* 84 - * SA1100 GPIO edge detection for IRQs: 85 - * IRQs are generated on Falling-Edge, Rising-Edge, or both. 86 - * Use this instead of directly setting GRER/GFER. 87 - */ 88 - static int GPIO_IRQ_rising_edge; 89 - static int GPIO_IRQ_falling_edge; 90 - static int GPIO_IRQ_mask = (1 << 11) - 1; 91 - 92 - static int sa1100_gpio_type(struct irq_data *d, unsigned int type) 93 - { 94 - unsigned int mask; 95 - 96 - mask = BIT(d->hwirq); 97 - 98 - if (type == IRQ_TYPE_PROBE) { 99 - if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask) 100 - return 0; 101 - type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; 102 - } 103 - 104 - if (type & IRQ_TYPE_EDGE_RISING) { 105 - GPIO_IRQ_rising_edge |= mask; 106 - } else 107 - GPIO_IRQ_rising_edge &= ~mask; 108 - if (type & IRQ_TYPE_EDGE_FALLING) { 109 - GPIO_IRQ_falling_edge |= mask; 110 - } else 111 - GPIO_IRQ_falling_edge &= ~mask; 112 - 113 - GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask; 114 - GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask; 115 - 116 - return 0; 117 - } 118 - 119 - /* 120 - * GPIO IRQs must be acknowledged. 121 - */ 122 - static void sa1100_gpio_ack(struct irq_data *d) 123 - { 124 - GEDR = BIT(d->hwirq); 125 - } 126 - 127 - static int sa1100_gpio_wake(struct irq_data *d, unsigned int on) 128 - { 129 - if (on) 130 - PWER |= BIT(d->hwirq); 131 - else 132 - PWER &= ~BIT(d->hwirq); 133 - return 0; 134 - } 135 - 136 - /* 137 - * This is for IRQs from 0 to 10. 138 - */ 139 - static struct irq_chip sa1100_low_gpio_chip = { 140 - .name = "GPIO-l", 141 - .irq_ack = sa1100_gpio_ack, 142 - .irq_mask = sa1100_mask_irq, 143 - .irq_unmask = sa1100_unmask_irq, 144 - .irq_set_type = sa1100_gpio_type, 145 - .irq_set_wake = sa1100_gpio_wake, 146 - }; 147 - 148 - static int sa1100_low_gpio_irqdomain_map(struct irq_domain *d, 149 - unsigned int irq, irq_hw_number_t hwirq) 150 - { 151 - irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip, 152 - handle_edge_irq); 153 - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 154 - 155 - return 0; 156 - } 157 - 158 - static struct irq_domain_ops sa1100_low_gpio_irqdomain_ops = { 159 - .map = sa1100_low_gpio_irqdomain_map, 160 - .xlate = irq_domain_xlate_onetwocell, 161 - }; 162 - 163 - static struct irq_domain *sa1100_low_gpio_irqdomain; 164 - 165 - /* 166 - * IRQ11 (GPIO11 through 27) handler. We enter here with the 167 - * irq_controller_lock held, and IRQs disabled. Decode the IRQ 168 - * and call the handler. 169 - */ 170 - static void 171 - sa1100_high_gpio_handler(unsigned int irq, struct irq_desc *desc) 172 - { 173 - unsigned int mask; 174 - 175 - mask = GEDR & 0xfffff800; 176 - do { 177 - /* 178 - * clear down all currently active IRQ sources. 179 - * We will be processing them all. 180 - */ 181 - GEDR = mask; 182 - 183 - irq = IRQ_GPIO11; 184 - mask >>= 11; 185 - do { 186 - if (mask & 1) 187 - generic_handle_irq(irq); 188 - mask >>= 1; 189 - irq++; 190 - } while (mask); 191 - 192 - mask = GEDR & 0xfffff800; 193 - } while (mask); 194 - } 195 - 196 - /* 197 - * Like GPIO0 to 10, GPIO11-27 IRQs need to be handled specially. 198 - * In addition, the IRQs are all collected up into one bit in the 199 - * interrupt controller registers. 200 - */ 201 - static void sa1100_high_gpio_mask(struct irq_data *d) 202 - { 203 - unsigned int mask = BIT(d->hwirq); 204 - 205 - GPIO_IRQ_mask &= ~mask; 206 - 207 - GRER &= ~mask; 208 - GFER &= ~mask; 209 - } 210 - 211 - static void sa1100_high_gpio_unmask(struct irq_data *d) 212 - { 213 - unsigned int mask = BIT(d->hwirq); 214 - 215 - GPIO_IRQ_mask |= mask; 216 - 217 - GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask; 218 - GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask; 219 - } 220 - 221 - static struct irq_chip sa1100_high_gpio_chip = { 222 - .name = "GPIO-h", 223 - .irq_ack = sa1100_gpio_ack, 224 - .irq_mask = sa1100_high_gpio_mask, 225 - .irq_unmask = sa1100_high_gpio_unmask, 226 - .irq_set_type = sa1100_gpio_type, 227 - .irq_set_wake = sa1100_gpio_wake, 228 - }; 229 - 230 - static int sa1100_high_gpio_irqdomain_map(struct irq_domain *d, 231 - unsigned int irq, irq_hw_number_t hwirq) 232 - { 233 - irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip, 234 - handle_edge_irq); 235 - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 236 - 237 - return 0; 238 - } 239 - 240 - static struct irq_domain_ops sa1100_high_gpio_irqdomain_ops = { 241 - .map = sa1100_high_gpio_irqdomain_map, 242 - .xlate = irq_domain_xlate_onetwocell, 243 - }; 244 - 245 - static struct irq_domain *sa1100_high_gpio_irqdomain; 246 - 247 83 static struct resource irq_resource = 248 84 DEFINE_RES_MEM_NAMED(0x90050000, SZ_64K, "irqs"); 249 85 ··· 106 270 IC_GPIO6|IC_GPIO5|IC_GPIO4|IC_GPIO3|IC_GPIO2| 107 271 IC_GPIO1|IC_GPIO0); 108 272 109 - /* 110 - * Set the appropriate edges for wakeup. 111 - */ 112 - GRER = PWER & GPIO_IRQ_rising_edge; 113 - GFER = PWER & GPIO_IRQ_falling_edge; 114 - 115 - /* 116 - * Clear any pending GPIO interrupts. 117 - */ 118 - GEDR = GEDR; 119 - 120 273 return 0; 121 274 } 122 275 ··· 116 291 if (st->saved) { 117 292 ICCR = st->iccr; 118 293 ICLR = st->iclr; 119 - 120 - GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask; 121 - GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask; 122 294 123 295 ICMR = st->icmr; 124 296 } ··· 147 325 if (mask == 0) 148 326 break; 149 327 150 - handle_IRQ(ffs(mask) - 1 + IRQ_GPIO0, regs); 328 + handle_domain_irq(sa1100_normal_irqdomain, 329 + ffs(mask) - 1, regs); 151 330 } while (1); 152 331 } 153 332 ··· 162 339 /* all IRQs are IRQ, not FIQ */ 163 340 ICLR = 0; 164 341 165 - /* clear all GPIO edge detects */ 166 - GFER = 0; 167 - GRER = 0; 168 - GEDR = -1; 169 - 170 342 /* 171 343 * Whatever the doc says, this has to be set for the wait-on-irq 172 344 * instruction to work... on a SA1100 rev 9 at least. 173 345 */ 174 346 ICCR = 1; 175 347 176 - sa1100_low_gpio_irqdomain = irq_domain_add_legacy(NULL, 177 - 11, IRQ_GPIO0, 0, 178 - &sa1100_low_gpio_irqdomain_ops, NULL); 179 - 180 - sa1100_normal_irqdomain = irq_domain_add_legacy(NULL, 181 - 21, IRQ_GPIO11_27, 11, 348 + sa1100_normal_irqdomain = irq_domain_add_simple(NULL, 349 + 32, IRQ_GPIO0_SC, 182 350 &sa1100_normal_irqdomain_ops, NULL); 183 - 184 - sa1100_high_gpio_irqdomain = irq_domain_add_legacy(NULL, 185 - 17, IRQ_GPIO11, 11, 186 - &sa1100_high_gpio_irqdomain_ops, NULL); 187 - 188 - /* 189 - * Install handler for GPIO 11-27 edge detect interrupts 190 - */ 191 - irq_set_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler); 192 351 193 352 set_handle_irq(sa1100_handle_irq); 194 353
+1
arch/arm/mach-sa1100/pm.c
··· 81 81 /* 82 82 * Ensure not to come back here if it wasn't intended 83 83 */ 84 + RCSR = RCSR_SMR; 84 85 PSPR = 0; 85 86 86 87 /*
-139
arch/arm/mach-sa1100/time.c
··· 1 - /* 2 - * linux/arch/arm/mach-sa1100/time.c 3 - * 4 - * Copyright (C) 1998 Deborah Wallach. 5 - * Twiddles (C) 1999 Hugo Fiennes <hugo@empeg.com> 6 - * 7 - * 2000/03/29 (C) Nicolas Pitre <nico@fluxnic.net> 8 - * Rewritten: big cleanup, much simpler, better HZ accuracy. 9 - * 10 - */ 11 - #include <linux/init.h> 12 - #include <linux/kernel.h> 13 - #include <linux/errno.h> 14 - #include <linux/interrupt.h> 15 - #include <linux/irq.h> 16 - #include <linux/timex.h> 17 - #include <linux/clockchips.h> 18 - #include <linux/sched_clock.h> 19 - 20 - #include <asm/mach/time.h> 21 - #include <mach/hardware.h> 22 - #include <mach/irqs.h> 23 - 24 - #define SA1100_CLOCK_FREQ 3686400 25 - #define SA1100_LATCH DIV_ROUND_CLOSEST(SA1100_CLOCK_FREQ, HZ) 26 - 27 - static u64 notrace sa1100_read_sched_clock(void) 28 - { 29 - return readl_relaxed(OSCR); 30 - } 31 - 32 - #define MIN_OSCR_DELTA 2 33 - 34 - static irqreturn_t sa1100_ost0_interrupt(int irq, void *dev_id) 35 - { 36 - struct clock_event_device *c = dev_id; 37 - 38 - /* Disarm the compare/match, signal the event. */ 39 - writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); 40 - writel_relaxed(OSSR_M0, OSSR); 41 - c->event_handler(c); 42 - 43 - return IRQ_HANDLED; 44 - } 45 - 46 - static int 47 - sa1100_osmr0_set_next_event(unsigned long delta, struct clock_event_device *c) 48 - { 49 - unsigned long next, oscr; 50 - 51 - writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER); 52 - next = readl_relaxed(OSCR) + delta; 53 - writel_relaxed(next, OSMR0); 54 - oscr = readl_relaxed(OSCR); 55 - 56 - return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; 57 - } 58 - 59 - static void 60 - sa1100_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *c) 61 - { 62 - switch (mode) { 63 - case CLOCK_EVT_MODE_ONESHOT: 64 - case CLOCK_EVT_MODE_UNUSED: 65 - case CLOCK_EVT_MODE_SHUTDOWN: 66 - writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); 67 - writel_relaxed(OSSR_M0, OSSR); 68 - break; 69 - 70 - case CLOCK_EVT_MODE_RESUME: 71 - case CLOCK_EVT_MODE_PERIODIC: 72 - break; 73 - } 74 - } 75 - 76 - #ifdef CONFIG_PM 77 - unsigned long osmr[4], oier; 78 - 79 - static void sa1100_timer_suspend(struct clock_event_device *cedev) 80 - { 81 - osmr[0] = readl_relaxed(OSMR0); 82 - osmr[1] = readl_relaxed(OSMR1); 83 - osmr[2] = readl_relaxed(OSMR2); 84 - osmr[3] = readl_relaxed(OSMR3); 85 - oier = readl_relaxed(OIER); 86 - } 87 - 88 - static void sa1100_timer_resume(struct clock_event_device *cedev) 89 - { 90 - writel_relaxed(0x0f, OSSR); 91 - writel_relaxed(osmr[0], OSMR0); 92 - writel_relaxed(osmr[1], OSMR1); 93 - writel_relaxed(osmr[2], OSMR2); 94 - writel_relaxed(osmr[3], OSMR3); 95 - writel_relaxed(oier, OIER); 96 - 97 - /* 98 - * OSMR0 is the system timer: make sure OSCR is sufficiently behind 99 - */ 100 - writel_relaxed(OSMR0 - SA1100_LATCH, OSCR); 101 - } 102 - #else 103 - #define sa1100_timer_suspend NULL 104 - #define sa1100_timer_resume NULL 105 - #endif 106 - 107 - static struct clock_event_device ckevt_sa1100_osmr0 = { 108 - .name = "osmr0", 109 - .features = CLOCK_EVT_FEAT_ONESHOT, 110 - .rating = 200, 111 - .set_next_event = sa1100_osmr0_set_next_event, 112 - .set_mode = sa1100_osmr0_set_mode, 113 - .suspend = sa1100_timer_suspend, 114 - .resume = sa1100_timer_resume, 115 - }; 116 - 117 - static struct irqaction sa1100_timer_irq = { 118 - .name = "ost0", 119 - .flags = IRQF_TIMER | IRQF_IRQPOLL, 120 - .handler = sa1100_ost0_interrupt, 121 - .dev_id = &ckevt_sa1100_osmr0, 122 - }; 123 - 124 - void __init sa1100_timer_init(void) 125 - { 126 - writel_relaxed(0, OIER); 127 - writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); 128 - 129 - sched_clock_register(sa1100_read_sched_clock, 32, 3686400); 130 - 131 - ckevt_sa1100_osmr0.cpumask = cpumask_of(0); 132 - 133 - setup_irq(IRQ_OST0, &sa1100_timer_irq); 134 - 135 - clocksource_mmio_init(OSCR, "oscr", SA1100_CLOCK_FREQ, 200, 32, 136 - clocksource_mmio_readl_up); 137 - clockevents_config_and_register(&ckevt_sa1100_osmr0, 3686400, 138 - MIN_OSCR_DELTA * 2, 0x7fffffff); 139 - }
+1
arch/arm/mm/Kconfig
··· 1012 1012 1013 1013 config ARM_KERNMEM_PERMS 1014 1014 bool "Restrict kernel memory permissions" 1015 + depends on MMU 1015 1016 help 1016 1017 If this is set, kernel memory other than kernel text (and rodata) 1017 1018 will be made non-executable. The tradeoff is that each region is
+231 -210
arch/arm/mm/cache-l2x0.c
··· 41 41 void (*enable)(void __iomem *, u32, unsigned); 42 42 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); 43 43 void (*save)(void __iomem *); 44 + void (*configure)(void __iomem *); 44 45 struct outer_cache_fns outer_cache; 45 46 }; 46 47 47 48 #define CACHE_LINE_SIZE 32 48 49 49 50 static void __iomem *l2x0_base; 51 + static const struct l2c_init_data *l2x0_data; 50 52 static DEFINE_RAW_SPINLOCK(l2x0_lock); 51 53 static u32 l2x0_way_mask; /* Bitmask of active ways */ 52 54 static u32 l2x0_size; ··· 108 106 } 109 107 } 110 108 109 + static void l2c_configure(void __iomem *base) 110 + { 111 + if (outer_cache.configure) { 112 + outer_cache.configure(&l2x0_saved_regs); 113 + return; 114 + } 115 + 116 + if (l2x0_data->configure) 117 + l2x0_data->configure(base); 118 + 119 + l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL); 120 + } 121 + 111 122 /* 112 123 * Enable the L2 cache controller. This function must only be 113 124 * called when the cache controller is known to be disabled. ··· 129 114 { 130 115 unsigned long flags; 131 116 132 - l2c_write_sec(aux, base, L2X0_AUX_CTRL); 117 + /* Do not touch the controller if already enabled. */ 118 + if (readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN) 119 + return; 120 + 121 + l2x0_saved_regs.aux_ctrl = aux; 122 + l2c_configure(base); 133 123 134 124 l2c_unlock(base, num_lock); 135 125 ··· 156 136 dsb(st); 157 137 } 158 138 159 - #ifdef CONFIG_CACHE_PL310 160 - static inline void cache_wait(void __iomem *reg, unsigned long mask) 161 - { 162 - /* cache operations by line are atomic on PL310 */ 163 - } 164 - #else 165 - #define cache_wait l2c_wait_mask 166 - #endif 167 - 168 - static inline void cache_sync(void) 169 - { 170 - void __iomem *base = l2x0_base; 171 - 172 - writel_relaxed(0, base + sync_reg_offset); 173 - cache_wait(base + L2X0_CACHE_SYNC, 1); 174 - } 175 - 176 - #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) 177 - static inline void debug_writel(unsigned long val) 178 - { 179 - l2c_set_debug(l2x0_base, val); 180 - } 181 - #else 182 - /* Optimised out for non-errata case */ 183 - static inline void debug_writel(unsigned long val) 184 - { 185 - } 186 - #endif 187 - 188 - static void l2x0_cache_sync(void) 189 - { 190 - unsigned long flags; 191 - 192 - raw_spin_lock_irqsave(&l2x0_lock, flags); 193 - cache_sync(); 194 - raw_spin_unlock_irqrestore(&l2x0_lock, flags); 195 - } 196 - 197 - static void __l2x0_flush_all(void) 198 - { 199 - debug_writel(0x03); 200 - __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); 201 - cache_sync(); 202 - debug_writel(0x00); 203 - } 204 - 205 - static void l2x0_flush_all(void) 206 - { 207 - unsigned long flags; 208 - 209 - /* clean all ways */ 210 - raw_spin_lock_irqsave(&l2x0_lock, flags); 211 - __l2x0_flush_all(); 212 - raw_spin_unlock_irqrestore(&l2x0_lock, flags); 213 - } 214 - 215 - static void l2x0_disable(void) 216 - { 217 - unsigned long flags; 218 - 219 - raw_spin_lock_irqsave(&l2x0_lock, flags); 220 - __l2x0_flush_all(); 221 - l2c_write_sec(0, l2x0_base, L2X0_CTRL); 222 - dsb(st); 223 - raw_spin_unlock_irqrestore(&l2x0_lock, flags); 224 - } 225 - 226 139 static void l2c_save(void __iomem *base) 227 140 { 228 141 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 142 + } 143 + 144 + static void l2c_resume(void) 145 + { 146 + l2c_enable(l2x0_base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock); 229 147 } 230 148 231 149 /* ··· 246 288 __l2c210_cache_sync(l2x0_base); 247 289 } 248 290 249 - static void l2c210_resume(void) 250 - { 251 - void __iomem *base = l2x0_base; 252 - 253 - if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) 254 - l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); 255 - } 256 - 257 291 static const struct l2c_init_data l2c210_data __initconst = { 258 292 .type = "L2C-210", 259 293 .way_size_0 = SZ_8K, ··· 259 309 .flush_all = l2c210_flush_all, 260 310 .disable = l2c_disable, 261 311 .sync = l2c210_sync, 262 - .resume = l2c210_resume, 312 + .resume = l2c_resume, 263 313 }, 264 314 }; 265 315 ··· 416 466 .flush_all = l2c220_flush_all, 417 467 .disable = l2c_disable, 418 468 .sync = l2c220_sync, 419 - .resume = l2c210_resume, 469 + .resume = l2c_resume, 420 470 }, 421 471 }; 422 472 ··· 565 615 L310_POWER_CTRL); 566 616 } 567 617 568 - static void l2c310_resume(void) 618 + static void l2c310_configure(void __iomem *base) 569 619 { 570 - void __iomem *base = l2x0_base; 620 + unsigned revision; 571 621 572 - if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 573 - unsigned revision; 622 + /* restore pl310 setup */ 623 + l2c_write_sec(l2x0_saved_regs.tag_latency, base, 624 + L310_TAG_LATENCY_CTRL); 625 + l2c_write_sec(l2x0_saved_regs.data_latency, base, 626 + L310_DATA_LATENCY_CTRL); 627 + l2c_write_sec(l2x0_saved_regs.filter_end, base, 628 + L310_ADDR_FILTER_END); 629 + l2c_write_sec(l2x0_saved_regs.filter_start, base, 630 + L310_ADDR_FILTER_START); 574 631 575 - /* restore pl310 setup */ 576 - writel_relaxed(l2x0_saved_regs.tag_latency, 577 - base + L310_TAG_LATENCY_CTRL); 578 - writel_relaxed(l2x0_saved_regs.data_latency, 579 - base + L310_DATA_LATENCY_CTRL); 580 - writel_relaxed(l2x0_saved_regs.filter_end, 581 - base + L310_ADDR_FILTER_END); 582 - writel_relaxed(l2x0_saved_regs.filter_start, 583 - base + L310_ADDR_FILTER_START); 632 + revision = readl_relaxed(base + L2X0_CACHE_ID) & 633 + L2X0_CACHE_ID_RTL_MASK; 584 634 585 - revision = readl_relaxed(base + L2X0_CACHE_ID) & 586 - L2X0_CACHE_ID_RTL_MASK; 587 - 588 - if (revision >= L310_CACHE_ID_RTL_R2P0) 589 - l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 590 - L310_PREFETCH_CTRL); 591 - if (revision >= L310_CACHE_ID_RTL_R3P0) 592 - l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 593 - L310_POWER_CTRL); 594 - 595 - l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 596 - 597 - /* Re-enable full-line-of-zeros for Cortex-A9 */ 598 - if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 599 - set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 600 - } 635 + if (revision >= L310_CACHE_ID_RTL_R2P0) 636 + l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, 637 + L310_PREFETCH_CTRL); 638 + if (revision >= L310_CACHE_ID_RTL_R3P0) 639 + l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, 640 + L310_POWER_CTRL); 601 641 } 602 642 603 643 static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data) ··· 639 699 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); 640 700 } 641 701 702 + /* r3p0 or later has power control register */ 703 + if (rev >= L310_CACHE_ID_RTL_R3P0) 704 + l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN | 705 + L310_STNDBY_MODE_EN; 706 + 707 + /* 708 + * Always enable non-secure access to the lockdown registers - 709 + * we write to them as part of the L2C enable sequence so they 710 + * need to be accessible. 711 + */ 712 + aux |= L310_AUX_CTRL_NS_LOCKDOWN; 713 + 714 + l2c_enable(base, aux, num_lock); 715 + 716 + /* Read back resulting AUX_CTRL value as it could have been altered. */ 717 + aux = readl_relaxed(base + L2X0_AUX_CTRL); 718 + 642 719 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) { 643 720 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL); 644 721 ··· 669 712 if (rev >= L310_CACHE_ID_RTL_R3P0) { 670 713 u32 power_ctrl; 671 714 672 - l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN, 673 - base, L310_POWER_CTRL); 674 715 power_ctrl = readl_relaxed(base + L310_POWER_CTRL); 675 716 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n", 676 717 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis", 677 718 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); 678 719 } 679 - 680 - /* 681 - * Always enable non-secure access to the lockdown registers - 682 - * we write to them as part of the L2C enable sequence so they 683 - * need to be accessible. 684 - */ 685 - aux |= L310_AUX_CTRL_NS_LOCKDOWN; 686 - 687 - l2c_enable(base, aux, num_lock); 688 720 689 721 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) { 690 722 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); ··· 706 760 707 761 if (revision >= L310_CACHE_ID_RTL_R3P0 && 708 762 revision < L310_CACHE_ID_RTL_R3P2) { 709 - u32 val = readl_relaxed(base + L310_PREFETCH_CTRL); 763 + u32 val = l2x0_saved_regs.prefetch_ctrl; 710 764 /* I don't think bit23 is required here... but iMX6 does so */ 711 765 if (val & (BIT(30) | BIT(23))) { 712 766 val &= ~(BIT(30) | BIT(23)); 713 - l2c_write_sec(val, base, L310_PREFETCH_CTRL); 767 + l2x0_saved_regs.prefetch_ctrl = val; 714 768 errata[n++] = "752271"; 715 769 } 716 770 } ··· 746 800 l2c_disable(); 747 801 } 748 802 803 + static void l2c310_resume(void) 804 + { 805 + l2c_resume(); 806 + 807 + /* Re-enable full-line-of-zeros for Cortex-A9 */ 808 + if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) 809 + set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); 810 + } 811 + 749 812 static const struct l2c_init_data l2c310_init_fns __initconst = { 750 813 .type = "L2C-310", 751 814 .way_size_0 = SZ_8K, ··· 762 807 .enable = l2c310_enable, 763 808 .fixup = l2c310_fixup, 764 809 .save = l2c310_save, 810 + .configure = l2c310_configure, 765 811 .outer_cache = { 766 812 .inv_range = l2c210_inv_range, 767 813 .clean_range = l2c210_clean_range, ··· 774 818 }, 775 819 }; 776 820 777 - static void __init __l2c_init(const struct l2c_init_data *data, 778 - u32 aux_val, u32 aux_mask, u32 cache_id) 821 + static int __init __l2c_init(const struct l2c_init_data *data, 822 + u32 aux_val, u32 aux_mask, u32 cache_id) 779 823 { 780 824 struct outer_cache_fns fns; 781 825 unsigned way_size_bits, ways; 782 826 u32 aux, old_aux; 827 + 828 + /* 829 + * Save the pointer globally so that callbacks which do not receive 830 + * context from callers can access the structure. 831 + */ 832 + l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL); 833 + if (!l2x0_data) 834 + return -ENOMEM; 783 835 784 836 /* 785 837 * Sanity check the aux values. aux_mask is the bits we preserve ··· 848 884 849 885 fns = data->outer_cache; 850 886 fns.write_sec = outer_cache.write_sec; 887 + fns.configure = outer_cache.configure; 851 888 if (data->fixup) 852 889 data->fixup(l2x0_base, cache_id, &fns); 853 890 ··· 875 910 data->type, ways, l2x0_size >> 10); 876 911 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 877 912 data->type, cache_id, aux); 913 + 914 + return 0; 878 915 } 879 916 880 917 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) ··· 902 935 data = &l2c310_init_fns; 903 936 break; 904 937 } 938 + 939 + /* Read back current (default) hardware configuration */ 940 + if (data->save) 941 + data->save(l2x0_base); 905 942 906 943 __l2c_init(data, aux_val, aux_mask, cache_id); 907 944 } ··· 1073 1102 .flush_all = l2c210_flush_all, 1074 1103 .disable = l2c_disable, 1075 1104 .sync = l2c210_sync, 1076 - .resume = l2c210_resume, 1105 + .resume = l2c_resume, 1077 1106 }, 1078 1107 }; 1079 1108 ··· 1091 1120 .flush_all = l2c220_flush_all, 1092 1121 .disable = l2c_disable, 1093 1122 .sync = l2c220_sync, 1094 - .resume = l2c210_resume, 1123 + .resume = l2c_resume, 1095 1124 }, 1096 1125 }; 1097 1126 ··· 1102 1131 u32 tag[3] = { 0, 0, 0 }; 1103 1132 u32 filter[2] = { 0, 0 }; 1104 1133 u32 assoc; 1134 + u32 prefetch; 1135 + u32 val; 1105 1136 int ret; 1106 1137 1107 1138 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 1108 1139 if (tag[0] && tag[1] && tag[2]) 1109 - writel_relaxed( 1140 + l2x0_saved_regs.tag_latency = 1110 1141 L310_LATENCY_CTRL_RD(tag[0] - 1) | 1111 1142 L310_LATENCY_CTRL_WR(tag[1] - 1) | 1112 - L310_LATENCY_CTRL_SETUP(tag[2] - 1), 1113 - l2x0_base + L310_TAG_LATENCY_CTRL); 1143 + L310_LATENCY_CTRL_SETUP(tag[2] - 1); 1114 1144 1115 1145 of_property_read_u32_array(np, "arm,data-latency", 1116 1146 data, ARRAY_SIZE(data)); 1117 1147 if (data[0] && data[1] && data[2]) 1118 - writel_relaxed( 1148 + l2x0_saved_regs.data_latency = 1119 1149 L310_LATENCY_CTRL_RD(data[0] - 1) | 1120 1150 L310_LATENCY_CTRL_WR(data[1] - 1) | 1121 - L310_LATENCY_CTRL_SETUP(data[2] - 1), 1122 - l2x0_base + L310_DATA_LATENCY_CTRL); 1151 + L310_LATENCY_CTRL_SETUP(data[2] - 1); 1123 1152 1124 1153 of_property_read_u32_array(np, "arm,filter-ranges", 1125 1154 filter, ARRAY_SIZE(filter)); 1126 1155 if (filter[1]) { 1127 - writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), 1128 - l2x0_base + L310_ADDR_FILTER_END); 1129 - writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN, 1130 - l2x0_base + L310_ADDR_FILTER_START); 1156 + l2x0_saved_regs.filter_end = 1157 + ALIGN(filter[0] + filter[1], SZ_1M); 1158 + l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1)) 1159 + | L310_ADDR_FILTER_EN; 1131 1160 } 1132 1161 1133 1162 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); ··· 1149 1178 assoc); 1150 1179 break; 1151 1180 } 1181 + 1182 + prefetch = l2x0_saved_regs.prefetch_ctrl; 1183 + 1184 + ret = of_property_read_u32(np, "arm,double-linefill", &val); 1185 + if (ret == 0) { 1186 + if (val) 1187 + prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL; 1188 + else 1189 + prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL; 1190 + } else if (ret != -EINVAL) { 1191 + pr_err("L2C-310 OF arm,double-linefill property value is missing\n"); 1192 + } 1193 + 1194 + ret = of_property_read_u32(np, "arm,double-linefill-incr", &val); 1195 + if (ret == 0) { 1196 + if (val) 1197 + prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR; 1198 + else 1199 + prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR; 1200 + } else if (ret != -EINVAL) { 1201 + pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n"); 1202 + } 1203 + 1204 + ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val); 1205 + if (ret == 0) { 1206 + if (!val) 1207 + prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP; 1208 + else 1209 + prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP; 1210 + } else if (ret != -EINVAL) { 1211 + pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n"); 1212 + } 1213 + 1214 + ret = of_property_read_u32(np, "arm,prefetch-drop", &val); 1215 + if (ret == 0) { 1216 + if (val) 1217 + prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP; 1218 + else 1219 + prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP; 1220 + } else if (ret != -EINVAL) { 1221 + pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n"); 1222 + } 1223 + 1224 + ret = of_property_read_u32(np, "arm,prefetch-offset", &val); 1225 + if (ret == 0) { 1226 + prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK; 1227 + prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK; 1228 + } else if (ret != -EINVAL) { 1229 + pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n"); 1230 + } 1231 + 1232 + l2x0_saved_regs.prefetch_ctrl = prefetch; 1152 1233 } 1153 1234 1154 1235 static const struct l2c_init_data of_l2c310_data __initconst = { ··· 1211 1188 .enable = l2c310_enable, 1212 1189 .fixup = l2c310_fixup, 1213 1190 .save = l2c310_save, 1191 + .configure = l2c310_configure, 1214 1192 .outer_cache = { 1215 1193 .inv_range = l2c210_inv_range, 1216 1194 .clean_range = l2c210_clean_range, ··· 1240 1216 .enable = l2c310_enable, 1241 1217 .fixup = l2c310_fixup, 1242 1218 .save = l2c310_save, 1219 + .configure = l2c310_configure, 1243 1220 .outer_cache = { 1244 1221 .inv_range = l2c210_inv_range, 1245 1222 .clean_range = l2c210_clean_range, ··· 1256 1231 * noninclusive, while the hardware cache range operations use 1257 1232 * inclusive start and end addresses. 1258 1233 */ 1259 - static unsigned long calc_range_end(unsigned long start, unsigned long end) 1234 + static unsigned long aurora_range_end(unsigned long start, unsigned long end) 1260 1235 { 1261 1236 /* 1262 1237 * Limit the number of cache lines processed at once, ··· 1275 1250 return end; 1276 1251 } 1277 1252 1278 - /* 1279 - * Make sure 'start' and 'end' reference the same page, as L2 is PIPT 1280 - * and range operations only do a TLB lookup on the start address. 1281 - */ 1282 1253 static void aurora_pa_range(unsigned long start, unsigned long end, 1283 - unsigned long offset) 1254 + unsigned long offset) 1284 1255 { 1256 + void __iomem *base = l2x0_base; 1257 + unsigned long range_end; 1285 1258 unsigned long flags; 1286 1259 1287 - raw_spin_lock_irqsave(&l2x0_lock, flags); 1288 - writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); 1289 - writel_relaxed(end, l2x0_base + offset); 1290 - raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1291 - 1292 - cache_sync(); 1293 - } 1294 - 1295 - static void aurora_inv_range(unsigned long start, unsigned long end) 1296 - { 1297 1260 /* 1298 1261 * round start and end adresses up to cache line size 1299 1262 */ ··· 1289 1276 end = ALIGN(end, CACHE_LINE_SIZE); 1290 1277 1291 1278 /* 1292 - * Invalidate all full cache lines between 'start' and 'end'. 1279 + * perform operation on all full cache lines between 'start' and 'end' 1293 1280 */ 1294 1281 while (start < end) { 1295 - unsigned long range_end = calc_range_end(start, end); 1296 - aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1297 - AURORA_INVAL_RANGE_REG); 1282 + range_end = aurora_range_end(start, end); 1283 + 1284 + raw_spin_lock_irqsave(&l2x0_lock, flags); 1285 + writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG); 1286 + writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset); 1287 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1288 + 1289 + writel_relaxed(0, base + AURORA_SYNC_REG); 1298 1290 start = range_end; 1299 1291 } 1292 + } 1293 + static void aurora_inv_range(unsigned long start, unsigned long end) 1294 + { 1295 + aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG); 1300 1296 } 1301 1297 1302 1298 static void aurora_clean_range(unsigned long start, unsigned long end) ··· 1314 1292 * If L2 is forced to WT, the L2 will always be clean and we 1315 1293 * don't need to do anything here. 1316 1294 */ 1317 - if (!l2_wt_override) { 1318 - start &= ~(CACHE_LINE_SIZE - 1); 1319 - end = ALIGN(end, CACHE_LINE_SIZE); 1320 - while (start != end) { 1321 - unsigned long range_end = calc_range_end(start, end); 1322 - aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1323 - AURORA_CLEAN_RANGE_REG); 1324 - start = range_end; 1325 - } 1326 - } 1295 + if (!l2_wt_override) 1296 + aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG); 1327 1297 } 1328 1298 1329 1299 static void aurora_flush_range(unsigned long start, unsigned long end) 1330 1300 { 1331 - start &= ~(CACHE_LINE_SIZE - 1); 1332 - end = ALIGN(end, CACHE_LINE_SIZE); 1333 - while (start != end) { 1334 - unsigned long range_end = calc_range_end(start, end); 1335 - /* 1336 - * If L2 is forced to WT, the L2 will always be clean and we 1337 - * just need to invalidate. 1338 - */ 1339 - if (l2_wt_override) 1340 - aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1341 - AURORA_INVAL_RANGE_REG); 1342 - else 1343 - aurora_pa_range(start, range_end - CACHE_LINE_SIZE, 1344 - AURORA_FLUSH_RANGE_REG); 1345 - start = range_end; 1346 - } 1301 + if (l2_wt_override) 1302 + aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG); 1303 + else 1304 + aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG); 1305 + } 1306 + 1307 + static void aurora_flush_all(void) 1308 + { 1309 + void __iomem *base = l2x0_base; 1310 + unsigned long flags; 1311 + 1312 + /* clean all ways */ 1313 + raw_spin_lock_irqsave(&l2x0_lock, flags); 1314 + __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 1315 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1316 + 1317 + writel_relaxed(0, base + AURORA_SYNC_REG); 1318 + } 1319 + 1320 + static void aurora_cache_sync(void) 1321 + { 1322 + writel_relaxed(0, l2x0_base + AURORA_SYNC_REG); 1323 + } 1324 + 1325 + static void aurora_disable(void) 1326 + { 1327 + void __iomem *base = l2x0_base; 1328 + unsigned long flags; 1329 + 1330 + raw_spin_lock_irqsave(&l2x0_lock, flags); 1331 + __l2c_op_way(base + L2X0_CLEAN_INV_WAY); 1332 + writel_relaxed(0, base + AURORA_SYNC_REG); 1333 + l2c_write_sec(0, base, L2X0_CTRL); 1334 + dsb(st); 1335 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 1347 1336 } 1348 1337 1349 1338 static void aurora_save(void __iomem *base) 1350 1339 { 1351 1340 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); 1352 1341 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); 1353 - } 1354 - 1355 - static void aurora_resume(void) 1356 - { 1357 - void __iomem *base = l2x0_base; 1358 - 1359 - if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1360 - writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); 1361 - writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); 1362 - } 1363 1342 } 1364 1343 1365 1344 /* ··· 1421 1398 .inv_range = aurora_inv_range, 1422 1399 .clean_range = aurora_clean_range, 1423 1400 .flush_range = aurora_flush_range, 1424 - .flush_all = l2x0_flush_all, 1425 - .disable = l2x0_disable, 1426 - .sync = l2x0_cache_sync, 1427 - .resume = aurora_resume, 1401 + .flush_all = aurora_flush_all, 1402 + .disable = aurora_disable, 1403 + .sync = aurora_cache_sync, 1404 + .resume = l2c_resume, 1428 1405 }, 1429 1406 }; 1430 1407 ··· 1437 1414 .fixup = aurora_fixup, 1438 1415 .save = aurora_save, 1439 1416 .outer_cache = { 1440 - .resume = aurora_resume, 1417 + .resume = l2c_resume, 1441 1418 }, 1442 1419 }; 1443 1420 ··· 1585 1562 .of_parse = l2c310_of_parse, 1586 1563 .enable = l2c310_enable, 1587 1564 .save = l2c310_save, 1565 + .configure = l2c310_configure, 1588 1566 .outer_cache = { 1589 1567 .inv_range = bcm_inv_range, 1590 1568 .clean_range = bcm_clean_range, ··· 1607 1583 readl_relaxed(base + L310_PREFETCH_CTRL); 1608 1584 } 1609 1585 1610 - static void tauros3_resume(void) 1586 + static void tauros3_configure(void __iomem *base) 1611 1587 { 1612 - void __iomem *base = l2x0_base; 1613 - 1614 - if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { 1615 - writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1616 - base + TAUROS3_AUX2_CTRL); 1617 - writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1618 - base + L310_PREFETCH_CTRL); 1619 - 1620 - l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); 1621 - } 1588 + writel_relaxed(l2x0_saved_regs.aux2_ctrl, 1589 + base + TAUROS3_AUX2_CTRL); 1590 + writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 1591 + base + L310_PREFETCH_CTRL); 1622 1592 } 1623 1593 1624 1594 static const struct l2c_init_data of_tauros3_data __initconst = { ··· 1621 1603 .num_lock = 8, 1622 1604 .enable = l2c_enable, 1623 1605 .save = tauros3_save, 1606 + .configure = tauros3_configure, 1624 1607 /* Tauros3 broadcasts L1 cache operations to L2 */ 1625 1608 .outer_cache = { 1626 - .resume = tauros3_resume, 1609 + .resume = l2c_resume, 1627 1610 }, 1628 1611 }; 1629 1612 ··· 1680 1661 if (!of_property_read_bool(np, "cache-unified")) 1681 1662 pr_err("L2C: device tree omits to specify unified cache\n"); 1682 1663 1664 + /* Read back current (default) hardware configuration */ 1665 + if (data->save) 1666 + data->save(l2x0_base); 1667 + 1683 1668 /* L2 configuration can only be changed if the cache is disabled */ 1684 1669 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) 1685 1670 if (data->of_parse) ··· 1694 1671 else 1695 1672 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 1696 1673 1697 - __l2c_init(data, aux_val, aux_mask, cache_id); 1698 - 1699 - return 0; 1674 + return __l2c_init(data, aux_val, aux_mask, cache_id); 1700 1675 } 1701 1676 #endif
+11 -15
arch/arm/mm/context.c
··· 144 144 /* Update the list of reserved ASIDs and the ASID bitmap. */ 145 145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 146 146 for_each_possible_cpu(i) { 147 - if (i == cpu) { 148 - asid = 0; 149 - } else { 150 - asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 151 - /* 152 - * If this CPU has already been through a 153 - * rollover, but hasn't run another task in 154 - * the meantime, we must preserve its reserved 155 - * ASID, as this is the only trace we have of 156 - * the process it is still running. 157 - */ 158 - if (asid == 0) 159 - asid = per_cpu(reserved_asids, i); 160 - __set_bit(asid & ~ASID_MASK, asid_map); 161 - } 147 + asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 148 + /* 149 + * If this CPU has already been through a 150 + * rollover, but hasn't run another task in 151 + * the meantime, we must preserve its reserved 152 + * ASID, as this is the only trace we have of 153 + * the process it is still running. 154 + */ 155 + if (asid == 0) 156 + asid = per_cpu(reserved_asids, i); 157 + __set_bit(asid & ~ASID_MASK, asid_map); 162 158 per_cpu(reserved_asids, i) = asid; 163 159 } 164 160
+3
arch/arm/mm/dma-mapping.c
··· 2025 2025 { 2026 2026 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 2027 2027 2028 + if (!mapping) 2029 + return; 2030 + 2028 2031 arm_iommu_detach_device(dev); 2029 2032 arm_iommu_release_mapping(mapping); 2030 2033 }
+2 -7
arch/arm/mm/dump.c
··· 220 220 static const char units[] = "KMGTPE"; 221 221 u64 prot = val & pg_level[level].mask; 222 222 223 - if (addr < USER_PGTABLES_CEILING) 224 - return; 225 - 226 223 if (!st->level) { 227 224 st->level = level; 228 225 st->current_prot = prot; ··· 305 308 pgd_t *pgd = swapper_pg_dir; 306 309 struct pg_state st; 307 310 unsigned long addr; 308 - unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE; 311 + unsigned i; 309 312 310 313 memset(&st, 0, sizeof(st)); 311 314 st.seq = m; 312 315 st.marker = address_markers; 313 316 314 - pgd += pgdoff; 315 - 316 - for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) { 317 + for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { 317 318 addr = i * PGDIR_SIZE; 318 319 if (!pgd_none(*pgd)) { 319 320 walk_pud(&st, pgd, addr);
+3 -6
arch/arm/mm/init.c
··· 319 319 320 320 early_init_fdt_scan_reserved_mem(); 321 321 322 - /* 323 - * reserve memory for DMA contigouos allocations, 324 - * must come from DMA area inside low memory 325 - */ 322 + /* reserve memory for DMA contiguous allocations */ 326 323 dma_contiguous_reserve(arm_dma_limit); 327 324 328 325 arm_memblock_steal_permitted = false; ··· 655 658 .start = (unsigned long)_stext, 656 659 .end = (unsigned long)__init_begin, 657 660 #ifdef CONFIG_ARM_LPAE 658 - .mask = ~PMD_SECT_RDONLY, 659 - .prot = PMD_SECT_RDONLY, 661 + .mask = ~L_PMD_SECT_RDONLY, 662 + .prot = L_PMD_SECT_RDONLY, 660 663 #else 661 664 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 662 665 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
+2 -2
arch/arm/mm/mmu.c
··· 1329 1329 static void __init map_lowmem(void) 1330 1330 { 1331 1331 struct memblock_region *reg; 1332 - unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 1333 - unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1332 + phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 1333 + phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1334 1334 1335 1335 /* Map all the lowmem memory banks. */ 1336 1336 for_each_memblock(memory, reg) {
+1
arch/arm64/Kconfig
··· 39 39 select HARDIRQS_SW_RESEND 40 40 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 41 41 select HAVE_ARCH_AUDITSYSCALL 42 + select HAVE_ARCH_BITREVERSE 42 43 select HAVE_ARCH_JUMP_LABEL 43 44 select HAVE_ARCH_KGDB 44 45 select HAVE_ARCH_SECCOMP_FILTER
+19
arch/arm64/include/asm/bitrev.h
··· 1 + #ifndef __ASM_BITREV_H 2 + #define __ASM_BITREV_H 3 + static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x) 4 + { 5 + __asm__ ("rbit %w0, %w1" : "=r" (x) : "r" (x)); 6 + return x; 7 + } 8 + 9 + static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x) 10 + { 11 + return __arch_bitrev32((u32)x) >> 16; 12 + } 13 + 14 + static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x) 15 + { 16 + return __arch_bitrev32((u32)x) >> 24; 17 + } 18 + 19 + #endif
+47
drivers/amba/bus.c
··· 18 18 #include <linux/pm_domain.h> 19 19 #include <linux/amba/bus.h> 20 20 #include <linux/sizes.h> 21 + #include <linux/limits.h> 21 22 22 23 #include <asm/irq.h> 23 24 ··· 44 43 struct amba_device *pcdev = to_amba_device(dev); 45 44 struct amba_driver *pcdrv = to_amba_driver(drv); 46 45 46 + /* When driver_override is set, only bind to the matching driver */ 47 + if (pcdev->driver_override) 48 + return !strcmp(pcdev->driver_override, drv->name); 49 + 47 50 return amba_lookup(pcdrv->id_table, pcdev) != NULL; 48 51 } 49 52 ··· 62 57 63 58 retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid); 64 59 return retval; 60 + } 61 + 62 + static ssize_t driver_override_show(struct device *_dev, 63 + struct device_attribute *attr, char *buf) 64 + { 65 + struct amba_device *dev = to_amba_device(_dev); 66 + 67 + if (!dev->driver_override) 68 + return 0; 69 + 70 + return sprintf(buf, "%s\n", dev->driver_override); 71 + } 72 + 73 + static ssize_t driver_override_store(struct device *_dev, 74 + struct device_attribute *attr, 75 + const char *buf, size_t count) 76 + { 77 + struct amba_device *dev = to_amba_device(_dev); 78 + char *driver_override, *old = dev->driver_override, *cp; 79 + 80 + if (count > PATH_MAX) 81 + return -EINVAL; 82 + 83 + driver_override = kstrndup(buf, count, GFP_KERNEL); 84 + if (!driver_override) 85 + return -ENOMEM; 86 + 87 + cp = strchr(driver_override, '\n'); 88 + if (cp) 89 + *cp = '\0'; 90 + 91 + if (strlen(driver_override)) { 92 + dev->driver_override = driver_override; 93 + } else { 94 + kfree(driver_override); 95 + dev->driver_override = NULL; 96 + } 97 + 98 + kfree(old); 99 + 100 + return count; 65 101 } 66 102 67 103 #define amba_attr_func(name,fmt,arg...) \ ··· 127 81 static struct device_attribute amba_dev_attrs[] = { 128 82 __ATTR_RO(id), 129 83 __ATTR_RO(resource), 84 + __ATTR_RW(driver_override), 130 85 __ATTR_NULL, 131 86 }; 132 87
+7
drivers/clocksource/Kconfig
··· 229 229 depends on MIPS_GIC 230 230 select CLKSRC_OF 231 231 232 + config CLKSRC_PXA 233 + def_bool y if ARCH_PXA || ARCH_SA1100 234 + select CLKSRC_OF if USE_OF 235 + help 236 + This enables OST0 support available on PXA and SA-11x0 237 + platforms. 238 + 232 239 endmenu
+1 -1
drivers/clocksource/Makefile
··· 21 21 obj-$(CONFIG_ARCH_MARCO) += timer-marco.o 22 22 obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o 23 23 obj-$(CONFIG_ARCH_MXS) += mxs_timer.o 24 - obj-$(CONFIG_ARCH_PXA) += pxa_timer.o 24 + obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o 25 25 obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o 26 26 obj-$(CONFIG_ARCH_U300) += timer-u300.o 27 27 obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
+198 -1
drivers/gpio/gpio-sa1100.c
··· 11 11 #include <linux/init.h> 12 12 #include <linux/module.h> 13 13 #include <linux/io.h> 14 + #include <linux/syscore_ops.h> 14 15 #include <mach/hardware.h> 15 16 #include <mach/irqs.h> 16 17 ··· 51 50 52 51 static int sa1100_to_irq(struct gpio_chip *chip, unsigned offset) 53 52 { 54 - return offset < 11 ? (IRQ_GPIO0 + offset) : (IRQ_GPIO11 - 11 + offset); 53 + return IRQ_GPIO0 + offset; 55 54 } 56 55 57 56 static struct gpio_chip sa1100_gpio_chip = { ··· 65 64 .ngpio = GPIO_MAX + 1, 66 65 }; 67 66 67 + /* 68 + * SA1100 GPIO edge detection for IRQs: 69 + * IRQs are generated on Falling-Edge, Rising-Edge, or both. 70 + * Use this instead of directly setting GRER/GFER. 71 + */ 72 + static int GPIO_IRQ_rising_edge; 73 + static int GPIO_IRQ_falling_edge; 74 + static int GPIO_IRQ_mask; 75 + 76 + static int sa1100_gpio_type(struct irq_data *d, unsigned int type) 77 + { 78 + unsigned int mask; 79 + 80 + mask = BIT(d->hwirq); 81 + 82 + if (type == IRQ_TYPE_PROBE) { 83 + if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask) 84 + return 0; 85 + type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; 86 + } 87 + 88 + if (type & IRQ_TYPE_EDGE_RISING) 89 + GPIO_IRQ_rising_edge |= mask; 90 + else 91 + GPIO_IRQ_rising_edge &= ~mask; 92 + if (type & IRQ_TYPE_EDGE_FALLING) 93 + GPIO_IRQ_falling_edge |= mask; 94 + else 95 + GPIO_IRQ_falling_edge &= ~mask; 96 + 97 + GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask; 98 + GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask; 99 + 100 + return 0; 101 + } 102 + 103 + /* 104 + * GPIO IRQs must be acknowledged. 105 + */ 106 + static void sa1100_gpio_ack(struct irq_data *d) 107 + { 108 + GEDR = BIT(d->hwirq); 109 + } 110 + 111 + static void sa1100_gpio_mask(struct irq_data *d) 112 + { 113 + unsigned int mask = BIT(d->hwirq); 114 + 115 + GPIO_IRQ_mask &= ~mask; 116 + 117 + GRER &= ~mask; 118 + GFER &= ~mask; 119 + } 120 + 121 + static void sa1100_gpio_unmask(struct irq_data *d) 122 + { 123 + unsigned int mask = BIT(d->hwirq); 124 + 125 + GPIO_IRQ_mask |= mask; 126 + 127 + GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask; 128 + GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask; 129 + } 130 + 131 + static int sa1100_gpio_wake(struct irq_data *d, unsigned int on) 132 + { 133 + if (on) 134 + PWER |= BIT(d->hwirq); 135 + else 136 + PWER &= ~BIT(d->hwirq); 137 + return 0; 138 + } 139 + 140 + /* 141 + * This is for GPIO IRQs 142 + */ 143 + static struct irq_chip sa1100_gpio_irq_chip = { 144 + .name = "GPIO", 145 + .irq_ack = sa1100_gpio_ack, 146 + .irq_mask = sa1100_gpio_mask, 147 + .irq_unmask = sa1100_gpio_unmask, 148 + .irq_set_type = sa1100_gpio_type, 149 + .irq_set_wake = sa1100_gpio_wake, 150 + }; 151 + 152 + static int sa1100_gpio_irqdomain_map(struct irq_domain *d, 153 + unsigned int irq, irq_hw_number_t hwirq) 154 + { 155 + irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip, 156 + handle_edge_irq); 157 + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 158 + 159 + return 0; 160 + } 161 + 162 + static struct irq_domain_ops sa1100_gpio_irqdomain_ops = { 163 + .map = sa1100_gpio_irqdomain_map, 164 + .xlate = irq_domain_xlate_onetwocell, 165 + }; 166 + 167 + static struct irq_domain *sa1100_gpio_irqdomain; 168 + 169 + /* 170 + * IRQ 0-11 (GPIO) handler. We enter here with the 171 + * irq_controller_lock held, and IRQs disabled. Decode the IRQ 172 + * and call the handler. 173 + */ 174 + static void 175 + sa1100_gpio_handler(unsigned int irq, struct irq_desc *desc) 176 + { 177 + unsigned int mask; 178 + 179 + mask = GEDR; 180 + do { 181 + /* 182 + * clear down all currently active IRQ sources. 183 + * We will be processing them all. 184 + */ 185 + GEDR = mask; 186 + 187 + irq = IRQ_GPIO0; 188 + do { 189 + if (mask & 1) 190 + generic_handle_irq(irq); 191 + mask >>= 1; 192 + irq++; 193 + } while (mask); 194 + 195 + mask = GEDR; 196 + } while (mask); 197 + } 198 + 199 + static int sa1100_gpio_suspend(void) 200 + { 201 + /* 202 + * Set the appropriate edges for wakeup. 203 + */ 204 + GRER = PWER & GPIO_IRQ_rising_edge; 205 + GFER = PWER & GPIO_IRQ_falling_edge; 206 + 207 + /* 208 + * Clear any pending GPIO interrupts. 209 + */ 210 + GEDR = GEDR; 211 + 212 + return 0; 213 + } 214 + 215 + static void sa1100_gpio_resume(void) 216 + { 217 + GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask; 218 + GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask; 219 + } 220 + 221 + static struct syscore_ops sa1100_gpio_syscore_ops = { 222 + .suspend = sa1100_gpio_suspend, 223 + .resume = sa1100_gpio_resume, 224 + }; 225 + 226 + static int __init sa1100_gpio_init_devicefs(void) 227 + { 228 + register_syscore_ops(&sa1100_gpio_syscore_ops); 229 + return 0; 230 + } 231 + 232 + device_initcall(sa1100_gpio_init_devicefs); 233 + 68 234 void __init sa1100_init_gpio(void) 69 235 { 236 + /* clear all GPIO edge detects */ 237 + GFER = 0; 238 + GRER = 0; 239 + GEDR = -1; 240 + 70 241 gpiochip_add(&sa1100_gpio_chip); 242 + 243 + sa1100_gpio_irqdomain = irq_domain_add_simple(NULL, 244 + 28, IRQ_GPIO0, 245 + &sa1100_gpio_irqdomain_ops, NULL); 246 + 247 + /* 248 + * Install handlers for GPIO 0-10 edge detect interrupts 249 + */ 250 + irq_set_chained_handler(IRQ_GPIO0_SC, sa1100_gpio_handler); 251 + irq_set_chained_handler(IRQ_GPIO1_SC, sa1100_gpio_handler); 252 + irq_set_chained_handler(IRQ_GPIO2_SC, sa1100_gpio_handler); 253 + irq_set_chained_handler(IRQ_GPIO3_SC, sa1100_gpio_handler); 254 + irq_set_chained_handler(IRQ_GPIO4_SC, sa1100_gpio_handler); 255 + irq_set_chained_handler(IRQ_GPIO5_SC, sa1100_gpio_handler); 256 + irq_set_chained_handler(IRQ_GPIO6_SC, sa1100_gpio_handler); 257 + irq_set_chained_handler(IRQ_GPIO7_SC, sa1100_gpio_handler); 258 + irq_set_chained_handler(IRQ_GPIO8_SC, sa1100_gpio_handler); 259 + irq_set_chained_handler(IRQ_GPIO9_SC, sa1100_gpio_handler); 260 + irq_set_chained_handler(IRQ_GPIO10_SC, sa1100_gpio_handler); 261 + /* 262 + * Install handler for GPIO 11-27 edge detect interrupts 263 + */ 264 + irq_set_chained_handler(IRQ_GPIO11_27, sa1100_gpio_handler); 265 + 71 266 }
+9 -4
include/linux/amba/bus.h
··· 33 33 struct clk *pclk; 34 34 unsigned int periphid; 35 35 unsigned int irq[AMBA_NR_IRQS]; 36 + char *driver_override; 36 37 }; 37 38 38 39 struct amba_driver { ··· 93 92 int amba_request_regions(struct amba_device *, const char *); 94 93 void amba_release_regions(struct amba_device *); 95 94 96 - #define amba_pclk_enable(d) \ 97 - (IS_ERR((d)->pclk) ? 0 : clk_enable((d)->pclk)) 95 + static inline int amba_pclk_enable(struct amba_device *dev) 96 + { 97 + return clk_enable(dev->pclk); 98 + } 98 99 99 - #define amba_pclk_disable(d) \ 100 - do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) 100 + static inline void amba_pclk_disable(struct amba_device *dev) 101 + { 102 + clk_disable(dev->pclk); 103 + } 101 104 102 105 static inline int amba_pclk_prepare(struct amba_device *dev) 103 106 {
+73 -4
include/linux/bitrev.h
··· 3 3 4 4 #include <linux/types.h> 5 5 6 - extern u8 const byte_rev_table[256]; 6 + #ifdef CONFIG_HAVE_ARCH_BITREVERSE 7 + #include <asm/bitrev.h> 7 8 8 - static inline u8 bitrev8(u8 byte) 9 + #define __bitrev32 __arch_bitrev32 10 + #define __bitrev16 __arch_bitrev16 11 + #define __bitrev8 __arch_bitrev8 12 + 13 + #else 14 + extern u8 const byte_rev_table[256]; 15 + static inline u8 __bitrev8(u8 byte) 9 16 { 10 17 return byte_rev_table[byte]; 11 18 } 12 19 13 - extern u16 bitrev16(u16 in); 14 - extern u32 bitrev32(u32 in); 20 + static inline u16 __bitrev16(u16 x) 21 + { 22 + return (__bitrev8(x & 0xff) << 8) | __bitrev8(x >> 8); 23 + } 15 24 25 + static inline u32 __bitrev32(u32 x) 26 + { 27 + return (__bitrev16(x & 0xffff) << 16) | __bitrev16(x >> 16); 28 + } 29 + 30 + #endif /* CONFIG_HAVE_ARCH_BITREVERSE */ 31 + 32 + #define __constant_bitrev32(x) \ 33 + ({ \ 34 + u32 __x = x; \ 35 + __x = (__x >> 16) | (__x << 16); \ 36 + __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \ 37 + __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ 38 + __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ 39 + __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ 40 + __x; \ 41 + }) 42 + 43 + #define __constant_bitrev16(x) \ 44 + ({ \ 45 + u16 __x = x; \ 46 + __x = (__x >> 8) | (__x << 8); \ 47 + __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \ 48 + __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \ 49 + __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \ 50 + __x; \ 51 + }) 52 + 53 + #define __constant_bitrev8(x) \ 54 + ({ \ 55 + u8 __x = x; \ 56 + __x = (__x >> 4) | (__x << 4); \ 57 + __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \ 58 + __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \ 59 + __x; \ 60 + }) 61 + 62 + #define bitrev32(x) \ 63 + ({ \ 64 + u32 __x = x; \ 65 + __builtin_constant_p(__x) ? \ 66 + __constant_bitrev32(__x) : \ 67 + __bitrev32(__x); \ 68 + }) 69 + 70 + #define bitrev16(x) \ 71 + ({ \ 72 + u16 __x = x; \ 73 + __builtin_constant_p(__x) ? \ 74 + __constant_bitrev16(__x) : \ 75 + __bitrev16(__x); \ 76 + }) 77 + 78 + #define bitrev8(x) \ 79 + ({ \ 80 + u8 __x = x; \ 81 + __builtin_constant_p(__x) ? \ 82 + __constant_bitrev8(__x) : \ 83 + __bitrev8(__x) ; \ 84 + }) 16 85 #endif /* _LINUX_BITREV_H */
+9
lib/Kconfig
··· 13 13 config BITREVERSE 14 14 tristate 15 15 16 + config HAVE_ARCH_BITREVERSE 17 + boolean 18 + default n 19 + depends on BITREVERSE 20 + help 21 + This option provides an config for the architecture which have instruction 22 + can do bitreverse operation, we use the hardware instruction if the architecture 23 + have this capability. 24 + 16 25 config RATIONAL 17 26 boolean 18 27
+2 -15
lib/bitrev.c
··· 1 + #ifndef CONFIG_HAVE_ARCH_BITREVERSE 1 2 #include <linux/types.h> 2 3 #include <linux/module.h> 3 4 #include <linux/bitrev.h> ··· 43 42 }; 44 43 EXPORT_SYMBOL_GPL(byte_rev_table); 45 44 46 - u16 bitrev16(u16 x) 47 - { 48 - return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); 49 - } 50 - EXPORT_SYMBOL(bitrev16); 51 - 52 - /** 53 - * bitrev32 - reverse the order of bits in a u32 value 54 - * @x: value to be bit-reversed 55 - */ 56 - u32 bitrev32(u32 x) 57 - { 58 - return (bitrev16(x & 0xffff) << 16) | bitrev16(x >> 16); 59 - } 60 - EXPORT_SYMBOL(bitrev32); 45 + #endif /* CONFIG_HAVE_ARCH_BITREVERSE */