Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'audit', 'delay', 'fixes', 'misc' and 'sta2x11' into for-linus

+963 -753
+22
arch/arm/Kconfig
··· 45 45 select GENERIC_SMP_IDLE_THREAD 46 46 select KTIME_SCALAR 47 47 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 48 + select GENERIC_STRNCPY_FROM_USER 49 + select GENERIC_STRNLEN_USER 50 + select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN 48 51 help 49 52 The ARM series is a line of low-power-consumption RISC chip designs 50 53 licensed by ARM Ltd and targeted at embedded applications and ··· 1963 1960 DTB. To allow a device tree enabled kernel to be used with such 1964 1961 bootloaders, this option allows zImage to extract the information 1965 1962 from the ATAG list and store it at run time into the appended DTB. 1963 + 1964 + choice 1965 + prompt "Kernel command line type" if ARM_ATAG_DTB_COMPAT 1966 + default ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER 1967 + 1968 + config ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER 1969 + bool "Use bootloader kernel arguments if available" 1970 + help 1971 + Uses the command-line options passed by the boot loader instead of 1972 + the device tree bootargs property. If the boot loader doesn't provide 1973 + any, the device tree bootargs property will be used. 1974 + 1975 + config ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND 1976 + bool "Extend with bootloader kernel arguments" 1977 + help 1978 + The command-line arguments provided by the boot loader will be 1979 + appended to the the device tree bootargs property. 1980 + 1981 + endchoice 1966 1982 1967 1983 config CMDLINE 1968 1984 string "Default kernel command string"
+9
arch/arm/Kconfig.debug
··· 369 369 help 370 370 Perform tests of kprobes API and instruction set simulation. 371 371 372 + config PID_IN_CONTEXTIDR 373 + bool "Write the current PID to the CONTEXTIDR register" 374 + depends on CPU_COPY_V6 375 + help 376 + Enabling this option causes the kernel to write the current PID to 377 + the PROCID field of the CONTEXTIDR register, at the expense of some 378 + additional instructions during context switch. Say Y here only if you 379 + are planning to use hardware trace tools with this kernel. 380 + 372 381 endmenu
+3
arch/arm/Makefile
··· 10 10 # 11 11 # Copyright (C) 1995-2001 by Russell King 12 12 13 + # Ensure linker flags are correct 14 + LDFLAGS := 15 + 13 16 LDFLAGS_vmlinux :=-p --no-undefined -X 14 17 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) 15 18 LDFLAGS_vmlinux += --be8
+60 -2
arch/arm/boot/compressed/atags_to_fdt.c
··· 1 1 #include <asm/setup.h> 2 2 #include <libfdt.h> 3 3 4 + #if defined(CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND) 5 + #define do_extend_cmdline 1 6 + #else 7 + #define do_extend_cmdline 0 8 + #endif 9 + 4 10 static int node_offset(void *fdt, const char *node_path) 5 11 { 6 12 int offset = fdt_path_offset(fdt, node_path); ··· 40 34 if (offset < 0) 41 35 return offset; 42 36 return fdt_setprop_cell(fdt, offset, property, val); 37 + } 38 + 39 + static const void *getprop(const void *fdt, const char *node_path, 40 + const char *property, int *len) 41 + { 42 + int offset = fdt_path_offset(fdt, node_path); 43 + 44 + if (offset == -FDT_ERR_NOTFOUND) 45 + return NULL; 46 + 47 + return fdt_getprop(fdt, offset, property, len); 48 + } 49 + 50 + static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) 51 + { 52 + char cmdline[COMMAND_LINE_SIZE]; 53 + const char *fdt_bootargs; 54 + char *ptr = cmdline; 55 + int len = 0; 56 + 57 + /* copy the fdt command line into the buffer */ 58 + fdt_bootargs = getprop(fdt, "/chosen", "bootargs", &len); 59 + if (fdt_bootargs) 60 + if (len < COMMAND_LINE_SIZE) { 61 + memcpy(ptr, fdt_bootargs, len); 62 + /* len is the length of the string 63 + * including the NULL terminator */ 64 + ptr += len - 1; 65 + } 66 + 67 + /* and append the ATAG_CMDLINE */ 68 + if (fdt_cmdline) { 69 + len = strlen(fdt_cmdline); 70 + if (ptr - cmdline + len + 2 < COMMAND_LINE_SIZE) { 71 + *ptr++ = ' '; 72 + memcpy(ptr, fdt_cmdline, len); 73 + ptr += len; 74 + } 75 + } 76 + *ptr = '\0'; 77 + 78 + setprop_string(fdt, "/chosen", "bootargs", cmdline); 43 79 } 44 80 45 81 /* ··· 120 72 121 73 for_each_tag(atag, atag_list) { 122 74 if (atag->hdr.tag == ATAG_CMDLINE) { 123 - setprop_string(fdt, "/chosen", "bootargs", 124 - atag->u.cmdline.cmdline); 75 + /* Append the ATAGS command line to the device tree 76 + * command line. 77 + * NB: This means that if the same parameter is set in 78 + * the device tree and in the tags, the one from the 79 + * tags will be chosen. 80 + */ 81 + if (do_extend_cmdline) 82 + merge_fdt_bootargs(fdt, 83 + atag->u.cmdline.cmdline); 84 + else 85 + setprop_string(fdt, "/chosen", "bootargs", 86 + atag->u.cmdline.cmdline); 125 87 } else if (atag->hdr.tag == ATAG_MEM) { 126 88 if (memcount >= sizeof(mem_reg_property)/4) 127 89 continue;
+3
arch/arm/include/asm/arch_timer.h
··· 1 1 #ifndef __ASMARM_ARCH_TIMER_H 2 2 #define __ASMARM_ARCH_TIMER_H 3 3 4 + #include <asm/errno.h> 5 + 4 6 #ifdef CONFIG_ARM_ARCH_TIMER 7 + #define ARCH_HAS_READ_CURRENT_TIMER 5 8 int arch_timer_of_register(void); 6 9 int arch_timer_sched_clock_init(void); 7 10 #else
+25 -7
arch/arm/include/asm/delay.h
··· 6 6 #ifndef __ASM_ARM_DELAY_H 7 7 #define __ASM_ARM_DELAY_H 8 8 9 + #include <asm/memory.h> 9 10 #include <asm/param.h> /* HZ */ 10 11 11 - extern void __delay(int loops); 12 + #define MAX_UDELAY_MS 2 13 + #define UDELAY_MULT ((UL(2199023) * HZ) >> 11) 14 + #define UDELAY_SHIFT 30 15 + 16 + #ifndef __ASSEMBLY__ 17 + 18 + extern struct arm_delay_ops { 19 + void (*delay)(unsigned long); 20 + void (*const_udelay)(unsigned long); 21 + void (*udelay)(unsigned long); 22 + } arm_delay_ops; 23 + 24 + #define __delay(n) arm_delay_ops.delay(n) 12 25 13 26 /* 14 27 * This function intentionally does not exist; if you see references to ··· 36 23 * division by multiplication: you don't have to worry about 37 24 * loss of precision. 38 25 * 39 - * Use only for very small delays ( < 1 msec). Should probably use a 26 + * Use only for very small delays ( < 2 msec). Should probably use a 40 27 * lookup table, really, as the multiplications take much too long with 41 28 * short delays. This is a "reasonable" implementation, though (and the 42 29 * first constant multiplications gets optimized away if the delay is 43 30 * a constant) 44 31 */ 45 - extern void __udelay(unsigned long usecs); 46 - extern void __const_udelay(unsigned long); 47 - 48 - #define MAX_UDELAY_MS 2 32 + #define __udelay(n) arm_delay_ops.udelay(n) 33 + #define __const_udelay(n) arm_delay_ops.const_udelay(n) 49 34 50 35 #define udelay(n) \ 51 36 (__builtin_constant_p(n) ? \ 52 37 ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \ 53 - __const_udelay((n) * ((2199023U*HZ)>>11))) : \ 38 + __const_udelay((n) * UDELAY_MULT)) : \ 54 39 __udelay(n)) 40 + 41 + /* Loop-based definitions for assembly code. */ 42 + extern void __loop_delay(unsigned long loops); 43 + extern void __loop_udelay(unsigned long usecs); 44 + extern void __loop_const_udelay(unsigned long); 45 + 46 + #endif /* __ASSEMBLY__ */ 55 47 56 48 #endif /* defined(_ARM_DELAY_H) */ 57 49
-274
arch/arm/include/asm/locks.h
··· 1 - /* 2 - * arch/arm/include/asm/locks.h 3 - * 4 - * Copyright (C) 2000 Russell King 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - * 10 - * Interrupt safe locking assembler. 11 - */ 12 - #ifndef __ASM_PROC_LOCKS_H 13 - #define __ASM_PROC_LOCKS_H 14 - 15 - #if __LINUX_ARM_ARCH__ >= 6 16 - 17 - #define __down_op(ptr,fail) \ 18 - ({ \ 19 - __asm__ __volatile__( \ 20 - "@ down_op\n" \ 21 - "1: ldrex lr, [%0]\n" \ 22 - " sub lr, lr, %1\n" \ 23 - " strex ip, lr, [%0]\n" \ 24 - " teq ip, #0\n" \ 25 - " bne 1b\n" \ 26 - " teq lr, #0\n" \ 27 - " movmi ip, %0\n" \ 28 - " blmi " #fail \ 29 - : \ 30 - : "r" (ptr), "I" (1) \ 31 - : "ip", "lr", "cc"); \ 32 - smp_mb(); \ 33 - }) 34 - 35 - #define __down_op_ret(ptr,fail) \ 36 - ({ \ 37 - unsigned int ret; \ 38 - __asm__ __volatile__( \ 39 - "@ down_op_ret\n" \ 40 - "1: ldrex lr, [%1]\n" \ 41 - " sub lr, lr, %2\n" \ 42 - " strex ip, lr, [%1]\n" \ 43 - " teq ip, #0\n" \ 44 - " bne 1b\n" \ 45 - " teq lr, #0\n" \ 46 - " movmi ip, %1\n" \ 47 - " movpl ip, #0\n" \ 48 - " blmi " #fail "\n" \ 49 - " mov %0, ip" \ 50 - : "=&r" (ret) \ 51 - : "r" (ptr), "I" (1) \ 52 - : "ip", "lr", "cc"); \ 53 - smp_mb(); \ 54 - ret; \ 55 - }) 56 - 57 - #define __up_op(ptr,wake) \ 58 - ({ \ 59 - smp_mb(); \ 60 - __asm__ __volatile__( \ 61 - "@ up_op\n" \ 62 - "1: ldrex lr, [%0]\n" \ 63 - " add lr, lr, %1\n" \ 64 - " strex ip, lr, [%0]\n" \ 65 - " teq ip, #0\n" \ 66 - " bne 1b\n" \ 67 - " cmp lr, #0\n" \ 68 - " movle ip, %0\n" \ 69 - " blle " #wake \ 70 - : \ 71 - : "r" (ptr), "I" (1) \ 72 - : "ip", "lr", "cc"); \ 73 - }) 74 - 75 - /* 76 - * The value 0x01000000 supports up to 128 processors and 77 - * lots of processes. BIAS must be chosen such that sub'ing 78 - * BIAS once per CPU will result in the long remaining 79 - * negative. 80 - */ 81 - #define RW_LOCK_BIAS 0x01000000 82 - #define RW_LOCK_BIAS_STR "0x01000000" 83 - 84 - #define __down_op_write(ptr,fail) \ 85 - ({ \ 86 - __asm__ __volatile__( \ 87 - "@ down_op_write\n" \ 88 - "1: ldrex lr, [%0]\n" \ 89 - " sub lr, lr, %1\n" \ 90 - " strex ip, lr, [%0]\n" \ 91 - " teq ip, #0\n" \ 92 - " bne 1b\n" \ 93 - " teq lr, #0\n" \ 94 - " movne ip, %0\n" \ 95 - " blne " #fail \ 96 - : \ 97 - : "r" (ptr), "I" (RW_LOCK_BIAS) \ 98 - : "ip", "lr", "cc"); \ 99 - smp_mb(); \ 100 - }) 101 - 102 - #define __up_op_write(ptr,wake) \ 103 - ({ \ 104 - smp_mb(); \ 105 - __asm__ __volatile__( \ 106 - "@ up_op_write\n" \ 107 - "1: ldrex lr, [%0]\n" \ 108 - " adds lr, lr, %1\n" \ 109 - " strex ip, lr, [%0]\n" \ 110 - " teq ip, #0\n" \ 111 - " bne 1b\n" \ 112 - " movcs ip, %0\n" \ 113 - " blcs " #wake \ 114 - : \ 115 - : "r" (ptr), "I" (RW_LOCK_BIAS) \ 116 - : "ip", "lr", "cc"); \ 117 - }) 118 - 119 - #define __down_op_read(ptr,fail) \ 120 - __down_op(ptr, fail) 121 - 122 - #define __up_op_read(ptr,wake) \ 123 - ({ \ 124 - smp_mb(); \ 125 - __asm__ __volatile__( \ 126 - "@ up_op_read\n" \ 127 - "1: ldrex lr, [%0]\n" \ 128 - " add lr, lr, %1\n" \ 129 - " strex ip, lr, [%0]\n" \ 130 - " teq ip, #0\n" \ 131 - " bne 1b\n" \ 132 - " teq lr, #0\n" \ 133 - " moveq ip, %0\n" \ 134 - " bleq " #wake \ 135 - : \ 136 - : "r" (ptr), "I" (1) \ 137 - : "ip", "lr", "cc"); \ 138 - }) 139 - 140 - #else 141 - 142 - #define __down_op(ptr,fail) \ 143 - ({ \ 144 - __asm__ __volatile__( \ 145 - "@ down_op\n" \ 146 - " mrs ip, cpsr\n" \ 147 - " orr lr, ip, #128\n" \ 148 - " msr cpsr_c, lr\n" \ 149 - " ldr lr, [%0]\n" \ 150 - " subs lr, lr, %1\n" \ 151 - " str lr, [%0]\n" \ 152 - " msr cpsr_c, ip\n" \ 153 - " movmi ip, %0\n" \ 154 - " blmi " #fail \ 155 - : \ 156 - : "r" (ptr), "I" (1) \ 157 - : "ip", "lr", "cc"); \ 158 - smp_mb(); \ 159 - }) 160 - 161 - #define __down_op_ret(ptr,fail) \ 162 - ({ \ 163 - unsigned int ret; \ 164 - __asm__ __volatile__( \ 165 - "@ down_op_ret\n" \ 166 - " mrs ip, cpsr\n" \ 167 - " orr lr, ip, #128\n" \ 168 - " msr cpsr_c, lr\n" \ 169 - " ldr lr, [%1]\n" \ 170 - " subs lr, lr, %2\n" \ 171 - " str lr, [%1]\n" \ 172 - " msr cpsr_c, ip\n" \ 173 - " movmi ip, %1\n" \ 174 - " movpl ip, #0\n" \ 175 - " blmi " #fail "\n" \ 176 - " mov %0, ip" \ 177 - : "=&r" (ret) \ 178 - : "r" (ptr), "I" (1) \ 179 - : "ip", "lr", "cc"); \ 180 - smp_mb(); \ 181 - ret; \ 182 - }) 183 - 184 - #define __up_op(ptr,wake) \ 185 - ({ \ 186 - smp_mb(); \ 187 - __asm__ __volatile__( \ 188 - "@ up_op\n" \ 189 - " mrs ip, cpsr\n" \ 190 - " orr lr, ip, #128\n" \ 191 - " msr cpsr_c, lr\n" \ 192 - " ldr lr, [%0]\n" \ 193 - " adds lr, lr, %1\n" \ 194 - " str lr, [%0]\n" \ 195 - " msr cpsr_c, ip\n" \ 196 - " movle ip, %0\n" \ 197 - " blle " #wake \ 198 - : \ 199 - : "r" (ptr), "I" (1) \ 200 - : "ip", "lr", "cc"); \ 201 - }) 202 - 203 - /* 204 - * The value 0x01000000 supports up to 128 processors and 205 - * lots of processes. BIAS must be chosen such that sub'ing 206 - * BIAS once per CPU will result in the long remaining 207 - * negative. 208 - */ 209 - #define RW_LOCK_BIAS 0x01000000 210 - #define RW_LOCK_BIAS_STR "0x01000000" 211 - 212 - #define __down_op_write(ptr,fail) \ 213 - ({ \ 214 - __asm__ __volatile__( \ 215 - "@ down_op_write\n" \ 216 - " mrs ip, cpsr\n" \ 217 - " orr lr, ip, #128\n" \ 218 - " msr cpsr_c, lr\n" \ 219 - " ldr lr, [%0]\n" \ 220 - " subs lr, lr, %1\n" \ 221 - " str lr, [%0]\n" \ 222 - " msr cpsr_c, ip\n" \ 223 - " movne ip, %0\n" \ 224 - " blne " #fail \ 225 - : \ 226 - : "r" (ptr), "I" (RW_LOCK_BIAS) \ 227 - : "ip", "lr", "cc"); \ 228 - smp_mb(); \ 229 - }) 230 - 231 - #define __up_op_write(ptr,wake) \ 232 - ({ \ 233 - __asm__ __volatile__( \ 234 - "@ up_op_write\n" \ 235 - " mrs ip, cpsr\n" \ 236 - " orr lr, ip, #128\n" \ 237 - " msr cpsr_c, lr\n" \ 238 - " ldr lr, [%0]\n" \ 239 - " adds lr, lr, %1\n" \ 240 - " str lr, [%0]\n" \ 241 - " msr cpsr_c, ip\n" \ 242 - " movcs ip, %0\n" \ 243 - " blcs " #wake \ 244 - : \ 245 - : "r" (ptr), "I" (RW_LOCK_BIAS) \ 246 - : "ip", "lr", "cc"); \ 247 - smp_mb(); \ 248 - }) 249 - 250 - #define __down_op_read(ptr,fail) \ 251 - __down_op(ptr, fail) 252 - 253 - #define __up_op_read(ptr,wake) \ 254 - ({ \ 255 - smp_mb(); \ 256 - __asm__ __volatile__( \ 257 - "@ up_op_read\n" \ 258 - " mrs ip, cpsr\n" \ 259 - " orr lr, ip, #128\n" \ 260 - " msr cpsr_c, lr\n" \ 261 - " ldr lr, [%0]\n" \ 262 - " adds lr, lr, %1\n" \ 263 - " str lr, [%0]\n" \ 264 - " msr cpsr_c, ip\n" \ 265 - " moveq ip, %0\n" \ 266 - " bleq " #wake \ 267 - : \ 268 - : "r" (ptr), "I" (1) \ 269 - : "ip", "lr", "cc"); \ 270 - }) 271 - 272 - #endif 273 - 274 - #endif
+1 -1
arch/arm/include/asm/memory.h
··· 16 16 #include <linux/compiler.h> 17 17 #include <linux/const.h> 18 18 #include <linux/types.h> 19 - #include <asm/sizes.h> 19 + #include <linux/sizes.h> 20 20 21 21 #ifdef CONFIG_NEED_MACH_MEMORY_H 22 22 #include <mach/memory.h>
+1 -16
arch/arm/include/asm/perf_event.h
··· 12 12 #ifndef __ARM_PERF_EVENT_H__ 13 13 #define __ARM_PERF_EVENT_H__ 14 14 15 - /* ARM perf PMU IDs for use by internal perf clients. */ 16 - enum arm_perf_pmu_ids { 17 - ARM_PERF_PMU_ID_XSCALE1 = 0, 18 - ARM_PERF_PMU_ID_XSCALE2, 19 - ARM_PERF_PMU_ID_V6, 20 - ARM_PERF_PMU_ID_V6MP, 21 - ARM_PERF_PMU_ID_CA8, 22 - ARM_PERF_PMU_ID_CA9, 23 - ARM_PERF_PMU_ID_CA5, 24 - ARM_PERF_PMU_ID_CA15, 25 - ARM_PERF_PMU_ID_CA7, 26 - ARM_NUM_PMU_IDS, 27 - }; 28 - 29 - extern enum arm_perf_pmu_ids 30 - armpmu_get_pmu_id(void); 15 + /* Nothing to see here... */ 31 16 32 17 #endif /* __ARM_PERF_EVENT_H__ */
+1 -2
arch/arm/include/asm/pmu.h
··· 103 103 104 104 struct arm_pmu { 105 105 struct pmu pmu; 106 - enum arm_perf_pmu_ids id; 107 106 enum arm_pmu_type type; 108 107 cpumask_t active_irqs; 109 - const char *name; 108 + char *name; 110 109 irqreturn_t (*handle_irq)(int irq_num, void *dev); 111 110 void (*enable)(struct hw_perf_event *evt, int idx); 112 111 void (*disable)(struct hw_perf_event *evt, int idx);
+50 -26
arch/arm/include/asm/spinlock.h
··· 59 59 } 60 60 61 61 /* 62 - * ARMv6 Spin-locking. 62 + * ARMv6 ticket-based spin-locking. 63 63 * 64 - * We exclusively read the old value. If it is zero, we may have 65 - * won the lock, so we try exclusively storing it. A memory barrier 66 - * is required after we get a lock, and before we release it, because 67 - * V6 CPUs are assumed to have weakly ordered memory. 68 - * 69 - * Unlocked value: 0 70 - * Locked value: 1 64 + * A memory barrier is required after we get a lock, and before we 65 + * release it, because V6 CPUs are assumed to have weakly ordered 66 + * memory. 71 67 */ 72 68 73 - #define arch_spin_is_locked(x) ((x)->lock != 0) 74 69 #define arch_spin_unlock_wait(lock) \ 75 70 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 76 71 ··· 74 79 static inline void arch_spin_lock(arch_spinlock_t *lock) 75 80 { 76 81 unsigned long tmp; 82 + u32 newval; 83 + arch_spinlock_t lockval; 77 84 78 85 __asm__ __volatile__( 79 - "1: ldrex %0, [%1]\n" 80 - " teq %0, #0\n" 81 - WFE("ne") 82 - " strexeq %0, %2, [%1]\n" 83 - " teqeq %0, #0\n" 86 + "1: ldrex %0, [%3]\n" 87 + " add %1, %0, %4\n" 88 + " strex %2, %1, [%3]\n" 89 + " teq %2, #0\n" 84 90 " bne 1b" 85 - : "=&r" (tmp) 86 - : "r" (&lock->lock), "r" (1) 91 + : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) 92 + : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 87 93 : "cc"); 94 + 95 + while (lockval.tickets.next != lockval.tickets.owner) { 96 + wfe(); 97 + lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); 98 + } 88 99 89 100 smp_mb(); 90 101 } ··· 98 97 static inline int arch_spin_trylock(arch_spinlock_t *lock) 99 98 { 100 99 unsigned long tmp; 100 + u32 slock; 101 101 102 102 __asm__ __volatile__( 103 - " ldrex %0, [%1]\n" 104 - " teq %0, #0\n" 105 - " strexeq %0, %2, [%1]" 106 - : "=&r" (tmp) 107 - : "r" (&lock->lock), "r" (1) 103 + " ldrex %0, [%2]\n" 104 + " subs %1, %0, %0, ror #16\n" 105 + " addeq %0, %0, %3\n" 106 + " strexeq %1, %0, [%2]" 107 + : "=&r" (slock), "=&r" (tmp) 108 + : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 108 109 : "cc"); 109 110 110 111 if (tmp == 0) { ··· 119 116 120 117 static inline void arch_spin_unlock(arch_spinlock_t *lock) 121 118 { 119 + unsigned long tmp; 120 + u32 slock; 121 + 122 122 smp_mb(); 123 123 124 124 __asm__ __volatile__( 125 - " str %1, [%0]\n" 126 - : 127 - : "r" (&lock->lock), "r" (0) 125 + " mov %1, #1\n" 126 + "1: ldrex %0, [%2]\n" 127 + " uadd16 %0, %0, %1\n" 128 + " strex %1, %0, [%2]\n" 129 + " teq %1, #0\n" 130 + " bne 1b" 131 + : "=&r" (slock), "=&r" (tmp) 132 + : "r" (&lock->slock) 128 133 : "cc"); 129 134 130 135 dsb_sev(); 131 136 } 137 + 138 + static inline int arch_spin_is_locked(arch_spinlock_t *lock) 139 + { 140 + struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); 141 + return tickets.owner != tickets.next; 142 + } 143 + 144 + static inline int arch_spin_is_contended(arch_spinlock_t *lock) 145 + { 146 + struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); 147 + return (tickets.next - tickets.owner) > 1; 148 + } 149 + #define arch_spin_is_contended arch_spin_is_contended 132 150 133 151 /* 134 152 * RWLOCKS ··· 182 158 unsigned long tmp; 183 159 184 160 __asm__ __volatile__( 185 - "1: ldrex %0, [%1]\n" 161 + " ldrex %0, [%1]\n" 186 162 " teq %0, #0\n" 187 163 " strexeq %0, %2, [%1]" 188 164 : "=&r" (tmp) ··· 268 244 unsigned long tmp, tmp2 = 1; 269 245 270 246 __asm__ __volatile__( 271 - "1: ldrex %0, [%2]\n" 247 + " ldrex %0, [%2]\n" 272 248 " adds %0, %0, #1\n" 273 249 " strexpl %1, %0, [%2]\n" 274 250 : "=&r" (tmp), "+r" (tmp2)
+15 -2
arch/arm/include/asm/spinlock_types.h
··· 5 5 # error "please don't include this file directly" 6 6 #endif 7 7 8 + #define TICKET_SHIFT 16 9 + 8 10 typedef struct { 9 - volatile unsigned int lock; 11 + union { 12 + u32 slock; 13 + struct __raw_tickets { 14 + #ifdef __ARMEB__ 15 + u16 next; 16 + u16 owner; 17 + #else 18 + u16 owner; 19 + u16 next; 20 + #endif 21 + } tickets; 22 + }; 10 23 } arch_spinlock_t; 11 24 12 - #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 25 + #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } 13 26 14 27 typedef struct { 15 28 volatile unsigned int lock;
+6 -4
arch/arm/include/asm/timex.h
··· 12 12 #ifndef _ASMARM_TIMEX_H 13 13 #define _ASMARM_TIMEX_H 14 14 15 + #include <asm/arch_timer.h> 15 16 #include <mach/timex.h> 16 17 17 18 typedef unsigned long cycles_t; 18 19 19 - static inline cycles_t get_cycles (void) 20 - { 21 - return 0; 22 - } 20 + #ifdef ARCH_HAS_READ_CURRENT_TIMER 21 + #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; }) 22 + #else 23 + #define get_cycles() (0) 24 + #endif 23 25 24 26 #endif
+6 -21
arch/arm/include/asm/uaccess.h
··· 189 189 190 190 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 191 191 192 + #define user_addr_max() \ 193 + (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) 194 + 192 195 /* 193 196 * The "__xxx" versions of the user access functions do not verify the 194 197 * address space - it must have been done previously with a separate ··· 401 398 #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) 402 399 #endif 403 400 404 - extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count); 405 - extern unsigned long __must_check __strnlen_user(const char __user *s, long n); 406 - 407 401 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 408 402 { 409 403 if (access_ok(VERIFY_READ, from, n)) ··· 427 427 return n; 428 428 } 429 429 430 - static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) 431 - { 432 - long res = -EFAULT; 433 - if (access_ok(VERIFY_READ, src, 1)) 434 - res = __strncpy_from_user(dst, src, count); 435 - return res; 436 - } 430 + extern long strncpy_from_user(char *dest, const char __user *src, long count); 437 431 438 - #define strlen_user(s) strnlen_user(s, ~0UL >> 1) 439 - 440 - static inline long __must_check strnlen_user(const char __user *s, long n) 441 - { 442 - unsigned long res = 0; 443 - 444 - if (__addr_ok(s)) 445 - res = __strnlen_user(s, n); 446 - 447 - return res; 448 - } 432 + extern __must_check long strlen_user(const char __user *str); 433 + extern __must_check long strnlen_user(const char __user *str, long n); 449 434 450 435 #endif /* _ASMARM_UACCESS_H */
+96
arch/arm/include/asm/word-at-a-time.h
··· 1 + #ifndef __ASM_ARM_WORD_AT_A_TIME_H 2 + #define __ASM_ARM_WORD_AT_A_TIME_H 3 + 4 + #ifndef __ARMEB__ 5 + 6 + /* 7 + * Little-endian word-at-a-time zero byte handling. 8 + * Heavily based on the x86 algorithm. 9 + */ 10 + #include <linux/kernel.h> 11 + 12 + struct word_at_a_time { 13 + const unsigned long one_bits, high_bits; 14 + }; 15 + 16 + #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } 17 + 18 + static inline unsigned long has_zero(unsigned long a, unsigned long *bits, 19 + const struct word_at_a_time *c) 20 + { 21 + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; 22 + *bits = mask; 23 + return mask; 24 + } 25 + 26 + #define prep_zero_mask(a, bits, c) (bits) 27 + 28 + static inline unsigned long create_zero_mask(unsigned long bits) 29 + { 30 + bits = (bits - 1) & ~bits; 31 + return bits >> 7; 32 + } 33 + 34 + static inline unsigned long find_zero(unsigned long mask) 35 + { 36 + unsigned long ret; 37 + 38 + #if __LINUX_ARM_ARCH__ >= 5 39 + /* We have clz available. */ 40 + ret = fls(mask) >> 3; 41 + #else 42 + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ 43 + ret = (0x0ff0001 + mask) >> 23; 44 + /* Fix the 1 for 00 case */ 45 + ret &= mask; 46 + #endif 47 + 48 + return ret; 49 + } 50 + 51 + #ifdef CONFIG_DCACHE_WORD_ACCESS 52 + 53 + #define zero_bytemask(mask) (mask) 54 + 55 + /* 56 + * Load an unaligned word from kernel space. 57 + * 58 + * In the (very unlikely) case of the word being a page-crosser 59 + * and the next page not being mapped, take the exception and 60 + * return zeroes in the non-existing part. 61 + */ 62 + static inline unsigned long load_unaligned_zeropad(const void *addr) 63 + { 64 + unsigned long ret, offset; 65 + 66 + /* Load word from unaligned pointer addr */ 67 + asm( 68 + "1: ldr %0, [%2]\n" 69 + "2:\n" 70 + " .pushsection .fixup,\"ax\"\n" 71 + " .align 2\n" 72 + "3: and %1, %2, #0x3\n" 73 + " bic %2, %2, #0x3\n" 74 + " ldr %0, [%2]\n" 75 + " lsl %1, %1, #0x3\n" 76 + " lsr %0, %0, %1\n" 77 + " b 2b\n" 78 + " .popsection\n" 79 + " .pushsection __ex_table,\"a\"\n" 80 + " .align 3\n" 81 + " .long 1b, 3b\n" 82 + " .popsection" 83 + : "=&r" (ret), "=&r" (offset) 84 + : "r" (addr), "Qo" (*(unsigned long *)addr)); 85 + 86 + return ret; 87 + } 88 + 89 + 90 + #endif /* DCACHE_WORD_ACCESS */ 91 + 92 + #else /* __ARMEB__ */ 93 + #include <asm-generic/word-at-a-time.h> 94 + #endif 95 + 96 + #endif /* __ASM_ARM_WORD_AT_A_TIME_H */
+12 -1
arch/arm/kernel/arch_timer.c
··· 32 32 33 33 static struct clock_event_device __percpu **arch_timer_evt; 34 34 35 + extern void init_current_timer_delay(unsigned long freq); 36 + 35 37 /* 36 38 * Architected system timer support. 37 39 */ ··· 139 137 /* Be safe... */ 140 138 arch_timer_disable(); 141 139 142 - clk->features = CLOCK_EVT_FEAT_ONESHOT; 140 + clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; 143 141 clk->name = "arch_sys_timer"; 144 142 clk->rating = 450; 145 143 clk->set_mode = arch_timer_set_mode; ··· 225 223 return arch_counter_get_cntpct(); 226 224 } 227 225 226 + int read_current_timer(unsigned long *timer_val) 227 + { 228 + if (!arch_timer_rate) 229 + return -ENXIO; 230 + *timer_val = arch_counter_get_cntpct(); 231 + return 0; 232 + } 233 + 228 234 static struct clocksource clocksource_counter = { 229 235 .name = "arch_sys_counter", 230 236 .rating = 400, ··· 306 296 if (err) 307 297 goto out_free_irq; 308 298 299 + init_current_timer_delay(arch_timer_rate); 309 300 return 0; 310 301 311 302 out_free_irq:
+1 -6
arch/arm/kernel/armksyms.c
··· 49 49 extern void fpundefinstr(void); 50 50 51 51 /* platform dependent support */ 52 - EXPORT_SYMBOL(__udelay); 53 - EXPORT_SYMBOL(__const_udelay); 52 + EXPORT_SYMBOL(arm_delay_ops); 54 53 55 54 /* networking */ 56 55 EXPORT_SYMBOL(csum_partial); ··· 85 86 EXPORT_SYMBOL(memmove); 86 87 EXPORT_SYMBOL(memchr); 87 88 EXPORT_SYMBOL(__memzero); 88 - 89 - /* user mem (segment) */ 90 - EXPORT_SYMBOL(__strnlen_user); 91 - EXPORT_SYMBOL(__strncpy_from_user); 92 89 93 90 #ifdef CONFIG_MMU 94 91 EXPORT_SYMBOL(copy_page);
+6 -14
arch/arm/kernel/entry-common.S
··· 95 95 ENTRY(ret_from_fork) 96 96 bl schedule_tail 97 97 get_thread_info tsk 98 - ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing 99 98 mov why, #1 100 - tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 101 - beq ret_slow_syscall 102 - mov r1, sp 103 - mov r0, #1 @ trace exit [IP = 1] 104 - bl syscall_trace 105 99 b ret_slow_syscall 106 100 ENDPROC(ret_from_fork) 107 101 ··· 442 448 * context switches, and waiting for our parent to respond. 443 449 */ 444 450 __sys_trace: 445 - mov r2, scno 446 - add r1, sp, #S_OFF 447 - mov r0, #0 @ trace entry [IP = 0] 448 - bl syscall_trace 451 + mov r1, scno 452 + add r0, sp, #S_OFF 453 + bl syscall_trace_enter 449 454 450 455 adr lr, BSYM(__sys_trace_return) @ return address 451 456 mov scno, r0 @ syscall number (possibly new) ··· 456 463 457 464 __sys_trace_return: 458 465 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 459 - mov r2, scno 460 - mov r1, sp 461 - mov r0, #1 @ trace exit [IP = 1] 462 - bl syscall_trace 466 + mov r1, scno 467 + mov r0, sp 468 + bl syscall_trace_exit 463 469 b ret_slow_syscall 464 470 465 471 .align 5
+24 -37
arch/arm/kernel/head.S
··· 55 55 add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE 56 56 .endm 57 57 58 - #ifdef CONFIG_XIP_KERNEL 59 - #define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 60 - #define KERNEL_END _edata_loc 61 - #else 62 - #define KERNEL_START KERNEL_RAM_VADDR 63 - #define KERNEL_END _end 64 - #endif 65 - 66 58 /* 67 59 * Kernel startup entry point. 68 60 * --------------------------- ··· 210 218 blo 1b 211 219 212 220 /* 213 - * Now setup the pagetables for our kernel direct 214 - * mapped region. 221 + * Map our RAM from the start to the end of the kernel .bss section. 215 222 */ 223 + add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) 224 + ldr r6, =(_end - 1) 225 + orr r3, r8, r7 226 + add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 227 + 1: str r3, [r0], #1 << PMD_ORDER 228 + add r3, r3, #1 << SECTION_SHIFT 229 + cmp r0, r6 230 + bls 1b 231 + 232 + #ifdef CONFIG_XIP_KERNEL 233 + /* 234 + * Map the kernel image separately as it is not located in RAM. 235 + */ 236 + #define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 216 237 mov r3, pc 217 238 mov r3, r3, lsr #SECTION_SHIFT 218 239 orr r3, r7, r3, lsl #SECTION_SHIFT 219 - add r0, r4, #(KERNEL_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) 220 - str r3, [r0, #((KERNEL_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! 221 - ldr r6, =(KERNEL_END - 1) 240 + add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) 241 + str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! 242 + ldr r6, =(_edata_loc - 1) 222 243 add r0, r0, #1 << PMD_ORDER 223 244 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 224 245 1: cmp r0, r6 225 246 add r3, r3, #1 << SECTION_SHIFT 226 247 strls r3, [r0], #1 << PMD_ORDER 227 248 bls 1b 228 - 229 - #ifdef CONFIG_XIP_KERNEL 230 - /* 231 - * Map some ram to cover our .data and .bss areas. 232 - */ 233 - add r3, r8, #TEXT_OFFSET 234 - orr r3, r3, r7 235 - add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) 236 - str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> (SECTION_SHIFT - PMD_ORDER)]! 237 - ldr r6, =(_end - 1) 238 - add r0, r0, #4 239 - add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 240 - 1: cmp r0, r6 241 - add r3, r3, #1 << 20 242 - strls r3, [r0], #4 243 - bls 1b 244 249 #endif 245 250 246 251 /* 247 - * Then map boot params address in r2 or the first 1MB (2MB with LPAE) 248 - * of ram if boot params address is not specified. 252 + * Then map boot params address in r2 if specified. 249 253 */ 250 254 mov r0, r2, lsr #SECTION_SHIFT 251 255 movs r0, r0, lsl #SECTION_SHIFT 252 - moveq r0, r8 253 - sub r3, r0, r8 254 - add r3, r3, #PAGE_OFFSET 255 - add r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 256 - orr r6, r7, r0 257 - str r6, [r3] 256 + subne r3, r0, r8 257 + addne r3, r3, #PAGE_OFFSET 258 + addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 259 + orrne r6, r7, r0 260 + strne r6, [r3] 258 261 259 262 #ifdef CONFIG_DEBUG_LL 260 263 #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
+6 -9
arch/arm/kernel/perf_event.c
··· 47 47 /* Set at runtime when we know what CPU type we are. */ 48 48 static struct arm_pmu *cpu_pmu; 49 49 50 - enum arm_perf_pmu_ids 51 - armpmu_get_pmu_id(void) 50 + const char *perf_pmu_name(void) 52 51 { 53 - int id = -ENODEV; 52 + if (!cpu_pmu) 53 + return NULL; 54 54 55 - if (cpu_pmu != NULL) 56 - id = cpu_pmu->id; 57 - 58 - return id; 55 + return cpu_pmu->pmu.name; 59 56 } 60 - EXPORT_SYMBOL_GPL(armpmu_get_pmu_id); 57 + EXPORT_SYMBOL_GPL(perf_pmu_name); 61 58 62 59 int perf_num_counters(void) 63 60 { ··· 757 760 cpu_pmu->name, cpu_pmu->num_events); 758 761 cpu_pmu_init(cpu_pmu); 759 762 register_cpu_notifier(&pmu_cpu_notifier); 760 - armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); 763 + armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); 761 764 } else { 762 765 pr_info("no hardware support available\n"); 763 766 }
-2
arch/arm/kernel/perf_event_v6.c
··· 650 650 } 651 651 652 652 static struct arm_pmu armv6pmu = { 653 - .id = ARM_PERF_PMU_ID_V6, 654 653 .name = "v6", 655 654 .handle_irq = armv6pmu_handle_irq, 656 655 .enable = armv6pmu_enable_event, ··· 684 685 } 685 686 686 687 static struct arm_pmu armv6mpcore_pmu = { 687 - .id = ARM_PERF_PMU_ID_V6MP, 688 688 .name = "v6mpcore", 689 689 .handle_irq = armv6pmu_handle_irq, 690 690 .enable = armv6pmu_enable_event,
-5
arch/arm/kernel/perf_event_v7.c
··· 1258 1258 1259 1259 static struct arm_pmu *__init armv7_a8_pmu_init(void) 1260 1260 { 1261 - armv7pmu.id = ARM_PERF_PMU_ID_CA8; 1262 1261 armv7pmu.name = "ARMv7 Cortex-A8"; 1263 1262 armv7pmu.map_event = armv7_a8_map_event; 1264 1263 armv7pmu.num_events = armv7_read_num_pmnc_events(); ··· 1266 1267 1267 1268 static struct arm_pmu *__init armv7_a9_pmu_init(void) 1268 1269 { 1269 - armv7pmu.id = ARM_PERF_PMU_ID_CA9; 1270 1270 armv7pmu.name = "ARMv7 Cortex-A9"; 1271 1271 armv7pmu.map_event = armv7_a9_map_event; 1272 1272 armv7pmu.num_events = armv7_read_num_pmnc_events(); ··· 1274 1276 1275 1277 static struct arm_pmu *__init armv7_a5_pmu_init(void) 1276 1278 { 1277 - armv7pmu.id = ARM_PERF_PMU_ID_CA5; 1278 1279 armv7pmu.name = "ARMv7 Cortex-A5"; 1279 1280 armv7pmu.map_event = armv7_a5_map_event; 1280 1281 armv7pmu.num_events = armv7_read_num_pmnc_events(); ··· 1282 1285 1283 1286 static struct arm_pmu *__init armv7_a15_pmu_init(void) 1284 1287 { 1285 - armv7pmu.id = ARM_PERF_PMU_ID_CA15; 1286 1288 armv7pmu.name = "ARMv7 Cortex-A15"; 1287 1289 armv7pmu.map_event = armv7_a15_map_event; 1288 1290 armv7pmu.num_events = armv7_read_num_pmnc_events(); ··· 1291 1295 1292 1296 static struct arm_pmu *__init armv7_a7_pmu_init(void) 1293 1297 { 1294 - armv7pmu.id = ARM_PERF_PMU_ID_CA7; 1295 1298 armv7pmu.name = "ARMv7 Cortex-A7"; 1296 1299 armv7pmu.map_event = armv7_a7_map_event; 1297 1300 armv7pmu.num_events = armv7_read_num_pmnc_events();
-2
arch/arm/kernel/perf_event_xscale.c
··· 435 435 } 436 436 437 437 static struct arm_pmu xscale1pmu = { 438 - .id = ARM_PERF_PMU_ID_XSCALE1, 439 438 .name = "xscale1", 440 439 .handle_irq = xscale1pmu_handle_irq, 441 440 .enable = xscale1pmu_enable_event, ··· 802 803 } 803 804 804 805 static struct arm_pmu xscale2pmu = { 805 - .id = ARM_PERF_PMU_ID_XSCALE2, 806 806 .name = "xscale2", 807 807 .handle_irq = xscale2pmu_handle_irq, 808 808 .enable = xscale2pmu_enable_event,
+24 -10
arch/arm/kernel/ptrace.c
··· 907 907 return ret; 908 908 } 909 909 910 - asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) 910 + enum ptrace_syscall_dir { 911 + PTRACE_SYSCALL_ENTER = 0, 912 + PTRACE_SYSCALL_EXIT, 913 + }; 914 + 915 + static int ptrace_syscall_trace(struct pt_regs *regs, int scno, 916 + enum ptrace_syscall_dir dir) 911 917 { 912 918 unsigned long ip; 913 - 914 - if (why) 915 - audit_syscall_exit(regs); 916 - else 917 - audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, 918 - regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); 919 919 920 920 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 921 921 return scno; ··· 927 927 * IP = 0 -> entry, =1 -> exit 928 928 */ 929 929 ip = regs->ARM_ip; 930 - regs->ARM_ip = why; 930 + regs->ARM_ip = dir; 931 931 932 - if (why) 932 + if (dir == PTRACE_SYSCALL_EXIT) 933 933 tracehook_report_syscall_exit(regs, 0); 934 934 else if (tracehook_report_syscall_entry(regs)) 935 935 current_thread_info()->syscall = -1; 936 936 937 937 regs->ARM_ip = ip; 938 - 939 938 return current_thread_info()->syscall; 939 + } 940 + 941 + asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) 942 + { 943 + int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); 944 + audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, 945 + regs->ARM_r2, regs->ARM_r3); 946 + return ret; 947 + } 948 + 949 + asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno) 950 + { 951 + int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); 952 + audit_syscall_exit(regs); 953 + return ret; 940 954 }
+1 -1
arch/arm/kernel/smp.c
··· 179 179 mb(); 180 180 181 181 /* Tell __cpu_die() that this CPU is now safe to dispose of */ 182 - complete(&cpu_died); 182 + RCU_NONIDLE(complete(&cpu_died)); 183 183 184 184 /* 185 185 * actual CPU shutdown procedure is at least platform (if not
+216 -21
arch/arm/kernel/topology.c
··· 17 17 #include <linux/percpu.h> 18 18 #include <linux/node.h> 19 19 #include <linux/nodemask.h> 20 + #include <linux/of.h> 20 21 #include <linux/sched.h> 22 + #include <linux/slab.h> 21 23 22 24 #include <asm/cputype.h> 23 25 #include <asm/topology.h> 26 + 27 + /* 28 + * cpu power scale management 29 + */ 30 + 31 + /* 32 + * cpu power table 33 + * This per cpu data structure describes the relative capacity of each core. 34 + * On a heteregenous system, cores don't have the same computation capacity 35 + * and we reflect that difference in the cpu_power field so the scheduler can 36 + * take this difference into account during load balance. A per cpu structure 37 + * is preferred because each CPU updates its own cpu_power field during the 38 + * load balance except for idle cores. One idle core is selected to run the 39 + * rebalance_domains for all idle cores and the cpu_power can be updated 40 + * during this sequence. 41 + */ 42 + static DEFINE_PER_CPU(unsigned long, cpu_scale); 43 + 44 + unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) 45 + { 46 + return per_cpu(cpu_scale, cpu); 47 + } 48 + 49 + static void set_power_scale(unsigned int cpu, unsigned long power) 50 + { 51 + per_cpu(cpu_scale, cpu) = power; 52 + } 53 + 54 + #ifdef CONFIG_OF 55 + struct cpu_efficiency { 56 + const char *compatible; 57 + unsigned long efficiency; 58 + }; 59 + 60 + /* 61 + * Table of relative efficiency of each processors 62 + * The efficiency value must fit in 20bit and the final 63 + * cpu_scale value must be in the range 64 + * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2 65 + * in order to return at most 1 when DIV_ROUND_CLOSEST 66 + * is used to compute the capacity of a CPU. 67 + * Processors that are not defined in the table, 68 + * use the default SCHED_POWER_SCALE value for cpu_scale. 69 + */ 70 + struct cpu_efficiency table_efficiency[] = { 71 + {"arm,cortex-a15", 3891}, 72 + {"arm,cortex-a7", 2048}, 73 + {NULL, }, 74 + }; 75 + 76 + struct cpu_capacity { 77 + unsigned long hwid; 78 + unsigned long capacity; 79 + }; 80 + 81 + struct cpu_capacity *cpu_capacity; 82 + 83 + unsigned long middle_capacity = 1; 84 + 85 + /* 86 + * Iterate all CPUs' descriptor in DT and compute the efficiency 87 + * (as per table_efficiency). Also calculate a middle efficiency 88 + * as close as possible to (max{eff_i} - min{eff_i}) / 2 89 + * This is later used to scale the cpu_power field such that an 90 + * 'average' CPU is of middle power. Also see the comments near 91 + * table_efficiency[] and update_cpu_power(). 92 + */ 93 + static void __init parse_dt_topology(void) 94 + { 95 + struct cpu_efficiency *cpu_eff; 96 + struct device_node *cn = NULL; 97 + unsigned long min_capacity = (unsigned long)(-1); 98 + unsigned long max_capacity = 0; 99 + unsigned long capacity = 0; 100 + int alloc_size, cpu = 0; 101 + 102 + alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); 103 + cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT); 104 + 105 + while ((cn = of_find_node_by_type(cn, "cpu"))) { 106 + const u32 *rate, *reg; 107 + int len; 108 + 109 + if (cpu >= num_possible_cpus()) 110 + break; 111 + 112 + for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) 113 + if (of_device_is_compatible(cn, cpu_eff->compatible)) 114 + break; 115 + 116 + if (cpu_eff->compatible == NULL) 117 + continue; 118 + 119 + rate = of_get_property(cn, "clock-frequency", &len); 120 + if (!rate || len != 4) { 121 + pr_err("%s missing clock-frequency property\n", 122 + cn->full_name); 123 + continue; 124 + } 125 + 126 + reg = of_get_property(cn, "reg", &len); 127 + if (!reg || len != 4) { 128 + pr_err("%s missing reg property\n", cn->full_name); 129 + continue; 130 + } 131 + 132 + capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; 133 + 134 + /* Save min capacity of the system */ 135 + if (capacity < min_capacity) 136 + min_capacity = capacity; 137 + 138 + /* Save max capacity of the system */ 139 + if (capacity > max_capacity) 140 + max_capacity = capacity; 141 + 142 + cpu_capacity[cpu].capacity = capacity; 143 + cpu_capacity[cpu++].hwid = be32_to_cpup(reg); 144 + } 145 + 146 + if (cpu < num_possible_cpus()) 147 + cpu_capacity[cpu].hwid = (unsigned long)(-1); 148 + 149 + /* If min and max capacities are equals, we bypass the update of the 150 + * cpu_scale because all CPUs have the same capacity. Otherwise, we 151 + * compute a middle_capacity factor that will ensure that the capacity 152 + * of an 'average' CPU of the system will be as close as possible to 153 + * SCHED_POWER_SCALE, which is the default value, but with the 154 + * constraint explained near table_efficiency[]. 155 + */ 156 + if (min_capacity == max_capacity) 157 + cpu_capacity[0].hwid = (unsigned long)(-1); 158 + else if (4*max_capacity < (3*(max_capacity + min_capacity))) 159 + middle_capacity = (min_capacity + max_capacity) 160 + >> (SCHED_POWER_SHIFT+1); 161 + else 162 + middle_capacity = ((max_capacity / 3) 163 + >> (SCHED_POWER_SHIFT-1)) + 1; 164 + 165 + } 166 + 167 + /* 168 + * Look for a customed capacity of a CPU in the cpu_capacity table during the 169 + * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the 170 + * function returns directly for SMP system. 171 + */ 172 + void update_cpu_power(unsigned int cpu, unsigned long hwid) 173 + { 174 + unsigned int idx = 0; 175 + 176 + /* look for the cpu's hwid in the cpu capacity table */ 177 + for (idx = 0; idx < num_possible_cpus(); idx++) { 178 + if (cpu_capacity[idx].hwid == hwid) 179 + break; 180 + 181 + if (cpu_capacity[idx].hwid == -1) 182 + return; 183 + } 184 + 185 + if (idx == num_possible_cpus()) 186 + return; 187 + 188 + set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity); 189 + 190 + printk(KERN_INFO "CPU%u: update cpu_power %lu\n", 191 + cpu, arch_scale_freq_power(NULL, cpu)); 192 + } 193 + 194 + #else 195 + static inline void parse_dt_topology(void) {} 196 + static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} 197 + #endif 198 + 199 + 200 + /* 201 + * cpu topology management 202 + */ 24 203 25 204 #define MPIDR_SMP_BITMASK (0x3 << 30) 26 205 #define MPIDR_SMP_VALUE (0x2 << 30) ··· 210 31 * These masks reflect the current use of the affinity levels. 211 32 * The affinity level can be up to 16 bits according to ARM ARM 212 33 */ 34 + #define MPIDR_HWID_BITMASK 0xFFFFFF 213 35 214 36 #define MPIDR_LEVEL0_MASK 0x3 215 37 #define MPIDR_LEVEL0_SHIFT 0 ··· 221 41 #define MPIDR_LEVEL2_MASK 0xFF 222 42 #define MPIDR_LEVEL2_SHIFT 16 223 43 44 + /* 45 + * cpu topology table 46 + */ 224 47 struct cputopo_arm cpu_topology[NR_CPUS]; 225 48 226 49 const struct cpumask *cpu_coregroup_mask(int cpu) 227 50 { 228 51 return &cpu_topology[cpu].core_sibling; 52 + } 53 + 54 + void update_siblings_masks(unsigned int cpuid) 55 + { 56 + struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; 57 + int cpu; 58 + 59 + /* update core and thread sibling masks */ 60 + for_each_possible_cpu(cpu) { 61 + cpu_topo = &cpu_topology[cpu]; 62 + 63 + if (cpuid_topo->socket_id != cpu_topo->socket_id) 64 + continue; 65 + 66 + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); 67 + if (cpu != cpuid) 68 + cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); 69 + 70 + if (cpuid_topo->core_id != cpu_topo->core_id) 71 + continue; 72 + 73 + cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); 74 + if (cpu != cpuid) 75 + cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); 76 + } 77 + smp_wmb(); 229 78 } 230 79 231 80 /* ··· 266 57 { 267 58 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; 268 59 unsigned int mpidr; 269 - unsigned int cpu; 270 60 271 61 /* If the cpu topology has been already set, just return */ 272 62 if (cpuid_topo->core_id != -1) ··· 307 99 cpuid_topo->socket_id = -1; 308 100 } 309 101 310 - /* update core and thread sibling masks */ 311 - for_each_possible_cpu(cpu) { 312 - struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; 102 + update_siblings_masks(cpuid); 313 103 314 - if (cpuid_topo->socket_id == cpu_topo->socket_id) { 315 - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); 316 - if (cpu != cpuid) 317 - cpumask_set_cpu(cpu, 318 - &cpuid_topo->core_sibling); 319 - 320 - if (cpuid_topo->core_id == cpu_topo->core_id) { 321 - cpumask_set_cpu(cpuid, 322 - &cpu_topo->thread_sibling); 323 - if (cpu != cpuid) 324 - cpumask_set_cpu(cpu, 325 - &cpuid_topo->thread_sibling); 326 - } 327 - } 328 - } 329 - smp_wmb(); 104 + update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK); 330 105 331 106 printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", 332 107 cpuid, cpu_topology[cpuid].thread_id, ··· 325 134 { 326 135 unsigned int cpu; 327 136 328 - /* init core mask */ 137 + /* init core mask and power*/ 329 138 for_each_possible_cpu(cpu) { 330 139 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); 331 140 ··· 334 143 cpu_topo->socket_id = -1; 335 144 cpumask_clear(&cpu_topo->core_sibling); 336 145 cpumask_clear(&cpu_topo->thread_sibling); 146 + 147 + set_power_scale(cpu, SCHED_POWER_SCALE); 337 148 } 338 149 smp_wmb(); 150 + 151 + parse_dt_topology(); 339 152 }
+55 -23
arch/arm/kernel/traps.c
··· 233 233 #define S_ISA " ARM" 234 234 #endif 235 235 236 - static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) 236 + static int __die(const char *str, int err, struct pt_regs *regs) 237 237 { 238 - struct task_struct *tsk = thread->task; 238 + struct task_struct *tsk = current; 239 239 static int die_counter; 240 240 int ret; 241 241 ··· 245 245 /* trap and error numbers are mostly meaningless on ARM */ 246 246 ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); 247 247 if (ret == NOTIFY_STOP) 248 - return ret; 248 + return 1; 249 249 250 250 print_modules(); 251 251 __show_regs(regs); 252 252 printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", 253 - TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); 253 + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); 254 254 255 255 if (!user_mode(regs) || in_interrupt()) { 256 256 dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, ··· 259 259 dump_instr(KERN_EMERG, regs); 260 260 } 261 261 262 - return ret; 262 + return 0; 263 263 } 264 264 265 - static DEFINE_RAW_SPINLOCK(die_lock); 265 + static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 266 + static int die_owner = -1; 267 + static unsigned int die_nest_count; 266 268 267 - /* 268 - * This function is protected against re-entrancy. 269 - */ 270 - void die(const char *str, struct pt_regs *regs, int err) 269 + static unsigned long oops_begin(void) 271 270 { 272 - struct thread_info *thread = current_thread_info(); 273 - int ret; 274 - enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; 271 + int cpu; 272 + unsigned long flags; 275 273 276 274 oops_enter(); 277 275 278 - raw_spin_lock_irq(&die_lock); 276 + /* racy, but better than risking deadlock. */ 277 + raw_local_irq_save(flags); 278 + cpu = smp_processor_id(); 279 + if (!arch_spin_trylock(&die_lock)) { 280 + if (cpu == die_owner) 281 + /* nested oops. should stop eventually */; 282 + else 283 + arch_spin_lock(&die_lock); 284 + } 285 + die_nest_count++; 286 + die_owner = cpu; 279 287 console_verbose(); 280 288 bust_spinlocks(1); 281 - if (!user_mode(regs)) 282 - bug_type = report_bug(regs->ARM_pc, regs); 283 - if (bug_type != BUG_TRAP_TYPE_NONE) 284 - str = "Oops - BUG"; 285 - ret = __die(str, err, thread, regs); 289 + return flags; 290 + } 286 291 287 - if (regs && kexec_should_crash(thread->task)) 292 + static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) 293 + { 294 + if (regs && kexec_should_crash(current)) 288 295 crash_kexec(regs); 289 296 290 297 bust_spinlocks(0); 298 + die_owner = -1; 291 299 add_taint(TAINT_DIE); 292 - raw_spin_unlock_irq(&die_lock); 300 + die_nest_count--; 301 + if (!die_nest_count) 302 + /* Nest count reaches zero, release the lock. */ 303 + arch_spin_unlock(&die_lock); 304 + raw_local_irq_restore(flags); 293 305 oops_exit(); 294 306 295 307 if (in_interrupt()) 296 308 panic("Fatal exception in interrupt"); 297 309 if (panic_on_oops) 298 310 panic("Fatal exception"); 299 - if (ret != NOTIFY_STOP) 300 - do_exit(SIGSEGV); 311 + if (signr) 312 + do_exit(signr); 313 + } 314 + 315 + /* 316 + * This function is protected against re-entrancy. 317 + */ 318 + void die(const char *str, struct pt_regs *regs, int err) 319 + { 320 + enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; 321 + unsigned long flags = oops_begin(); 322 + int sig = SIGSEGV; 323 + 324 + if (!user_mode(regs)) 325 + bug_type = report_bug(regs->ARM_pc, regs); 326 + if (bug_type != BUG_TRAP_TYPE_NONE) 327 + str = "Oops - BUG"; 328 + 329 + if (__die(str, err, regs)) 330 + sig = 0; 331 + 332 + oops_end(flags, regs, sig); 301 333 } 302 334 303 335 void arm_notify_die(const char *str, struct pt_regs *regs,
+1 -2
arch/arm/lib/Makefile
··· 6 6 7 7 lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ 8 8 csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ 9 - delay.o findbit.o memchr.o memcpy.o \ 9 + delay.o delay-loop.o findbit.o memchr.o memcpy.o \ 10 10 memmove.o memset.o memzero.o setbit.o \ 11 - strncpy_from_user.o strnlen_user.o \ 12 11 strchr.o strrchr.o \ 13 12 testchangebit.o testclearbit.o testsetbit.o \ 14 13 ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
+9 -11
arch/arm/lib/delay.S arch/arm/lib/delay-loop.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/param.h> 12 + #include <asm/delay.h> 13 13 .text 14 14 15 15 .LC0: .word loops_per_jiffy 16 - .LC1: .word (2199023*HZ)>>11 16 + .LC1: .word UDELAY_MULT 17 17 18 18 /* 19 19 * r0 <= 2000 ··· 21 21 * HZ <= 1000 22 22 */ 23 23 24 - ENTRY(__udelay) 24 + ENTRY(__loop_udelay) 25 25 ldr r2, .LC1 26 26 mul r0, r2, r0 27 - ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06 27 + ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06 28 28 mov r1, #-1 29 29 ldr r2, .LC0 30 30 ldr r2, [r2] @ max = 0x01ffffff ··· 39 39 40 40 /* 41 41 * loops = r0 * HZ * loops_per_jiffy / 1000000 42 - * 43 - * Oh, if only we had a cycle counter... 44 42 */ 45 43 46 44 @ Delay routine 47 - ENTRY(__delay) 45 + ENTRY(__loop_delay) 48 46 subs r0, r0, #1 49 47 #if 0 50 48 movls pc, lr ··· 60 62 movls pc, lr 61 63 subs r0, r0, #1 62 64 #endif 63 - bhi __delay 65 + bhi __loop_delay 64 66 mov pc, lr 65 - ENDPROC(__udelay) 66 - ENDPROC(__const_udelay) 67 - ENDPROC(__delay) 67 + ENDPROC(__loop_udelay) 68 + ENDPROC(__loop_const_udelay) 69 + ENDPROC(__loop_delay)
+71
arch/arm/lib/delay.c
··· 1 + /* 2 + * Delay loops based on the OpenRISC implementation. 3 + * 4 + * Copyright (C) 2012 ARM Limited 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 + * 19 + * Author: Will Deacon <will.deacon@arm.com> 20 + */ 21 + 22 + #include <linux/delay.h> 23 + #include <linux/init.h> 24 + #include <linux/kernel.h> 25 + #include <linux/module.h> 26 + #include <linux/timex.h> 27 + 28 + /* 29 + * Default to the loop-based delay implementation. 30 + */ 31 + struct arm_delay_ops arm_delay_ops = { 32 + .delay = __loop_delay, 33 + .const_udelay = __loop_const_udelay, 34 + .udelay = __loop_udelay, 35 + }; 36 + 37 + #ifdef ARCH_HAS_READ_CURRENT_TIMER 38 + static void __timer_delay(unsigned long cycles) 39 + { 40 + cycles_t start = get_cycles(); 41 + 42 + while ((get_cycles() - start) < cycles) 43 + cpu_relax(); 44 + } 45 + 46 + static void __timer_const_udelay(unsigned long xloops) 47 + { 48 + unsigned long long loops = xloops; 49 + loops *= loops_per_jiffy; 50 + __timer_delay(loops >> UDELAY_SHIFT); 51 + } 52 + 53 + static void __timer_udelay(unsigned long usecs) 54 + { 55 + __timer_const_udelay(usecs * UDELAY_MULT); 56 + } 57 + 58 + void __init init_current_timer_delay(unsigned long freq) 59 + { 60 + pr_info("Switching to timer-based delay loop\n"); 61 + lpj_fine = freq / HZ; 62 + arm_delay_ops.delay = __timer_delay; 63 + arm_delay_ops.const_udelay = __timer_const_udelay; 64 + arm_delay_ops.udelay = __timer_udelay; 65 + } 66 + 67 + unsigned long __cpuinit calibrate_delay_is_known(void) 68 + { 69 + return lpj_fine; 70 + } 71 + #endif
-43
arch/arm/lib/strncpy_from_user.S
··· 1 - /* 2 - * linux/arch/arm/lib/strncpy_from_user.S 3 - * 4 - * Copyright (C) 1995-2000 Russell King 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - */ 10 - #include <linux/linkage.h> 11 - #include <asm/assembler.h> 12 - #include <asm/errno.h> 13 - 14 - .text 15 - .align 5 16 - 17 - /* 18 - * Copy a string from user space to kernel space. 19 - * r0 = dst, r1 = src, r2 = byte length 20 - * returns the number of characters copied (strlen of copied string), 21 - * -EFAULT on exception, or "len" if we fill the whole buffer 22 - */ 23 - ENTRY(__strncpy_from_user) 24 - mov ip, r1 25 - 1: subs r2, r2, #1 26 - ldrusr r3, r1, 1, pl 27 - bmi 2f 28 - strb r3, [r0], #1 29 - teq r3, #0 30 - bne 1b 31 - sub r1, r1, #1 @ take NUL character out of count 32 - 2: sub r0, r1, ip 33 - mov pc, lr 34 - ENDPROC(__strncpy_from_user) 35 - 36 - .pushsection .fixup,"ax" 37 - .align 0 38 - 9001: mov r3, #0 39 - strb r3, [r0, #0] @ null terminate 40 - mov r0, #-EFAULT 41 - mov pc, lr 42 - .popsection 43 -
-40
arch/arm/lib/strnlen_user.S
··· 1 - /* 2 - * linux/arch/arm/lib/strnlen_user.S 3 - * 4 - * Copyright (C) 1995-2000 Russell King 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - */ 10 - #include <linux/linkage.h> 11 - #include <asm/assembler.h> 12 - #include <asm/errno.h> 13 - 14 - .text 15 - .align 5 16 - 17 - /* Prototype: unsigned long __strnlen_user(const char *str, long n) 18 - * Purpose : get length of a string in user memory 19 - * Params : str - address of string in user memory 20 - * Returns : length of string *including terminator* 21 - * or zero on exception, or n + 1 if too long 22 - */ 23 - ENTRY(__strnlen_user) 24 - mov r2, r0 25 - 1: 26 - ldrusr r3, r0, 1 27 - teq r3, #0 28 - beq 2f 29 - subs r1, r1, #1 30 - bne 1b 31 - add r0, r0, #1 32 - 2: sub r0, r0, r2 33 - mov pc, lr 34 - ENDPROC(__strnlen_user) 35 - 36 - .pushsection .fixup,"ax" 37 - .align 0 38 - 9001: mov r0, #0 39 - mov pc, lr 40 - .popsection
+1 -1
arch/arm/mach-msm/platsmp.c
··· 127 127 * the boot monitor to read the system wide flags register, 128 128 * and branch to the address found there. 129 129 */ 130 - gic_raise_softirq(cpumask_of(cpu), 1); 130 + gic_raise_softirq(cpumask_of(cpu), 0); 131 131 132 132 timeout = jiffies + (1 * HZ); 133 133 while (time_before(jiffies, timeout)) {
+1 -1
arch/arm/mach-omap2/omap-smp.c
··· 111 111 booted = true; 112 112 } 113 113 114 - gic_raise_softirq(cpumask_of(cpu), 1); 114 + gic_raise_softirq(cpumask_of(cpu), 0); 115 115 116 116 /* 117 117 * Now the secondary core is starting up let it run its
+11 -11
arch/arm/mach-pxa/include/mach/regs-ost.h
··· 7 7 * OS Timer & Match Registers 8 8 */ 9 9 10 - #define OSMR0 __REG(0x40A00000) /* */ 11 - #define OSMR1 __REG(0x40A00004) /* */ 12 - #define OSMR2 __REG(0x40A00008) /* */ 13 - #define OSMR3 __REG(0x40A0000C) /* */ 14 - #define OSMR4 __REG(0x40A00080) /* */ 15 - #define OSCR __REG(0x40A00010) /* OS Timer Counter Register */ 16 - #define OSCR4 __REG(0x40A00040) /* OS Timer Counter Register */ 17 - #define OMCR4 __REG(0x40A000C0) /* */ 18 - #define OSSR __REG(0x40A00014) /* OS Timer Status Register */ 19 - #define OWER __REG(0x40A00018) /* OS Timer Watchdog Enable Register */ 20 - #define OIER __REG(0x40A0001C) /* OS Timer Interrupt Enable Register */ 10 + #define OSMR0 io_p2v(0x40A00000) /* */ 11 + #define OSMR1 io_p2v(0x40A00004) /* */ 12 + #define OSMR2 io_p2v(0x40A00008) /* */ 13 + #define OSMR3 io_p2v(0x40A0000C) /* */ 14 + #define OSMR4 io_p2v(0x40A00080) /* */ 15 + #define OSCR io_p2v(0x40A00010) /* OS Timer Counter Register */ 16 + #define OSCR4 io_p2v(0x40A00040) /* OS Timer Counter Register */ 17 + #define OMCR4 io_p2v(0x40A000C0) /* */ 18 + #define OSSR io_p2v(0x40A00014) /* OS Timer Status Register */ 19 + #define OWER io_p2v(0x40A00018) /* OS Timer Watchdog Enable Register */ 20 + #define OIER io_p2v(0x40A0001C) /* OS Timer Interrupt Enable Register */ 21 21 22 22 #define OSSR_M3 (1 << 3) /* Match status channel 3 */ 23 23 #define OSSR_M2 (1 << 2) /* Match status channel 2 */
+4 -3
arch/arm/mach-pxa/reset.c
··· 77 77 static void do_hw_reset(void) 78 78 { 79 79 /* Initialize the watchdog and let it fire */ 80 - OWER = OWER_WME; 81 - OSSR = OSSR_M3; 82 - OSMR3 = OSCR + 368640; /* ... in 100 ms */ 80 + writel_relaxed(OWER_WME, OWER); 81 + writel_relaxed(OSSR_M3, OSSR); 82 + /* ... in 100 ms */ 83 + writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); 83 84 } 84 85 85 86 void pxa_restart(char mode, const char *cmd)
+26 -26
arch/arm/mach-pxa/time.c
··· 35 35 36 36 static u32 notrace pxa_read_sched_clock(void) 37 37 { 38 - return OSCR; 38 + return readl_relaxed(OSCR); 39 39 } 40 40 41 41 ··· 47 47 struct clock_event_device *c = dev_id; 48 48 49 49 /* Disarm the compare/match, signal the event. */ 50 - OIER &= ~OIER_E0; 51 - OSSR = OSSR_M0; 50 + writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); 51 + writel_relaxed(OSSR_M0, OSSR); 52 52 c->event_handler(c); 53 53 54 54 return IRQ_HANDLED; ··· 59 59 { 60 60 unsigned long next, oscr; 61 61 62 - OIER |= OIER_E0; 63 - next = OSCR + delta; 64 - OSMR0 = next; 65 - oscr = OSCR; 62 + writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER); 63 + next = readl_relaxed(OSCR) + delta; 64 + writel_relaxed(next, OSMR0); 65 + oscr = readl_relaxed(OSCR); 66 66 67 67 return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; 68 68 } ··· 72 72 { 73 73 switch (mode) { 74 74 case CLOCK_EVT_MODE_ONESHOT: 75 - OIER &= ~OIER_E0; 76 - OSSR = OSSR_M0; 75 + writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); 76 + writel_relaxed(OSSR_M0, OSSR); 77 77 break; 78 78 79 79 case CLOCK_EVT_MODE_UNUSED: 80 80 case CLOCK_EVT_MODE_SHUTDOWN: 81 81 /* initializing, released, or preparing for suspend */ 82 - OIER &= ~OIER_E0; 83 - OSSR = OSSR_M0; 82 + writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); 83 + writel_relaxed(OSSR_M0, OSSR); 84 84 break; 85 85 86 86 case CLOCK_EVT_MODE_RESUME: ··· 108 108 { 109 109 unsigned long clock_tick_rate = get_clock_tick_rate(); 110 110 111 - OIER = 0; 112 - OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3; 111 + writel_relaxed(0, OIER); 112 + writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); 113 113 114 114 setup_sched_clock(pxa_read_sched_clock, 32, clock_tick_rate); 115 115 ··· 122 122 123 123 setup_irq(IRQ_OST0, &pxa_ost0_irq); 124 124 125 - clocksource_mmio_init(&OSCR, "oscr0", clock_tick_rate, 200, 32, 125 + clocksource_mmio_init(OSCR, "oscr0", clock_tick_rate, 200, 32, 126 126 clocksource_mmio_readl_up); 127 127 clockevents_register_device(&ckevt_pxa_osmr0); 128 128 } ··· 132 132 133 133 static void pxa_timer_suspend(void) 134 134 { 135 - osmr[0] = OSMR0; 136 - osmr[1] = OSMR1; 137 - osmr[2] = OSMR2; 138 - osmr[3] = OSMR3; 139 - oier = OIER; 140 - oscr = OSCR; 135 + osmr[0] = readl_relaxed(OSMR0); 136 + osmr[1] = readl_relaxed(OSMR1); 137 + osmr[2] = readl_relaxed(OSMR2); 138 + osmr[3] = readl_relaxed(OSMR3); 139 + oier = readl_relaxed(OIER); 140 + oscr = readl_relaxed(OSCR); 141 141 } 142 142 143 143 static void pxa_timer_resume(void) ··· 151 151 if (osmr[0] - oscr < MIN_OSCR_DELTA) 152 152 osmr[0] += MIN_OSCR_DELTA; 153 153 154 - OSMR0 = osmr[0]; 155 - OSMR1 = osmr[1]; 156 - OSMR2 = osmr[2]; 157 - OSMR3 = osmr[3]; 158 - OIER = oier; 159 - OSCR = oscr; 154 + writel_relaxed(osmr[0], OSMR0); 155 + writel_relaxed(osmr[1], OSMR1); 156 + writel_relaxed(osmr[2], OSMR2); 157 + writel_relaxed(osmr[3], OSMR3); 158 + writel_relaxed(oier, OIER); 159 + writel_relaxed(oscr, OSCR); 160 160 } 161 161 #else 162 162 #define pxa_timer_suspend NULL
+1 -1
arch/arm/mach-sa1100/assabet.c
··· 362 362 static void __init map_sa1100_gpio_regs( void ) 363 363 { 364 364 unsigned long phys = __PREG(GPLR) & PMD_MASK; 365 - unsigned long virt = io_p2v(phys); 365 + unsigned long virt = (unsigned long)io_p2v(phys); 366 366 int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO); 367 367 pmd_t *pmd; 368 368
+1
arch/arm/mach-sa1100/cpu-sa1100.c
··· 87 87 #include <linux/types.h> 88 88 #include <linux/init.h> 89 89 #include <linux/cpufreq.h> 90 + #include <linux/io.h> 90 91 91 92 #include <asm/cputype.h> 92 93
+1
arch/arm/mach-sa1100/cpu-sa1110.c
··· 19 19 #include <linux/cpufreq.h> 20 20 #include <linux/delay.h> 21 21 #include <linux/init.h> 22 + #include <linux/io.h> 22 23 #include <linux/kernel.h> 23 24 #include <linux/moduleparam.h> 24 25 #include <linux/types.h>
+8 -8
arch/arm/mach-sa1100/include/mach/SA-1100.h
··· 830 830 * (read/write). 831 831 */ 832 832 833 - #define OSMR0 __REG(0x90000000) /* OS timer Match Reg. 0 */ 834 - #define OSMR1 __REG(0x90000004) /* OS timer Match Reg. 1 */ 835 - #define OSMR2 __REG(0x90000008) /* OS timer Match Reg. 2 */ 836 - #define OSMR3 __REG(0x9000000c) /* OS timer Match Reg. 3 */ 837 - #define OSCR __REG(0x90000010) /* OS timer Counter Reg. */ 838 - #define OSSR __REG(0x90000014 ) /* OS timer Status Reg. */ 839 - #define OWER __REG(0x90000018 ) /* OS timer Watch-dog Enable Reg. */ 840 - #define OIER __REG(0x9000001C ) /* OS timer Interrupt Enable Reg. */ 833 + #define OSMR0 io_p2v(0x90000000) /* OS timer Match Reg. 0 */ 834 + #define OSMR1 io_p2v(0x90000004) /* OS timer Match Reg. 1 */ 835 + #define OSMR2 io_p2v(0x90000008) /* OS timer Match Reg. 2 */ 836 + #define OSMR3 io_p2v(0x9000000c) /* OS timer Match Reg. 3 */ 837 + #define OSCR io_p2v(0x90000010) /* OS timer Counter Reg. */ 838 + #define OSSR io_p2v(0x90000014) /* OS timer Status Reg. */ 839 + #define OWER io_p2v(0x90000018) /* OS timer Watch-dog Enable Reg. */ 840 + #define OIER io_p2v(0x9000001C) /* OS timer Interrupt Enable Reg. */ 841 841 842 842 #define OSSR_M(Nb) /* Match detected [0..3] */ \ 843 843 (0x00000001 << (Nb))
+1
arch/arm/mach-sa1100/include/mach/gpio.h
··· 24 24 #ifndef __ASM_ARCH_SA1100_GPIO_H 25 25 #define __ASM_ARCH_SA1100_GPIO_H 26 26 27 + #include <linux/io.h> 27 28 #include <mach/hardware.h> 28 29 #include <asm/irq.h> 29 30 #include <asm-generic/gpio.h>
+4 -2
arch/arm/mach-sa1100/include/mach/hardware.h
··· 32 32 #define PIO_START 0x80000000 /* physical start of IO space */ 33 33 34 34 #define io_p2v( x ) \ 35 - ( (((x)&0x00ffffff) | (((x)&0x30000000)>>VIO_SHIFT)) + VIO_BASE ) 35 + IOMEM( (((x)&0x00ffffff) | (((x)&0x30000000)>>VIO_SHIFT)) + VIO_BASE ) 36 36 #define io_v2p( x ) \ 37 37 ( (((x)&0x00ffffff) | (((x)&(0x30000000>>VIO_SHIFT))<<VIO_SHIFT)) + PIO_START ) 38 38 ··· 47 47 #define CPU_SA1110_ID (0x6901b110) 48 48 #define CPU_SA1110_MASK (0xfffffff0) 49 49 50 + #define __MREG(x) IOMEM(io_p2v(x)) 51 + 50 52 #ifndef __ASSEMBLY__ 51 53 52 54 #include <asm/cputype.h> ··· 58 56 #define cpu_is_sa1100() ((read_cpuid_id() & CPU_SA1100_MASK) == CPU_SA1100_ID) 59 57 #define cpu_is_sa1110() ((read_cpuid_id() & CPU_SA1110_MASK) == CPU_SA1110_ID) 60 58 61 - # define __REG(x) (*((volatile unsigned long *)io_p2v(x))) 59 + # define __REG(x) (*((volatile unsigned long __iomem *)io_p2v(x))) 62 60 # define __PREG(x) (io_v2p((unsigned long)&(x))) 63 61 64 62 static inline unsigned long get_clock_tick_rate(void)
+2
arch/arm/mach-sa1100/include/mach/uncompress.h
··· 8 8 9 9 #include "hardware.h" 10 10 11 + #define IOMEM(x) (x) 12 + 11 13 /* 12 14 * The following code assumes the serial port has already been 13 15 * initialized by the bootloader. We search for the first enabled
+1
arch/arm/mach-sa1100/irq.c
··· 12 12 #include <linux/init.h> 13 13 #include <linux/module.h> 14 14 #include <linux/interrupt.h> 15 + #include <linux/io.h> 15 16 #include <linux/irq.h> 16 17 #include <linux/ioport.h> 17 18 #include <linux/syscore_ops.h>
+1
arch/arm/mach-sa1100/jornada720_ssp.c
··· 18 18 #include <linux/module.h> 19 19 #include <linux/platform_device.h> 20 20 #include <linux/sched.h> 21 + #include <linux/io.h> 21 22 22 23 #include <mach/hardware.h> 23 24 #include <mach/jornada720.h>
+1
arch/arm/mach-sa1100/leds-cerf.c
··· 4 4 * Author: ??? 5 5 */ 6 6 #include <linux/init.h> 7 + #include <linux/io.h> 7 8 8 9 #include <mach/hardware.h> 9 10 #include <asm/leds.h>
+1
arch/arm/mach-sa1100/leds-lart.c
··· 10 10 * pace of the LED. 11 11 */ 12 12 #include <linux/init.h> 13 + #include <linux/io.h> 13 14 14 15 #include <mach/hardware.h> 15 16 #include <asm/leds.h>
+1
arch/arm/mach-sa1100/pm.c
··· 23 23 * Storage is local on the stack now. 24 24 */ 25 25 #include <linux/init.h> 26 + #include <linux/io.h> 26 27 #include <linux/suspend.h> 27 28 #include <linux/errno.h> 28 29 #include <linux/time.h>
+4 -4
arch/arm/mach-sa1100/sleep.S
··· 38 38 orr r4, r4, #MDREFR_K1DB2 39 39 ldr r5, =PPCR 40 40 41 - @ Pre-load __udelay into the I-cache 41 + @ Pre-load __loop_udelay into the I-cache 42 42 mov r0, #1 43 - bl __udelay 43 + bl __loop_udelay 44 44 mov r0, r0 45 45 46 46 @ The following must all exist in a single cache line to ··· 53 53 @ delay 90us and set CPU PLL to lowest speed 54 54 @ fixes resume problem on high speed SA1110 55 55 mov r0, #90 56 - bl __udelay 56 + bl __loop_udelay 57 57 mov r1, #0 58 58 str r1, [r5] 59 59 mov r0, #90 60 - bl __udelay 60 + bl __loop_udelay 61 61 62 62 /* 63 63 * SA1110 SDRAM controller workaround. register values:
+24 -24
arch/arm/mach-sa1100/time.c
··· 22 22 23 23 static u32 notrace sa1100_read_sched_clock(void) 24 24 { 25 - return OSCR; 25 + return readl_relaxed(OSCR); 26 26 } 27 27 28 28 #define MIN_OSCR_DELTA 2 ··· 32 32 struct clock_event_device *c = dev_id; 33 33 34 34 /* Disarm the compare/match, signal the event. */ 35 - OIER &= ~OIER_E0; 36 - OSSR = OSSR_M0; 35 + writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); 36 + writel_relaxed(OSSR_M0, OSSR); 37 37 c->event_handler(c); 38 38 39 39 return IRQ_HANDLED; ··· 44 44 { 45 45 unsigned long next, oscr; 46 46 47 - OIER |= OIER_E0; 48 - next = OSCR + delta; 49 - OSMR0 = next; 50 - oscr = OSCR; 47 + writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER); 48 + next = readl_relaxed(OSCR) + delta; 49 + writel_relaxed(next, OSMR0); 50 + oscr = readl_relaxed(OSCR); 51 51 52 52 return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; 53 53 } ··· 59 59 case CLOCK_EVT_MODE_ONESHOT: 60 60 case CLOCK_EVT_MODE_UNUSED: 61 61 case CLOCK_EVT_MODE_SHUTDOWN: 62 - OIER &= ~OIER_E0; 63 - OSSR = OSSR_M0; 62 + writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); 63 + writel_relaxed(OSSR_M0, OSSR); 64 64 break; 65 65 66 66 case CLOCK_EVT_MODE_RESUME: ··· 86 86 87 87 static void __init sa1100_timer_init(void) 88 88 { 89 - OIER = 0; 90 - OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3; 89 + writel_relaxed(0, OIER); 90 + writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); 91 91 92 92 setup_sched_clock(sa1100_read_sched_clock, 32, 3686400); 93 93 ··· 100 100 101 101 setup_irq(IRQ_OST0, &sa1100_timer_irq); 102 102 103 - clocksource_mmio_init(&OSCR, "oscr", CLOCK_TICK_RATE, 200, 32, 103 + clocksource_mmio_init(OSCR, "oscr", CLOCK_TICK_RATE, 200, 32, 104 104 clocksource_mmio_readl_up); 105 105 clockevents_register_device(&ckevt_sa1100_osmr0); 106 106 } ··· 110 110 111 111 static void sa1100_timer_suspend(void) 112 112 { 113 - osmr[0] = OSMR0; 114 - osmr[1] = OSMR1; 115 - osmr[2] = OSMR2; 116 - osmr[3] = OSMR3; 117 - oier = OIER; 113 + osmr[0] = readl_relaxed(OSMR0); 114 + osmr[1] = readl_relaxed(OSMR1); 115 + osmr[2] = readl_relaxed(OSMR2); 116 + osmr[3] = readl_relaxed(OSMR3); 117 + oier = readl_relaxed(OIER); 118 118 } 119 119 120 120 static void sa1100_timer_resume(void) 121 121 { 122 - OSSR = 0x0f; 123 - OSMR0 = osmr[0]; 124 - OSMR1 = osmr[1]; 125 - OSMR2 = osmr[2]; 126 - OSMR3 = osmr[3]; 127 - OIER = oier; 122 + writel_relaxed(0x0f, OSSR); 123 + writel_relaxed(osmr[0], OSMR0); 124 + writel_relaxed(osmr[1], OSMR1); 125 + writel_relaxed(osmr[2], OSMR2); 126 + writel_relaxed(osmr[3], OSMR3); 127 + writel_relaxed(oier, OIER); 128 128 129 129 /* 130 130 * OSMR0 is the system timer: make sure OSCR is sufficiently behind 131 131 */ 132 - OSCR = OSMR0 - LATCH; 132 + writel_relaxed(OSMR0 - LATCH, OSCR); 133 133 } 134 134 #else 135 135 #define sa1100_timer_suspend NULL
+35
arch/arm/mm/context.c
··· 14 14 #include <linux/percpu.h> 15 15 16 16 #include <asm/mmu_context.h> 17 + #include <asm/thread_notify.h> 17 18 #include <asm/tlbflush.h> 18 19 19 20 static DEFINE_RAW_SPINLOCK(cpu_asid_lock); ··· 47 46 : "=r" (ttb)); 48 47 isb(); 49 48 } 49 + #endif 50 + 51 + #ifdef CONFIG_PID_IN_CONTEXTIDR 52 + static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, 53 + void *t) 54 + { 55 + u32 contextidr; 56 + pid_t pid; 57 + struct thread_info *thread = t; 58 + 59 + if (cmd != THREAD_NOTIFY_SWITCH) 60 + return NOTIFY_DONE; 61 + 62 + pid = task_pid_nr(thread->task) << ASID_BITS; 63 + asm volatile( 64 + " mrc p15, 0, %0, c13, c0, 1\n" 65 + " bfi %1, %0, #0, %2\n" 66 + " mcr p15, 0, %1, c13, c0, 1\n" 67 + : "=r" (contextidr), "+r" (pid) 68 + : "I" (ASID_BITS)); 69 + isb(); 70 + 71 + return NOTIFY_OK; 72 + } 73 + 74 + static struct notifier_block contextidr_notifier_block = { 75 + .notifier_call = contextidr_notifier, 76 + }; 77 + 78 + static int __init contextidr_notifier_init(void) 79 + { 80 + return thread_register_notifier(&contextidr_notifier_block); 81 + } 82 + arch_initcall(contextidr_notifier_init); 50 83 #endif 51 84 52 85 /*
+1 -1
arch/arm/mm/dma-mapping.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/iommu.h> 25 25 #include <linux/vmalloc.h> 26 + #include <linux/sizes.h> 26 27 27 28 #include <asm/memory.h> 28 29 #include <asm/highmem.h> 29 30 #include <asm/cacheflush.h> 30 31 #include <asm/tlbflush.h> 31 - #include <asm/sizes.h> 32 32 #include <asm/mach/arch.h> 33 33 #include <asm/dma-iommu.h> 34 34 #include <asm/mach/map.h>
+1 -1
arch/arm/mm/init.c
··· 21 21 #include <linux/gfp.h> 22 22 #include <linux/memblock.h> 23 23 #include <linux/dma-contiguous.h> 24 + #include <linux/sizes.h> 24 25 25 26 #include <asm/mach-types.h> 26 27 #include <asm/memblock.h> 27 28 #include <asm/prom.h> 28 29 #include <asm/sections.h> 29 30 #include <asm/setup.h> 30 - #include <asm/sizes.h> 31 31 #include <asm/tlb.h> 32 32 #include <asm/fixmap.h> 33 33
+1 -1
arch/arm/mm/ioremap.c
··· 25 25 #include <linux/mm.h> 26 26 #include <linux/vmalloc.h> 27 27 #include <linux/io.h> 28 + #include <linux/sizes.h> 28 29 29 30 #include <asm/cp15.h> 30 31 #include <asm/cputype.h> ··· 33 32 #include <asm/mmu_context.h> 34 33 #include <asm/pgalloc.h> 35 34 #include <asm/tlbflush.h> 36 - #include <asm/sizes.h> 37 35 #include <asm/system_info.h> 38 36 39 37 #include <asm/mach/map.h>
+1 -7
arch/arm/mm/mmu.c
··· 16 16 #include <linux/memblock.h> 17 17 #include <linux/fs.h> 18 18 #include <linux/vmalloc.h> 19 + #include <linux/sizes.h> 19 20 20 21 #include <asm/cp15.h> 21 22 #include <asm/cputype.h> 22 23 #include <asm/sections.h> 23 24 #include <asm/cachetype.h> 24 25 #include <asm/setup.h> 25 - #include <asm/sizes.h> 26 26 #include <asm/smp_plat.h> 27 27 #include <asm/tlb.h> 28 28 #include <asm/highmem.h> ··· 420 420 */ 421 421 cp = &cache_policies[cachepolicy]; 422 422 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 423 - 424 - /* 425 - * Only use write-through for non-SMP systems 426 - */ 427 - if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) 428 - vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; 429 423 430 424 /* 431 425 * Enable CPU-specific coherency if supported.
+6
arch/arm/mm/proc-v6.S
··· 107 107 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 108 108 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 109 109 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 110 + #ifdef CONFIG_PID_IN_CONTEXTIDR 111 + mrc p15, 0, r2, c13, c0, 1 @ read current context ID 112 + bic r2, r2, #0xff @ extract the PID 113 + and r1, r1, #0xff 114 + orr r1, r1, r2 @ insert into new context ID 115 + #endif 110 116 mcr p15, 0, r1, c13, c0, 1 @ set context ID 111 117 #endif 112 118 mov pc, lr
+5
arch/arm/mm/proc-v7-2level.S
··· 46 46 #ifdef CONFIG_ARM_ERRATA_430973 47 47 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 48 48 #endif 49 + #ifdef CONFIG_PID_IN_CONTEXTIDR 50 + mrc p15, 0, r2, c13, c0, 1 @ read current context ID 51 + lsr r2, r2, #8 @ extract the PID 52 + bfi r1, r2, #8, #24 @ insert into new context ID 53 + #endif 49 54 #ifdef CONFIG_ARM_ERRATA_754322 50 55 dsb 51 56 #endif
+27 -16
arch/arm/oprofile/common.c
··· 23 23 #include <asm/ptrace.h> 24 24 25 25 #ifdef CONFIG_HW_PERF_EVENTS 26 + 27 + /* 28 + * OProfile has a curious naming scheme for the ARM PMUs, but they are 29 + * part of the user ABI so we need to map from the perf PMU name for 30 + * supported PMUs. 31 + */ 32 + static struct op_perf_name { 33 + char *perf_name; 34 + char *op_name; 35 + } op_perf_name_map[] = { 36 + { "xscale1", "arm/xscale1" }, 37 + { "xscale1", "arm/xscale2" }, 38 + { "v6", "arm/armv6" }, 39 + { "v6mpcore", "arm/mpcore" }, 40 + { "ARMv7 Cortex-A8", "arm/armv7" }, 41 + { "ARMv7 Cortex-A9", "arm/armv7-ca9" }, 42 + }; 43 + 26 44 char *op_name_from_perf_id(void) 27 45 { 28 - enum arm_perf_pmu_ids id = armpmu_get_pmu_id(); 46 + int i; 47 + struct op_perf_name names; 48 + const char *perf_name = perf_pmu_name(); 29 49 30 - switch (id) { 31 - case ARM_PERF_PMU_ID_XSCALE1: 32 - return "arm/xscale1"; 33 - case ARM_PERF_PMU_ID_XSCALE2: 34 - return "arm/xscale2"; 35 - case ARM_PERF_PMU_ID_V6: 36 - return "arm/armv6"; 37 - case ARM_PERF_PMU_ID_V6MP: 38 - return "arm/mpcore"; 39 - case ARM_PERF_PMU_ID_CA8: 40 - return "arm/armv7"; 41 - case ARM_PERF_PMU_ID_CA9: 42 - return "arm/armv7-ca9"; 43 - default: 44 - return NULL; 50 + for (i = 0; i < ARRAY_SIZE(op_perf_name_map); ++i) { 51 + names = op_perf_name_map[i]; 52 + if (!strcmp(names.perf_name, perf_name)) 53 + return names.op_name; 45 54 } 55 + 56 + return NULL; 46 57 } 47 58 #endif 48 59
+1 -1
arch/arm/plat-versatile/platsmp.c
··· 85 85 * the boot monitor to read the system wide flags register, 86 86 * and branch to the address found there. 87 87 */ 88 - gic_raise_softirq(cpumask_of(cpu), 1); 88 + gic_raise_softirq(cpumask_of(cpu), 0); 89 89 90 90 timeout = jiffies + (1 * HZ); 91 91 while (time_before(jiffies, timeout)) {
+1 -1
drivers/amba/bus.c
··· 16 16 #include <linux/pm.h> 17 17 #include <linux/pm_runtime.h> 18 18 #include <linux/amba/bus.h> 19 + #include <linux/sizes.h> 19 20 20 21 #include <asm/irq.h> 21 - #include <asm/sizes.h> 22 22 23 23 #define to_amba_driver(d) container_of(d, struct amba_driver, drv) 24 24
+1
drivers/input/touchscreen/jornada720_ts.c
··· 19 19 #include <linux/interrupt.h> 20 20 #include <linux/module.h> 21 21 #include <linux/slab.h> 22 + #include <linux/io.h> 22 23 23 24 #include <mach/hardware.h> 24 25 #include <mach/jornada720.h>
+6 -6
drivers/net/irda/pxaficp_ir.c
··· 289 289 } 290 290 lsr = STLSR; 291 291 } 292 - si->last_oscr = OSCR; 292 + si->last_oscr = readl_relaxed(OSCR); 293 293 break; 294 294 295 295 case 0x04: /* Received Data Available */ ··· 300 300 dev->stats.rx_bytes++; 301 301 async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR); 302 302 } while (STLSR & LSR_DR); 303 - si->last_oscr = OSCR; 303 + si->last_oscr = readl_relaxed(OSCR); 304 304 break; 305 305 306 306 case 0x02: /* Transmit FIFO Data Request */ ··· 316 316 /* We need to ensure that the transmitter has finished. */ 317 317 while ((STLSR & LSR_TEMT) == 0) 318 318 cpu_relax(); 319 - si->last_oscr = OSCR; 319 + si->last_oscr = readl_relaxed(OSCR); 320 320 321 321 /* 322 322 * Ok, we've finished transmitting. Now enable ··· 370 370 371 371 while (ICSR1 & ICSR1_TBY) 372 372 cpu_relax(); 373 - si->last_oscr = OSCR; 373 + si->last_oscr = readl_relaxed(OSCR); 374 374 375 375 /* 376 376 * HACK: It looks like the TBY bit is dropped too soon. ··· 470 470 471 471 /* stop RX DMA */ 472 472 DCSR(si->rxdma) &= ~DCSR_RUN; 473 - si->last_oscr = OSCR; 473 + si->last_oscr = readl_relaxed(OSCR); 474 474 icsr0 = ICSR0; 475 475 476 476 if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) { ··· 546 546 skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); 547 547 548 548 if (mtt) 549 - while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) 549 + while ((unsigned)(readl_relaxed(OSCR) - si->last_oscr)/4 < mtt) 550 550 cpu_relax(); 551 551 552 552 /* stop RX DMA, disable FICP */
+1
drivers/pcmcia/sa1100_shannon.c
··· 8 8 #include <linux/kernel.h> 9 9 #include <linux/device.h> 10 10 #include <linux/init.h> 11 + #include <linux/io.h> 11 12 12 13 #include <mach/hardware.h> 13 14 #include <asm/mach-types.h>
+1 -1
drivers/tty/serial/amba-pl011.c
··· 53 53 #include <linux/delay.h> 54 54 #include <linux/types.h> 55 55 #include <linux/pinctrl/consumer.h> 56 + #include <linux/sizes.h> 56 57 57 58 #include <asm/io.h> 58 - #include <asm/sizes.h> 59 59 60 60 #define UART_NR 14 61 61
+7 -7
drivers/watchdog/sa1100_wdt.c
··· 54 54 return -EBUSY; 55 55 56 56 /* Activate SA1100 Watchdog timer */ 57 - OSMR3 = OSCR + pre_margin; 58 - OSSR = OSSR_M3; 59 - OWER = OWER_WME; 60 - OIER |= OIER_E3; 57 + writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3); 58 + writel_relaxed(OSSR_M3, OSSR); 59 + writel_relaxed(OWER_WME, OWER); 60 + writel_relaxed(readl_relaxed(OIER) | OIER_E3, OIER); 61 61 return nonseekable_open(inode, file); 62 62 } 63 63 ··· 80 80 { 81 81 if (len) 82 82 /* Refresh OSMR3 timer. */ 83 - OSMR3 = OSCR + pre_margin; 83 + writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3); 84 84 return len; 85 85 } 86 86 ··· 114 114 break; 115 115 116 116 case WDIOC_KEEPALIVE: 117 - OSMR3 = OSCR + pre_margin; 117 + writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3); 118 118 ret = 0; 119 119 break; 120 120 ··· 129 129 } 130 130 131 131 pre_margin = oscr_freq * time; 132 - OSMR3 = OSCR + pre_margin; 132 + writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3); 133 133 /*fall through*/ 134 134 135 135 case WDIOC_GETTIMEOUT:
+2 -47
include/asm-generic/sizes.h
··· 1 - /* 2 - * linux/include/asm-generic/sizes.h 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License version 2 as 6 - * published by the Free Software Foundation. 7 - */ 8 - #ifndef __ASM_GENERIC_SIZES_H__ 9 - #define __ASM_GENERIC_SIZES_H__ 10 - 11 - #define SZ_1 0x00000001 12 - #define SZ_2 0x00000002 13 - #define SZ_4 0x00000004 14 - #define SZ_8 0x00000008 15 - #define SZ_16 0x00000010 16 - #define SZ_32 0x00000020 17 - #define SZ_64 0x00000040 18 - #define SZ_128 0x00000080 19 - #define SZ_256 0x00000100 20 - #define SZ_512 0x00000200 21 - 22 - #define SZ_1K 0x00000400 23 - #define SZ_2K 0x00000800 24 - #define SZ_4K 0x00001000 25 - #define SZ_8K 0x00002000 26 - #define SZ_16K 0x00004000 27 - #define SZ_32K 0x00008000 28 - #define SZ_64K 0x00010000 29 - #define SZ_128K 0x00020000 30 - #define SZ_256K 0x00040000 31 - #define SZ_512K 0x00080000 32 - 33 - #define SZ_1M 0x00100000 34 - #define SZ_2M 0x00200000 35 - #define SZ_4M 0x00400000 36 - #define SZ_8M 0x00800000 37 - #define SZ_16M 0x01000000 38 - #define SZ_32M 0x02000000 39 - #define SZ_64M 0x04000000 40 - #define SZ_128M 0x08000000 41 - #define SZ_256M 0x10000000 42 - #define SZ_512M 0x20000000 43 - 44 - #define SZ_1G 0x40000000 45 - #define SZ_2G 0x80000000 46 - 47 - #endif /* __ASM_GENERIC_SIZES_H__ */ 1 + /* This is a placeholder, to be removed over time */ 2 + #include <linux/sizes.h>
+47
include/linux/sizes.h
··· 1 + /* 2 + * include/linux/sizes.h 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + #ifndef __LINUX_SIZES_H__ 9 + #define __LINUX_SIZES_H__ 10 + 11 + #define SZ_1 0x00000001 12 + #define SZ_2 0x00000002 13 + #define SZ_4 0x00000004 14 + #define SZ_8 0x00000008 15 + #define SZ_16 0x00000010 16 + #define SZ_32 0x00000020 17 + #define SZ_64 0x00000040 18 + #define SZ_128 0x00000080 19 + #define SZ_256 0x00000100 20 + #define SZ_512 0x00000200 21 + 22 + #define SZ_1K 0x00000400 23 + #define SZ_2K 0x00000800 24 + #define SZ_4K 0x00001000 25 + #define SZ_8K 0x00002000 26 + #define SZ_16K 0x00004000 27 + #define SZ_32K 0x00008000 28 + #define SZ_64K 0x00010000 29 + #define SZ_128K 0x00020000 30 + #define SZ_256K 0x00040000 31 + #define SZ_512K 0x00080000 32 + 33 + #define SZ_1M 0x00100000 34 + #define SZ_2M 0x00200000 35 + #define SZ_4M 0x00400000 36 + #define SZ_8M 0x00800000 37 + #define SZ_16M 0x01000000 38 + #define SZ_32M 0x02000000 39 + #define SZ_64M 0x04000000 40 + #define SZ_128M 0x08000000 41 + #define SZ_256M 0x10000000 42 + #define SZ_512M 0x20000000 43 + 44 + #define SZ_1G 0x40000000 45 + #define SZ_2G 0x80000000 46 + 47 + #endif /* __LINUX_SIZES_H__ */
+1 -1
init/Kconfig
··· 357 357 358 358 config AUDITSYSCALL 359 359 bool "Enable system-call auditing support" 360 - depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || ARM) 360 + depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT)) 361 361 default y if SECURITY_SELINUX 362 362 help 363 363 Enable low-overhead system-call auditing infrastructure that