Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm

Pull ARM fixes from Russell King:
"The usual collection of random fixes. Also some further fixes to the
last set of security fixes, and some more from Will (which you may
already have in a slightly different form)"

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
ARM: 7807/1: kexec: validate CPU hotplug support
ARM: 7812/1: rwlocks: retry trylock operation if strex fails on free lock
ARM: 7811/1: locks: use early clobber in arch_spin_trylock
ARM: 7810/1: perf: Fix array out of bounds access in armpmu_map_hw_event()
ARM: 7809/1: perf: fix event validation for software group leaders
ARM: Fix FIQ code on VIVT CPUs
ARM: Fix !kuser helpers case
ARM: Fix the world famous typo with is_gate_vma()

Changed files
+69 -33
arch
+3
arch/arm/include/asm/smp_plat.h
··· 88 88 { 89 89 return 1 << mpidr_hash.bits; 90 90 } 91 + 92 + extern int platform_can_cpu_hotplug(void); 93 + 91 94 #endif
+31 -20
arch/arm/include/asm/spinlock.h
··· 107 107 " subs %1, %0, %0, ror #16\n" 108 108 " addeq %0, %0, %4\n" 109 109 " strexeq %2, %0, [%3]" 110 - : "=&r" (slock), "=&r" (contended), "=r" (res) 110 + : "=&r" (slock), "=&r" (contended), "=&r" (res) 111 111 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 112 112 : "cc"); 113 113 } while (res); ··· 168 168 169 169 static inline int arch_write_trylock(arch_rwlock_t *rw) 170 170 { 171 - unsigned long tmp; 171 + unsigned long contended, res; 172 172 173 - __asm__ __volatile__( 174 - " ldrex %0, [%1]\n" 175 - " teq %0, #0\n" 176 - " strexeq %0, %2, [%1]" 177 - : "=&r" (tmp) 178 - : "r" (&rw->lock), "r" (0x80000000) 179 - : "cc"); 173 + do { 174 + __asm__ __volatile__( 175 + " ldrex %0, [%2]\n" 176 + " mov %1, #0\n" 177 + " teq %0, #0\n" 178 + " strexeq %1, %3, [%2]" 179 + : "=&r" (contended), "=&r" (res) 180 + : "r" (&rw->lock), "r" (0x80000000) 181 + : "cc"); 182 + } while (res); 180 183 181 - if (tmp == 0) { 184 + if (!contended) { 182 185 smp_mb(); 183 186 return 1; 184 187 } else { ··· 257 254 258 255 static inline int arch_read_trylock(arch_rwlock_t *rw) 259 256 { 260 - unsigned long tmp, tmp2 = 1; 257 + unsigned long contended, res; 261 258 262 - __asm__ __volatile__( 263 - " ldrex %0, [%2]\n" 264 - " adds %0, %0, #1\n" 265 - " strexpl %1, %0, [%2]\n" 266 - : "=&r" (tmp), "+r" (tmp2) 267 - : "r" (&rw->lock) 268 - : "cc"); 259 + do { 260 + __asm__ __volatile__( 261 + " ldrex %0, [%2]\n" 262 + " mov %1, #0\n" 263 + " adds %0, %0, #1\n" 264 + " strexpl %1, %0, [%2]" 265 + : "=&r" (contended), "=&r" (res) 266 + : "r" (&rw->lock) 267 + : "cc"); 268 + } while (res); 269 269 270 - smp_mb(); 271 - return tmp2 == 0; 270 + /* If the lock is negative, then it is already held for write. */ 271 + if (contended < 0x80000000) { 272 + smp_mb(); 273 + return 1; 274 + } else { 275 + return 0; 276 + } 272 277 } 273 278 274 279 /* read_can_lock - would read_trylock() succeed? */
+2 -1
arch/arm/kernel/entry-armv.S
··· 357 357 .endm 358 358 359 359 .macro kuser_cmpxchg_check 360 - #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 360 + #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ 361 + !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 361 362 #ifndef CONFIG_MMU 362 363 #warning "NPTL on non MMU needs fixing" 363 364 #else
+2 -6
arch/arm/kernel/fiq.c
··· 84 84 85 85 void set_fiq_handler(void *start, unsigned int length) 86 86 { 87 - #if defined(CONFIG_CPU_USE_DOMAINS) 88 - void *base = (void *)0xffff0000; 89 - #else 90 87 void *base = vectors_page; 91 - #endif 92 88 unsigned offset = FIQ_OFFSET; 93 89 94 90 memcpy(base + offset, start, length); 91 + if (!cache_is_vipt_nonaliasing()) 92 + flush_icache_range(base + offset, offset + length); 95 93 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); 96 - if (!vectors_high()) 97 - flush_icache_range(offset, offset + length); 98 94 } 99 95 100 96 int claim_fiq(struct fiq_handler *f)
+16 -4
arch/arm/kernel/machine_kexec.c
··· 15 15 #include <asm/mmu_context.h> 16 16 #include <asm/cacheflush.h> 17 17 #include <asm/mach-types.h> 18 + #include <asm/smp_plat.h> 18 19 #include <asm/system_misc.h> 19 20 20 21 extern const unsigned char relocate_new_kernel[]; ··· 38 37 struct kexec_segment *current_segment; 39 38 __be32 header; 40 39 int i, err; 40 + 41 + /* 42 + * Validate that if the current HW supports SMP, then the SW supports 43 + * and implements CPU hotplug for the current HW. If not, we won't be 44 + * able to kexec reliably, so fail the prepare operation. 45 + */ 46 + if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) 47 + return -EINVAL; 41 48 42 49 /* 43 50 * No segment at default ATAGs address. try to locate ··· 143 134 unsigned long reboot_code_buffer_phys; 144 135 void *reboot_code_buffer; 145 136 146 - if (num_online_cpus() > 1) { 147 - pr_err("kexec: error: multiple CPUs still online\n"); 148 - return; 149 - } 137 + /* 138 + * This can only happen if machine_shutdown() failed to disable some 139 + * CPU, and that can only happen if the checks in 140 + * machine_kexec_prepare() were not correct. If this fails, we can't 141 + * reliably kexec anyway, so BUG_ON is appropriate. 142 + */ 143 + BUG_ON(num_online_cpus() > 1); 150 144 151 145 page_list = image->head & PAGE_MASK; 152 146
+4 -1
arch/arm/kernel/perf_event.c
··· 56 56 int mapping; 57 57 58 58 if (config >= PERF_COUNT_HW_MAX) 59 - return -ENOENT; 59 + return -EINVAL; 60 60 61 61 mapping = (*event_map)[config]; 62 62 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; ··· 257 257 { 258 258 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 259 259 struct pmu *leader_pmu = event->group_leader->pmu; 260 + 261 + if (is_software_event(event)) 262 + return 1; 260 263 261 264 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) 262 265 return 1;
+1 -1
arch/arm/kernel/process.c
··· 462 462 { 463 463 return in_gate_area(NULL, addr); 464 464 } 465 - #define is_gate_vma(vma) ((vma) = &gate_vma) 465 + #define is_gate_vma(vma) ((vma) == &gate_vma) 466 466 #else 467 467 #define is_gate_vma(vma) 0 468 468 #endif
+10
arch/arm/kernel/smp.c
··· 145 145 return -ENOSYS; 146 146 } 147 147 148 + int platform_can_cpu_hotplug(void) 149 + { 150 + #ifdef CONFIG_HOTPLUG_CPU 151 + if (smp_ops.cpu_kill) 152 + return 1; 153 + #endif 154 + 155 + return 0; 156 + } 157 + 148 158 #ifdef CONFIG_HOTPLUG_CPU 149 159 static void percpu_timer_stop(void); 150 160