[ARM] Handle HWCAP_VFP in VFP support code

Don't set HWCAP_VFP in the processor support file; not only does it
depend on the processor features, but it also depends on the support
code being present. Therefore, only set it if the support code
detects that we have a VFP coprocessor attached.

Also, move the VFP handling of the coprocessor access register into
the VFP support code.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by Russell King and committed by Russell King efe90d27 c9976797

+56 -23
-3
arch/arm/kernel/setup.c
··· 354 354 #ifndef CONFIG_ARM_THUMB 355 355 elf_hwcap &= ~HWCAP_THUMB; 356 356 #endif 357 - #ifndef CONFIG_VFP 358 - elf_hwcap &= ~HWCAP_VFP; 359 - #endif 360 357 361 358 cpu_proc_init(); 362 359 }
+1 -1
arch/arm/mm/proc-arm926.S
··· 480 480 b __arm926_setup 481 481 .long cpu_arch_name 482 482 .long cpu_elf_name 483 - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA 483 + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 484 484 .long cpu_arm926_name 485 485 .long arm926_processor_functions 486 486 .long v4wbi_tlb_fns
+1 -6
arch/arm/mm/proc-v6.S
··· 207 207 #endif 208 208 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 209 209 #endif /* CONFIG_MMU */ 210 - #ifdef CONFIG_VFP 211 - mrc p15, 0, r0, c1, c0, 2 212 - orr r0, r0, #(0xf << 20) 213 - mcr p15, 0, r0, c1, c0, 2 @ Enable full access to VFP 214 - #endif 215 210 adr r5, v6_crval 216 211 ldmia r5, {r5, r6} 217 212 mrc p15, 0, r0, c1, c0, 0 @ read control register ··· 268 273 b __v6_setup 269 274 .long cpu_arch_name 270 275 .long cpu_elf_name 271 - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA 276 + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 272 277 .long cpu_v6_name 273 278 .long v6_processor_functions 274 279 .long v6wbi_tlb_fns
+25 -1
arch/arm/vfp/vfpmodule.c
··· 263 263 if (exceptions) 264 264 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); 265 265 } 266 - 266 + 267 267 /* 268 268 * VFP support code initialisation. 269 269 */ 270 270 static int __init vfp_init(void) 271 271 { 272 272 unsigned int vfpsid; 273 + unsigned int cpu_arch = cpu_architecture(); 274 + u32 access = 0; 275 + 276 + if (cpu_arch >= CPU_ARCH_ARMv6) { 277 + access = get_copro_access(); 278 + 279 + /* 280 + * Enable full access to VFP (cp10 and cp11) 281 + */ 282 + set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); 283 + } 273 284 274 285 /* 275 286 * First check that there is a VFP that we can use. ··· 292 281 printk(KERN_INFO "VFP support v0.3: "); 293 282 if (VFP_arch) { 294 283 printk("not present\n"); 284 + 285 + /* 286 + * Restore the copro access register. 287 + */ 288 + if (cpu_arch >= CPU_ARCH_ARMv6) 289 + set_copro_access(access); 295 290 } else if (vfpsid & FPSID_NODOUBLE) { 296 291 printk("no double precision support\n"); 297 292 } else { ··· 308 291 (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, 309 292 (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, 310 293 (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); 294 + 311 295 vfp_vector = vfp_support_entry; 312 296 313 297 thread_register_notifier(&vfp_notifier_block); 298 + 299 + /* 300 + * We detected VFP, and the support code is 301 + * in place; report VFP support to userspace. 302 + */ 303 + elf_hwcap |= HWCAP_VFP; 314 304 } 315 305 return 0; 316 306 }
+29 -12
include/asm-arm/system.h
··· 139 139 #define cpu_is_xscale() 1 140 140 #endif 141 141 142 - #define set_cr(x) \ 143 - __asm__ __volatile__( \ 144 - "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ 145 - : : "r" (x) : "cc") 142 + static inline unsigned int get_cr(void) 143 + { 144 + unsigned int val; 145 + asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 146 + return val; 147 + } 146 148 147 - #define get_cr() \ 148 - ({ \ 149 - unsigned int __val; \ 150 - __asm__ __volatile__( \ 151 - "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ 152 - : "=r" (__val) : : "cc"); \ 153 - __val; \ 154 - }) 149 + static inline void set_cr(unsigned int val) 150 + { 151 + asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" 152 + : : "r" (val) : "cc"); 153 + } 154 + 155 + #define CPACC_FULL(n) (3 << (n * 2)) 156 + #define CPACC_SVC(n) (1 << (n * 2)) 157 + #define CPACC_DISABLE(n) (0 << (n * 2)) 158 + 159 + static inline unsigned int get_copro_access(void) 160 + { 161 + unsigned int val; 162 + asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" 163 + : "=r" (val) : : "cc"); 164 + return val; 165 + } 166 + 167 + static inline void set_copro_access(unsigned int val) 168 + { 169 + asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" 170 + : : "r" (val) : "cc"); 171 + } 155 172 156 173 extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 157 174 extern unsigned long cr_alignment; /* defined in entry-armv.S */