[ARM] Handle HWCAP_VFP in VFP support code

Don't set HWCAP_VFP in the processor support file; not only does it
depend on the processor features, but it also depends on the support
code being present. Therefore, only set it if the support code
detects that we have a VFP coprocessor attached.

Also, move the VFP handling of the coprocessor access register into
the VFP support code.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by Russell King and committed by Russell King efe90d27 c9976797

+56 -23
-3
arch/arm/kernel/setup.c
··· 354 #ifndef CONFIG_ARM_THUMB 355 elf_hwcap &= ~HWCAP_THUMB; 356 #endif 357 - #ifndef CONFIG_VFP 358 - elf_hwcap &= ~HWCAP_VFP; 359 - #endif 360 361 cpu_proc_init(); 362 }
··· 354 #ifndef CONFIG_ARM_THUMB 355 elf_hwcap &= ~HWCAP_THUMB; 356 #endif 357 358 cpu_proc_init(); 359 }
+1 -1
arch/arm/mm/proc-arm926.S
··· 480 b __arm926_setup 481 .long cpu_arch_name 482 .long cpu_elf_name 483 - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA 484 .long cpu_arm926_name 485 .long arm926_processor_functions 486 .long v4wbi_tlb_fns
··· 480 b __arm926_setup 481 .long cpu_arch_name 482 .long cpu_elf_name 483 + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 484 .long cpu_arm926_name 485 .long arm926_processor_functions 486 .long v4wbi_tlb_fns
+1 -6
arch/arm/mm/proc-v6.S
··· 207 #endif 208 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 209 #endif /* CONFIG_MMU */ 210 - #ifdef CONFIG_VFP 211 - mrc p15, 0, r0, c1, c0, 2 212 - orr r0, r0, #(0xf << 20) 213 - mcr p15, 0, r0, c1, c0, 2 @ Enable full access to VFP 214 - #endif 215 adr r5, v6_crval 216 ldmia r5, {r5, r6} 217 mrc p15, 0, r0, c1, c0, 0 @ read control register ··· 268 b __v6_setup 269 .long cpu_arch_name 270 .long cpu_elf_name 271 - .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA 272 .long cpu_v6_name 273 .long v6_processor_functions 274 .long v6wbi_tlb_fns
··· 207 #endif 208 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 209 #endif /* CONFIG_MMU */ 210 adr r5, v6_crval 211 ldmia r5, {r5, r6} 212 mrc p15, 0, r0, c1, c0, 0 @ read control register ··· 273 b __v6_setup 274 .long cpu_arch_name 275 .long cpu_elf_name 276 + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 277 .long cpu_v6_name 278 .long v6_processor_functions 279 .long v6wbi_tlb_fns
+25 -1
arch/arm/vfp/vfpmodule.c
··· 263 if (exceptions) 264 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); 265 } 266 - 267 /* 268 * VFP support code initialisation. 269 */ 270 static int __init vfp_init(void) 271 { 272 unsigned int vfpsid; 273 274 /* 275 * First check that there is a VFP that we can use. ··· 292 printk(KERN_INFO "VFP support v0.3: "); 293 if (VFP_arch) { 294 printk("not present\n"); 295 } else if (vfpsid & FPSID_NODOUBLE) { 296 printk("no double precision support\n"); 297 } else { ··· 308 (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, 309 (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, 310 (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); 311 vfp_vector = vfp_support_entry; 312 313 thread_register_notifier(&vfp_notifier_block); 314 } 315 return 0; 316 }
··· 263 if (exceptions) 264 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); 265 } 266 + 267 /* 268 * VFP support code initialisation. 269 */ 270 static int __init vfp_init(void) 271 { 272 unsigned int vfpsid; 273 + unsigned int cpu_arch = cpu_architecture(); 274 + u32 access = 0; 275 + 276 + if (cpu_arch >= CPU_ARCH_ARMv6) { 277 + access = get_copro_access(); 278 + 279 + /* 280 + * Enable full access to VFP (cp10 and cp11) 281 + */ 282 + set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); 283 + } 284 285 /* 286 * First check that there is a VFP that we can use. ··· 281 printk(KERN_INFO "VFP support v0.3: "); 282 if (VFP_arch) { 283 printk("not present\n"); 284 + 285 + /* 286 + * Restore the copro access register. 287 + */ 288 + if (cpu_arch >= CPU_ARCH_ARMv6) 289 + set_copro_access(access); 290 } else if (vfpsid & FPSID_NODOUBLE) { 291 printk("no double precision support\n"); 292 } else { ··· 291 (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, 292 (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, 293 (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); 294 + 295 vfp_vector = vfp_support_entry; 296 297 thread_register_notifier(&vfp_notifier_block); 298 + 299 + /* 300 + * We detected VFP, and the support code is 301 + * in place; report VFP support to userspace. 302 + */ 303 + elf_hwcap |= HWCAP_VFP; 304 } 305 return 0; 306 }
+29 -12
include/asm-arm/system.h
··· 139 #define cpu_is_xscale() 1 140 #endif 141 142 - #define set_cr(x) \ 143 - __asm__ __volatile__( \ 144 - "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ 145 - : : "r" (x) : "cc") 146 147 - #define get_cr() \ 148 - ({ \ 149 - unsigned int __val; \ 150 - __asm__ __volatile__( \ 151 - "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ 152 - : "=r" (__val) : : "cc"); \ 153 - __val; \ 154 - }) 155 156 extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 157 extern unsigned long cr_alignment; /* defined in entry-armv.S */
··· 139 #define cpu_is_xscale() 1 140 #endif 141 142 + static inline unsigned int get_cr(void) 143 + { 144 + unsigned int val; 145 + asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 146 + return val; 147 + } 148 149 + static inline void set_cr(unsigned int val) 150 + { 151 + asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" 152 + : : "r" (val) : "cc"); 153 + } 154 + 155 + #define CPACC_FULL(n) (3 << (n * 2)) 156 + #define CPACC_SVC(n) (1 << (n * 2)) 157 + #define CPACC_DISABLE(n) (0 << (n * 2)) 158 + 159 + static inline unsigned int get_copro_access(void) 160 + { 161 + unsigned int val; 162 + asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" 163 + : "=r" (val) : : "cc"); 164 + return val; 165 + } 166 + 167 + static inline void set_copro_access(unsigned int val) 168 + { 169 + asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" 170 + : : "r" (val) : "cc"); 171 + } 172 173 extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 174 extern unsigned long cr_alignment; /* defined in entry-armv.S */