Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Thumb-2: Implement the unified VFP support

This patch modifies the VFP files for the ARM/Thumb-2 unified
assembly syntax.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

+32 -16
+32 -16
arch/arm/vfp/vfphw.S
··· 209 209 last_VFP_context_address: 210 210 .word last_VFP_context 211 211 212 - ENTRY(vfp_get_float) 213 - add pc, pc, r0, lsl #3 212 + .macro tbl_branch, base, tmp, shift 213 + #ifdef CONFIG_THUMB2_KERNEL 214 + adr \tmp, 1f 215 + add \tmp, \tmp, \base, lsl \shift 216 + mov pc, \tmp 217 + #else 218 + add pc, pc, \base, lsl \shift 214 219 mov r0, r0 220 + #endif 221 + 1: 222 + .endm 223 + 224 + ENTRY(vfp_get_float) 225 + tbl_branch r0, r3, #3 215 226 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 216 - mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 227 + 1: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 217 228 mov pc, lr 218 - mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 229 + .org 1b + 8 230 + 1: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 219 231 mov pc, lr 232 + .org 1b + 8 220 233 .endr 221 234 ENDPROC(vfp_get_float) 222 235 223 236 ENTRY(vfp_put_float) 224 - add pc, pc, r1, lsl #3 225 - mov r0, r0 237 + tbl_branch r1, r3, #3 226 238 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 227 - mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 239 + 1: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 228 240 mov pc, lr 229 - mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 241 + .org 1b + 8 242 + 1: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 230 243 mov pc, lr 244 + .org 1b + 8 231 245 .endr 232 246 ENDPROC(vfp_put_float) 233 247 234 248 ENTRY(vfp_get_double) 235 - add pc, pc, r0, lsl #3 236 - mov r0, r0 249 + tbl_branch r0, r3, #3 237 250 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 238 - fmrrd r0, r1, d\dr 251 + 1: fmrrd r0, r1, d\dr 239 252 mov pc, lr 253 + .org 1b + 8 240 254 .endr 241 255 #ifdef CONFIG_VFPv3 242 256 @ d16 - d31 registers 243 257 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 244 - mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr 258 + 1: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr 245 259 mov pc, lr 260 + .org 1b + 8 246 261 .endr 247 262 #endif 248 263 ··· 268 253 ENDPROC(vfp_get_double) 269 254 270 255 ENTRY(vfp_put_double) 271 - add pc, pc, r2, lsl #3 272 - mov r0, r0 256 + tbl_branch r2, r3, #3 273 257 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 274 - fmdrr d\dr, r0, r1 258 + 1: fmdrr d\dr, r0, r1 275 259 mov pc, lr 260 + .org 1b + 8 276 261 .endr 277 262 #ifdef CONFIG_VFPv3 278 263 @ d16 - d31 registers 279 264 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 280 - mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr 265 + 1: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr 281 266 mov pc, lr 267 + .org 1b + 8 282 268 .endr 283 269 #endif 284 270 ENDPROC(vfp_put_double)