Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Hexagon: add support for new v4+ registers

Add support for a couple new v4+ registers, along with
newer save/restore pt_regs.

Signed-off-by: Richard Kuo <rkuo@codeaurora.org>

+206 -48
+9 -2
arch/hexagon/include/uapi/asm/registers.h
··· 57 57 }; 58 58 union { 59 59 struct { 60 - unsigned long gp; 61 60 unsigned long ugp; 61 + unsigned long gp; 62 62 }; 63 - long long int ugpgp; 63 + long long int gpugp; 64 + }; 65 + union { 66 + struct { 67 + unsigned long cs0; 68 + unsigned long cs1; 69 + }; 70 + long long int cs1cs0; 64 71 }; 65 72 /* 66 73 * Be extremely careful with rearranging these, if at all. Some code
+6
arch/hexagon/include/uapi/asm/user.h
··· 55 55 unsigned long pc; 56 56 unsigned long cause; 57 57 unsigned long badva; 58 + #if CONFIG_HEXAGON_ARCH_VERSION < 4 58 59 unsigned long pad1; /* pad out to 48 words total */ 59 60 unsigned long pad2; /* pad out to 48 words total */ 60 61 unsigned long pad3; /* pad out to 48 words total */ 62 + #else 63 + unsigned long cs0; 64 + unsigned long cs1; 65 + unsigned long pad1; /* pad out to 48 words total */ 66 + #endif 61 67 }; 62 68 63 69 #endif
+2 -1
arch/hexagon/kernel/asm-offsets.c
··· 44 44 45 45 COMMENT("Hexagon pt_regs definitions"); 46 46 OFFSET(_PT_SYSCALL_NR, pt_regs, syscall_nr); 47 - OFFSET(_PT_UGPGP, pt_regs, ugpgp); 47 + OFFSET(_PT_GPUGP, pt_regs, gpugp); 48 + OFFSET(_PT_CS1CS0, pt_regs, cs1cs0); 48 49 OFFSET(_PT_R3130, pt_regs, r3130); 49 50 OFFSET(_PT_R2928, pt_regs, r2928); 50 51 OFFSET(_PT_R2726, pt_regs, r2726);
+2
arch/hexagon/kernel/kgdb.c
··· 70 70 { "lc1", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc1)}, 71 71 { " gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)}, 72 72 { "ugp", GDB_SIZEOF_REG, offsetof(struct pt_regs, ugp)}, 73 + { "cs0", GDB_SIZEOF_REG, offsetof(struct pt_regs, cs0)}, 74 + { "cs1", GDB_SIZEOF_REG, offsetof(struct pt_regs, cs1)}, 73 75 { "psp", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmpsp)}, 74 76 { "elr", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmel)}, 75 77 { "est", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmest)},
+9
arch/hexagon/kernel/ptrace.c
··· 76 76 dummy = pt_cause(regs); 77 77 ONEXT(&dummy, cause); 78 78 ONEXT(&pt_badva(regs), badva); 79 + #if CONFIG_HEXAGON_ARCH_VERSION >=4 80 + ONEXT(&regs->cs0, cs0); 81 + ONEXT(&regs->cs1, cs1); 82 + #endif 79 83 80 84 /* Pad the rest with zeros, if needed */ 81 85 if (!ret) ··· 126 122 /* CAUSE and BADVA aren't writeable. */ 127 123 INEXT(&bucket, cause); 128 124 INEXT(&bucket, badva); 125 + 126 + #if CONFIG_HEXAGON_ARCH_VERSION >=4 127 + INEXT(&regs->cs0, cs0); 128 + INEXT(&regs->cs1, cs1); 129 + #endif 129 130 130 131 /* Ignore the rest, if needed */ 131 132 if (!ret)
+8 -2
arch/hexagon/kernel/signal.c
··· 66 66 err |= __put_user(regs->preds, &sc->sc_regs.p3_0); 67 67 err |= __put_user(regs->gp, &sc->sc_regs.gp); 68 68 err |= __put_user(regs->ugp, &sc->sc_regs.ugp); 69 - 69 + #if CONFIG_HEXAGON_ARCH_VERSION >= 4 70 + err |= __put_user(regs->cs0, &sc->sc_regs.cs0); 71 + err |= __put_user(regs->cs1, &sc->sc_regs.cs1); 72 + #endif 70 73 tmp = pt_elr(regs); err |= __put_user(tmp, &sc->sc_regs.pc); 71 74 tmp = pt_cause(regs); err |= __put_user(tmp, &sc->sc_regs.cause); 72 75 tmp = pt_badva(regs); err |= __put_user(tmp, &sc->sc_regs.badva); ··· 96 93 err |= __get_user(regs->preds, &sc->sc_regs.p3_0); 97 94 err |= __get_user(regs->gp, &sc->sc_regs.gp); 98 95 err |= __get_user(regs->ugp, &sc->sc_regs.ugp); 99 - 96 + #if CONFIG_HEXAGON_ARCH_VERSION >= 4 97 + err |= __get_user(regs->cs0, &sc->sc_regs.cs0); 98 + err |= __get_user(regs->cs1, &sc->sc_regs.cs1); 99 + #endif 100 100 err |= __get_user(tmp, &sc->sc_regs.pc); pt_set_elr(regs, tmp); 101 101 102 102 return err;
+168 -43
arch/hexagon/kernel/vm_entry.S
··· 45 45 * number in the case where we decode a system call (trap0(#1)). 46 46 */ 47 47 48 + #if CONFIG_HEXAGON_ARCH_VERSION < 4 48 49 #define save_pt_regs()\ 49 - memd(R0 + #_PT_R3130) = R31:30; \ 50 + memd(R0 + #_PT_R3130) = R31:30; \ 51 + { memw(R0 + #_PT_R2928) = R28; \ 52 + R31 = memw(R0 + #_PT_ER_VMPSP); }\ 53 + { memw(R0 + #(_PT_R2928 + 4)) = R31; \ 54 + R31 = ugp; } \ 55 + { memd(R0 + #_PT_R2726) = R27:26; \ 56 + R30 = gp ; } \ 57 + memd(R0 + #_PT_R2524) = R25:24; \ 58 + memd(R0 + #_PT_R2322) = R23:22; \ 59 + memd(R0 + #_PT_R2120) = R21:20; \ 60 + memd(R0 + #_PT_R1918) = R19:18; \ 61 + memd(R0 + #_PT_R1716) = R17:16; \ 62 + memd(R0 + #_PT_R1514) = R15:14; \ 63 + memd(R0 + #_PT_R1312) = R13:12; \ 64 + { memd(R0 + #_PT_R1110) = R11:10; \ 65 + R15 = lc0; } \ 66 + { memd(R0 + #_PT_R0908) = R9:8; \ 67 + R14 = sa0; } \ 68 + { memd(R0 + #_PT_R0706) = R7:6; \ 69 + R13 = lc1; } \ 70 + { memd(R0 + #_PT_R0504) = R5:4; \ 71 + R12 = sa1; } \ 72 + { memd(R0 + #_PT_GPUGP) = R31:30; \ 73 + R11 = m1; \ 74 + R2.H = #HI(_THREAD_SIZE); } \ 75 + { memd(R0 + #_PT_LC0SA0) = R15:14; \ 76 + R10 = m0; \ 77 + R2.L = #LO(_THREAD_SIZE); } \ 78 + { memd(R0 + #_PT_LC1SA1) = R13:12; \ 79 + R15 = p3:0; \ 80 + R2 = neg(R2); } \ 81 + { memd(R0 + #_PT_M1M0) = R11:10; \ 82 + R14 = usr; \ 83 + R2 = and(R0,R2); } \ 84 + { memd(R0 + #_PT_PREDSUSR) = R15:14; \ 85 + THREADINFO_REG = R2; } \ 86 + { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \ 87 + memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \ 88 + R2 = #-1; } \ 89 + { memw(R0 + #_PT_SYSCALL_NR) = R2; \ 90 + R30 = #0; } 91 + #else 92 + /* V4+ */ 93 + /* the # ## # syntax inserts a literal ## */ 94 + #define save_pt_regs()\ 95 + { memd(R0 + #_PT_R3130) = R31:30; \ 96 + R30 = memw(R0 + #_PT_ER_VMPSP); }\ 50 97 { memw(R0 + #_PT_R2928) = R28; \ 51 - R31 = memw(R0 + #_PT_ER_VMPSP); }\ 52 - { memw(R0 + #(_PT_R2928 + 4)) = R31; \ 53 - R31 = ugp; } \ 54 - { memd(R0 + #_PT_R2726) = R27:26; \ 55 - R30 = gp ; } \ 56 - memd(R0 + #_PT_R2524) = R25:24; \ 57 - memd(R0 + #_PT_R2322) = R23:22; \ 58 - memd(R0 + #_PT_R2120) = R21:20; \ 59 - memd(R0 + #_PT_R1918) = R19:18; \ 60 - memd(R0 + #_PT_R1716) = R17:16; \ 61 - memd(R0 + #_PT_R1514) = R15:14; \ 62 - memd(R0 + #_PT_R1312) = R13:12; \ 98 + memw(R0 + #(_PT_R2928 + 4)) = R30; }\ 99 + { R31:30 = C11:10; \ 100 + memd(R0 + #_PT_R2726) = R27:26; \ 101 + memd(R0 + #_PT_R2524) = R25:24; }\ 102 + { memd(R0 + #_PT_R2322) = R23:22; \ 103 + memd(R0 + #_PT_R2120) = R21:20; }\ 104 + { memd(R0 + #_PT_R1918) = R19:18; \ 105 + memd(R0 + #_PT_R1716) = R17:16; }\ 106 + { memd(R0 + #_PT_R1514) = R15:14; \ 107 + memd(R0 + #_PT_R1312) = R13:12; \ 108 + R17:16 = C13:12; }\ 63 109 { memd(R0 + #_PT_R1110) = R11:10; \ 64 - R15 = lc0; } \ 65 - { memd(R0 + #_PT_R0908) = R9:8; \ 66 - R14 = sa0; } \ 110 + memd(R0 + #_PT_R0908) = R9:8; \ 111 + R15:14 = C1:0; } \ 67 112 { memd(R0 + #_PT_R0706) = R7:6; \ 68 - R13 = lc1; } \ 69 - { memd(R0 + #_PT_R0504) = R5:4; \ 70 - R12 = sa1; } \ 71 - { memd(R0 + #_PT_UGPGP) = R31:30; \ 72 - R11 = m1; \ 73 - R2.H = #HI(_THREAD_SIZE); } \ 74 - { memd(R0 + #_PT_LC0SA0) = R15:14; \ 75 - R10 = m0; \ 76 - R2.L = #LO(_THREAD_SIZE); } \ 77 - { memd(R0 + #_PT_LC1SA1) = R13:12; \ 78 - R15 = p3:0; \ 79 - R2 = neg(R2); } \ 113 + memd(R0 + #_PT_R0504) = R5:4; \ 114 + R13:12 = C3:2; } \ 115 + { memd(R0 + #_PT_GPUGP) = R31:30; \ 116 + memd(R0 + #_PT_LC0SA0) = R15:14; \ 117 + R11:10 = C7:6; }\ 118 + { THREADINFO_REG = and(R0, # ## #-_THREAD_SIZE); \ 119 + memd(R0 + #_PT_LC1SA1) = R13:12; \ 120 + R15 = p3:0; }\ 80 121 { memd(R0 + #_PT_M1M0) = R11:10; \ 81 - R14 = usr; \ 82 - R2 = and(R0,R2); } \ 83 - { memd(R0 + #_PT_PREDSUSR) = R15:14; \ 84 - THREADINFO_REG = R2; } \ 122 + memw(R0 + #_PT_PREDSUSR + 4) = R15; }\ 85 123 { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \ 86 124 memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \ 87 125 R2 = #-1; } \ 88 126 { memw(R0 + #_PT_SYSCALL_NR) = R2; \ 127 + memd(R0 + #_PT_CS1CS0) = R17:16; \ 89 128 R30 = #0; } 129 + #endif 90 130 91 131 /* 92 132 * Restore registers and thread_info.regs state. THREADINFO_REG ··· 134 94 * preserved. Don't restore R29 (SP) until later. 135 95 */ 136 96 97 + #if CONFIG_HEXAGON_ARCH_VERSION < 4 137 98 #define restore_pt_regs() \ 138 99 { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \ 139 100 R15:14 = memd(R0 + #_PT_PREDSUSR); } \ ··· 162 121 R23:22 = memd(R0 + #_PT_R2322); } \ 163 122 { R25:24 = memd(R0 + #_PT_R2524); \ 164 123 R27:26 = memd(R0 + #_PT_R2726); } \ 165 - R31:30 = memd(R0 + #_PT_UGPGP); \ 124 + R31:30 = memd(R0 + #_PT_GPUGP); \ 166 125 { R28 = memw(R0 + #_PT_R2928); \ 167 126 ugp = R31; } \ 168 127 { R31:30 = memd(R0 + #_PT_R3130); \ 169 128 gp = R30; } 129 + #else 130 + /* V4+ */ 131 + #define restore_pt_regs() \ 132 + { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \ 133 + R15:14 = memd(R0 + #_PT_PREDSUSR); } \ 134 + { R11:10 = memd(R0 + #_PT_M1M0); \ 135 + R13:12 = memd(R0 + #_PT_LC1SA1); \ 136 + p3:0 = R15; } \ 137 + { R15:14 = memd(R0 + #_PT_LC0SA0); \ 138 + R3:2 = memd(R0 + #_PT_R0302); \ 139 + usr = R14; } \ 140 + { R5:4 = memd(R0 + #_PT_R0504); \ 141 + R7:6 = memd(R0 + #_PT_R0706); \ 142 + C7:6 = R11:10; }\ 143 + { R9:8 = memd(R0 + #_PT_R0908); \ 144 + R11:10 = memd(R0 + #_PT_R1110); \ 145 + C3:2 = R13:12; }\ 146 + { R13:12 = memd(R0 + #_PT_R1312); \ 147 + R15:14 = memd(R0 + #_PT_R1514); \ 148 + C1:0 = R15:14; }\ 149 + { R17:16 = memd(R0 + #_PT_R1716); \ 150 + R19:18 = memd(R0 + #_PT_R1918); } \ 151 + { R21:20 = memd(R0 + #_PT_R2120); \ 152 + R23:22 = memd(R0 + #_PT_R2322); } \ 153 + { R25:24 = memd(R0 + #_PT_R2524); \ 154 + R27:26 = memd(R0 + #_PT_R2726); } \ 155 + R31:30 = memd(R0 + #_PT_CS1CS0); \ 156 + { C13:12 = R31:30; \ 157 + R31:30 = memd(R0 + #_PT_GPUGP) ; \ 158 + R28 = memw(R0 + #_PT_R2928); }\ 159 + { C11:10 = R31:30; \ 160 + R31:30 = memd(R0 + #_PT_R3130); } 161 + #endif 170 162 171 163 /* 172 164 * Clears off enough space for the rest of pt_regs; evrec is a part ··· 213 139 * Need to save off R0, R1, R2, R3 immediately. 214 140 */ 215 141 142 + #if CONFIG_HEXAGON_ARCH_VERSION < 4 216 143 #define vm_event_entry(CHandler) \ 217 144 { \ 218 145 R29 = add(R29, #-(_PT_REGS_SIZE)); \ ··· 233 158 R1.H = #HI(CHandler); \ 234 159 jump event_dispatch; \ 235 160 } 161 + #else 162 + /* V4+ */ 163 + /* turn on I$ prefetch early */ 164 + /* the # ## # syntax inserts a literal ## */ 165 + #define vm_event_entry(CHandler) \ 166 + { \ 167 + R29 = add(R29, #-(_PT_REGS_SIZE)); \ 168 + memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \ 169 + memd(R29 + #(_PT_R0302 + -_PT_REGS_SIZE)) = R3:2; \ 170 + R0 = usr; \ 171 + } \ 172 + { \ 173 + memw(R29 + #_PT_PREDSUSR) = R0; \ 174 + R0 = setbit(R0, #16); \ 175 + } \ 176 + usr = R0; \ 177 + R1:0 = G1:0; \ 178 + { \ 179 + memd(R29 + #_PT_ER_VMEL) = R1:0; \ 180 + R1 = # ## #(CHandler); \ 181 + R3:2 = G3:2; \ 182 + } \ 183 + { \ 184 + R0 = R29; \ 185 + memd(R29 + #_PT_ER_VMPSP) = R3:2; \ 186 + jump event_dispatch; \ 187 + } 188 + #endif 236 189 237 190 .text 238 191 /* ··· 287 184 288 185 /* "Nested control path" -- if the previous mode was kernel */ 289 186 R0 = memw(R29 + #_PT_ER_VMEST); 290 - P0 = tstbit(R0, #HVM_VMEST_UM_SFT); 291 - if !P0 jump restore_all; 187 + { 188 + P0 = tstbit(R0, #HVM_VMEST_UM_SFT); 189 + if (!P0.new) jump:nt restore_all; 190 + } 292 191 /* 293 192 * Returning from system call, normally coming back from user mode 294 193 */ ··· 303 198 * Coming back from the C-world, our thread info pointer 304 199 * should be in the designated register (usually R19) 305 200 */ 201 + #if CONFIG_HEXAGON_ARCH_VERSION < 4 306 202 R1.L = #LO(_TIF_ALLWORK_MASK) 307 203 { 308 204 R1.H = #HI(_TIF_ALLWORK_MASK); 309 205 R0 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS); 310 206 } 207 + #else 208 + { 209 + R1 = ##_TIF_ALLWORK_MASK; 210 + R0 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS); 211 + } 212 + #endif 311 213 312 214 /* 313 215 * Compare against the "return to userspace" _TIF_WORK_MASK ··· 334 222 work_notifysig: 335 223 /* this is the part that's kind of fuzzy. */ 336 224 R1 = and(R0, #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME)); 337 - P0 = cmp.eq(R1, #0); 338 - if P0 jump restore_all 339 - R1 = R0; /* unsigned long thread_info_flags */ 340 - R0 = R29; /* regs should still be at top of stack */ 225 + { 226 + P0 = cmp.eq(R1, #0); 227 + if P0.new jump:t restore_all; 228 + } 229 + { 230 + R1 = R0; /* unsigned long thread_info_flags */ 231 + R0 = R29; /* regs should still be at top of stack */ 232 + } 341 233 call do_notify_resume 342 234 343 235 restore_all: ··· 351 235 352 236 /* do the setregs here for VM 0.5 */ 353 237 /* R29 here should already be pointing at pt_regs */ 354 - R1:0 = memd(R29 + #_PT_ER_VMEL); 355 - R3:2 = memd(R29 + #_PT_ER_VMPSP); 238 + { 239 + R1:0 = memd(R29 + #_PT_ER_VMEL); 240 + R3:2 = memd(R29 + #_PT_ER_VMPSP); 241 + } 242 + #if CONFIG_HEXAGON_ARCH_VERSION < 4 356 243 trap1(#HVM_TRAP1_VMSETREGS); 244 + #else 245 + G1:0 = R1:0; 246 + G3:2 = R3:2; 247 + #endif 357 248 358 249 R0 = R29 359 250 restore_pt_regs() 360 - R1:0 = memd(R29 + #_PT_R0100); 361 - R29 = add(R29, #_PT_REGS_SIZE); 251 + { 252 + R1:0 = memd(R29 + #_PT_R0100); 253 + R29 = add(R29, #_PT_REGS_SIZE); 254 + } 362 255 trap1(#HVM_TRAP1_VMRTE) 363 256 /* Notreached */ 364 257
+2
arch/hexagon/kernel/vm_events.c
··· 42 42 regs->lc1, regs->sa1, regs->m1); 43 43 printk(KERN_EMERG "gp: \t0x%08lx ugp: 0x%08lx usr: 0x%08lx\n", 44 44 regs->gp, regs->ugp, regs->usr); 45 + printk(KERN_EMERG "cs0: \t0x%08lx cs1: 0x%08lx\n", 46 + regs->cs0, regs->cs1); 45 47 printk(KERN_EMERG "r0: \t0x%08lx %08lx %08lx %08lx\n", regs->r00, 46 48 regs->r01, 47 49 regs->r02,