Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: reorganize SR referencing

- reference SRs by names where possible, not by numbers;
- get rid of __stringify around SR names where possible;
- remove unneeded SR names from asm/regs.h;
- add SREG_ prefix to remaining SR names;

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Chris Zankel <chris@zankel.net>

authored by

Max Filippov and committed by
Chris Zankel
bc5378fc f4349b6e

+254 -292
+4 -4
arch/xtensa/boot/boot-redboot/bootstrap.S
··· 51 51 /* 'reset' window registers */ 52 52 53 53 movi a4, 1 54 - wsr a4, PS 54 + wsr a4, ps 55 55 rsync 56 56 57 - rsr a5, WINDOWBASE 57 + rsr a5, windowbase 58 58 ssl a5 59 59 sll a4, a4 60 - wsr a4, WINDOWSTART 60 + wsr a4, windowstart 61 61 rsync 62 62 63 63 movi a4, 0x00040000 64 - wsr a4, PS 64 + wsr a4, ps 65 65 rsync 66 66 67 67 /* copy the loader to its address
+6 -6
arch/xtensa/include/asm/atomic.h
··· 73 73 "l32i %0, %2, 0 \n\t" 74 74 "add %0, %0, %1 \n\t" 75 75 "s32i %0, %2, 0 \n\t" 76 - "wsr a15, "__stringify(PS)" \n\t" 76 + "wsr a15, ps \n\t" 77 77 "rsync \n" 78 78 : "=&a" (vval) 79 79 : "a" (i), "a" (v) ··· 97 97 "l32i %0, %2, 0 \n\t" 98 98 "sub %0, %0, %1 \n\t" 99 99 "s32i %0, %2, 0 \n\t" 100 - "wsr a15, "__stringify(PS)" \n\t" 100 + "wsr a15, ps \n\t" 101 101 "rsync \n" 102 102 : "=&a" (vval) 103 103 : "a" (i), "a" (v) ··· 118 118 "l32i %0, %2, 0 \n\t" 119 119 "add %0, %0, %1 \n\t" 120 120 "s32i %0, %2, 0 \n\t" 121 - "wsr a15, "__stringify(PS)" \n\t" 121 + "wsr a15, ps \n\t" 122 122 "rsync \n" 123 123 : "=&a" (vval) 124 124 : "a" (i), "a" (v) ··· 137 137 "l32i %0, %2, 0 \n\t" 138 138 "sub %0, %0, %1 \n\t" 139 139 "s32i %0, %2, 0 \n\t" 140 - "wsr a15, "__stringify(PS)" \n\t" 140 + "wsr a15, ps \n\t" 141 141 "rsync \n" 142 142 : "=&a" (vval) 143 143 : "a" (i), "a" (v) ··· 260 260 "xor %1, %4, %3 \n\t" 261 261 "and %0, %0, %4 \n\t" 262 262 "s32i %0, %2, 0 \n\t" 263 - "wsr a15, "__stringify(PS)" \n\t" 263 + "wsr a15, ps \n\t" 264 264 "rsync \n" 265 265 : "=&a" (vval), "=a" (mask) 266 266 : "a" (v), "a" (all_f), "1" (mask) ··· 277 277 "l32i %0, %2, 0 \n\t" 278 278 "or %0, %0, %1 \n\t" 279 279 "s32i %0, %2, 0 \n\t" 280 - "wsr a15, "__stringify(PS)" \n\t" 280 + "wsr a15, ps \n\t" 281 281 "rsync \n" 282 282 : "=&a" (vval) 283 283 : "a" (mask), "a" (v)
+1 -1
arch/xtensa/include/asm/cacheflush.h
··· 165 165 static inline u32 xtensa_get_cacheattr(void) 166 166 { 167 167 u32 r; 168 - asm volatile(" rsr %0, CACHEATTR" : "=a"(r)); 168 + asm volatile(" rsr %0, cacheattr" : "=a"(r)); 169 169 return r; 170 170 } 171 171
+2 -2
arch/xtensa/include/asm/cmpxchg.h
··· 27 27 "bne %0, %2, 1f \n\t" 28 28 "s32i %3, %1, 0 \n\t" 29 29 "1: \n\t" 30 - "wsr a15, "__stringify(PS)" \n\t" 30 + "wsr a15, ps \n\t" 31 31 "rsync \n\t" 32 32 : "=&a" (old) 33 33 : "a" (p), "a" (old), "r" (new) ··· 97 97 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" 98 98 "l32i %0, %1, 0 \n\t" 99 99 "s32i %2, %1, 0 \n\t" 100 - "wsr a15, "__stringify(PS)" \n\t" 100 + "wsr a15, ps \n\t" 101 101 "rsync \n\t" 102 102 : "=&a" (tmp) 103 103 : "a" (m), "a" (val)
+2 -3
arch/xtensa/include/asm/coprocessor.h
··· 94 94 #if XCHAL_HAVE_CP 95 95 96 96 #define RSR_CPENABLE(x) do { \ 97 - __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \ 97 + __asm__ __volatile__("rsr %0, cpenable" : "=a" (x)); \ 98 98 } while(0); 99 99 #define WSR_CPENABLE(x) do { \ 100 - __asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" \ 101 - :: "a" (x)); \ 100 + __asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x)); \ 102 101 } while(0); 103 102 104 103 #endif /* XCHAL_HAVE_CP */
+1 -1
arch/xtensa/include/asm/delay.h
··· 27 27 static __inline__ u32 xtensa_get_ccount(void) 28 28 { 29 29 u32 ccount; 30 - asm volatile ("rsr %0, 234; # CCOUNT\n" : "=r" (ccount)); 30 + asm volatile ("rsr %0, ccount\n" : "=r" (ccount)); 31 31 return ccount; 32 32 } 33 33
+2 -2
arch/xtensa/include/asm/irqflags.h
··· 16 16 static inline unsigned long arch_local_save_flags(void) 17 17 { 18 18 unsigned long flags; 19 - asm volatile("rsr %0,"__stringify(PS) : "=a" (flags)); 19 + asm volatile("rsr %0, ps" : "=a" (flags)); 20 20 return flags; 21 21 } 22 22 ··· 41 41 42 42 static inline void arch_local_irq_restore(unsigned long flags) 43 43 { 44 - asm volatile("wsr %0, "__stringify(PS)" ; rsync" 44 + asm volatile("wsr %0, ps; rsync" 45 45 :: "a" (flags) : "memory"); 46 46 } 47 47
+2 -2
arch/xtensa/include/asm/mmu_context.h
··· 51 51 52 52 static inline void set_rasid_register (unsigned long val) 53 53 { 54 - __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t" 54 + __asm__ __volatile__ (" wsr %0, rasid\n\t" 55 55 " isync\n" : : "a" (val)); 56 56 } 57 57 58 58 static inline unsigned long get_rasid_register (void) 59 59 { 60 60 unsigned long tmp; 61 - __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp)); 61 + __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp)); 62 62 return tmp; 63 63 } 64 64
+9 -46
arch/xtensa/include/asm/regs.h
··· 27 27 28 28 /* Special registers. */ 29 29 30 - #define LBEG 0 31 - #define LEND 1 32 - #define LCOUNT 2 33 - #define SAR 3 34 - #define BR 4 35 - #define SCOMPARE1 12 36 - #define ACCHI 16 37 - #define ACCLO 17 38 - #define MR 32 39 - #define WINDOWBASE 72 40 - #define WINDOWSTART 73 41 - #define PTEVADDR 83 42 - #define RASID 90 43 - #define ITLBCFG 91 44 - #define DTLBCFG 92 45 - #define IBREAKENABLE 96 46 - #define DDR 104 47 - #define IBREAKA 128 48 - #define DBREAKA 144 49 - #define DBREAKC 160 50 - #define EPC 176 51 - #define EPC_1 177 52 - #define DEPC 192 53 - #define EPS 192 54 - #define EPS_1 193 55 - #define EXCSAVE 208 56 - #define EXCSAVE_1 209 57 - #define INTERRUPT 226 58 - #define INTENABLE 228 59 - #define PS 230 60 - #define THREADPTR 231 61 - #define EXCCAUSE 232 62 - #define DEBUGCAUSE 233 63 - #define CCOUNT 234 64 - #define PRID 235 65 - #define ICOUNT 236 66 - #define ICOUNTLEVEL 237 67 - #define EXCVADDR 238 68 - #define CCOMPARE 240 69 - #define MISC_SR 244 70 - 71 - /* Special names for read-only and write-only interrupt registers. */ 72 - 73 - #define INTREAD 226 74 - #define INTSET 226 75 - #define INTCLEAR 227 30 + #define SREG_MR 32 31 + #define SREG_IBREAKA 128 32 + #define SREG_DBREAKA 144 33 + #define SREG_DBREAKC 160 34 + #define SREG_EPC 176 35 + #define SREG_EPS 192 36 + #define SREG_EXCSAVE 208 37 + #define SREG_CCOMPARE 240 38 + #define SREG_MISC 244 76 39 77 40 /* EXCCAUSE register fields */ 78 41
+4 -4
arch/xtensa/include/asm/timex.h
··· 63 63 * Register access. 64 64 */ 65 65 66 - #define WSR_CCOUNT(r) asm volatile ("wsr %0,"__stringify(CCOUNT) :: "a" (r)) 67 - #define RSR_CCOUNT(r) asm volatile ("rsr %0,"__stringify(CCOUNT) : "=a" (r)) 68 - #define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r)) 69 - #define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r)) 66 + #define WSR_CCOUNT(r) asm volatile ("wsr %0, ccount" :: "a" (r)) 67 + #define RSR_CCOUNT(r) asm volatile ("rsr %0, ccount" : "=a" (r)) 68 + #define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r)) 69 + #define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r)) 70 70 71 71 static inline unsigned long get_ccount (void) 72 72 {
+4 -4
arch/xtensa/include/asm/tlbflush.h
··· 86 86 87 87 static inline void set_itlbcfg_register (unsigned long val) 88 88 { 89 - __asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t" 89 + __asm__ __volatile__("wsr %0, itlbcfg\n\t" "isync\n\t" 90 90 : : "a" (val)); 91 91 } 92 92 93 93 static inline void set_dtlbcfg_register (unsigned long val) 94 94 { 95 - __asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t" 95 + __asm__ __volatile__("wsr %0, dtlbcfg; dsync\n\t" 96 96 : : "a" (val)); 97 97 } 98 98 99 99 static inline void set_ptevaddr_register (unsigned long val) 100 100 { 101 - __asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n" 101 + __asm__ __volatile__(" wsr %0, ptevaddr; isync\n" 102 102 : : "a" (val)); 103 103 } 104 104 105 105 static inline unsigned long read_ptevaddr_register (void) 106 106 { 107 107 unsigned long tmp; 108 - __asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp)); 108 + __asm__ __volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp)); 109 109 return tmp; 110 110 } 111 111
+19 -19
arch/xtensa/kernel/align.S
··· 170 170 s32i a7, a2, PT_AREG7 171 171 s32i a8, a2, PT_AREG8 172 172 173 - rsr a0, DEPC 174 - xsr a3, EXCSAVE_1 173 + rsr a0, depc 174 + xsr a3, excsave1 175 175 s32i a0, a2, PT_AREG2 176 176 s32i a3, a2, PT_AREG3 177 177 178 178 /* Keep value of SAR in a0 */ 179 179 180 - rsr a0, SAR 181 - rsr a8, EXCVADDR # load unaligned memory address 180 + rsr a0, sar 181 + rsr a8, excvaddr # load unaligned memory address 182 182 183 183 /* Now, identify one of the following load/store instructions. 184 184 * ··· 197 197 198 198 /* Extract the instruction that caused the unaligned access. */ 199 199 200 - rsr a7, EPC_1 # load exception address 200 + rsr a7, epc1 # load exception address 201 201 movi a3, ~3 202 202 and a3, a3, a7 # mask lower bits 203 203 ··· 275 275 1: 276 276 277 277 #if XCHAL_HAVE_LOOPS 278 - rsr a5, LEND # check if we reached LEND 278 + rsr a5, lend # check if we reached LEND 279 279 bne a7, a5, 1f 280 - rsr a5, LCOUNT # and LCOUNT != 0 280 + rsr a5, lcount # and LCOUNT != 0 281 281 beqz a5, 1f 282 282 addi a5, a5, -1 # decrement LCOUNT and set 283 - rsr a7, LBEG # set PC to LBEGIN 284 - wsr a5, LCOUNT 283 + rsr a7, lbeg # set PC to LBEGIN 284 + wsr a5, lcount 285 285 #endif 286 286 287 - 1: wsr a7, EPC_1 # skip load instruction 287 + 1: wsr a7, epc1 # skip load instruction 288 288 extui a4, a4, INSN_T, 4 # extract target register 289 289 movi a5, .Lload_table 290 290 addx8 a4, a4, a5 ··· 355 355 356 356 1: 357 357 #if XCHAL_HAVE_LOOPS 358 - rsr a4, LEND # check if we reached LEND 358 + rsr a4, lend # check if we reached LEND 359 359 bne a7, a4, 1f 360 - rsr a4, LCOUNT # and LCOUNT != 0 360 + rsr a4, lcount # and LCOUNT != 0 361 361 beqz a4, 1f 362 362 addi a4, a4, -1 # decrement LCOUNT and set 363 - rsr a7, LBEG # set PC to LBEGIN 364 - wsr a4, LCOUNT 363 + rsr a7, lbeg # set PC to LBEGIN 364 + wsr a4, lcount 365 365 #endif 366 366 367 - 1: wsr a7, EPC_1 # skip store instruction 367 + 1: wsr a7, epc1 # skip store instruction 368 368 movi a4, ~3 369 369 and a4, a4, a8 # align memory address 370 370 ··· 406 406 407 407 .Lexit: 408 408 movi a4, 0 409 - rsr a3, EXCSAVE_1 409 + rsr a3, excsave1 410 410 s32i a4, a3, EXC_TABLE_FIXUP 411 411 412 412 /* Restore working register */ ··· 420 420 421 421 /* restore SAR and return */ 422 422 423 - wsr a0, SAR 423 + wsr a0, sar 424 424 l32i a0, a2, PT_AREG0 425 425 l32i a2, a2, PT_AREG2 426 426 rfe ··· 438 438 l32i a6, a2, PT_AREG6 439 439 l32i a5, a2, PT_AREG5 440 440 l32i a4, a2, PT_AREG4 441 - wsr a0, SAR 441 + wsr a0, sar 442 442 mov a1, a2 443 443 444 - rsr a0, PS 444 + rsr a0, ps 445 445 bbsi.l a2, PS_UM_BIT, 1f # jump if user mode 446 446 447 447 movi a0, _kernel_exception
+10 -10
arch/xtensa/kernel/coprocessor.S
··· 43 43 /* IO protection is currently unsupported. */ 44 44 45 45 ENTRY(fast_io_protect) 46 - wsr a0, EXCSAVE_1 46 + wsr a0, excsave1 47 47 movi a0, unrecoverable_exception 48 48 callx0 a0 49 49 ··· 220 220 */ 221 221 222 222 ENTRY(fast_coprocessor_double) 223 - wsr a0, EXCSAVE_1 223 + wsr a0, excsave1 224 224 movi a0, unrecoverable_exception 225 225 callx0 a0 226 226 ··· 229 229 230 230 /* Save remaining registers a1-a3 and SAR */ 231 231 232 - xsr a3, EXCSAVE_1 232 + xsr a3, excsave1 233 233 s32i a3, a2, PT_AREG3 234 - rsr a3, SAR 234 + rsr a3, sar 235 235 s32i a1, a2, PT_AREG1 236 236 s32i a3, a2, PT_SAR 237 237 mov a1, a2 238 - rsr a2, DEPC 238 + rsr a2, depc 239 239 s32i a2, a1, PT_AREG2 240 240 241 241 /* ··· 248 248 249 249 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ 250 250 251 - rsr a3, EXCCAUSE 251 + rsr a3, exccause 252 252 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED 253 253 254 254 /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/ 255 255 256 256 ssl a3 # SAR: 32 - coprocessor_number 257 257 movi a2, 1 258 - rsr a0, CPENABLE 258 + rsr a0, cpenable 259 259 sll a2, a2 260 260 or a0, a0, a2 261 - wsr a0, CPENABLE 261 + wsr a0, cpenable 262 262 rsync 263 263 264 264 /* Retrieve previous owner. (a3 still holds CP number) */ ··· 291 291 292 292 /* Note that only a0 and a1 were preserved. */ 293 293 294 - 2: rsr a3, EXCCAUSE 294 + 2: rsr a3, exccause 295 295 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED 296 296 movi a0, coprocessor_owner 297 297 addx4 a0, a3, a0 ··· 321 321 l32i a0, a1, PT_SAR 322 322 l32i a3, a1, PT_AREG3 323 323 l32i a2, a1, PT_AREG2 324 - wsr a0, SAR 324 + wsr a0, sar 325 325 l32i a0, a1, PT_AREG0 326 326 l32i a1, a1, PT_AREG1 327 327
+126 -126
arch/xtensa/kernel/entry.S
··· 112 112 113 113 /* Save a2, a3, and depc, restore excsave_1 and set SP. */ 114 114 115 - xsr a3, EXCSAVE_1 116 - rsr a0, DEPC 115 + xsr a3, excsave1 116 + rsr a0, depc 117 117 s32i a1, a2, PT_AREG1 118 118 s32i a0, a2, PT_AREG2 119 119 s32i a3, a2, PT_AREG3 ··· 125 125 /* Save SAR and turn off single stepping */ 126 126 127 127 movi a2, 0 128 - rsr a3, SAR 129 - xsr a2, ICOUNTLEVEL 128 + rsr a3, sar 129 + xsr a2, icountlevel 130 130 s32i a3, a1, PT_SAR 131 131 s32i a2, a1, PT_ICOUNTLEVEL 132 132 133 133 /* Rotate ws so that the current windowbase is at bit0. */ 134 134 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 135 135 136 - rsr a2, WINDOWBASE 137 - rsr a3, WINDOWSTART 136 + rsr a2, windowbase 137 + rsr a3, windowstart 138 138 ssr a2 139 139 s32i a2, a1, PT_WINDOWBASE 140 140 s32i a3, a1, PT_WINDOWSTART ··· 205 205 206 206 /* WINDOWBASE still in SAR! */ 207 207 208 - rsr a2, SAR # original WINDOWBASE 208 + rsr a2, sar # original WINDOWBASE 209 209 movi a3, 1 210 210 ssl a2 211 211 sll a3, a3 212 - wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit 213 - wsr a2, WINDOWBASE # and WINDOWSTART 212 + wsr a3, windowstart # set corresponding WINDOWSTART bit 213 + wsr a2, windowbase # and WINDOWSTART 214 214 rsync 215 215 216 216 /* We are back to the original stack pointer (a1) */ ··· 252 252 253 253 /* Save a0, a2, a3, DEPC and set SP. */ 254 254 255 - xsr a3, EXCSAVE_1 # restore a3, excsave_1 256 - rsr a0, DEPC # get a2 255 + xsr a3, excsave1 # restore a3, excsave_1 256 + rsr a0, depc # get a2 257 257 s32i a1, a2, PT_AREG1 258 258 s32i a0, a2, PT_AREG2 259 259 s32i a3, a2, PT_AREG3 ··· 265 265 /* Save SAR and turn off single stepping */ 266 266 267 267 movi a2, 0 268 - rsr a3, SAR 269 - xsr a2, ICOUNTLEVEL 268 + rsr a3, sar 269 + xsr a2, icountlevel 270 270 s32i a3, a1, PT_SAR 271 271 s32i a2, a1, PT_ICOUNTLEVEL 272 272 273 273 /* Rotate ws so that the current windowbase is at bit0. */ 274 274 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 275 275 276 - rsr a2, WINDOWBASE # don't need to save these, we only 277 - rsr a3, WINDOWSTART # need shifted windowstart: windowmask 276 + rsr a2, windowbase # don't need to save these, we only 277 + rsr a3, windowstart # need shifted windowstart: windowmask 278 278 ssr a2 279 279 slli a2, a3, 32-WSBITS 280 280 src a2, a3, a2 ··· 323 323 324 324 /* Save some registers, disable loops and clear the syscall flag. */ 325 325 326 - rsr a2, DEBUGCAUSE 327 - rsr a3, EPC_1 326 + rsr a2, debugcause 327 + rsr a3, epc1 328 328 s32i a2, a1, PT_DEBUGCAUSE 329 329 s32i a3, a1, PT_PC 330 330 331 331 movi a2, -1 332 - rsr a3, EXCVADDR 332 + rsr a3, excvaddr 333 333 s32i a2, a1, PT_SYSCALL 334 334 movi a2, 0 335 335 s32i a3, a1, PT_EXCVADDR 336 - xsr a2, LCOUNT 336 + xsr a2, lcount 337 337 s32i a2, a1, PT_LCOUNT 338 338 339 339 /* It is now save to restore the EXC_TABLE_FIXUP variable. */ 340 340 341 - rsr a0, EXCCAUSE 341 + rsr a0, exccause 342 342 movi a3, 0 343 - rsr a2, EXCSAVE_1 343 + rsr a2, excsave1 344 344 s32i a0, a1, PT_EXCCAUSE 345 345 s32i a3, a2, EXC_TABLE_FIXUP 346 346 ··· 352 352 * (interrupts disabled) and if this exception is not an interrupt. 353 353 */ 354 354 355 - rsr a3, PS 355 + rsr a3, ps 356 356 addi a0, a0, -4 357 357 movi a2, 1 358 358 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] 359 359 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception 360 360 movi a2, 1 << PS_WOE_BIT 361 361 or a3, a3, a2 362 - rsr a0, EXCCAUSE 363 - xsr a3, PS 362 + rsr a0, exccause 363 + xsr a3, ps 364 364 365 365 s32i a3, a1, PT_PS # save ps 366 366 367 - /* Save LBEG, LEND */ 367 + /* Save lbeg, lend */ 368 368 369 - rsr a2, LBEG 370 - rsr a3, LEND 369 + rsr a2, lbeg 370 + rsr a3, lend 371 371 s32i a2, a1, PT_LBEG 372 372 s32i a3, a1, PT_LEND 373 373 ··· 432 432 433 433 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 434 434 435 - wsr a3, PS /* disable interrupts */ 435 + wsr a3, ps /* disable interrupts */ 436 436 437 437 _bbci.l a3, PS_UM_BIT, kernel_exception_exit 438 438 ··· 444 444 445 445 l32i a2, a1, PT_WINDOWBASE 446 446 l32i a3, a1, PT_WINDOWSTART 447 - wsr a1, DEPC # use DEPC as temp storage 448 - wsr a3, WINDOWSTART # restore WINDOWSTART 447 + wsr a1, depc # use DEPC as temp storage 448 + wsr a3, windowstart # restore WINDOWSTART 449 449 ssr a2 # preserve user's WB in the SAR 450 - wsr a2, WINDOWBASE # switch to user's saved WB 450 + wsr a2, windowbase # switch to user's saved WB 451 451 rsync 452 - rsr a1, DEPC # restore stack pointer 452 + rsr a1, depc # restore stack pointer 453 453 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) 454 454 rotw -1 # we restore a4..a7 455 455 _bltui a6, 16, 1f # only have to restore current window? ··· 475 475 476 476 /* Clear unrestored registers (don't leak anything to user-land */ 477 477 478 - 1: rsr a0, WINDOWBASE 479 - rsr a3, SAR 478 + 1: rsr a0, windowbase 479 + rsr a3, sar 480 480 sub a3, a0, a3 481 481 beqz a3, 2f 482 482 extui a3, a3, 0, WBBITS ··· 556 556 557 557 /* Test WINDOWSTART now. If spilled, do the movsp */ 558 558 559 - rsr a3, WINDOWSTART 559 + rsr a3, windowstart 560 560 addi a0, a3, -1 561 561 and a3, a3, a0 562 562 _bnez a3, common_exception_exit ··· 604 604 605 605 1: l32i a2, a1, PT_PC 606 606 l32i a3, a1, PT_SAR 607 - wsr a2, EPC_1 608 - wsr a3, SAR 607 + wsr a2, epc1 608 + wsr a3, sar 609 609 610 610 /* Restore LBEG, LEND, LCOUNT */ 611 611 612 612 l32i a2, a1, PT_LBEG 613 613 l32i a3, a1, PT_LEND 614 - wsr a2, LBEG 614 + wsr a2, lbeg 615 615 l32i a2, a1, PT_LCOUNT 616 - wsr a3, LEND 617 - wsr a2, LCOUNT 616 + wsr a3, lend 617 + wsr a2, lcount 618 618 619 619 /* We control single stepping through the ICOUNTLEVEL register. */ 620 620 621 621 l32i a2, a1, PT_ICOUNTLEVEL 622 622 movi a3, -2 623 - wsr a2, ICOUNTLEVEL 624 - wsr a3, ICOUNT 623 + wsr a2, icountlevel 624 + wsr a3, icount 625 625 626 626 /* Check if it was double exception. */ 627 627 ··· 636 636 l32i a1, a1, PT_AREG1 637 637 rfe 638 638 639 - 1: wsr a0, DEPC 639 + 1: wsr a0, depc 640 640 l32i a0, a1, PT_AREG0 641 641 l32i a1, a1, PT_AREG1 642 642 rfde ··· 651 651 652 652 ENTRY(debug_exception) 653 653 654 - rsr a0, EPS + XCHAL_DEBUGLEVEL 654 + rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL 655 655 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode 656 656 657 - /* Set EPC_1 and EXCCAUSE */ 657 + /* Set EPC1 and EXCCAUSE */ 658 658 659 - wsr a2, DEPC # save a2 temporarily 660 - rsr a2, EPC + XCHAL_DEBUGLEVEL 661 - wsr a2, EPC_1 659 + wsr a2, depc # save a2 temporarily 660 + rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL 661 + wsr a2, epc1 662 662 663 663 movi a2, EXCCAUSE_MAPPED_DEBUG 664 - wsr a2, EXCCAUSE 664 + wsr a2, exccause 665 665 666 666 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 667 667 668 668 movi a2, 1 << PS_EXCM_BIT 669 669 or a2, a0, a2 670 670 movi a0, debug_exception # restore a3, debug jump vector 671 - wsr a2, PS 672 - xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL 671 + wsr a2, ps 672 + xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 673 673 674 674 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 675 675 ··· 680 680 movi a0, 0 681 681 s32i a1, a2, PT_AREG1 682 682 s32i a0, a2, PT_DEPC # mark it as a regular exception 683 - xsr a0, DEPC 683 + xsr a0, depc 684 684 s32i a3, a2, PT_AREG3 685 685 s32i a0, a2, PT_AREG2 686 686 mov a1, a2 687 687 j _kernel_exception 688 688 689 - 2: rsr a2, EXCSAVE_1 689 + 2: rsr a2, excsave1 690 690 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer 691 691 s32i a0, a2, PT_AREG0 692 692 movi a0, 0 693 693 s32i a1, a2, PT_AREG1 694 694 s32i a0, a2, PT_DEPC 695 - xsr a0, DEPC 695 + xsr a0, depc 696 696 s32i a3, a2, PT_AREG3 697 697 s32i a0, a2, PT_AREG2 698 698 mov a1, a2 ··· 732 732 movi a0, 1 733 733 movi a1, 0 734 734 735 - wsr a0, WINDOWSTART 736 - wsr a1, WINDOWBASE 735 + wsr a0, windowstart 736 + wsr a1, windowbase 737 737 rsync 738 738 739 739 movi a1, (1 << PS_WOE_BIT) | 1 740 - wsr a1, PS 740 + wsr a1, ps 741 741 rsync 742 742 743 743 movi a1, init_task ··· 793 793 l32i a0, a2, PT_DEPC 794 794 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double 795 795 796 - rsr a0, DEPC # get a2 796 + rsr a0, depc # get a2 797 797 s32i a4, a2, PT_AREG4 # save a4 and 798 798 s32i a0, a2, PT_AREG2 # a2 to stack 799 799 ··· 804 804 805 805 /* Restore a3, excsave_1 */ 806 806 807 - xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl. 808 - rsr a4, EPC_1 # get exception address 807 + xsr a3, excsave1 # make sure excsave_1 is valid for dbl. 808 + rsr a4, epc1 # get exception address 809 809 s32i a3, a2, PT_AREG3 # save a3 to stack 810 810 811 811 #ifdef ALLOCA_EXCEPTION_IN_IRAM ··· 820 820 jx a3 821 821 822 822 .Lunhandled_double: 823 - wsr a0, EXCSAVE_1 823 + wsr a0, excsave1 824 824 movi a0, unrecoverable_exception 825 825 callx0 a0 826 826 ··· 852 852 #endif 853 853 addi a4, a4, 3 # step over movsp 854 854 _EXTUI_MOVSP_DST(a0) # extract destination register 855 - wsr a4, EPC_1 # save new epc_1 855 + wsr a4, epc1 # save new epc_1 856 856 857 857 _bnei a0, 1, 1f # no 'movsp a1, ax': jump 858 858 ··· 953 953 954 954 /* Skip syscall. */ 955 955 956 - rsr a0, EPC_1 956 + rsr a0, epc1 957 957 addi a0, a0, 3 958 - wsr a0, EPC_1 958 + wsr a0, epc1 959 959 960 960 l32i a0, a2, PT_DEPC 961 961 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 962 962 963 - rsr a0, DEPC # get syscall-nr 963 + rsr a0, depc # get syscall-nr 964 964 _beqz a0, fast_syscall_spill_registers 965 965 _beqi a0, __NR_xtensa, fast_syscall_xtensa 966 966 ··· 970 970 971 971 /* Skip syscall. */ 972 972 973 - rsr a0, EPC_1 973 + rsr a0, epc1 974 974 addi a0, a0, 3 975 - wsr a0, EPC_1 975 + wsr a0, epc1 976 976 977 977 l32i a0, a2, PT_DEPC 978 978 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 979 979 980 - rsr a0, DEPC # get syscall-nr 980 + rsr a0, depc # get syscall-nr 981 981 _beqz a0, fast_syscall_spill_registers 982 982 _beqi a0, __NR_xtensa, fast_syscall_xtensa 983 983 ··· 988 988 /* Restore all states. */ 989 989 990 990 l32i a0, a2, PT_AREG0 # restore a0 991 - xsr a2, DEPC # restore a2, depc 992 - rsr a3, EXCSAVE_1 991 + xsr a2, depc # restore a2, depc 992 + rsr a3, excsave1 993 993 994 - wsr a0, EXCSAVE_1 994 + wsr a0, excsave1 995 995 movi a0, unrecoverable_exception 996 996 callx0 a0 997 997 ··· 1047 1047 1048 1048 ENTRY(fast_syscall_xtensa) 1049 1049 1050 - xsr a3, EXCSAVE_1 # restore a3, excsave1 1050 + xsr a3, excsave1 # restore a3, excsave1 1051 1051 1052 1052 s32i a7, a2, PT_AREG7 # we need an additional register 1053 1053 movi a7, 4 # sizeof(unsigned int) ··· 1124 1124 1125 1125 movi a0, fast_syscall_spill_registers_fixup 1126 1126 s32i a0, a3, EXC_TABLE_FIXUP 1127 - rsr a0, WINDOWBASE 1127 + rsr a0, windowbase 1128 1128 s32i a0, a3, EXC_TABLE_PARAM 1129 1129 1130 1130 /* Save a3 and SAR on stack. */ 1131 1131 1132 - rsr a0, SAR 1133 - xsr a3, EXCSAVE_1 # restore a3 and excsave_1 1132 + rsr a0, sar 1133 + xsr a3, excsave1 # restore a3 and excsave_1 1134 1134 s32i a3, a2, PT_AREG3 1135 1135 s32i a4, a2, PT_AREG4 1136 1136 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 ··· 1148 1148 l32i a3, a2, PT_AREG5 1149 1149 l32i a4, a2, PT_AREG4 1150 1150 l32i a0, a2, PT_AREG0 1151 - wsr a3, SAR 1151 + wsr a3, sar 1152 1152 l32i a3, a2, PT_AREG3 1153 1153 1154 1154 /* Restore clobbered registers. */ ··· 1173 1173 1174 1174 fast_syscall_spill_registers_fixup: 1175 1175 1176 - rsr a2, WINDOWBASE # get current windowbase (a2 is saved) 1177 - xsr a0, DEPC # restore depc and a0 1176 + rsr a2, windowbase # get current windowbase (a2 is saved) 1177 + xsr a0, depc # restore depc and a0 1178 1178 ssl a2 # set shift (32 - WB) 1179 1179 1180 1180 /* We need to make sure the current registers (a0-a3) are preserved. ··· 1182 1182 * in WS, so that the exception handlers save them to the task stack. 1183 1183 */ 1184 1184 1185 - rsr a3, EXCSAVE_1 # get spill-mask 1185 + rsr a3, excsave1 # get spill-mask 1186 1186 slli a2, a3, 1 # shift left by one 1187 1187 1188 1188 slli a3, a2, 32-WSBITS 1189 1189 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... 1190 - wsr a2, WINDOWSTART # set corrected windowstart 1190 + wsr a2, windowstart # set corrected windowstart 1191 1191 1192 1192 movi a3, exc_table 1193 1193 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 ··· 1201 1201 * excsave_1: a3 1202 1202 */ 1203 1203 1204 - wsr a3, WINDOWBASE 1204 + wsr a3, windowbase 1205 1205 rsync 1206 1206 1207 1207 /* We are now in the original frame when we entered _spill_registers: ··· 1227 1227 /* Jump to the exception handler. */ 1228 1228 1229 1229 movi a3, exc_table 1230 - rsr a0, EXCCAUSE 1230 + rsr a0, exccause 1231 1231 addx4 a0, a0, a3 # find entry in table 1232 1232 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1233 1233 jx a0 ··· 1236 1236 1237 1237 /* When we return here, all registers have been restored (a2: DEPC) */ 1238 1238 1239 - wsr a2, DEPC # exception address 1239 + wsr a2, depc # exception address 1240 1240 1241 1241 /* Restore fixup handler. */ 1242 1242 1243 - xsr a3, EXCSAVE_1 1243 + xsr a3, excsave1 1244 1244 movi a2, fast_syscall_spill_registers_fixup 1245 1245 s32i a2, a3, EXC_TABLE_FIXUP 1246 - rsr a2, WINDOWBASE 1246 + rsr a2, windowbase 1247 1247 s32i a2, a3, EXC_TABLE_PARAM 1248 1248 l32i a2, a3, EXC_TABLE_KSTK 1249 1249 1250 1250 /* Load WB at the time the exception occurred. */ 1251 1251 1252 - rsr a3, SAR # WB is still in SAR 1252 + rsr a3, sar # WB is still in SAR 1253 1253 neg a3, a3 1254 - wsr a3, WINDOWBASE 1254 + wsr a3, windowbase 1255 1255 rsync 1256 1256 1257 1257 /* Restore a3 and return. */ 1258 1258 1259 1259 movi a3, exc_table 1260 - xsr a3, EXCSAVE_1 1260 + xsr a3, excsave1 1261 1261 1262 1262 rfde 1263 1263 ··· 1283 1283 * Rotate ws right so that a4 = yyxxxwww1. 1284 1284 */ 1285 1285 1286 - rsr a4, WINDOWBASE 1287 - rsr a3, WINDOWSTART # a3 = xxxwww1yy 1286 + rsr a4, windowbase 1287 + rsr a3, windowstart # a3 = xxxwww1yy 1288 1288 ssr a4 # holds WB 1289 1289 slli a4, a3, WSBITS 1290 1290 or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy ··· 1302 1302 1303 1303 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1304 1304 1305 - wsr a3, WINDOWSTART # save shifted windowstart 1305 + wsr a3, windowstart # save shifted windowstart 1306 1306 neg a4, a3 1307 1307 and a3, a4, a3 # first bit set from right: 000010000 1308 1308 ··· 1311 1311 sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right 1312 1312 ssr a4 # save in SAR for later. 1313 1313 1314 - rsr a3, WINDOWBASE 1314 + rsr a3, windowbase 1315 1315 add a3, a3, a4 1316 - wsr a3, WINDOWBASE 1316 + wsr a3, windowbase 1317 1317 rsync 1318 1318 1319 - rsr a3, WINDOWSTART 1319 + rsr a3, windowstart 1320 1320 srl a3, a3 # shift windowstart 1321 1321 1322 1322 /* WB is now just one frame below the oldest frame in the register ··· 1364 1364 .Lexit: /* Done. Do the final rotation, set WS, and return. */ 1365 1365 1366 1366 rotw 1 1367 - rsr a3, WINDOWBASE 1367 + rsr a3, windowbase 1368 1368 ssl a3 1369 1369 movi a3, 1 1370 1370 sll a3, a3 1371 - wsr a3, WINDOWSTART 1371 + wsr a3, windowstart 1372 1372 ret 1373 1373 1374 1374 .Lc4: s32e a4, a9, -16 ··· 1429 1429 * however, this condition is unrecoverable in kernel space. 1430 1430 */ 1431 1431 1432 - rsr a0, PS 1432 + rsr a0, ps 1433 1433 _bbci.l a0, PS_UM_BIT, 1f 1434 1434 1435 1435 /* User space: Setup a dummy frame and kill application. ··· 1439 1439 movi a0, 1 1440 1440 movi a1, 0 1441 1441 1442 - wsr a0, WINDOWSTART 1443 - wsr a1, WINDOWBASE 1442 + wsr a0, windowstart 1443 + wsr a1, windowbase 1444 1444 rsync 1445 1445 1446 1446 movi a0, 0 1447 1447 1448 1448 movi a3, exc_table 1449 1449 l32i a1, a3, EXC_TABLE_KSTK 1450 - wsr a3, EXCSAVE_1 1450 + wsr a3, excsave1 1451 1451 1452 1452 movi a4, (1 << PS_WOE_BIT) | 1 1453 - wsr a4, PS 1453 + wsr a4, ps 1454 1454 rsync 1455 1455 1456 1456 movi a6, SIGSEGV ··· 1459 1459 1460 1460 1: /* Kernel space: PANIC! */ 1461 1461 1462 - wsr a0, EXCSAVE_1 1462 + wsr a0, excsave1 1463 1463 movi a0, unrecoverable_exception 1464 1464 callx0 a0 # should not return 1465 1465 1: j 1b ··· 1524 1524 1525 1525 /* We deliberately destroy a3 that holds the exception table. */ 1526 1526 1527 - 8: rsr a3, EXCVADDR # fault address 1527 + 8: rsr a3, excvaddr # fault address 1528 1528 _PGD_OFFSET(a0, a3, a1) 1529 1529 l32i a0, a0, 0 # read pmdval 1530 1530 beqz a0, 2f ··· 1561 1561 */ 1562 1562 1563 1563 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 1564 - rsr a1, PTEVADDR 1564 + rsr a1, ptevaddr 1565 1565 addx2 a3, a3, a3 # -> 0,3,6,9 1566 1566 srli a1, a1, PAGE_SHIFT 1567 1567 extui a3, a3, 2, 2 # -> 0,0,1,2 ··· 1583 1583 l32i a0, a2, PT_AREG0 1584 1584 l32i a1, a2, PT_AREG1 1585 1585 l32i a2, a2, PT_DEPC 1586 - xsr a3, EXCSAVE_1 1586 + xsr a3, excsave1 1587 1587 1588 1588 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1589 1589 1590 1590 /* Restore excsave1 and return. */ 1591 1591 1592 - rsr a2, DEPC 1592 + rsr a2, depc 1593 1593 rfe 1594 1594 1595 1595 /* Return from double exception. */ 1596 1596 1597 - 1: xsr a2, DEPC 1597 + 1: xsr a2, depc 1598 1598 esync 1599 1599 rfde 1600 1600 ··· 1618 1618 /* Make sure the exception originated in the special functions */ 1619 1619 1620 1620 movi a0, __tlbtemp_mapping_start 1621 - rsr a3, EPC_1 1621 + rsr a3, epc1 1622 1622 bltu a3, a0, 2f 1623 1623 movi a0, __tlbtemp_mapping_end 1624 1624 bgeu a3, a0, 2f ··· 1626 1626 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ 1627 1627 1628 1628 movi a3, TLBTEMP_BASE_1 1629 - rsr a0, EXCVADDR 1629 + rsr a0, excvaddr 1630 1630 bltu a0, a3, 2f 1631 1631 1632 1632 addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) ··· 1635 1635 /* Check if we have to restore an ITLB mapping. */ 1636 1636 1637 1637 movi a1, __tlbtemp_mapping_itlb 1638 - rsr a3, EPC_1 1638 + rsr a3, epc1 1639 1639 sub a3, a3, a1 1640 1640 1641 1641 /* Calculate VPN */ ··· 1671 1671 2: /* Invalid PGD, default exception handling */ 1672 1672 1673 1673 movi a3, exc_table 1674 - rsr a1, DEPC 1675 - xsr a3, EXCSAVE_1 1674 + rsr a1, depc 1675 + xsr a3, excsave1 1676 1676 s32i a1, a2, PT_AREG2 1677 1677 s32i a3, a2, PT_AREG3 1678 1678 mov a1, a2 1679 1679 1680 - rsr a2, PS 1680 + rsr a2, ps 1681 1681 bbsi.l a2, PS_UM_BIT, 1f 1682 1682 j _kernel_exception 1683 1683 1: j _user_exception ··· 1712 1712 l32i a0, a1, TASK_MM # tsk->mm 1713 1713 beqz a0, 9f 1714 1714 1715 - 8: rsr a1, EXCVADDR # fault address 1715 + 8: rsr a1, excvaddr # fault address 1716 1716 _PGD_OFFSET(a0, a1, a4) 1717 1717 l32i a0, a0, 0 1718 1718 beqz a0, 2f ··· 1725 1725 1726 1726 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE 1727 1727 or a4, a4, a1 1728 - rsr a1, EXCVADDR 1728 + rsr a1, excvaddr 1729 1729 s32i a4, a0, 0 1730 1730 1731 1731 /* We need to flush the cache if we have page coloring. */ ··· 1749 1749 1750 1750 /* Restore excsave1 and a3. */ 1751 1751 1752 - xsr a3, EXCSAVE_1 1752 + xsr a3, excsave1 1753 1753 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1754 1754 1755 - rsr a2, DEPC 1755 + rsr a2, depc 1756 1756 rfe 1757 1757 1758 1758 /* Double exception. Restore FIXUP handler and return. */ 1759 1759 1760 - 1: xsr a2, DEPC 1760 + 1: xsr a2, depc 1761 1761 esync 1762 1762 rfde 1763 1763 ··· 1766 1766 1767 1767 2: /* If there was a problem, handle fault in C */ 1768 1768 1769 - rsr a4, DEPC # still holds a2 1770 - xsr a3, EXCSAVE_1 1769 + rsr a4, depc # still holds a2 1770 + xsr a3, excsave1 1771 1771 s32i a4, a2, PT_AREG2 1772 1772 s32i a3, a2, PT_AREG3 1773 1773 l32i a4, a2, PT_AREG4 1774 1774 mov a1, a2 1775 1775 1776 - rsr a2, PS 1776 + rsr a2, ps 1777 1777 bbsi.l a2, PS_UM_BIT, 1f 1778 1778 j _kernel_exception 1779 1779 1: j _user_exception ··· 1901 1901 /* Disable ints while we manipulate the stack pointer. */ 1902 1902 1903 1903 movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL 1904 - xsr a14, PS 1905 - rsr a3, EXCSAVE_1 1904 + xsr a14, ps 1905 + rsr a3, excsave1 1906 1906 rsync 1907 1907 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ 1908 1908 ··· 1910 1910 1911 1911 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) 1912 1912 l32i a3, a5, THREAD_CPENABLE 1913 - xsr a3, CPENABLE 1913 + xsr a3, cpenable 1914 1914 s32i a3, a4, THREAD_CPENABLE 1915 1915 #endif 1916 1916 ··· 1924 1924 * we return from kernel space. 1925 1925 */ 1926 1926 1927 - rsr a3, EXCSAVE_1 # exc_table 1927 + rsr a3, excsave1 # exc_table 1928 1928 movi a6, 0 1929 1929 addi a7, a5, PT_REGS_OFFSET 1930 1930 s32i a6, a3, EXC_TABLE_FIXUP ··· 1937 1937 1938 1938 load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER 1939 1939 1940 - wsr a14, PS 1940 + wsr a14, ps 1941 1941 mov a2, a12 # return 'prev' 1942 1942 rsync 1943 1943
+18 -18
arch/xtensa/kernel/head.S
··· 61 61 /* Disable interrupts and exceptions. */ 62 62 63 63 movi a0, LOCKLEVEL 64 - wsr a0, PS 64 + wsr a0, ps 65 65 66 66 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ 67 67 68 - wsr a2, EXCSAVE_1 68 + wsr a2, excsave1 69 69 70 70 /* Start with a fresh windowbase and windowstart. */ 71 71 72 72 movi a1, 1 73 73 movi a0, 0 74 - wsr a1, WINDOWSTART 75 - wsr a0, WINDOWBASE 74 + wsr a1, windowstart 75 + wsr a0, windowbase 76 76 rsync 77 77 78 78 /* Set a0 to 0 for the remaining initialization. */ ··· 82 82 /* Clear debugging registers. */ 83 83 84 84 #if XCHAL_HAVE_DEBUG 85 - wsr a0, IBREAKENABLE 86 - wsr a0, ICOUNT 85 + wsr a0, ibreakenable 86 + wsr a0, icount 87 87 movi a1, 15 88 - wsr a0, ICOUNTLEVEL 88 + wsr a0, icountlevel 89 89 90 90 .set _index, 0 91 91 .rept XCHAL_NUM_DBREAK - 1 92 - wsr a0, DBREAKC + _index 92 + wsr a0, SREG_DBREAKC + _index 93 93 .set _index, _index + 1 94 94 .endr 95 95 #endif 96 96 97 97 /* Clear CCOUNT (not really necessary, but nice) */ 98 98 99 - wsr a0, CCOUNT # not really necessary, but nice 99 + wsr a0, ccount # not really necessary, but nice 100 100 101 101 /* Disable zero-loops. */ 102 102 103 103 #if XCHAL_HAVE_LOOPS 104 - wsr a0, LCOUNT 104 + wsr a0, lcount 105 105 #endif 106 106 107 107 /* Disable all timers. */ 108 108 109 109 .set _index, 0 110 110 .rept XCHAL_NUM_TIMERS - 1 111 - wsr a0, CCOMPARE + _index 111 + wsr a0, SREG_CCOMPARE + _index 112 112 .set _index, _index + 1 113 113 .endr 114 114 115 115 /* Interrupt initialization. */ 116 116 117 117 movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE 118 - wsr a0, INTENABLE 119 - wsr a2, INTCLEAR 118 + wsr a0, intenable 119 + wsr a2, intclear 120 120 121 121 /* Disable coprocessors. */ 122 122 123 123 #if XCHAL_CP_NUM > 0 124 - wsr a0, CPENABLE 124 + wsr a0, cpenable 125 125 #endif 126 126 127 127 /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0 ··· 132 132 */ 133 133 134 134 movi a1, 1 135 - wsr a1, PS 135 + wsr a1, ps 136 136 rsync 137 137 138 138 /* Initialize the caches. ··· 206 206 addi a1, a1, KERNEL_STACK_SIZE 207 207 208 208 movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0 209 - wsr a2, PS # (enable reg-windows; progmode stack) 209 + wsr a2, ps # (enable reg-windows; progmode stack) 210 210 rsync 211 211 212 212 /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/ 213 213 214 214 movi a2, debug_exception 215 - wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL 215 + wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 216 216 217 217 /* Set up EXCSAVE[1] to point to the exc_table. */ 218 218 219 219 movi a6, exc_table 220 - xsr a6, EXCSAVE_1 220 + xsr a6, excsave1 221 221 222 222 /* init_arch kick-starts the linux kernel */ 223 223
+3 -3
arch/xtensa/kernel/irq.c
··· 72 72 static void xtensa_irq_mask(struct irq_data *d) 73 73 { 74 74 cached_irq_mask &= ~(1 << d->irq); 75 - set_sr (cached_irq_mask, INTENABLE); 75 + set_sr (cached_irq_mask, intenable); 76 76 } 77 77 78 78 static void xtensa_irq_unmask(struct irq_data *d) 79 79 { 80 80 cached_irq_mask |= 1 << d->irq; 81 - set_sr (cached_irq_mask, INTENABLE); 81 + set_sr (cached_irq_mask, intenable); 82 82 } 83 83 84 84 static void xtensa_irq_enable(struct irq_data *d) ··· 95 95 96 96 static void xtensa_irq_ack(struct irq_data *d) 97 97 { 98 - set_sr(1 << d->irq, INTCLEAR); 98 + set_sr(1 << d->irq, intclear); 99 99 } 100 100 101 101 static int xtensa_irq_retrigger(struct irq_data *d)
+9 -9
arch/xtensa/kernel/traps.c
··· 202 202 203 203 void do_interrupt (struct pt_regs *regs) 204 204 { 205 - unsigned long intread = get_sr (INTREAD); 206 - unsigned long intenable = get_sr (INTENABLE); 205 + unsigned long intread = get_sr (interrupt); 206 + unsigned long intenable = get_sr (intenable); 207 207 int i, mask; 208 208 209 209 /* Handle all interrupts (no priorities). ··· 213 213 214 214 for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) { 215 215 if (mask & (intread & intenable)) { 216 - set_sr (mask, INTCLEAR); 216 + set_sr (mask, intclear); 217 217 do_IRQ (i,regs); 218 218 } 219 219 } ··· 339 339 /* Initialize EXCSAVE_1 to hold the address of the exception table. */ 340 340 341 341 i = (unsigned long)exc_table; 342 - __asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i)); 342 + __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (i)); 343 343 } 344 344 345 345 /* ··· 386 386 unsigned int a0, ps; 387 387 388 388 __asm__ __volatile__ ( 389 - "movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t" 389 + "movi a14, " __stringify(PS_EXCM_BIT | 1) "\n\t" 390 390 "mov a12, a0\n\t" 391 - "rsr a13," __stringify(SAR) "\n\t" 392 - "xsr a14," __stringify(PS) "\n\t" 391 + "rsr a13, sar\n\t" 392 + "xsr a14, ps\n\t" 393 393 "movi a0, _spill_registers\n\t" 394 394 "rsync\n\t" 395 395 "callx0 a0\n\t" 396 396 "mov a0, a12\n\t" 397 - "wsr a13," __stringify(SAR) "\n\t" 398 - "wsr a14," __stringify(PS) "\n\t" 397 + "wsr a13, sar\n\t" 398 + "wsr a14, ps\n\t" 399 399 :: "a" (&a0), "a" (&ps) 400 400 : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); 401 401 }
+22 -22
arch/xtensa/kernel/vectors.S
··· 69 69 70 70 ENTRY(_UserExceptionVector) 71 71 72 - xsr a3, EXCSAVE_1 # save a3 and get dispatch table 73 - wsr a2, DEPC # save a2 72 + xsr a3, excsave1 # save a3 and get dispatch table 73 + wsr a2, depc # save a2 74 74 l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2 75 75 s32i a0, a2, PT_AREG0 # save a0 to ESF 76 - rsr a0, EXCCAUSE # retrieve exception cause 76 + rsr a0, exccause # retrieve exception cause 77 77 s32i a0, a2, PT_DEPC # mark it as a regular exception 78 78 addx4 a0, a0, a3 # find entry in table 79 79 l32i a0, a0, EXC_TABLE_FAST_USER # load handler ··· 93 93 94 94 ENTRY(_KernelExceptionVector) 95 95 96 - xsr a3, EXCSAVE_1 # save a3, and get dispatch table 97 - wsr a2, DEPC # save a2 96 + xsr a3, excsave1 # save a3, and get dispatch table 97 + wsr a2, depc # save a2 98 98 addi a2, a1, -16-PT_SIZE # adjust stack pointer 99 99 s32i a0, a2, PT_AREG0 # save a0 to ESF 100 - rsr a0, EXCCAUSE # retrieve exception cause 100 + rsr a0, exccause # retrieve exception cause 101 101 s32i a0, a2, PT_DEPC # mark it as a regular exception 102 102 addx4 a0, a0, a3 # find entry in table 103 103 l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address ··· 205 205 206 206 /* Deliberately destroy excsave (don't assume it's value was valid). */ 207 207 208 - wsr a3, EXCSAVE_1 # save a3 208 + wsr a3, excsave1 # save a3 209 209 210 210 /* Check for kernel double exception (usually fatal). */ 211 211 212 - rsr a3, PS 212 + rsr a3, ps 213 213 _bbci.l a3, PS_UM_BIT, .Lksp 214 214 215 215 /* Check if we are currently handling a window exception. */ 216 216 /* Note: We don't need to indicate that we enter a critical section. */ 217 217 218 - xsr a0, DEPC # get DEPC, save a0 218 + xsr a0, depc # get DEPC, save a0 219 219 220 220 movi a3, XCHAL_WINDOW_VECTORS_VADDR 221 221 _bltu a0, a3, .Lfixup ··· 243 243 * Note: We can trash the current window frame (a0...a3) and depc! 244 244 */ 245 245 246 - wsr a2, DEPC # save stack pointer temporarily 247 - rsr a0, PS 246 + wsr a2, depc # save stack pointer temporarily 247 + rsr a0, ps 248 248 extui a0, a0, PS_OWB_SHIFT, 4 249 - wsr a0, WINDOWBASE 249 + wsr a0, windowbase 250 250 rsync 251 251 252 252 /* We are now in the previous window frame. Save registers again. */ 253 253 254 - xsr a2, DEPC # save a2 and get stack pointer 254 + xsr a2, depc # save a2 and get stack pointer 255 255 s32i a0, a2, PT_AREG0 256 256 257 - wsr a3, EXCSAVE_1 # save a3 257 + wsr a3, excsave1 # save a3 258 258 movi a3, exc_table 259 259 260 - rsr a0, EXCCAUSE 260 + rsr a0, exccause 261 261 s32i a0, a2, PT_DEPC # mark it as a regular exception 262 262 addx4 a0, a0, a3 263 263 l32i a0, a0, EXC_TABLE_FAST_USER ··· 290 290 291 291 /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */ 292 292 293 - xsr a3, DEPC 293 + xsr a3, depc 294 294 s32i a0, a2, PT_DEPC 295 295 s32i a3, a2, PT_AREG0 296 296 297 297 /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */ 298 298 299 299 movi a3, exc_table 300 - rsr a0, EXCCAUSE 300 + rsr a0, exccause 301 301 addx4 a0, a0, a3 302 302 l32i a0, a0, EXC_TABLE_FAST_USER 303 303 jx a0 ··· 312 312 313 313 .Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ 314 314 315 - rsr a3, EXCCAUSE 315 + rsr a3, exccause 316 316 beqi a3, EXCCAUSE_ITLB_MISS, 1f 317 317 addi a3, a3, -EXCCAUSE_DTLB_MISS 318 318 bnez a3, .Lunrecoverable ··· 328 328 329 329 .Lunrecoverable_fixup: 330 330 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE 331 - xsr a0, DEPC 331 + xsr a0, depc 332 332 333 333 .Lunrecoverable: 334 - rsr a3, EXCSAVE_1 335 - wsr a0, EXCSAVE_1 334 + rsr a3, excsave1 335 + wsr a0, excsave1 336 336 movi a0, unrecoverable_exception 337 337 callx0 a0 338 338 ··· 349 349 .section .DebugInterruptVector.text, "ax" 350 350 351 351 ENTRY(_DebugInterruptVector) 352 - xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL 352 + xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 353 353 jx a0 354 354 355 355
+5 -5
arch/xtensa/platforms/iss/setup.c
··· 61 61 * jump to the reset vector. */ 62 62 63 63 __asm__ __volatile__("movi a2, 15\n\t" 64 - "wsr a2, " __stringify(ICOUNTLEVEL) "\n\t" 64 + "wsr a2, icountlevel\n\t" 65 65 "movi a2, 0\n\t" 66 - "wsr a2, " __stringify(ICOUNT) "\n\t" 67 - "wsr a2, " __stringify(IBREAKENABLE) "\n\t" 68 - "wsr a2, " __stringify(LCOUNT) "\n\t" 66 + "wsr a2, icount\n\t" 67 + "wsr a2, ibreakenable\n\t" 68 + "wsr a2, lcount\n\t" 69 69 "movi a2, 0x1f\n\t" 70 - "wsr a2, " __stringify(PS) "\n\t" 70 + "wsr a2, ps\n\t" 71 71 "isync\n\t" 72 72 "jx %0\n\t" 73 73 :
+5 -5
arch/xtensa/platforms/xt2000/setup.c
··· 66 66 * jump to the reset vector. */ 67 67 68 68 __asm__ __volatile__ ("movi a2, 15\n\t" 69 - "wsr a2, " __stringify(ICOUNTLEVEL) "\n\t" 69 + "wsr a2, icountlevel\n\t" 70 70 "movi a2, 0\n\t" 71 - "wsr a2, " __stringify(ICOUNT) "\n\t" 72 - "wsr a2, " __stringify(IBREAKENABLE) "\n\t" 73 - "wsr a2, " __stringify(LCOUNT) "\n\t" 71 + "wsr a2, icount\n\t" 72 + "wsr a2, ibreakenable\n\t" 73 + "wsr a2, lcount\n\t" 74 74 "movi a2, 0x1f\n\t" 75 - "wsr a2, " __stringify(PS) "\n\t" 75 + "wsr a2, ps\n\t" 76 76 "isync\n\t" 77 77 "jx %0\n\t" 78 78 :