at v2.6.18 11 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle 7 * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. 8 * Copyright (C) 1999 Silicon Graphics, Inc. 9 */ 10#ifndef _ASM_STACKFRAME_H 11#define _ASM_STACKFRAME_H 12 13#include <linux/threads.h> 14 15#include <asm/asm.h> 16#include <asm/asmmacro.h> 17#include <asm/mipsregs.h> 18#include <asm/asm-offsets.h> 19 20#ifdef CONFIG_MIPS_MT_SMTC 21#include <asm/mipsmtregs.h> 22#endif /* CONFIG_MIPS_MT_SMTC */ 23 24 .macro SAVE_AT 25 .set push 26 .set noat 27 LONG_S $1, PT_R1(sp) 28 .set pop 29 .endm 30 31 .macro SAVE_TEMP 32 mfhi v1 33#ifdef CONFIG_32BIT 34 LONG_S $8, PT_R8(sp) 35 LONG_S $9, PT_R9(sp) 36#endif 37 LONG_S v1, PT_HI(sp) 38 mflo v1 39 LONG_S $10, PT_R10(sp) 40 LONG_S $11, PT_R11(sp) 41 LONG_S v1, PT_LO(sp) 42 LONG_S $12, PT_R12(sp) 43 LONG_S $13, PT_R13(sp) 44 LONG_S $14, PT_R14(sp) 45 LONG_S $15, PT_R15(sp) 46 LONG_S $24, PT_R24(sp) 47 .endm 48 49 .macro SAVE_STATIC 50 LONG_S $16, PT_R16(sp) 51 LONG_S $17, PT_R17(sp) 52 LONG_S $18, PT_R18(sp) 53 LONG_S $19, PT_R19(sp) 54 LONG_S $20, PT_R20(sp) 55 LONG_S $21, PT_R21(sp) 56 LONG_S $22, PT_R22(sp) 57 LONG_S $23, PT_R23(sp) 58 LONG_S $30, PT_R30(sp) 59 .endm 60 61#ifdef CONFIG_SMP 62 .macro get_saved_sp /* SMP variation */ 63#ifdef CONFIG_32BIT 64#ifdef CONFIG_MIPS_MT_SMTC 65 .set mips32 66 mfc0 k0, CP0_TCBIND; 67 .set mips0 68 lui k1, %hi(kernelsp) 69 srl k0, k0, 19 70 /* No need to shift down and up to clear bits 0-1 */ 71#else 72 mfc0 k0, CP0_CONTEXT 73 lui k1, %hi(kernelsp) 74 srl k0, k0, 23 75#endif 76 addu k1, k0 77 LONG_L k1, %lo(kernelsp)(k1) 78#endif 79#ifdef CONFIG_64BIT 80#ifdef CONFIG_MIPS_MT_SMTC 81 .set mips64 82 mfc0 k0, CP0_TCBIND; 83 .set mips0 84 lui k0, %highest(kernelsp) 85 dsrl k1, 19 86 /* No need to shift down and up to clear bits 0-2 */ 87#else 88 MFC0 k1, CP0_CONTEXT 89 lui k0, %highest(kernelsp) 90 dsrl k1, 23 91 daddiu k0, %higher(kernelsp) 92 dsll k0, k0, 16 93 daddiu k0, %hi(kernelsp) 94 dsll k0, k0, 16 95#endif /* CONFIG_MIPS_MT_SMTC */ 96 daddu k1, k1, k0 97 LONG_L k1, %lo(kernelsp)(k1) 98#endif /* CONFIG_64BIT */ 99 .endm 100 101 .macro set_saved_sp stackp temp temp2 102#ifdef CONFIG_32BIT 103#ifdef CONFIG_MIPS_MT_SMTC 104 mfc0 \temp, CP0_TCBIND 105 srl \temp, 19 106#else 107 mfc0 \temp, CP0_CONTEXT 108 srl \temp, 23 109#endif 110#endif 111#ifdef CONFIG_64BIT 112#ifdef CONFIG_MIPS_MT_SMTC 113 mfc0 \temp, CP0_TCBIND 114 dsrl \temp, 19 115#else 116 MFC0 \temp, CP0_CONTEXT 117 dsrl \temp, 23 118#endif 119#endif 120 LONG_S \stackp, kernelsp(\temp) 121 .endm 122#else 123 .macro get_saved_sp /* Uniprocessor variation */ 124#ifdef CONFIG_64BIT 125 lui k1, %highest(kernelsp) 126 daddiu k1, %higher(kernelsp) 127 dsll k1, k1, 16 128 daddiu k1, %hi(kernelsp) 129 dsll k1, k1, 16 130#else 131 lui k1, %hi(kernelsp) 132#endif 133 LONG_L k1, %lo(kernelsp)(k1) 134 .endm 135 136 .macro set_saved_sp stackp temp temp2 137 LONG_S \stackp, kernelsp 138 .endm 139#endif 140 141 .macro SAVE_SOME 142 .set push 143 .set noat 144 .set reorder 145 mfc0 k0, CP0_STATUS 146 sll k0, 3 /* extract cu0 bit */ 147 .set noreorder 148 bltz k0, 8f 149 move k1, sp 150 .set reorder 151 /* Called from user mode, new stack. */ 152 get_saved_sp 1538: move k0, sp 154 PTR_SUBU sp, k1, PT_SIZE 155 LONG_S k0, PT_R29(sp) 156 LONG_S $3, PT_R3(sp) 157 /* 158 * You might think that you don't need to save $0, 159 * but the FPU emulator and gdb remote debug stub 160 * need it to operate correctly 161 */ 162 LONG_S $0, PT_R0(sp) 163 mfc0 v1, CP0_STATUS 164 LONG_S $2, PT_R2(sp) 165 LONG_S v1, PT_STATUS(sp) 166#ifdef CONFIG_MIPS_MT_SMTC 167 /* 168 * Ideally, these instructions would be shuffled in 169 * to cover the pipeline delay. 170 */ 171 .set mips32 172 mfc0 v1, CP0_TCSTATUS 173 .set mips0 174 LONG_S v1, PT_TCSTATUS(sp) 175#endif /* CONFIG_MIPS_MT_SMTC */ 176 LONG_S $4, PT_R4(sp) 177 mfc0 v1, CP0_CAUSE 178 LONG_S $5, PT_R5(sp) 179 LONG_S v1, PT_CAUSE(sp) 180 LONG_S $6, PT_R6(sp) 181 MFC0 v1, CP0_EPC 182 LONG_S $7, PT_R7(sp) 183#ifdef CONFIG_64BIT 184 LONG_S $8, PT_R8(sp) 185 LONG_S $9, PT_R9(sp) 186#endif 187 LONG_S v1, PT_EPC(sp) 188 LONG_S $25, PT_R25(sp) 189 LONG_S $28, PT_R28(sp) 190 LONG_S $31, PT_R31(sp) 191 ori $28, sp, _THREAD_MASK 192 xori $28, _THREAD_MASK 193 .set pop 194 .endm 195 196 .macro SAVE_ALL 197 SAVE_SOME 198 SAVE_AT 199 SAVE_TEMP 200 SAVE_STATIC 201 .endm 202 203 .macro RESTORE_AT 204 .set push 205 .set noat 206 LONG_L $1, PT_R1(sp) 207 .set pop 208 .endm 209 210 .macro RESTORE_TEMP 211 LONG_L $24, PT_LO(sp) 212#ifdef CONFIG_32BIT 213 LONG_L $8, PT_R8(sp) 214 LONG_L $9, PT_R9(sp) 215#endif 216 mtlo $24 217 LONG_L $24, PT_HI(sp) 218 LONG_L $10, PT_R10(sp) 219 LONG_L $11, PT_R11(sp) 220 mthi $24 221 LONG_L $12, PT_R12(sp) 222 LONG_L $13, PT_R13(sp) 223 LONG_L $14, PT_R14(sp) 224 LONG_L $15, PT_R15(sp) 225 LONG_L $24, PT_R24(sp) 226 .endm 227 228 .macro RESTORE_STATIC 229 LONG_L $16, PT_R16(sp) 230 LONG_L $17, PT_R17(sp) 231 LONG_L $18, PT_R18(sp) 232 LONG_L $19, PT_R19(sp) 233 LONG_L $20, PT_R20(sp) 234 LONG_L $21, PT_R21(sp) 235 LONG_L $22, PT_R22(sp) 236 LONG_L $23, PT_R23(sp) 237 LONG_L $30, PT_R30(sp) 238 .endm 239 240#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 241 242 .macro RESTORE_SOME 243 .set push 244 .set reorder 245 .set noat 246 mfc0 a0, CP0_STATUS 247 ori a0, 0x1f 248 xori a0, 0x1f 249 mtc0 a0, CP0_STATUS 250 li v1, 0xff00 251 and a0, v1 252 LONG_L v0, PT_STATUS(sp) 253 nor v1, $0, v1 254 and v0, v1 255 or v0, a0 256 mtc0 v0, CP0_STATUS 257 LONG_L $31, PT_R31(sp) 258 LONG_L $28, PT_R28(sp) 259 LONG_L $25, PT_R25(sp) 260#ifdef CONFIG_64BIT 261 LONG_L $8, PT_R8(sp) 262 LONG_L $9, PT_R9(sp) 263#endif 264 LONG_L $7, PT_R7(sp) 265 LONG_L $6, PT_R6(sp) 266 LONG_L $5, PT_R5(sp) 267 LONG_L $4, PT_R4(sp) 268 LONG_L $3, PT_R3(sp) 269 LONG_L $2, PT_R2(sp) 270 .set pop 271 .endm 272 273 .macro RESTORE_SP_AND_RET 274 .set push 275 .set noreorder 276 LONG_L k0, PT_EPC(sp) 277 LONG_L sp, PT_R29(sp) 278 jr k0 279 rfe 280 .set pop 281 .endm 282 283#else 284/* 285 * For SMTC kernel, global IE should be left set, and interrupts 286 * controlled exclusively via IXMT. 287 */ 288 289#ifdef CONFIG_MIPS_MT_SMTC 290#define STATMASK 0x1e 291#else 292#define STATMASK 0x1f 293#endif 294 .macro RESTORE_SOME 295 .set push 296 .set reorder 297 .set noat 298#ifdef CONFIG_MIPS_MT_SMTC 299 .set mips32r2 300 /* 301 * This may not really be necessary if ints are already 302 * inhibited here. 303 */ 304 mfc0 v0, CP0_TCSTATUS 305 ori v0, TCSTATUS_IXMT 306 mtc0 v0, CP0_TCSTATUS 307 _ehb 308 DMT 5 # dmt a1 309 jal mips_ihb 310#endif /* CONFIG_MIPS_MT_SMTC */ 311 mfc0 a0, CP0_STATUS 312 ori a0, STATMASK 313 xori a0, STATMASK 314 mtc0 a0, CP0_STATUS 315 li v1, 0xff00 316 and a0, v1 317 LONG_L v0, PT_STATUS(sp) 318 nor v1, $0, v1 319 and v0, v1 320 or v0, a0 321 mtc0 v0, CP0_STATUS 322#ifdef CONFIG_MIPS_MT_SMTC 323/* 324 * Only after EXL/ERL have been restored to status can we 325 * restore TCStatus.IXMT. 326 */ 327 LONG_L v1, PT_TCSTATUS(sp) 328 _ehb 329 mfc0 v0, CP0_TCSTATUS 330 andi v1, TCSTATUS_IXMT 331 /* We know that TCStatua.IXMT should be set from above */ 332 xori v0, v0, TCSTATUS_IXMT 333 or v0, v0, v1 334 mtc0 v0, CP0_TCSTATUS 335 _ehb 336 andi a1, a1, VPECONTROL_TE 337 beqz a1, 1f 338 emt 3391: 340 .set mips0 341#endif /* CONFIG_MIPS_MT_SMTC */ 342 LONG_L v1, PT_EPC(sp) 343 MTC0 v1, CP0_EPC 344 LONG_L $31, PT_R31(sp) 345 LONG_L $28, PT_R28(sp) 346 LONG_L $25, PT_R25(sp) 347#ifdef CONFIG_64BIT 348 LONG_L $8, PT_R8(sp) 349 LONG_L $9, PT_R9(sp) 350#endif 351 LONG_L $7, PT_R7(sp) 352 LONG_L $6, PT_R6(sp) 353 LONG_L $5, PT_R5(sp) 354 LONG_L $4, PT_R4(sp) 355 LONG_L $3, PT_R3(sp) 356 LONG_L $2, PT_R2(sp) 357 .set pop 358 .endm 359 360 .macro RESTORE_SP_AND_RET 361 LONG_L sp, PT_R29(sp) 362 .set mips3 363 eret 364 .set mips0 365 .endm 366 367#endif 368 369 .macro RESTORE_SP 370 LONG_L sp, PT_R29(sp) 371 .endm 372 373 .macro RESTORE_ALL 374 RESTORE_TEMP 375 RESTORE_STATIC 376 RESTORE_AT 377 RESTORE_SOME 378 RESTORE_SP 379 .endm 380 381 .macro RESTORE_ALL_AND_RET 382 RESTORE_TEMP 383 RESTORE_STATIC 384 RESTORE_AT 385 RESTORE_SOME 386 RESTORE_SP_AND_RET 387 .endm 388 389/* 390 * Move to kernel mode and disable interrupts. 391 * Set cp0 enable bit as sign that we're running on the kernel stack 392 */ 393 .macro CLI 394#if !defined(CONFIG_MIPS_MT_SMTC) 395 mfc0 t0, CP0_STATUS 396 li t1, ST0_CU0 | 0x1f 397 or t0, t1 398 xori t0, 0x1f 399 mtc0 t0, CP0_STATUS 400#else /* CONFIG_MIPS_MT_SMTC */ 401 /* 402 * For SMTC, we need to set privilege 403 * and disable interrupts only for the 404 * current TC, using the TCStatus register. 405 */ 406 mfc0 t0,CP0_TCSTATUS 407 /* Fortunately CU 0 is in the same place in both registers */ 408 /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ 409 li t1, ST0_CU0 | 0x08001c00 410 or t0,t1 411 /* Clear TKSU, leave IXMT */ 412 xori t0, 0x00001800 413 mtc0 t0, CP0_TCSTATUS 414 _ehb 415 /* We need to leave the global IE bit set, but clear EXL...*/ 416 mfc0 t0, CP0_STATUS 417 ori t0, ST0_EXL | ST0_ERL 418 xori t0, ST0_EXL | ST0_ERL 419 mtc0 t0, CP0_STATUS 420#endif /* CONFIG_MIPS_MT_SMTC */ 421 irq_disable_hazard 422 .endm 423 424/* 425 * Move to kernel mode and enable interrupts. 426 * Set cp0 enable bit as sign that we're running on the kernel stack 427 */ 428 .macro STI 429#if !defined(CONFIG_MIPS_MT_SMTC) 430 mfc0 t0, CP0_STATUS 431 li t1, ST0_CU0 | 0x1f 432 or t0, t1 433 xori t0, 0x1e 434 mtc0 t0, CP0_STATUS 435#else /* CONFIG_MIPS_MT_SMTC */ 436 /* 437 * For SMTC, we need to set privilege 438 * and enable interrupts only for the 439 * current TC, using the TCStatus register. 440 */ 441 _ehb 442 mfc0 t0,CP0_TCSTATUS 443 /* Fortunately CU 0 is in the same place in both registers */ 444 /* Set TCU0, TKSU (for later inversion) and IXMT */ 445 li t1, ST0_CU0 | 0x08001c00 446 or t0,t1 447 /* Clear TKSU *and* IXMT */ 448 xori t0, 0x00001c00 449 mtc0 t0, CP0_TCSTATUS 450 _ehb 451 /* We need to leave the global IE bit set, but clear EXL...*/ 452 mfc0 t0, CP0_STATUS 453 ori t0, ST0_EXL 454 xori t0, ST0_EXL 455 mtc0 t0, CP0_STATUS 456 /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ 457#endif /* CONFIG_MIPS_MT_SMTC */ 458 irq_enable_hazard 459 .endm 460 461/* 462 * Just move to kernel mode and leave interrupts as they are. 463 * Set cp0 enable bit as sign that we're running on the kernel stack 464 */ 465 .macro KMODE 466#ifdef CONFIG_MIPS_MT_SMTC 467 /* 468 * This gets baroque in SMTC. We want to 469 * protect the non-atomic clearing of EXL 470 * with DMT/EMT, but we don't want to take 471 * an interrupt while DMT is still in effect. 472 */ 473 474 /* KMODE gets invoked from both reorder and noreorder code */ 475 .set push 476 .set mips32r2 477 .set noreorder 478 mfc0 v0, CP0_TCSTATUS 479 andi v1, v0, TCSTATUS_IXMT 480 ori v0, TCSTATUS_IXMT 481 mtc0 v0, CP0_TCSTATUS 482 _ehb 483 DMT 2 # dmt v0 484 /* 485 * We don't know a priori if ra is "live" 486 */ 487 move t0, ra 488 jal mips_ihb 489 nop /* delay slot */ 490 move ra, t0 491#endif /* CONFIG_MIPS_MT_SMTC */ 492 mfc0 t0, CP0_STATUS 493 li t1, ST0_CU0 | 0x1e 494 or t0, t1 495 xori t0, 0x1e 496 mtc0 t0, CP0_STATUS 497#ifdef CONFIG_MIPS_MT_SMTC 498 _ehb 499 andi v0, v0, VPECONTROL_TE 500 beqz v0, 2f 501 nop /* delay slot */ 502 emt 5032: 504 mfc0 v0, CP0_TCSTATUS 505 /* Clear IXMT, then OR in previous value */ 506 ori v0, TCSTATUS_IXMT 507 xori v0, TCSTATUS_IXMT 508 or v0, v1, v0 509 mtc0 v0, CP0_TCSTATUS 510 /* 511 * irq_disable_hazard below should expand to EHB 512 * on 24K/34K CPUS 513 */ 514 .set pop 515#endif /* CONFIG_MIPS_MT_SMTC */ 516 irq_disable_hazard 517 .endm 518 519#endif /* _ASM_STACKFRAME_H */