at v2.6.21 11 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle 7 * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. 8 * Copyright (C) 1999 Silicon Graphics, Inc. 9 */ 10#ifndef _ASM_STACKFRAME_H 11#define _ASM_STACKFRAME_H 12 13#include <linux/threads.h> 14 15#include <asm/asm.h> 16#include <asm/asmmacro.h> 17#include <asm/mipsregs.h> 18#include <asm/asm-offsets.h> 19 20#ifdef CONFIG_MIPS_MT_SMTC 21#include <asm/mipsmtregs.h> 22#endif /* CONFIG_MIPS_MT_SMTC */ 23 24 .macro SAVE_AT 25 .set push 26 .set noat 27 LONG_S $1, PT_R1(sp) 28 .set pop 29 .endm 30 31 .macro SAVE_TEMP 32#ifdef CONFIG_CPU_HAS_SMARTMIPS 33 mflhxu v1 34 LONG_S v1, PT_LO(sp) 35 mflhxu v1 36 LONG_S v1, PT_HI(sp) 37 mflhxu v1 38 LONG_S v1, PT_ACX(sp) 39#else 40 mfhi v1 41 LONG_S v1, PT_HI(sp) 42 mflo v1 43 LONG_S v1, PT_LO(sp) 44#endif 45#ifdef CONFIG_32BIT 46 LONG_S $8, PT_R8(sp) 47 LONG_S $9, PT_R9(sp) 48#endif 49 LONG_S $10, PT_R10(sp) 50 LONG_S $11, PT_R11(sp) 51 LONG_S $12, PT_R12(sp) 52 LONG_S $13, PT_R13(sp) 53 LONG_S $14, PT_R14(sp) 54 LONG_S $15, PT_R15(sp) 55 LONG_S $24, PT_R24(sp) 56 .endm 57 58 .macro SAVE_STATIC 59 LONG_S $16, PT_R16(sp) 60 LONG_S $17, PT_R17(sp) 61 LONG_S $18, PT_R18(sp) 62 LONG_S $19, PT_R19(sp) 63 LONG_S $20, PT_R20(sp) 64 LONG_S $21, PT_R21(sp) 65 LONG_S $22, PT_R22(sp) 66 LONG_S $23, PT_R23(sp) 67 LONG_S $30, PT_R30(sp) 68 .endm 69 70#ifdef CONFIG_SMP 71#ifdef CONFIG_MIPS_MT_SMTC 72#define PTEBASE_SHIFT 19 /* TCBIND */ 73#else 74#define PTEBASE_SHIFT 23 /* CONTEXT */ 75#endif 76 .macro get_saved_sp /* SMP variation */ 77#ifdef CONFIG_MIPS_MT_SMTC 78 mfc0 k0, CP0_TCBIND 79#else 80 MFC0 k0, CP0_CONTEXT 81#endif 82#if defined(CONFIG_BUILD_ELF64) || (defined(CONFIG_64BIT) && __GNUC__ < 4) 83 lui k1, %highest(kernelsp) 84 daddiu k1, %higher(kernelsp) 85 dsll k1, 16 86 daddiu k1, %hi(kernelsp) 87 dsll k1, 16 88#else 89 lui k1, %hi(kernelsp) 90#endif 91 LONG_SRL k0, PTEBASE_SHIFT 92 LONG_ADDU k1, k0 93 LONG_L k1, %lo(kernelsp)(k1) 94 .endm 95 96 .macro set_saved_sp stackp temp temp2 97#ifdef CONFIG_MIPS_MT_SMTC 98 mfc0 \temp, CP0_TCBIND 99#else 100 MFC0 \temp, CP0_CONTEXT 101#endif 102 LONG_SRL \temp, PTEBASE_SHIFT 103 LONG_S \stackp, kernelsp(\temp) 104 .endm 105#else 106 .macro get_saved_sp /* Uniprocessor variation */ 107#if defined(CONFIG_BUILD_ELF64) || (defined(CONFIG_64BIT) && __GNUC__ < 4) 108 lui k1, %highest(kernelsp) 109 daddiu k1, %higher(kernelsp) 110 dsll k1, k1, 16 111 daddiu k1, %hi(kernelsp) 112 dsll k1, k1, 16 113#else 114 lui k1, %hi(kernelsp) 115#endif 116 LONG_L k1, %lo(kernelsp)(k1) 117 .endm 118 119 .macro set_saved_sp stackp temp temp2 120 LONG_S \stackp, kernelsp 121 .endm 122#endif 123 124 .macro SAVE_SOME 125 .set push 126 .set noat 127 .set reorder 128 mfc0 k0, CP0_STATUS 129 sll k0, 3 /* extract cu0 bit */ 130 .set noreorder 131 bltz k0, 8f 132 move k1, sp 133 .set reorder 134 /* Called from user mode, new stack. */ 135 get_saved_sp 1368: move k0, sp 137 PTR_SUBU sp, k1, PT_SIZE 138 LONG_S k0, PT_R29(sp) 139 LONG_S $3, PT_R3(sp) 140 /* 141 * You might think that you don't need to save $0, 142 * but the FPU emulator and gdb remote debug stub 143 * need it to operate correctly 144 */ 145 LONG_S $0, PT_R0(sp) 146 mfc0 v1, CP0_STATUS 147 LONG_S $2, PT_R2(sp) 148 LONG_S v1, PT_STATUS(sp) 149#ifdef CONFIG_MIPS_MT_SMTC 150 /* 151 * Ideally, these instructions would be shuffled in 152 * to cover the pipeline delay. 153 */ 154 .set mips32 155 mfc0 v1, CP0_TCSTATUS 156 .set mips0 157 LONG_S v1, PT_TCSTATUS(sp) 158#endif /* CONFIG_MIPS_MT_SMTC */ 159 LONG_S $4, PT_R4(sp) 160 mfc0 v1, CP0_CAUSE 161 LONG_S $5, PT_R5(sp) 162 LONG_S v1, PT_CAUSE(sp) 163 LONG_S $6, PT_R6(sp) 164 MFC0 v1, CP0_EPC 165 LONG_S $7, PT_R7(sp) 166#ifdef CONFIG_64BIT 167 LONG_S $8, PT_R8(sp) 168 LONG_S $9, PT_R9(sp) 169#endif 170 LONG_S v1, PT_EPC(sp) 171 LONG_S $25, PT_R25(sp) 172 LONG_S $28, PT_R28(sp) 173 LONG_S $31, PT_R31(sp) 174 ori $28, sp, _THREAD_MASK 175 xori $28, _THREAD_MASK 176 .set pop 177 .endm 178 179 .macro SAVE_ALL 180 SAVE_SOME 181 SAVE_AT 182 SAVE_TEMP 183 SAVE_STATIC 184 .endm 185 186 .macro RESTORE_AT 187 .set push 188 .set noat 189 LONG_L $1, PT_R1(sp) 190 .set pop 191 .endm 192 193 .macro RESTORE_TEMP 194#ifdef CONFIG_CPU_HAS_SMARTMIPS 195 LONG_L $24, PT_ACX(sp) 196 mtlhx $24 197 LONG_L $24, PT_HI(sp) 198 mtlhx $24 199 LONG_L $24, PT_LO(sp) 200 mtlhx $24 201#else 202 LONG_L $24, PT_LO(sp) 203 mtlo $24 204 LONG_L $24, PT_HI(sp) 205 mthi $24 206#endif 207#ifdef CONFIG_32BIT 208 LONG_L $8, PT_R8(sp) 209 LONG_L $9, PT_R9(sp) 210#endif 211 LONG_L $10, PT_R10(sp) 212 LONG_L $11, PT_R11(sp) 213 LONG_L $12, PT_R12(sp) 214 LONG_L $13, PT_R13(sp) 215 LONG_L $14, PT_R14(sp) 216 LONG_L $15, PT_R15(sp) 217 LONG_L $24, PT_R24(sp) 218 .endm 219 220 .macro RESTORE_STATIC 221 LONG_L $16, PT_R16(sp) 222 LONG_L $17, PT_R17(sp) 223 LONG_L $18, PT_R18(sp) 224 LONG_L $19, PT_R19(sp) 225 LONG_L $20, PT_R20(sp) 226 LONG_L $21, PT_R21(sp) 227 LONG_L $22, PT_R22(sp) 228 LONG_L $23, PT_R23(sp) 229 LONG_L $30, PT_R30(sp) 230 .endm 231 232#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 233 234 .macro RESTORE_SOME 235 .set push 236 .set reorder 237 .set noat 238 mfc0 a0, CP0_STATUS 239 ori a0, 0x1f 240 xori a0, 0x1f 241 mtc0 a0, CP0_STATUS 242 li v1, 0xff00 243 and a0, v1 244 LONG_L v0, PT_STATUS(sp) 245 nor v1, $0, v1 246 and v0, v1 247 or v0, a0 248 mtc0 v0, CP0_STATUS 249 LONG_L $31, PT_R31(sp) 250 LONG_L $28, PT_R28(sp) 251 LONG_L $25, PT_R25(sp) 252#ifdef CONFIG_64BIT 253 LONG_L $8, PT_R8(sp) 254 LONG_L $9, PT_R9(sp) 255#endif 256 LONG_L $7, PT_R7(sp) 257 LONG_L $6, PT_R6(sp) 258 LONG_L $5, PT_R5(sp) 259 LONG_L $4, PT_R4(sp) 260 LONG_L $3, PT_R3(sp) 261 LONG_L $2, PT_R2(sp) 262 .set pop 263 .endm 264 265 .macro RESTORE_SP_AND_RET 266 .set push 267 .set noreorder 268 LONG_L k0, PT_EPC(sp) 269 LONG_L sp, PT_R29(sp) 270 jr k0 271 rfe 272 .set pop 273 .endm 274 275#else 276/* 277 * For SMTC kernel, global IE should be left set, and interrupts 278 * controlled exclusively via IXMT. 279 */ 280 281#ifdef CONFIG_MIPS_MT_SMTC 282#define STATMASK 0x1e 283#else 284#define STATMASK 0x1f 285#endif 286 .macro RESTORE_SOME 287 .set push 288 .set reorder 289 .set noat 290#ifdef CONFIG_MIPS_MT_SMTC 291 .set mips32r2 292 /* 293 * This may not really be necessary if ints are already 294 * inhibited here. 295 */ 296 mfc0 v0, CP0_TCSTATUS 297 ori v0, TCSTATUS_IXMT 298 mtc0 v0, CP0_TCSTATUS 299 _ehb 300 DMT 5 # dmt a1 301 jal mips_ihb 302#endif /* CONFIG_MIPS_MT_SMTC */ 303 mfc0 a0, CP0_STATUS 304 ori a0, STATMASK 305 xori a0, STATMASK 306 mtc0 a0, CP0_STATUS 307 li v1, 0xff00 308 and a0, v1 309 LONG_L v0, PT_STATUS(sp) 310 nor v1, $0, v1 311 and v0, v1 312 or v0, a0 313 mtc0 v0, CP0_STATUS 314#ifdef CONFIG_MIPS_MT_SMTC 315/* 316 * Only after EXL/ERL have been restored to status can we 317 * restore TCStatus.IXMT. 318 */ 319 LONG_L v1, PT_TCSTATUS(sp) 320 _ehb 321 mfc0 v0, CP0_TCSTATUS 322 andi v1, TCSTATUS_IXMT 323 /* We know that TCStatua.IXMT should be set from above */ 324 xori v0, v0, TCSTATUS_IXMT 325 or v0, v0, v1 326 mtc0 v0, CP0_TCSTATUS 327 _ehb 328 andi a1, a1, VPECONTROL_TE 329 beqz a1, 1f 330 emt 3311: 332 .set mips0 333#endif /* CONFIG_MIPS_MT_SMTC */ 334 LONG_L v1, PT_EPC(sp) 335 MTC0 v1, CP0_EPC 336 LONG_L $31, PT_R31(sp) 337 LONG_L $28, PT_R28(sp) 338 LONG_L $25, PT_R25(sp) 339#ifdef CONFIG_64BIT 340 LONG_L $8, PT_R8(sp) 341 LONG_L $9, PT_R9(sp) 342#endif 343 LONG_L $7, PT_R7(sp) 344 LONG_L $6, PT_R6(sp) 345 LONG_L $5, PT_R5(sp) 346 LONG_L $4, PT_R4(sp) 347 LONG_L $3, PT_R3(sp) 348 LONG_L $2, PT_R2(sp) 349 .set pop 350 .endm 351 352 .macro RESTORE_SP_AND_RET 353 LONG_L sp, PT_R29(sp) 354 .set mips3 355 eret 356 .set mips0 357 .endm 358 359#endif 360 361 .macro RESTORE_SP 362 LONG_L sp, PT_R29(sp) 363 .endm 364 365 .macro RESTORE_ALL 366 RESTORE_TEMP 367 RESTORE_STATIC 368 RESTORE_AT 369 RESTORE_SOME 370 RESTORE_SP 371 .endm 372 373 .macro RESTORE_ALL_AND_RET 374 RESTORE_TEMP 375 RESTORE_STATIC 376 RESTORE_AT 377 RESTORE_SOME 378 RESTORE_SP_AND_RET 379 .endm 380 381/* 382 * Move to kernel mode and disable interrupts. 383 * Set cp0 enable bit as sign that we're running on the kernel stack 384 */ 385 .macro CLI 386#if !defined(CONFIG_MIPS_MT_SMTC) 387 mfc0 t0, CP0_STATUS 388 li t1, ST0_CU0 | 0x1f 389 or t0, t1 390 xori t0, 0x1f 391 mtc0 t0, CP0_STATUS 392#else /* CONFIG_MIPS_MT_SMTC */ 393 /* 394 * For SMTC, we need to set privilege 395 * and disable interrupts only for the 396 * current TC, using the TCStatus register. 397 */ 398 mfc0 t0,CP0_TCSTATUS 399 /* Fortunately CU 0 is in the same place in both registers */ 400 /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ 401 li t1, ST0_CU0 | 0x08001c00 402 or t0,t1 403 /* Clear TKSU, leave IXMT */ 404 xori t0, 0x00001800 405 mtc0 t0, CP0_TCSTATUS 406 _ehb 407 /* We need to leave the global IE bit set, but clear EXL...*/ 408 mfc0 t0, CP0_STATUS 409 ori t0, ST0_EXL | ST0_ERL 410 xori t0, ST0_EXL | ST0_ERL 411 mtc0 t0, CP0_STATUS 412#endif /* CONFIG_MIPS_MT_SMTC */ 413 irq_disable_hazard 414 .endm 415 416/* 417 * Move to kernel mode and enable interrupts. 418 * Set cp0 enable bit as sign that we're running on the kernel stack 419 */ 420 .macro STI 421#if !defined(CONFIG_MIPS_MT_SMTC) 422 mfc0 t0, CP0_STATUS 423 li t1, ST0_CU0 | 0x1f 424 or t0, t1 425 xori t0, 0x1e 426 mtc0 t0, CP0_STATUS 427#else /* CONFIG_MIPS_MT_SMTC */ 428 /* 429 * For SMTC, we need to set privilege 430 * and enable interrupts only for the 431 * current TC, using the TCStatus register. 432 */ 433 _ehb 434 mfc0 t0,CP0_TCSTATUS 435 /* Fortunately CU 0 is in the same place in both registers */ 436 /* Set TCU0, TKSU (for later inversion) and IXMT */ 437 li t1, ST0_CU0 | 0x08001c00 438 or t0,t1 439 /* Clear TKSU *and* IXMT */ 440 xori t0, 0x00001c00 441 mtc0 t0, CP0_TCSTATUS 442 _ehb 443 /* We need to leave the global IE bit set, but clear EXL...*/ 444 mfc0 t0, CP0_STATUS 445 ori t0, ST0_EXL 446 xori t0, ST0_EXL 447 mtc0 t0, CP0_STATUS 448 /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ 449#endif /* CONFIG_MIPS_MT_SMTC */ 450 irq_enable_hazard 451 .endm 452 453/* 454 * Just move to kernel mode and leave interrupts as they are. 455 * Set cp0 enable bit as sign that we're running on the kernel stack 456 */ 457 .macro KMODE 458#ifdef CONFIG_MIPS_MT_SMTC 459 /* 460 * This gets baroque in SMTC. We want to 461 * protect the non-atomic clearing of EXL 462 * with DMT/EMT, but we don't want to take 463 * an interrupt while DMT is still in effect. 464 */ 465 466 /* KMODE gets invoked from both reorder and noreorder code */ 467 .set push 468 .set mips32r2 469 .set noreorder 470 mfc0 v0, CP0_TCSTATUS 471 andi v1, v0, TCSTATUS_IXMT 472 ori v0, TCSTATUS_IXMT 473 mtc0 v0, CP0_TCSTATUS 474 _ehb 475 DMT 2 # dmt v0 476 /* 477 * We don't know a priori if ra is "live" 478 */ 479 move t0, ra 480 jal mips_ihb 481 nop /* delay slot */ 482 move ra, t0 483#endif /* CONFIG_MIPS_MT_SMTC */ 484 mfc0 t0, CP0_STATUS 485 li t1, ST0_CU0 | 0x1e 486 or t0, t1 487 xori t0, 0x1e 488 mtc0 t0, CP0_STATUS 489#ifdef CONFIG_MIPS_MT_SMTC 490 _ehb 491 andi v0, v0, VPECONTROL_TE 492 beqz v0, 2f 493 nop /* delay slot */ 494 emt 4952: 496 mfc0 v0, CP0_TCSTATUS 497 /* Clear IXMT, then OR in previous value */ 498 ori v0, TCSTATUS_IXMT 499 xori v0, TCSTATUS_IXMT 500 or v0, v1, v0 501 mtc0 v0, CP0_TCSTATUS 502 /* 503 * irq_disable_hazard below should expand to EHB 504 * on 24K/34K CPUS 505 */ 506 .set pop 507#endif /* CONFIG_MIPS_MT_SMTC */ 508 irq_disable_hazard 509 .endm 510 511#endif /* _ASM_STACKFRAME_H */