arch/tile: support backtracing on TILE-Gx

This functionality was stubbed out until recently. Now we support our
normal backtracing API on TILE-Gx as well as on TILE64/TILEPro.
This change includes a tweak to the instruction encoding caused by
adding addxli for compat mode.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>

+105 -48
+3 -1
arch/tile/include/asm/backtrace.h
··· 21 21 22 22 #include <arch/chip.h> 23 23 24 - #if CHIP_VA_WIDTH() > 32 24 + #if defined(__tile__) 25 + typedef unsigned long VirtualAddress; 26 + #elif CHIP_VA_WIDTH() > 32 25 27 typedef unsigned long long VirtualAddress; 26 28 #else 27 29 typedef unsigned int VirtualAddress;
+99 -38
arch/tile/kernel/backtrace.c
··· 19 19 20 20 #include <arch/chip.h> 21 21 22 - #if TILE_CHIP < 10 23 - 24 - 25 22 #include <asm/opcode-tile.h> 26 23 27 24 28 25 #define TREG_SP 54 29 26 #define TREG_LR 55 30 27 28 + 29 + #if TILE_CHIP >= 10 30 + #define tile_bundle_bits tilegx_bundle_bits 31 + #define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE 32 + #define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES 33 + #define tile_decoded_instruction tilegx_decoded_instruction 34 + #define tile_mnemonic tilegx_mnemonic 35 + #define parse_insn_tile parse_insn_tilegx 36 + #define TILE_OPC_IRET TILEGX_OPC_IRET 37 + #define TILE_OPC_ADDI TILEGX_OPC_ADDI 38 + #define TILE_OPC_ADDLI TILEGX_OPC_ADDLI 39 + #define TILE_OPC_INFO TILEGX_OPC_INFO 40 + #define TILE_OPC_INFOL TILEGX_OPC_INFOL 41 + #define TILE_OPC_JRP TILEGX_OPC_JRP 42 + #define TILE_OPC_MOVE TILEGX_OPC_MOVE 43 + #define OPCODE_STORE TILEGX_OPC_ST 44 + typedef long long bt_int_reg_t; 45 + #else 46 + #define OPCODE_STORE TILE_OPC_SW 47 + typedef int bt_int_reg_t; 48 + #endif 31 49 32 50 /** A decoded bundle used for backtracer analysis. */ 33 51 struct BacktraceBundle { ··· 59 41 /* This implementation only makes sense for native tools. */ 60 42 /** Default function to read memory. */ 61 43 static bool bt_read_memory(void *result, VirtualAddress addr, 62 - size_t size, void *extra) 44 + unsigned int size, void *extra) 63 45 { 64 46 /* FIXME: this should do some horrible signal stuff to catch 65 47 * SEGV cleanly and fail. ··· 124 106 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2); 125 107 if (insn == NULL) 126 108 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2); 109 + #if TILE_CHIP >= 10 110 + if (insn == NULL) 111 + insn = find_matching_insn(bundle, TILEGX_OPC_ADDXLI, vals, 2); 112 + if (insn == NULL) 113 + insn = find_matching_insn(bundle, TILEGX_OPC_ADDXI, vals, 2); 114 + #endif 127 115 if (insn == NULL) 128 116 return false; 129 117 ··· 214 190 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL; 215 191 } 216 192 217 - /** Does this bundle contain the instruction 'sw sp, lr'? */ 193 + /** Does this bundle contain a store of lr to sp? */ 218 194 static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle) 219 195 { 220 196 static const int vals[2] = { TREG_SP, TREG_LR }; 221 - return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL; 197 + return find_matching_insn(bundle, OPCODE_STORE, vals, 2) != NULL; 222 198 } 199 + 200 + #if TILE_CHIP >= 10 201 + /** Track moveli values placed into registers. */ 202 + static inline void bt_update_moveli(const struct BacktraceBundle *bundle, 203 + int moveli_args[]) 204 + { 205 + int i; 206 + for (i = 0; i < bundle->num_insns; i++) { 207 + const struct tile_decoded_instruction *insn = 208 + &bundle->insns[i]; 209 + 210 + if (insn->opcode->mnemonic == TILEGX_OPC_MOVELI) { 211 + int reg = insn->operand_values[0]; 212 + moveli_args[reg] = insn->operand_values[1]; 213 + } 214 + } 215 + } 216 + 217 + /** Does this bundle contain an 'add sp, sp, reg' instruction 218 + * from a register that we saw a moveli into, and if so, what 219 + * is the value in the register? 220 + */ 221 + static bool bt_has_add_sp(const struct BacktraceBundle *bundle, int *adjust, 222 + int moveli_args[]) 223 + { 224 + static const int vals[2] = { TREG_SP, TREG_SP }; 225 + 226 + const struct tile_decoded_instruction *insn = 227 + find_matching_insn(bundle, TILEGX_OPC_ADDX, vals, 2); 228 + if (insn) { 229 + int reg = insn->operand_values[2]; 230 + if (moveli_args[reg]) { 231 + *adjust = moveli_args[reg]; 232 + return true; 233 + } 234 + } 235 + return false; 236 + } 237 + #endif 223 238 224 239 /** Locates the caller's PC and SP for a program starting at the 225 240 * given address. ··· 289 226 int num_bundles_prefetched = 0; 290 227 int next_bundle = 0; 291 228 VirtualAddress pc; 229 + 230 + #if TILE_CHIP >= 10 231 + /* Naively try to track moveli values to support addx for -m32. */ 232 + int moveli_args[TILEGX_NUM_REGISTERS] = { 0 }; 233 + #endif 292 234 293 235 /* Default to assuming that the caller's sp is the current sp. 294 236 * This is necessary to handle the case where we start backtracing ··· 448 380 449 381 if (!sp_determined) { 450 382 int adjust; 451 - if (bt_has_addi_sp(&bundle, &adjust)) { 383 + if (bt_has_addi_sp(&bundle, &adjust) 384 + #if TILE_CHIP >= 10 385 + || bt_has_add_sp(&bundle, &adjust, moveli_args) 386 + #endif 387 + ) { 452 388 location->sp_location = SP_LOC_OFFSET; 453 389 454 390 if (adjust <= 0) { ··· 499 427 sp_determined = true; 500 428 } 501 429 } 430 + 431 + #if TILE_CHIP >= 10 432 + /* Track moveli arguments for -m32 mode. */ 433 + bt_update_moveli(&bundle, moveli_args); 434 + #endif 502 435 } 503 436 504 437 if (bt_has_iret(&bundle)) { ··· 579 502 break; 580 503 } 581 504 582 - /* The frame pointer should theoretically be aligned mod 8. If 583 - * it's not even aligned mod 4 then something terrible happened 584 - * and we should mark it as invalid. 505 + /* If the frame pointer is not aligned to the basic word size 506 + * something terrible happened and we should mark it as invalid. 585 507 */ 586 - if (fp % 4 != 0) 508 + if (fp % sizeof(bt_int_reg_t) != 0) 587 509 fp = -1; 588 510 589 511 /* -1 means "don't know initial_frame_caller_pc". */ ··· 623 547 state->read_memory_func_extra = read_memory_func_extra; 624 548 } 625 549 550 + /* Handle the case where the register holds more bits than the VA. */ 551 + static bool valid_addr_reg(bt_int_reg_t reg) 552 + { 553 + return ((VirtualAddress)reg == reg); 554 + } 555 + 626 556 bool backtrace_next(BacktraceIterator *state) 627 557 { 628 - VirtualAddress next_fp, next_pc, next_frame[2]; 558 + VirtualAddress next_fp, next_pc; 559 + bt_int_reg_t next_frame[2]; 629 560 630 561 if (state->fp == -1) { 631 562 /* No parent frame. */ ··· 646 563 } 647 564 648 565 next_fp = next_frame[1]; 649 - if (next_fp % 4 != 0) { 650 - /* Caller's frame pointer is suspect, so give up. 651 - * Technically it should be aligned mod 8, but we will 652 - * be forgiving here. 653 - */ 566 + if (!valid_addr_reg(next_frame[1]) || 567 + next_fp % sizeof(bt_int_reg_t) != 0) { 568 + /* Caller's frame pointer is suspect, so give up. */ 654 569 return false; 655 570 } 656 571 ··· 666 585 } else { 667 586 /* Get the caller PC from the frame linkage area. */ 668 587 next_pc = next_frame[0]; 669 - if (next_pc == 0 || 588 + if (!valid_addr_reg(next_frame[0]) || next_pc == 0 || 670 589 next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) { 671 590 /* The PC is suspect, so give up. */ 672 591 return false; ··· 680 599 681 600 return true; 682 601 } 683 - 684 - #else /* TILE_CHIP < 10 */ 685 - 686 - void backtrace_init(BacktraceIterator *state, 687 - BacktraceMemoryReader read_memory_func, 688 - void *read_memory_func_extra, 689 - VirtualAddress pc, VirtualAddress lr, 690 - VirtualAddress sp, VirtualAddress r52) 691 - { 692 - state->pc = pc; 693 - state->sp = sp; 694 - state->fp = -1; 695 - state->initial_frame_caller_pc = -1; 696 - state->read_memory_func = read_memory_func; 697 - state->read_memory_func_extra = read_memory_func_extra; 698 - } 699 - 700 - bool backtrace_next(BacktraceIterator *state) { return false; } 701 - 702 - #endif /* TILE_CHIP < 10 */
-8
arch/tile/kernel/stack.c
··· 108 108 /* Return a pt_regs pointer for a valid fault handler frame */ 109 109 static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) 110 110 { 111 - #ifndef __tilegx__ 112 111 const char *fault = NULL; /* happy compiler */ 113 112 char fault_buf[64]; 114 113 VirtualAddress sp = kbt->it.sp; ··· 145 146 } 146 147 if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) 147 148 return p; 148 - #endif 149 149 return NULL; 150 150 } 151 151 ··· 349 351 kbt->task->pid, kbt->task->tgid, kbt->task->comm, 350 352 smp_processor_id(), get_cycles()); 351 353 } 352 - #ifdef __tilegx__ 353 - if (kbt->is_current) { 354 - __insn_mtspr(SPR_SIM_CONTROL, 355 - SIM_DUMP_SPR_ARG(SIM_DUMP_BACKTRACE)); 356 - } 357 - #endif 358 354 kbt->verbose = 1; 359 355 i = 0; 360 356 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
+3 -1
arch/tile/kernel/traps.c
··· 128 128 #ifdef __tilegx__ 129 129 if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0) 130 130 return 0; 131 - if (get_Opcode_X1(bundle) != UNARY_OPCODE_X1) 131 + if (get_Opcode_X1(bundle) != RRR_0_OPCODE_X1) 132 + return 0; 133 + if (get_RRROpcodeExtension_X1(bundle) != UNARY_RRR_0_OPCODE_X1) 132 134 return 0; 133 135 if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1) 134 136 return 0;