Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6#include <stdio.h>
7#include <stdlib.h>
8
9#define unlikely(cond) (cond)
10#include <asm/insn.h>
11#include "../../../arch/x86/lib/inat.c"
12#include "../../../arch/x86/lib/insn.c"
13
14#define CONFIG_64BIT 1
15#include <asm/nops.h>
16
17#include <asm/orc_types.h>
18#include <objtool/check.h>
19#include <objtool/elf.h>
20#include <objtool/arch.h>
21#include <objtool/warn.h>
22#include <objtool/endianness.h>
23#include <objtool/builtin.h>
24#include <arch/elf.h>
25
26static int is_x86_64(const struct elf *elf)
27{
28 switch (elf->ehdr.e_machine) {
29 case EM_X86_64:
30 return 1;
31 case EM_386:
32 return 0;
33 default:
34 WARN("unexpected ELF machine type %d", elf->ehdr.e_machine);
35 return -1;
36 }
37}
38
39bool arch_callee_saved_reg(unsigned char reg)
40{
41 switch (reg) {
42 case CFI_BP:
43 case CFI_BX:
44 case CFI_R12:
45 case CFI_R13:
46 case CFI_R14:
47 case CFI_R15:
48 return true;
49
50 case CFI_AX:
51 case CFI_CX:
52 case CFI_DX:
53 case CFI_SI:
54 case CFI_DI:
55 case CFI_SP:
56 case CFI_R8:
57 case CFI_R9:
58 case CFI_R10:
59 case CFI_R11:
60 case CFI_RA:
61 default:
62 return false;
63 }
64}
65
66unsigned long arch_dest_reloc_offset(int addend)
67{
68 return addend + 4;
69}
70
71unsigned long arch_jump_destination(struct instruction *insn)
72{
73 return insn->offset + insn->len + insn->immediate;
74}
75
76#define ADD_OP(op) \
77 if (!(op = calloc(1, sizeof(*op)))) \
78 return -1; \
79 else for (list_add_tail(&op->list, ops_list); op; op = NULL)
80
81/*
82 * Helpers to decode ModRM/SIB:
83 *
84 * r/m| AX CX DX BX | SP | BP | SI DI |
85 * | R8 R9 R10 R11 | R12 | R13 | R14 R15 |
86 * Mod+----------------+-----+-----+---------+
87 * 00 | [r/m] |[SIB]|[IP+]| [r/m] |
88 * 01 | [r/m + d8] |[S+d]| [r/m + d8] |
89 * 10 | [r/m + d32] |[S+D]| [r/m + d32] |
90 * 11 | r/ m |
91 */
92
93#define mod_is_mem() (modrm_mod != 3)
94#define mod_is_reg() (modrm_mod == 3)
95
96#define is_RIP() ((modrm_rm & 7) == CFI_BP && modrm_mod == 0)
97#define have_SIB() ((modrm_rm & 7) == CFI_SP && mod_is_mem())
98
99#define rm_is(reg) (have_SIB() ? \
100 sib_base == (reg) && sib_index == CFI_SP : \
101 modrm_rm == (reg))
102
103#define rm_is_mem(reg) (mod_is_mem() && !is_RIP() && rm_is(reg))
104#define rm_is_reg(reg) (mod_is_reg() && modrm_rm == (reg))
105
106static bool has_notrack_prefix(struct insn *insn)
107{
108 int i;
109
110 for (i = 0; i < insn->prefixes.nbytes; i++) {
111 if (insn->prefixes.bytes[i] == 0x3e)
112 return true;
113 }
114
115 return false;
116}
117
118int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
119 unsigned long offset, unsigned int maxlen,
120 unsigned int *len, enum insn_type *type,
121 unsigned long *immediate,
122 struct list_head *ops_list)
123{
124 const struct elf *elf = file->elf;
125 struct insn insn;
126 int x86_64, ret;
127 unsigned char op1, op2, op3, prefix,
128 rex = 0, rex_b = 0, rex_r = 0, rex_w = 0, rex_x = 0,
129 modrm = 0, modrm_mod = 0, modrm_rm = 0, modrm_reg = 0,
130 sib = 0, /* sib_scale = 0, */ sib_index = 0, sib_base = 0;
131 struct stack_op *op = NULL;
132 struct symbol *sym;
133 u64 imm;
134
135 x86_64 = is_x86_64(elf);
136 if (x86_64 == -1)
137 return -1;
138
139 ret = insn_decode(&insn, sec->data->d_buf + offset, maxlen,
140 x86_64 ? INSN_MODE_64 : INSN_MODE_32);
141 if (ret < 0) {
142 WARN("can't decode instruction at %s:0x%lx", sec->name, offset);
143 return -1;
144 }
145
146 *len = insn.length;
147 *type = INSN_OTHER;
148
149 if (insn.vex_prefix.nbytes)
150 return 0;
151
152 prefix = insn.prefixes.bytes[0];
153
154 op1 = insn.opcode.bytes[0];
155 op2 = insn.opcode.bytes[1];
156 op3 = insn.opcode.bytes[2];
157
158 if (insn.rex_prefix.nbytes) {
159 rex = insn.rex_prefix.bytes[0];
160 rex_w = X86_REX_W(rex) >> 3;
161 rex_r = X86_REX_R(rex) >> 2;
162 rex_x = X86_REX_X(rex) >> 1;
163 rex_b = X86_REX_B(rex);
164 }
165
166 if (insn.modrm.nbytes) {
167 modrm = insn.modrm.bytes[0];
168 modrm_mod = X86_MODRM_MOD(modrm);
169 modrm_reg = X86_MODRM_REG(modrm) + 8*rex_r;
170 modrm_rm = X86_MODRM_RM(modrm) + 8*rex_b;
171 }
172
173 if (insn.sib.nbytes) {
174 sib = insn.sib.bytes[0];
175 /* sib_scale = X86_SIB_SCALE(sib); */
176 sib_index = X86_SIB_INDEX(sib) + 8*rex_x;
177 sib_base = X86_SIB_BASE(sib) + 8*rex_b;
178 }
179
180 switch (op1) {
181
182 case 0x1:
183 case 0x29:
184 if (rex_w && rm_is_reg(CFI_SP)) {
185
186 /* add/sub reg, %rsp */
187 ADD_OP(op) {
188 op->src.type = OP_SRC_ADD;
189 op->src.reg = modrm_reg;
190 op->dest.type = OP_DEST_REG;
191 op->dest.reg = CFI_SP;
192 }
193 }
194 break;
195
196 case 0x50 ... 0x57:
197
198 /* push reg */
199 ADD_OP(op) {
200 op->src.type = OP_SRC_REG;
201 op->src.reg = (op1 & 0x7) + 8*rex_b;
202 op->dest.type = OP_DEST_PUSH;
203 }
204
205 break;
206
207 case 0x58 ... 0x5f:
208
209 /* pop reg */
210 ADD_OP(op) {
211 op->src.type = OP_SRC_POP;
212 op->dest.type = OP_DEST_REG;
213 op->dest.reg = (op1 & 0x7) + 8*rex_b;
214 }
215
216 break;
217
218 case 0x68:
219 case 0x6a:
220 /* push immediate */
221 ADD_OP(op) {
222 op->src.type = OP_SRC_CONST;
223 op->dest.type = OP_DEST_PUSH;
224 }
225 break;
226
227 case 0x70 ... 0x7f:
228 *type = INSN_JUMP_CONDITIONAL;
229 break;
230
231 case 0x80 ... 0x83:
232 /*
233 * 1000 00sw : mod OP r/m : immediate
234 *
235 * s - sign extend immediate
236 * w - imm8 / imm32
237 *
238 * OP: 000 ADD 100 AND
239 * 001 OR 101 SUB
240 * 010 ADC 110 XOR
241 * 011 SBB 111 CMP
242 */
243
244 /* 64bit only */
245 if (!rex_w)
246 break;
247
248 /* %rsp target only */
249 if (!rm_is_reg(CFI_SP))
250 break;
251
252 imm = insn.immediate.value;
253 if (op1 & 2) { /* sign extend */
254 if (op1 & 1) { /* imm32 */
255 imm <<= 32;
256 imm = (s64)imm >> 32;
257 } else { /* imm8 */
258 imm <<= 56;
259 imm = (s64)imm >> 56;
260 }
261 }
262
263 switch (modrm_reg & 7) {
264 case 5:
265 imm = -imm;
266 /* fallthrough */
267 case 0:
268 /* add/sub imm, %rsp */
269 ADD_OP(op) {
270 op->src.type = OP_SRC_ADD;
271 op->src.reg = CFI_SP;
272 op->src.offset = imm;
273 op->dest.type = OP_DEST_REG;
274 op->dest.reg = CFI_SP;
275 }
276 break;
277
278 case 4:
279 /* and imm, %rsp */
280 ADD_OP(op) {
281 op->src.type = OP_SRC_AND;
282 op->src.reg = CFI_SP;
283 op->src.offset = insn.immediate.value;
284 op->dest.type = OP_DEST_REG;
285 op->dest.reg = CFI_SP;
286 }
287 break;
288
289 default:
290 /* WARN ? */
291 break;
292 }
293
294 break;
295
296 case 0x89:
297 if (!rex_w)
298 break;
299
300 if (modrm_reg == CFI_SP) {
301
302 if (mod_is_reg()) {
303 /* mov %rsp, reg */
304 ADD_OP(op) {
305 op->src.type = OP_SRC_REG;
306 op->src.reg = CFI_SP;
307 op->dest.type = OP_DEST_REG;
308 op->dest.reg = modrm_rm;
309 }
310 break;
311
312 } else {
313 /* skip RIP relative displacement */
314 if (is_RIP())
315 break;
316
317 /* skip nontrivial SIB */
318 if (have_SIB()) {
319 modrm_rm = sib_base;
320 if (sib_index != CFI_SP)
321 break;
322 }
323
324 /* mov %rsp, disp(%reg) */
325 ADD_OP(op) {
326 op->src.type = OP_SRC_REG;
327 op->src.reg = CFI_SP;
328 op->dest.type = OP_DEST_REG_INDIRECT;
329 op->dest.reg = modrm_rm;
330 op->dest.offset = insn.displacement.value;
331 }
332 break;
333 }
334
335 break;
336 }
337
338 if (rm_is_reg(CFI_SP)) {
339
340 /* mov reg, %rsp */
341 ADD_OP(op) {
342 op->src.type = OP_SRC_REG;
343 op->src.reg = modrm_reg;
344 op->dest.type = OP_DEST_REG;
345 op->dest.reg = CFI_SP;
346 }
347 break;
348 }
349
350 /* fallthrough */
351 case 0x88:
352 if (!rex_w)
353 break;
354
355 if (rm_is_mem(CFI_BP)) {
356
357 /* mov reg, disp(%rbp) */
358 ADD_OP(op) {
359 op->src.type = OP_SRC_REG;
360 op->src.reg = modrm_reg;
361 op->dest.type = OP_DEST_REG_INDIRECT;
362 op->dest.reg = CFI_BP;
363 op->dest.offset = insn.displacement.value;
364 }
365 break;
366 }
367
368 if (rm_is_mem(CFI_SP)) {
369
370 /* mov reg, disp(%rsp) */
371 ADD_OP(op) {
372 op->src.type = OP_SRC_REG;
373 op->src.reg = modrm_reg;
374 op->dest.type = OP_DEST_REG_INDIRECT;
375 op->dest.reg = CFI_SP;
376 op->dest.offset = insn.displacement.value;
377 }
378 break;
379 }
380
381 break;
382
383 case 0x8b:
384 if (!rex_w)
385 break;
386
387 if (rm_is_mem(CFI_BP)) {
388
389 /* mov disp(%rbp), reg */
390 ADD_OP(op) {
391 op->src.type = OP_SRC_REG_INDIRECT;
392 op->src.reg = CFI_BP;
393 op->src.offset = insn.displacement.value;
394 op->dest.type = OP_DEST_REG;
395 op->dest.reg = modrm_reg;
396 }
397 break;
398 }
399
400 if (rm_is_mem(CFI_SP)) {
401
402 /* mov disp(%rsp), reg */
403 ADD_OP(op) {
404 op->src.type = OP_SRC_REG_INDIRECT;
405 op->src.reg = CFI_SP;
406 op->src.offset = insn.displacement.value;
407 op->dest.type = OP_DEST_REG;
408 op->dest.reg = modrm_reg;
409 }
410 break;
411 }
412
413 break;
414
415 case 0x8d:
416 if (mod_is_reg()) {
417 WARN("invalid LEA encoding at %s:0x%lx", sec->name, offset);
418 break;
419 }
420
421 /* skip non 64bit ops */
422 if (!rex_w)
423 break;
424
425 /* skip RIP relative displacement */
426 if (is_RIP())
427 break;
428
429 /* skip nontrivial SIB */
430 if (have_SIB()) {
431 modrm_rm = sib_base;
432 if (sib_index != CFI_SP)
433 break;
434 }
435
436 /* lea disp(%src), %dst */
437 ADD_OP(op) {
438 op->src.offset = insn.displacement.value;
439 if (!op->src.offset) {
440 /* lea (%src), %dst */
441 op->src.type = OP_SRC_REG;
442 } else {
443 /* lea disp(%src), %dst */
444 op->src.type = OP_SRC_ADD;
445 }
446 op->src.reg = modrm_rm;
447 op->dest.type = OP_DEST_REG;
448 op->dest.reg = modrm_reg;
449 }
450 break;
451
452 case 0x8f:
453 /* pop to mem */
454 ADD_OP(op) {
455 op->src.type = OP_SRC_POP;
456 op->dest.type = OP_DEST_MEM;
457 }
458 break;
459
460 case 0x90:
461 *type = INSN_NOP;
462 break;
463
464 case 0x9c:
465 /* pushf */
466 ADD_OP(op) {
467 op->src.type = OP_SRC_CONST;
468 op->dest.type = OP_DEST_PUSHF;
469 }
470 break;
471
472 case 0x9d:
473 /* popf */
474 ADD_OP(op) {
475 op->src.type = OP_SRC_POPF;
476 op->dest.type = OP_DEST_MEM;
477 }
478 break;
479
480 case 0x0f:
481
482 if (op2 == 0x01) {
483
484 if (modrm == 0xca)
485 *type = INSN_CLAC;
486 else if (modrm == 0xcb)
487 *type = INSN_STAC;
488
489 } else if (op2 >= 0x80 && op2 <= 0x8f) {
490
491 *type = INSN_JUMP_CONDITIONAL;
492
493 } else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
494 op2 == 0x35) {
495
496 /* sysenter, sysret */
497 *type = INSN_CONTEXT_SWITCH;
498
499 } else if (op2 == 0x0b || op2 == 0xb9) {
500
501 /* ud2 */
502 *type = INSN_BUG;
503
504 } else if (op2 == 0x0d || op2 == 0x1f) {
505
506 /* nopl/nopw */
507 *type = INSN_NOP;
508
509 } else if (op2 == 0x1e) {
510
511 if (prefix == 0xf3 && (modrm == 0xfa || modrm == 0xfb))
512 *type = INSN_ENDBR;
513
514
515 } else if (op2 == 0x38 && op3 == 0xf8) {
516 if (insn.prefixes.nbytes == 1 &&
517 insn.prefixes.bytes[0] == 0xf2) {
518 /* ENQCMD cannot be used in the kernel. */
519 WARN("ENQCMD instruction at %s:%lx", sec->name,
520 offset);
521 }
522
523 } else if (op2 == 0xa0 || op2 == 0xa8) {
524
525 /* push fs/gs */
526 ADD_OP(op) {
527 op->src.type = OP_SRC_CONST;
528 op->dest.type = OP_DEST_PUSH;
529 }
530
531 } else if (op2 == 0xa1 || op2 == 0xa9) {
532
533 /* pop fs/gs */
534 ADD_OP(op) {
535 op->src.type = OP_SRC_POP;
536 op->dest.type = OP_DEST_MEM;
537 }
538 }
539
540 break;
541
542 case 0xc9:
543 /*
544 * leave
545 *
546 * equivalent to:
547 * mov bp, sp
548 * pop bp
549 */
550 ADD_OP(op) {
551 op->src.type = OP_SRC_REG;
552 op->src.reg = CFI_BP;
553 op->dest.type = OP_DEST_REG;
554 op->dest.reg = CFI_SP;
555 }
556 ADD_OP(op) {
557 op->src.type = OP_SRC_POP;
558 op->dest.type = OP_DEST_REG;
559 op->dest.reg = CFI_BP;
560 }
561 break;
562
563 case 0xcc:
564 /* int3 */
565 *type = INSN_TRAP;
566 break;
567
568 case 0xe3:
569 /* jecxz/jrcxz */
570 *type = INSN_JUMP_CONDITIONAL;
571 break;
572
573 case 0xe9:
574 case 0xeb:
575 *type = INSN_JUMP_UNCONDITIONAL;
576 break;
577
578 case 0xc2:
579 case 0xc3:
580 *type = INSN_RETURN;
581 break;
582
583 case 0xc7: /* mov imm, r/m */
584 if (!noinstr)
585 break;
586
587 if (insn.length == 3+4+4 && !strncmp(sec->name, ".init.text", 10)) {
588 struct reloc *immr, *disp;
589 struct symbol *func;
590 int idx;
591
592 immr = find_reloc_by_dest(elf, (void *)sec, offset+3);
593 disp = find_reloc_by_dest(elf, (void *)sec, offset+7);
594
595 if (!immr || strcmp(immr->sym->name, "pv_ops"))
596 break;
597
598 idx = (immr->addend + 8) / sizeof(void *);
599
600 func = disp->sym;
601 if (disp->sym->type == STT_SECTION)
602 func = find_symbol_by_offset(disp->sym->sec, disp->addend);
603 if (!func) {
604 WARN("no func for pv_ops[]");
605 return -1;
606 }
607
608 objtool_pv_add(file, idx, func);
609 }
610
611 break;
612
613 case 0xcf: /* iret */
614 /*
615 * Handle sync_core(), which has an IRET to self.
616 * All other IRET are in STT_NONE entry code.
617 */
618 sym = find_symbol_containing(sec, offset);
619 if (sym && sym->type == STT_FUNC) {
620 ADD_OP(op) {
621 /* add $40, %rsp */
622 op->src.type = OP_SRC_ADD;
623 op->src.reg = CFI_SP;
624 op->src.offset = 5*8;
625 op->dest.type = OP_DEST_REG;
626 op->dest.reg = CFI_SP;
627 }
628 break;
629 }
630
631 /* fallthrough */
632
633 case 0xca: /* retf */
634 case 0xcb: /* retf */
635 *type = INSN_CONTEXT_SWITCH;
636 break;
637
638 case 0xe8:
639 *type = INSN_CALL;
640 /*
641 * For the impact on the stack, a CALL behaves like
642 * a PUSH of an immediate value (the return address).
643 */
644 ADD_OP(op) {
645 op->src.type = OP_SRC_CONST;
646 op->dest.type = OP_DEST_PUSH;
647 }
648 break;
649
650 case 0xfc:
651 *type = INSN_CLD;
652 break;
653
654 case 0xfd:
655 *type = INSN_STD;
656 break;
657
658 case 0xff:
659 if (modrm_reg == 2 || modrm_reg == 3) {
660
661 *type = INSN_CALL_DYNAMIC;
662 if (has_notrack_prefix(&insn))
663 WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
664
665 } else if (modrm_reg == 4) {
666
667 *type = INSN_JUMP_DYNAMIC;
668 if (has_notrack_prefix(&insn))
669 WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
670
671 } else if (modrm_reg == 5) {
672
673 /* jmpf */
674 *type = INSN_CONTEXT_SWITCH;
675
676 } else if (modrm_reg == 6) {
677
678 /* push from mem */
679 ADD_OP(op) {
680 op->src.type = OP_SRC_CONST;
681 op->dest.type = OP_DEST_PUSH;
682 }
683 }
684
685 break;
686
687 default:
688 break;
689 }
690
691 *immediate = insn.immediate.nbytes ? insn.immediate.value : 0;
692
693 return 0;
694}
695
696void arch_initial_func_cfi_state(struct cfi_init_state *state)
697{
698 int i;
699
700 for (i = 0; i < CFI_NUM_REGS; i++) {
701 state->regs[i].base = CFI_UNDEFINED;
702 state->regs[i].offset = 0;
703 }
704
705 /* initial CFA (call frame address) */
706 state->cfa.base = CFI_SP;
707 state->cfa.offset = 8;
708
709 /* initial RA (return address) */
710 state->regs[CFI_RA].base = CFI_CFA;
711 state->regs[CFI_RA].offset = -8;
712}
713
714const char *arch_nop_insn(int len)
715{
716 static const char nops[5][5] = {
717 { BYTES_NOP1 },
718 { BYTES_NOP2 },
719 { BYTES_NOP3 },
720 { BYTES_NOP4 },
721 { BYTES_NOP5 },
722 };
723
724 if (len < 1 || len > 5) {
725 WARN("invalid NOP size: %d\n", len);
726 return NULL;
727 }
728
729 return nops[len-1];
730}
731
732#define BYTE_RET 0xC3
733
734const char *arch_ret_insn(int len)
735{
736 static const char ret[5][5] = {
737 { BYTE_RET },
738 { BYTE_RET, 0xcc },
739 { BYTE_RET, 0xcc, BYTES_NOP1 },
740 { BYTE_RET, 0xcc, BYTES_NOP2 },
741 { BYTE_RET, 0xcc, BYTES_NOP3 },
742 };
743
744 if (len < 1 || len > 5) {
745 WARN("invalid RET size: %d\n", len);
746 return NULL;
747 }
748
749 return ret[len-1];
750}
751
752int arch_decode_hint_reg(u8 sp_reg, int *base)
753{
754 switch (sp_reg) {
755 case ORC_REG_UNDEFINED:
756 *base = CFI_UNDEFINED;
757 break;
758 case ORC_REG_SP:
759 *base = CFI_SP;
760 break;
761 case ORC_REG_BP:
762 *base = CFI_BP;
763 break;
764 case ORC_REG_SP_INDIRECT:
765 *base = CFI_SP_INDIRECT;
766 break;
767 case ORC_REG_R10:
768 *base = CFI_R10;
769 break;
770 case ORC_REG_R13:
771 *base = CFI_R13;
772 break;
773 case ORC_REG_DI:
774 *base = CFI_DI;
775 break;
776 case ORC_REG_DX:
777 *base = CFI_DX;
778 break;
779 default:
780 return -1;
781 }
782
783 return 0;
784}
785
786bool arch_is_retpoline(struct symbol *sym)
787{
788 return !strncmp(sym->name, "__x86_indirect_", 15);
789}