Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6#include <string.h>
7#include <stdlib.h>
8
9#include <arch/elf.h>
10#include <objtool/builtin.h>
11#include <objtool/cfi.h>
12#include <objtool/arch.h>
13#include <objtool/check.h>
14#include <objtool/special.h>
15#include <objtool/warn.h>
16#include <objtool/endianness.h>
17
18#include <linux/objtool.h>
19#include <linux/hashtable.h>
20#include <linux/kernel.h>
21#include <linux/static_call_types.h>
22
23struct alternative {
24 struct list_head list;
25 struct instruction *insn;
26 bool skip_orig;
27};
28
29struct cfi_init_state initial_func_cfi;
30
31struct instruction *find_insn(struct objtool_file *file,
32 struct section *sec, unsigned long offset)
33{
34 struct instruction *insn;
35
36 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
37 if (insn->sec == sec && insn->offset == offset)
38 return insn;
39 }
40
41 return NULL;
42}
43
44static struct instruction *next_insn_same_sec(struct objtool_file *file,
45 struct instruction *insn)
46{
47 struct instruction *next = list_next_entry(insn, list);
48
49 if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
50 return NULL;
51
52 return next;
53}
54
55static struct instruction *next_insn_same_func(struct objtool_file *file,
56 struct instruction *insn)
57{
58 struct instruction *next = list_next_entry(insn, list);
59 struct symbol *func = insn->func;
60
61 if (!func)
62 return NULL;
63
64 if (&next->list != &file->insn_list && next->func == func)
65 return next;
66
67 /* Check if we're already in the subfunction: */
68 if (func == func->cfunc)
69 return NULL;
70
71 /* Move to the subfunction: */
72 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
73}
74
75static struct instruction *prev_insn_same_sym(struct objtool_file *file,
76 struct instruction *insn)
77{
78 struct instruction *prev = list_prev_entry(insn, list);
79
80 if (&prev->list != &file->insn_list && prev->func == insn->func)
81 return prev;
82
83 return NULL;
84}
85
86#define func_for_each_insn(file, func, insn) \
87 for (insn = find_insn(file, func->sec, func->offset); \
88 insn; \
89 insn = next_insn_same_func(file, insn))
90
91#define sym_for_each_insn(file, sym, insn) \
92 for (insn = find_insn(file, sym->sec, sym->offset); \
93 insn && &insn->list != &file->insn_list && \
94 insn->sec == sym->sec && \
95 insn->offset < sym->offset + sym->len; \
96 insn = list_next_entry(insn, list))
97
98#define sym_for_each_insn_continue_reverse(file, sym, insn) \
99 for (insn = list_prev_entry(insn, list); \
100 &insn->list != &file->insn_list && \
101 insn->sec == sym->sec && insn->offset >= sym->offset; \
102 insn = list_prev_entry(insn, list))
103
104#define sec_for_each_insn_from(file, insn) \
105 for (; insn; insn = next_insn_same_sec(file, insn))
106
107#define sec_for_each_insn_continue(file, insn) \
108 for (insn = next_insn_same_sec(file, insn); insn; \
109 insn = next_insn_same_sec(file, insn))
110
111static bool is_jump_table_jump(struct instruction *insn)
112{
113 struct alt_group *alt_group = insn->alt_group;
114
115 if (insn->jump_table)
116 return true;
117
118 /* Retpoline alternative for a jump table? */
119 return alt_group && alt_group->orig_group &&
120 alt_group->orig_group->first_insn->jump_table;
121}
122
123static bool is_sibling_call(struct instruction *insn)
124{
125 /*
126 * Assume only ELF functions can make sibling calls. This ensures
127 * sibling call detection consistency between vmlinux.o and individual
128 * objects.
129 */
130 if (!insn->func)
131 return false;
132
133 /* An indirect jump is either a sibling call or a jump to a table. */
134 if (insn->type == INSN_JUMP_DYNAMIC)
135 return !is_jump_table_jump(insn);
136
137 /* add_jump_destinations() sets insn->call_dest for sibling calls. */
138 return (is_static_jump(insn) && insn->call_dest);
139}
140
141/*
142 * This checks to see if the given function is a "noreturn" function.
143 *
144 * For global functions which are outside the scope of this object file, we
145 * have to keep a manual list of them.
146 *
147 * For local functions, we have to detect them manually by simply looking for
148 * the lack of a return instruction.
149 */
150static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
151 int recursion)
152{
153 int i;
154 struct instruction *insn;
155 bool empty = true;
156
157 /*
158 * Unfortunately these have to be hard coded because the noreturn
159 * attribute isn't provided in ELF data.
160 */
161 static const char * const global_noreturns[] = {
162 "__stack_chk_fail",
163 "panic",
164 "do_exit",
165 "do_task_dead",
166 "__module_put_and_exit",
167 "complete_and_exit",
168 "__reiserfs_panic",
169 "lbug_with_loc",
170 "fortify_panic",
171 "usercopy_abort",
172 "machine_real_restart",
173 "rewind_stack_do_exit",
174 "kunit_try_catch_throw",
175 "xen_start_kernel",
176 };
177
178 if (!func)
179 return false;
180
181 if (func->bind == STB_WEAK)
182 return false;
183
184 if (func->bind == STB_GLOBAL)
185 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
186 if (!strcmp(func->name, global_noreturns[i]))
187 return true;
188
189 if (!func->len)
190 return false;
191
192 insn = find_insn(file, func->sec, func->offset);
193 if (!insn->func)
194 return false;
195
196 func_for_each_insn(file, func, insn) {
197 empty = false;
198
199 if (insn->type == INSN_RETURN)
200 return false;
201 }
202
203 if (empty)
204 return false;
205
206 /*
207 * A function can have a sibling call instead of a return. In that
208 * case, the function's dead-end status depends on whether the target
209 * of the sibling call returns.
210 */
211 func_for_each_insn(file, func, insn) {
212 if (is_sibling_call(insn)) {
213 struct instruction *dest = insn->jump_dest;
214
215 if (!dest)
216 /* sibling call to another file */
217 return false;
218
219 /* local sibling call */
220 if (recursion == 5) {
221 /*
222 * Infinite recursion: two functions have
223 * sibling calls to each other. This is a very
224 * rare case. It means they aren't dead ends.
225 */
226 return false;
227 }
228
229 return __dead_end_function(file, dest->func, recursion+1);
230 }
231 }
232
233 return true;
234}
235
236static bool dead_end_function(struct objtool_file *file, struct symbol *func)
237{
238 return __dead_end_function(file, func, 0);
239}
240
241static void init_cfi_state(struct cfi_state *cfi)
242{
243 int i;
244
245 for (i = 0; i < CFI_NUM_REGS; i++) {
246 cfi->regs[i].base = CFI_UNDEFINED;
247 cfi->vals[i].base = CFI_UNDEFINED;
248 }
249 cfi->cfa.base = CFI_UNDEFINED;
250 cfi->drap_reg = CFI_UNDEFINED;
251 cfi->drap_offset = -1;
252}
253
254static void init_insn_state(struct insn_state *state, struct section *sec)
255{
256 memset(state, 0, sizeof(*state));
257 init_cfi_state(&state->cfi);
258
259 /*
260 * We need the full vmlinux for noinstr validation, otherwise we can
261 * not correctly determine insn->call_dest->sec (external symbols do
262 * not have a section).
263 */
264 if (vmlinux && noinstr && sec)
265 state->noinstr = sec->noinstr;
266}
267
268/*
269 * Call the arch-specific instruction decoder for all the instructions and add
270 * them to the global instruction list.
271 */
272static int decode_instructions(struct objtool_file *file)
273{
274 struct section *sec;
275 struct symbol *func;
276 unsigned long offset;
277 struct instruction *insn;
278 unsigned long nr_insns = 0;
279 int ret;
280
281 for_each_sec(file, sec) {
282
283 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
284 continue;
285
286 if (strcmp(sec->name, ".altinstr_replacement") &&
287 strcmp(sec->name, ".altinstr_aux") &&
288 strncmp(sec->name, ".discard.", 9))
289 sec->text = true;
290
291 if (!strcmp(sec->name, ".noinstr.text") ||
292 !strcmp(sec->name, ".entry.text"))
293 sec->noinstr = true;
294
295 for (offset = 0; offset < sec->len; offset += insn->len) {
296 insn = malloc(sizeof(*insn));
297 if (!insn) {
298 WARN("malloc failed");
299 return -1;
300 }
301 memset(insn, 0, sizeof(*insn));
302 INIT_LIST_HEAD(&insn->alts);
303 INIT_LIST_HEAD(&insn->stack_ops);
304 init_cfi_state(&insn->cfi);
305
306 insn->sec = sec;
307 insn->offset = offset;
308
309 ret = arch_decode_instruction(file->elf, sec, offset,
310 sec->len - offset,
311 &insn->len, &insn->type,
312 &insn->immediate,
313 &insn->stack_ops);
314 if (ret)
315 goto err;
316
317 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
318 list_add_tail(&insn->list, &file->insn_list);
319 nr_insns++;
320 }
321
322 list_for_each_entry(func, &sec->symbol_list, list) {
323 if (func->type != STT_FUNC || func->alias != func)
324 continue;
325
326 if (!find_insn(file, sec, func->offset)) {
327 WARN("%s(): can't find starting instruction",
328 func->name);
329 return -1;
330 }
331
332 sym_for_each_insn(file, func, insn)
333 insn->func = func;
334 }
335 }
336
337 if (stats)
338 printf("nr_insns: %lu\n", nr_insns);
339
340 return 0;
341
342err:
343 free(insn);
344 return ret;
345}
346
347static struct instruction *find_last_insn(struct objtool_file *file,
348 struct section *sec)
349{
350 struct instruction *insn = NULL;
351 unsigned int offset;
352 unsigned int end = (sec->len > 10) ? sec->len - 10 : 0;
353
354 for (offset = sec->len - 1; offset >= end && !insn; offset--)
355 insn = find_insn(file, sec, offset);
356
357 return insn;
358}
359
360/*
361 * Mark "ud2" instructions and manually annotated dead ends.
362 */
363static int add_dead_ends(struct objtool_file *file)
364{
365 struct section *sec;
366 struct reloc *reloc;
367 struct instruction *insn;
368
369 /*
370 * By default, "ud2" is a dead end unless otherwise annotated, because
371 * GCC 7 inserts it for certain divide-by-zero cases.
372 */
373 for_each_insn(file, insn)
374 if (insn->type == INSN_BUG)
375 insn->dead_end = true;
376
377 /*
378 * Check for manually annotated dead ends.
379 */
380 sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
381 if (!sec)
382 goto reachable;
383
384 list_for_each_entry(reloc, &sec->reloc_list, list) {
385 if (reloc->sym->type != STT_SECTION) {
386 WARN("unexpected relocation symbol type in %s", sec->name);
387 return -1;
388 }
389 insn = find_insn(file, reloc->sym->sec, reloc->addend);
390 if (insn)
391 insn = list_prev_entry(insn, list);
392 else if (reloc->addend == reloc->sym->sec->len) {
393 insn = find_last_insn(file, reloc->sym->sec);
394 if (!insn) {
395 WARN("can't find unreachable insn at %s+0x%x",
396 reloc->sym->sec->name, reloc->addend);
397 return -1;
398 }
399 } else {
400 WARN("can't find unreachable insn at %s+0x%x",
401 reloc->sym->sec->name, reloc->addend);
402 return -1;
403 }
404
405 insn->dead_end = true;
406 }
407
408reachable:
409 /*
410 * These manually annotated reachable checks are needed for GCC 4.4,
411 * where the Linux unreachable() macro isn't supported. In that case
412 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
413 * not a dead end.
414 */
415 sec = find_section_by_name(file->elf, ".rela.discard.reachable");
416 if (!sec)
417 return 0;
418
419 list_for_each_entry(reloc, &sec->reloc_list, list) {
420 if (reloc->sym->type != STT_SECTION) {
421 WARN("unexpected relocation symbol type in %s", sec->name);
422 return -1;
423 }
424 insn = find_insn(file, reloc->sym->sec, reloc->addend);
425 if (insn)
426 insn = list_prev_entry(insn, list);
427 else if (reloc->addend == reloc->sym->sec->len) {
428 insn = find_last_insn(file, reloc->sym->sec);
429 if (!insn) {
430 WARN("can't find reachable insn at %s+0x%x",
431 reloc->sym->sec->name, reloc->addend);
432 return -1;
433 }
434 } else {
435 WARN("can't find reachable insn at %s+0x%x",
436 reloc->sym->sec->name, reloc->addend);
437 return -1;
438 }
439
440 insn->dead_end = false;
441 }
442
443 return 0;
444}
445
446static int create_static_call_sections(struct objtool_file *file)
447{
448 struct section *sec;
449 struct static_call_site *site;
450 struct instruction *insn;
451 struct symbol *key_sym;
452 char *key_name, *tmp;
453 int idx;
454
455 sec = find_section_by_name(file->elf, ".static_call_sites");
456 if (sec) {
457 INIT_LIST_HEAD(&file->static_call_list);
458 WARN("file already has .static_call_sites section, skipping");
459 return 0;
460 }
461
462 if (list_empty(&file->static_call_list))
463 return 0;
464
465 idx = 0;
466 list_for_each_entry(insn, &file->static_call_list, call_node)
467 idx++;
468
469 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
470 sizeof(struct static_call_site), idx);
471 if (!sec)
472 return -1;
473
474 idx = 0;
475 list_for_each_entry(insn, &file->static_call_list, call_node) {
476
477 site = (struct static_call_site *)sec->data->d_buf + idx;
478 memset(site, 0, sizeof(struct static_call_site));
479
480 /* populate reloc for 'addr' */
481 if (elf_add_reloc_to_insn(file->elf, sec,
482 idx * sizeof(struct static_call_site),
483 R_X86_64_PC32,
484 insn->sec, insn->offset))
485 return -1;
486
487 /* find key symbol */
488 key_name = strdup(insn->call_dest->name);
489 if (!key_name) {
490 perror("strdup");
491 return -1;
492 }
493 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
494 STATIC_CALL_TRAMP_PREFIX_LEN)) {
495 WARN("static_call: trampoline name malformed: %s", key_name);
496 return -1;
497 }
498 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
499 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
500
501 key_sym = find_symbol_by_name(file->elf, tmp);
502 if (!key_sym) {
503 if (!module) {
504 WARN("static_call: can't find static_call_key symbol: %s", tmp);
505 return -1;
506 }
507
508 /*
509 * For modules(), the key might not be exported, which
510 * means the module can make static calls but isn't
511 * allowed to change them.
512 *
513 * In that case we temporarily set the key to be the
514 * trampoline address. This is fixed up in
515 * static_call_add_module().
516 */
517 key_sym = insn->call_dest;
518 }
519 free(key_name);
520
521 /* populate reloc for 'key' */
522 if (elf_add_reloc(file->elf, sec,
523 idx * sizeof(struct static_call_site) + 4,
524 R_X86_64_PC32, key_sym,
525 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
526 return -1;
527
528 idx++;
529 }
530
531 return 0;
532}
533
534static int create_mcount_loc_sections(struct objtool_file *file)
535{
536 struct section *sec;
537 unsigned long *loc;
538 struct instruction *insn;
539 int idx;
540
541 sec = find_section_by_name(file->elf, "__mcount_loc");
542 if (sec) {
543 INIT_LIST_HEAD(&file->mcount_loc_list);
544 WARN("file already has __mcount_loc section, skipping");
545 return 0;
546 }
547
548 if (list_empty(&file->mcount_loc_list))
549 return 0;
550
551 idx = 0;
552 list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node)
553 idx++;
554
555 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
556 if (!sec)
557 return -1;
558
559 idx = 0;
560 list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) {
561
562 loc = (unsigned long *)sec->data->d_buf + idx;
563 memset(loc, 0, sizeof(unsigned long));
564
565 if (elf_add_reloc_to_insn(file->elf, sec,
566 idx * sizeof(unsigned long),
567 R_X86_64_64,
568 insn->sec, insn->offset))
569 return -1;
570
571 idx++;
572 }
573
574 return 0;
575}
576
577/*
578 * Warnings shouldn't be reported for ignored functions.
579 */
580static void add_ignores(struct objtool_file *file)
581{
582 struct instruction *insn;
583 struct section *sec;
584 struct symbol *func;
585 struct reloc *reloc;
586
587 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
588 if (!sec)
589 return;
590
591 list_for_each_entry(reloc, &sec->reloc_list, list) {
592 switch (reloc->sym->type) {
593 case STT_FUNC:
594 func = reloc->sym;
595 break;
596
597 case STT_SECTION:
598 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
599 if (!func)
600 continue;
601 break;
602
603 default:
604 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
605 continue;
606 }
607
608 func_for_each_insn(file, func, insn)
609 insn->ignore = true;
610 }
611}
612
613/*
614 * This is a whitelist of functions that is allowed to be called with AC set.
615 * The list is meant to be minimal and only contains compiler instrumentation
616 * ABI and a few functions used to implement *_{to,from}_user() functions.
617 *
618 * These functions must not directly change AC, but may PUSHF/POPF.
619 */
620static const char *uaccess_safe_builtin[] = {
621 /* KASAN */
622 "kasan_report",
623 "kasan_check_range",
624 /* KASAN out-of-line */
625 "__asan_loadN_noabort",
626 "__asan_load1_noabort",
627 "__asan_load2_noabort",
628 "__asan_load4_noabort",
629 "__asan_load8_noabort",
630 "__asan_load16_noabort",
631 "__asan_storeN_noabort",
632 "__asan_store1_noabort",
633 "__asan_store2_noabort",
634 "__asan_store4_noabort",
635 "__asan_store8_noabort",
636 "__asan_store16_noabort",
637 "__kasan_check_read",
638 "__kasan_check_write",
639 /* KASAN in-line */
640 "__asan_report_load_n_noabort",
641 "__asan_report_load1_noabort",
642 "__asan_report_load2_noabort",
643 "__asan_report_load4_noabort",
644 "__asan_report_load8_noabort",
645 "__asan_report_load16_noabort",
646 "__asan_report_store_n_noabort",
647 "__asan_report_store1_noabort",
648 "__asan_report_store2_noabort",
649 "__asan_report_store4_noabort",
650 "__asan_report_store8_noabort",
651 "__asan_report_store16_noabort",
652 /* KCSAN */
653 "__kcsan_check_access",
654 "kcsan_found_watchpoint",
655 "kcsan_setup_watchpoint",
656 "kcsan_check_scoped_accesses",
657 "kcsan_disable_current",
658 "kcsan_enable_current_nowarn",
659 /* KCSAN/TSAN */
660 "__tsan_func_entry",
661 "__tsan_func_exit",
662 "__tsan_read_range",
663 "__tsan_write_range",
664 "__tsan_read1",
665 "__tsan_read2",
666 "__tsan_read4",
667 "__tsan_read8",
668 "__tsan_read16",
669 "__tsan_write1",
670 "__tsan_write2",
671 "__tsan_write4",
672 "__tsan_write8",
673 "__tsan_write16",
674 "__tsan_read_write1",
675 "__tsan_read_write2",
676 "__tsan_read_write4",
677 "__tsan_read_write8",
678 "__tsan_read_write16",
679 "__tsan_atomic8_load",
680 "__tsan_atomic16_load",
681 "__tsan_atomic32_load",
682 "__tsan_atomic64_load",
683 "__tsan_atomic8_store",
684 "__tsan_atomic16_store",
685 "__tsan_atomic32_store",
686 "__tsan_atomic64_store",
687 "__tsan_atomic8_exchange",
688 "__tsan_atomic16_exchange",
689 "__tsan_atomic32_exchange",
690 "__tsan_atomic64_exchange",
691 "__tsan_atomic8_fetch_add",
692 "__tsan_atomic16_fetch_add",
693 "__tsan_atomic32_fetch_add",
694 "__tsan_atomic64_fetch_add",
695 "__tsan_atomic8_fetch_sub",
696 "__tsan_atomic16_fetch_sub",
697 "__tsan_atomic32_fetch_sub",
698 "__tsan_atomic64_fetch_sub",
699 "__tsan_atomic8_fetch_and",
700 "__tsan_atomic16_fetch_and",
701 "__tsan_atomic32_fetch_and",
702 "__tsan_atomic64_fetch_and",
703 "__tsan_atomic8_fetch_or",
704 "__tsan_atomic16_fetch_or",
705 "__tsan_atomic32_fetch_or",
706 "__tsan_atomic64_fetch_or",
707 "__tsan_atomic8_fetch_xor",
708 "__tsan_atomic16_fetch_xor",
709 "__tsan_atomic32_fetch_xor",
710 "__tsan_atomic64_fetch_xor",
711 "__tsan_atomic8_fetch_nand",
712 "__tsan_atomic16_fetch_nand",
713 "__tsan_atomic32_fetch_nand",
714 "__tsan_atomic64_fetch_nand",
715 "__tsan_atomic8_compare_exchange_strong",
716 "__tsan_atomic16_compare_exchange_strong",
717 "__tsan_atomic32_compare_exchange_strong",
718 "__tsan_atomic64_compare_exchange_strong",
719 "__tsan_atomic8_compare_exchange_weak",
720 "__tsan_atomic16_compare_exchange_weak",
721 "__tsan_atomic32_compare_exchange_weak",
722 "__tsan_atomic64_compare_exchange_weak",
723 "__tsan_atomic8_compare_exchange_val",
724 "__tsan_atomic16_compare_exchange_val",
725 "__tsan_atomic32_compare_exchange_val",
726 "__tsan_atomic64_compare_exchange_val",
727 "__tsan_atomic_thread_fence",
728 "__tsan_atomic_signal_fence",
729 /* KCOV */
730 "write_comp_data",
731 "check_kcov_mode",
732 "__sanitizer_cov_trace_pc",
733 "__sanitizer_cov_trace_const_cmp1",
734 "__sanitizer_cov_trace_const_cmp2",
735 "__sanitizer_cov_trace_const_cmp4",
736 "__sanitizer_cov_trace_const_cmp8",
737 "__sanitizer_cov_trace_cmp1",
738 "__sanitizer_cov_trace_cmp2",
739 "__sanitizer_cov_trace_cmp4",
740 "__sanitizer_cov_trace_cmp8",
741 "__sanitizer_cov_trace_switch",
742 /* UBSAN */
743 "ubsan_type_mismatch_common",
744 "__ubsan_handle_type_mismatch",
745 "__ubsan_handle_type_mismatch_v1",
746 "__ubsan_handle_shift_out_of_bounds",
747 /* misc */
748 "csum_partial_copy_generic",
749 "copy_mc_fragile",
750 "copy_mc_fragile_handle_tail",
751 "copy_mc_enhanced_fast_string",
752 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
753 NULL
754};
755
756static void add_uaccess_safe(struct objtool_file *file)
757{
758 struct symbol *func;
759 const char **name;
760
761 if (!uaccess)
762 return;
763
764 for (name = uaccess_safe_builtin; *name; name++) {
765 func = find_symbol_by_name(file->elf, *name);
766 if (!func)
767 continue;
768
769 func->uaccess_safe = true;
770 }
771}
772
773/*
774 * FIXME: For now, just ignore any alternatives which add retpolines. This is
775 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
776 * But it at least allows objtool to understand the control flow *around* the
777 * retpoline.
778 */
779static int add_ignore_alternatives(struct objtool_file *file)
780{
781 struct section *sec;
782 struct reloc *reloc;
783 struct instruction *insn;
784
785 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
786 if (!sec)
787 return 0;
788
789 list_for_each_entry(reloc, &sec->reloc_list, list) {
790 if (reloc->sym->type != STT_SECTION) {
791 WARN("unexpected relocation symbol type in %s", sec->name);
792 return -1;
793 }
794
795 insn = find_insn(file, reloc->sym->sec, reloc->addend);
796 if (!insn) {
797 WARN("bad .discard.ignore_alts entry");
798 return -1;
799 }
800
801 insn->ignore_alts = true;
802 }
803
804 return 0;
805}
806
807__weak bool arch_is_retpoline(struct symbol *sym)
808{
809 return false;
810}
811
812#define NEGATIVE_RELOC ((void *)-1L)
813
814static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
815{
816 if (insn->reloc == NEGATIVE_RELOC)
817 return NULL;
818
819 if (!insn->reloc) {
820 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
821 insn->offset, insn->len);
822 if (!insn->reloc) {
823 insn->reloc = NEGATIVE_RELOC;
824 return NULL;
825 }
826 }
827
828 return insn->reloc;
829}
830
831/*
832 * Find the destination instructions for all jumps.
833 */
834static int add_jump_destinations(struct objtool_file *file)
835{
836 struct instruction *insn;
837 struct reloc *reloc;
838 struct section *dest_sec;
839 unsigned long dest_off;
840
841 for_each_insn(file, insn) {
842 if (!is_static_jump(insn))
843 continue;
844
845 reloc = insn_reloc(file, insn);
846 if (!reloc) {
847 dest_sec = insn->sec;
848 dest_off = arch_jump_destination(insn);
849 } else if (reloc->sym->type == STT_SECTION) {
850 dest_sec = reloc->sym->sec;
851 dest_off = arch_dest_reloc_offset(reloc->addend);
852 } else if (arch_is_retpoline(reloc->sym)) {
853 /*
854 * Retpoline jumps are really dynamic jumps in
855 * disguise, so convert them accordingly.
856 */
857 if (insn->type == INSN_JUMP_UNCONDITIONAL)
858 insn->type = INSN_JUMP_DYNAMIC;
859 else
860 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
861
862 list_add_tail(&insn->call_node,
863 &file->retpoline_call_list);
864
865 insn->retpoline_safe = true;
866 continue;
867 } else if (insn->func) {
868 /* internal or external sibling call (with reloc) */
869 insn->call_dest = reloc->sym;
870 if (insn->call_dest->static_call_tramp) {
871 list_add_tail(&insn->call_node,
872 &file->static_call_list);
873 }
874 continue;
875 } else if (reloc->sym->sec->idx) {
876 dest_sec = reloc->sym->sec;
877 dest_off = reloc->sym->sym.st_value +
878 arch_dest_reloc_offset(reloc->addend);
879 } else {
880 /* non-func asm code jumping to another file */
881 continue;
882 }
883
884 insn->jump_dest = find_insn(file, dest_sec, dest_off);
885 if (!insn->jump_dest) {
886
887 /*
888 * This is a special case where an alt instruction
889 * jumps past the end of the section. These are
890 * handled later in handle_group_alt().
891 */
892 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
893 continue;
894
895 WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
896 insn->sec, insn->offset, dest_sec->name,
897 dest_off);
898 return -1;
899 }
900
901 /*
902 * Cross-function jump.
903 */
904 if (insn->func && insn->jump_dest->func &&
905 insn->func != insn->jump_dest->func) {
906
907 /*
908 * For GCC 8+, create parent/child links for any cold
909 * subfunctions. This is _mostly_ redundant with a
910 * similar initialization in read_symbols().
911 *
912 * If a function has aliases, we want the *first* such
913 * function in the symbol table to be the subfunction's
914 * parent. In that case we overwrite the
915 * initialization done in read_symbols().
916 *
917 * However this code can't completely replace the
918 * read_symbols() code because this doesn't detect the
919 * case where the parent function's only reference to a
920 * subfunction is through a jump table.
921 */
922 if (!strstr(insn->func->name, ".cold") &&
923 strstr(insn->jump_dest->func->name, ".cold")) {
924 insn->func->cfunc = insn->jump_dest->func;
925 insn->jump_dest->func->pfunc = insn->func;
926
927 } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
928 insn->jump_dest->offset == insn->jump_dest->func->offset) {
929
930 /* internal sibling call (without reloc) */
931 insn->call_dest = insn->jump_dest->func;
932 if (insn->call_dest->static_call_tramp) {
933 list_add_tail(&insn->call_node,
934 &file->static_call_list);
935 }
936 }
937 }
938 }
939
940 return 0;
941}
942
943static void remove_insn_ops(struct instruction *insn)
944{
945 struct stack_op *op, *tmp;
946
947 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
948 list_del(&op->list);
949 free(op);
950 }
951}
952
953static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
954{
955 struct symbol *call_dest;
956
957 call_dest = find_func_by_offset(sec, offset);
958 if (!call_dest)
959 call_dest = find_symbol_by_offset(sec, offset);
960
961 return call_dest;
962}
963
964/*
965 * Find the destination instructions for all calls.
966 */
967static int add_call_destinations(struct objtool_file *file)
968{
969 struct instruction *insn;
970 unsigned long dest_off;
971 struct reloc *reloc;
972
973 for_each_insn(file, insn) {
974 if (insn->type != INSN_CALL)
975 continue;
976
977 reloc = insn_reloc(file, insn);
978 if (!reloc) {
979 dest_off = arch_jump_destination(insn);
980 insn->call_dest = find_call_destination(insn->sec, dest_off);
981
982 if (insn->ignore)
983 continue;
984
985 if (!insn->call_dest) {
986 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
987 return -1;
988 }
989
990 if (insn->func && insn->call_dest->type != STT_FUNC) {
991 WARN_FUNC("unsupported call to non-function",
992 insn->sec, insn->offset);
993 return -1;
994 }
995
996 } else if (reloc->sym->type == STT_SECTION) {
997 dest_off = arch_dest_reloc_offset(reloc->addend);
998 insn->call_dest = find_call_destination(reloc->sym->sec,
999 dest_off);
1000 if (!insn->call_dest) {
1001 WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1002 insn->sec, insn->offset,
1003 reloc->sym->sec->name,
1004 dest_off);
1005 return -1;
1006 }
1007
1008 } else if (arch_is_retpoline(reloc->sym)) {
1009 /*
1010 * Retpoline calls are really dynamic calls in
1011 * disguise, so convert them accordingly.
1012 */
1013 insn->type = INSN_CALL_DYNAMIC;
1014 insn->retpoline_safe = true;
1015
1016 list_add_tail(&insn->call_node,
1017 &file->retpoline_call_list);
1018
1019 remove_insn_ops(insn);
1020 continue;
1021
1022 } else
1023 insn->call_dest = reloc->sym;
1024
1025 if (insn->call_dest && insn->call_dest->static_call_tramp) {
1026 list_add_tail(&insn->call_node,
1027 &file->static_call_list);
1028 }
1029
1030 /*
1031 * Many compilers cannot disable KCOV with a function attribute
1032 * so they need a little help, NOP out any KCOV calls from noinstr
1033 * text.
1034 */
1035 if (insn->sec->noinstr &&
1036 !strncmp(insn->call_dest->name, "__sanitizer_cov_", 16)) {
1037 if (reloc) {
1038 reloc->type = R_NONE;
1039 elf_write_reloc(file->elf, reloc);
1040 }
1041
1042 elf_write_insn(file->elf, insn->sec,
1043 insn->offset, insn->len,
1044 arch_nop_insn(insn->len));
1045 insn->type = INSN_NOP;
1046 }
1047
1048 if (mcount && !strcmp(insn->call_dest->name, "__fentry__")) {
1049 if (reloc) {
1050 reloc->type = R_NONE;
1051 elf_write_reloc(file->elf, reloc);
1052 }
1053
1054 elf_write_insn(file->elf, insn->sec,
1055 insn->offset, insn->len,
1056 arch_nop_insn(insn->len));
1057
1058 insn->type = INSN_NOP;
1059
1060 list_add_tail(&insn->mcount_loc_node,
1061 &file->mcount_loc_list);
1062 }
1063
1064 /*
1065 * Whatever stack impact regular CALLs have, should be undone
1066 * by the RETURN of the called function.
1067 *
1068 * Annotated intra-function calls retain the stack_ops but
1069 * are converted to JUMP, see read_intra_function_calls().
1070 */
1071 remove_insn_ops(insn);
1072 }
1073
1074 return 0;
1075}
1076
1077/*
1078 * The .alternatives section requires some extra special care over and above
1079 * other special sections because alternatives are patched in place.
1080 */
1081static int handle_group_alt(struct objtool_file *file,
1082 struct special_alt *special_alt,
1083 struct instruction *orig_insn,
1084 struct instruction **new_insn)
1085{
1086 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1087 struct alt_group *orig_alt_group, *new_alt_group;
1088 unsigned long dest_off;
1089
1090
1091 orig_alt_group = malloc(sizeof(*orig_alt_group));
1092 if (!orig_alt_group) {
1093 WARN("malloc failed");
1094 return -1;
1095 }
1096 orig_alt_group->cfi = calloc(special_alt->orig_len,
1097 sizeof(struct cfi_state *));
1098 if (!orig_alt_group->cfi) {
1099 WARN("calloc failed");
1100 return -1;
1101 }
1102
1103 last_orig_insn = NULL;
1104 insn = orig_insn;
1105 sec_for_each_insn_from(file, insn) {
1106 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1107 break;
1108
1109 insn->alt_group = orig_alt_group;
1110 last_orig_insn = insn;
1111 }
1112 orig_alt_group->orig_group = NULL;
1113 orig_alt_group->first_insn = orig_insn;
1114 orig_alt_group->last_insn = last_orig_insn;
1115
1116
1117 new_alt_group = malloc(sizeof(*new_alt_group));
1118 if (!new_alt_group) {
1119 WARN("malloc failed");
1120 return -1;
1121 }
1122
1123 if (special_alt->new_len < special_alt->orig_len) {
1124 /*
1125 * Insert a fake nop at the end to make the replacement
1126 * alt_group the same size as the original. This is needed to
1127 * allow propagate_alt_cfi() to do its magic. When the last
1128 * instruction affects the stack, the instruction after it (the
1129 * nop) will propagate the new state to the shared CFI array.
1130 */
1131 nop = malloc(sizeof(*nop));
1132 if (!nop) {
1133 WARN("malloc failed");
1134 return -1;
1135 }
1136 memset(nop, 0, sizeof(*nop));
1137 INIT_LIST_HEAD(&nop->alts);
1138 INIT_LIST_HEAD(&nop->stack_ops);
1139 init_cfi_state(&nop->cfi);
1140
1141 nop->sec = special_alt->new_sec;
1142 nop->offset = special_alt->new_off + special_alt->new_len;
1143 nop->len = special_alt->orig_len - special_alt->new_len;
1144 nop->type = INSN_NOP;
1145 nop->func = orig_insn->func;
1146 nop->alt_group = new_alt_group;
1147 nop->ignore = orig_insn->ignore_alts;
1148 }
1149
1150 if (!special_alt->new_len) {
1151 *new_insn = nop;
1152 goto end;
1153 }
1154
1155 insn = *new_insn;
1156 sec_for_each_insn_from(file, insn) {
1157 struct reloc *alt_reloc;
1158
1159 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1160 break;
1161
1162 last_new_insn = insn;
1163
1164 insn->ignore = orig_insn->ignore_alts;
1165 insn->func = orig_insn->func;
1166 insn->alt_group = new_alt_group;
1167
1168 /*
1169 * Since alternative replacement code is copy/pasted by the
1170 * kernel after applying relocations, generally such code can't
1171 * have relative-address relocation references to outside the
1172 * .altinstr_replacement section, unless the arch's
1173 * alternatives code can adjust the relative offsets
1174 * accordingly.
1175 */
1176 alt_reloc = insn_reloc(file, insn);
1177 if (alt_reloc &&
1178 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1179
1180 WARN_FUNC("unsupported relocation in alternatives section",
1181 insn->sec, insn->offset);
1182 return -1;
1183 }
1184
1185 if (!is_static_jump(insn))
1186 continue;
1187
1188 if (!insn->immediate)
1189 continue;
1190
1191 dest_off = arch_jump_destination(insn);
1192 if (dest_off == special_alt->new_off + special_alt->new_len)
1193 insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1194
1195 if (!insn->jump_dest) {
1196 WARN_FUNC("can't find alternative jump destination",
1197 insn->sec, insn->offset);
1198 return -1;
1199 }
1200 }
1201
1202 if (!last_new_insn) {
1203 WARN_FUNC("can't find last new alternative instruction",
1204 special_alt->new_sec, special_alt->new_off);
1205 return -1;
1206 }
1207
1208 if (nop)
1209 list_add(&nop->list, &last_new_insn->list);
1210end:
1211 new_alt_group->orig_group = orig_alt_group;
1212 new_alt_group->first_insn = *new_insn;
1213 new_alt_group->last_insn = nop ? : last_new_insn;
1214 new_alt_group->cfi = orig_alt_group->cfi;
1215 return 0;
1216}
1217
1218/*
1219 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1220 * If the original instruction is a jump, make the alt entry an effective nop
1221 * by just skipping the original instruction.
1222 */
1223static int handle_jump_alt(struct objtool_file *file,
1224 struct special_alt *special_alt,
1225 struct instruction *orig_insn,
1226 struct instruction **new_insn)
1227{
1228 if (orig_insn->type == INSN_NOP)
1229 return 0;
1230
1231 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
1232 WARN_FUNC("unsupported instruction at jump label",
1233 orig_insn->sec, orig_insn->offset);
1234 return -1;
1235 }
1236
1237 *new_insn = list_next_entry(orig_insn, list);
1238 return 0;
1239}
1240
1241/*
1242 * Read all the special sections which have alternate instructions which can be
1243 * patched in or redirected to at runtime. Each instruction having alternate
1244 * instruction(s) has them added to its insn->alts list, which will be
1245 * traversed in validate_branch().
1246 */
1247static int add_special_section_alts(struct objtool_file *file)
1248{
1249 struct list_head special_alts;
1250 struct instruction *orig_insn, *new_insn;
1251 struct special_alt *special_alt, *tmp;
1252 struct alternative *alt;
1253 int ret;
1254
1255 ret = special_get_alts(file->elf, &special_alts);
1256 if (ret)
1257 return ret;
1258
1259 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1260
1261 orig_insn = find_insn(file, special_alt->orig_sec,
1262 special_alt->orig_off);
1263 if (!orig_insn) {
1264 WARN_FUNC("special: can't find orig instruction",
1265 special_alt->orig_sec, special_alt->orig_off);
1266 ret = -1;
1267 goto out;
1268 }
1269
1270 new_insn = NULL;
1271 if (!special_alt->group || special_alt->new_len) {
1272 new_insn = find_insn(file, special_alt->new_sec,
1273 special_alt->new_off);
1274 if (!new_insn) {
1275 WARN_FUNC("special: can't find new instruction",
1276 special_alt->new_sec,
1277 special_alt->new_off);
1278 ret = -1;
1279 goto out;
1280 }
1281 }
1282
1283 if (special_alt->group) {
1284 if (!special_alt->orig_len) {
1285 WARN_FUNC("empty alternative entry",
1286 orig_insn->sec, orig_insn->offset);
1287 continue;
1288 }
1289
1290 ret = handle_group_alt(file, special_alt, orig_insn,
1291 &new_insn);
1292 if (ret)
1293 goto out;
1294 } else if (special_alt->jump_or_nop) {
1295 ret = handle_jump_alt(file, special_alt, orig_insn,
1296 &new_insn);
1297 if (ret)
1298 goto out;
1299 }
1300
1301 alt = malloc(sizeof(*alt));
1302 if (!alt) {
1303 WARN("malloc failed");
1304 ret = -1;
1305 goto out;
1306 }
1307
1308 alt->insn = new_insn;
1309 alt->skip_orig = special_alt->skip_orig;
1310 orig_insn->ignore_alts |= special_alt->skip_alt;
1311 list_add_tail(&alt->list, &orig_insn->alts);
1312
1313 list_del(&special_alt->list);
1314 free(special_alt);
1315 }
1316
1317out:
1318 return ret;
1319}
1320
1321static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1322 struct reloc *table)
1323{
1324 struct reloc *reloc = table;
1325 struct instruction *dest_insn;
1326 struct alternative *alt;
1327 struct symbol *pfunc = insn->func->pfunc;
1328 unsigned int prev_offset = 0;
1329
1330 /*
1331 * Each @reloc is a switch table relocation which points to the target
1332 * instruction.
1333 */
1334 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1335
1336 /* Check for the end of the table: */
1337 if (reloc != table && reloc->jump_table_start)
1338 break;
1339
1340 /* Make sure the table entries are consecutive: */
1341 if (prev_offset && reloc->offset != prev_offset + 8)
1342 break;
1343
1344 /* Detect function pointers from contiguous objects: */
1345 if (reloc->sym->sec == pfunc->sec &&
1346 reloc->addend == pfunc->offset)
1347 break;
1348
1349 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1350 if (!dest_insn)
1351 break;
1352
1353 /* Make sure the destination is in the same function: */
1354 if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1355 break;
1356
1357 alt = malloc(sizeof(*alt));
1358 if (!alt) {
1359 WARN("malloc failed");
1360 return -1;
1361 }
1362
1363 alt->insn = dest_insn;
1364 list_add_tail(&alt->list, &insn->alts);
1365 prev_offset = reloc->offset;
1366 }
1367
1368 if (!prev_offset) {
1369 WARN_FUNC("can't find switch jump table",
1370 insn->sec, insn->offset);
1371 return -1;
1372 }
1373
1374 return 0;
1375}
1376
1377/*
1378 * find_jump_table() - Given a dynamic jump, find the switch jump table
1379 * associated with it.
1380 */
1381static struct reloc *find_jump_table(struct objtool_file *file,
1382 struct symbol *func,
1383 struct instruction *insn)
1384{
1385 struct reloc *table_reloc;
1386 struct instruction *dest_insn, *orig_insn = insn;
1387
1388 /*
1389 * Backward search using the @first_jump_src links, these help avoid
1390 * much of the 'in between' code. Which avoids us getting confused by
1391 * it.
1392 */
1393 for (;
1394 insn && insn->func && insn->func->pfunc == func;
1395 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1396
1397 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1398 break;
1399
1400 /* allow small jumps within the range */
1401 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
1402 insn->jump_dest &&
1403 (insn->jump_dest->offset <= insn->offset ||
1404 insn->jump_dest->offset > orig_insn->offset))
1405 break;
1406
1407 table_reloc = arch_find_switch_table(file, insn);
1408 if (!table_reloc)
1409 continue;
1410 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1411 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
1412 continue;
1413
1414 return table_reloc;
1415 }
1416
1417 return NULL;
1418}
1419
1420/*
1421 * First pass: Mark the head of each jump table so that in the next pass,
1422 * we know when a given jump table ends and the next one starts.
1423 */
1424static void mark_func_jump_tables(struct objtool_file *file,
1425 struct symbol *func)
1426{
1427 struct instruction *insn, *last = NULL;
1428 struct reloc *reloc;
1429
1430 func_for_each_insn(file, func, insn) {
1431 if (!last)
1432 last = insn;
1433
1434 /*
1435 * Store back-pointers for unconditional forward jumps such
1436 * that find_jump_table() can back-track using those and
1437 * avoid some potentially confusing code.
1438 */
1439 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
1440 insn->offset > last->offset &&
1441 insn->jump_dest->offset > insn->offset &&
1442 !insn->jump_dest->first_jump_src) {
1443
1444 insn->jump_dest->first_jump_src = insn;
1445 last = insn->jump_dest;
1446 }
1447
1448 if (insn->type != INSN_JUMP_DYNAMIC)
1449 continue;
1450
1451 reloc = find_jump_table(file, func, insn);
1452 if (reloc) {
1453 reloc->jump_table_start = true;
1454 insn->jump_table = reloc;
1455 }
1456 }
1457}
1458
1459static int add_func_jump_tables(struct objtool_file *file,
1460 struct symbol *func)
1461{
1462 struct instruction *insn;
1463 int ret;
1464
1465 func_for_each_insn(file, func, insn) {
1466 if (!insn->jump_table)
1467 continue;
1468
1469 ret = add_jump_table(file, insn, insn->jump_table);
1470 if (ret)
1471 return ret;
1472 }
1473
1474 return 0;
1475}
1476
1477/*
1478 * For some switch statements, gcc generates a jump table in the .rodata
1479 * section which contains a list of addresses within the function to jump to.
1480 * This finds these jump tables and adds them to the insn->alts lists.
1481 */
1482static int add_jump_table_alts(struct objtool_file *file)
1483{
1484 struct section *sec;
1485 struct symbol *func;
1486 int ret;
1487
1488 if (!file->rodata)
1489 return 0;
1490
1491 for_each_sec(file, sec) {
1492 list_for_each_entry(func, &sec->symbol_list, list) {
1493 if (func->type != STT_FUNC)
1494 continue;
1495
1496 mark_func_jump_tables(file, func);
1497 ret = add_func_jump_tables(file, func);
1498 if (ret)
1499 return ret;
1500 }
1501 }
1502
1503 return 0;
1504}
1505
1506static void set_func_state(struct cfi_state *state)
1507{
1508 state->cfa = initial_func_cfi.cfa;
1509 memcpy(&state->regs, &initial_func_cfi.regs,
1510 CFI_NUM_REGS * sizeof(struct cfi_reg));
1511 state->stack_size = initial_func_cfi.cfa.offset;
1512}
1513
1514static int read_unwind_hints(struct objtool_file *file)
1515{
1516 struct section *sec, *relocsec;
1517 struct reloc *reloc;
1518 struct unwind_hint *hint;
1519 struct instruction *insn;
1520 int i;
1521
1522 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
1523 if (!sec)
1524 return 0;
1525
1526 relocsec = sec->reloc;
1527 if (!relocsec) {
1528 WARN("missing .rela.discard.unwind_hints section");
1529 return -1;
1530 }
1531
1532 if (sec->len % sizeof(struct unwind_hint)) {
1533 WARN("struct unwind_hint size mismatch");
1534 return -1;
1535 }
1536
1537 file->hints = true;
1538
1539 for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
1540 hint = (struct unwind_hint *)sec->data->d_buf + i;
1541
1542 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
1543 if (!reloc) {
1544 WARN("can't find reloc for unwind_hints[%d]", i);
1545 return -1;
1546 }
1547
1548 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1549 if (!insn) {
1550 WARN("can't find insn for unwind_hints[%d]", i);
1551 return -1;
1552 }
1553
1554 insn->hint = true;
1555
1556 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
1557 set_func_state(&insn->cfi);
1558 continue;
1559 }
1560
1561 if (arch_decode_hint_reg(insn, hint->sp_reg)) {
1562 WARN_FUNC("unsupported unwind_hint sp base reg %d",
1563 insn->sec, insn->offset, hint->sp_reg);
1564 return -1;
1565 }
1566
1567 insn->cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
1568 insn->cfi.type = hint->type;
1569 insn->cfi.end = hint->end;
1570 }
1571
1572 return 0;
1573}
1574
1575static int read_retpoline_hints(struct objtool_file *file)
1576{
1577 struct section *sec;
1578 struct instruction *insn;
1579 struct reloc *reloc;
1580
1581 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
1582 if (!sec)
1583 return 0;
1584
1585 list_for_each_entry(reloc, &sec->reloc_list, list) {
1586 if (reloc->sym->type != STT_SECTION) {
1587 WARN("unexpected relocation symbol type in %s", sec->name);
1588 return -1;
1589 }
1590
1591 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1592 if (!insn) {
1593 WARN("bad .discard.retpoline_safe entry");
1594 return -1;
1595 }
1596
1597 if (insn->type != INSN_JUMP_DYNAMIC &&
1598 insn->type != INSN_CALL_DYNAMIC) {
1599 WARN_FUNC("retpoline_safe hint not an indirect jump/call",
1600 insn->sec, insn->offset);
1601 return -1;
1602 }
1603
1604 insn->retpoline_safe = true;
1605 }
1606
1607 return 0;
1608}
1609
1610static int read_instr_hints(struct objtool_file *file)
1611{
1612 struct section *sec;
1613 struct instruction *insn;
1614 struct reloc *reloc;
1615
1616 sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
1617 if (!sec)
1618 return 0;
1619
1620 list_for_each_entry(reloc, &sec->reloc_list, list) {
1621 if (reloc->sym->type != STT_SECTION) {
1622 WARN("unexpected relocation symbol type in %s", sec->name);
1623 return -1;
1624 }
1625
1626 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1627 if (!insn) {
1628 WARN("bad .discard.instr_end entry");
1629 return -1;
1630 }
1631
1632 insn->instr--;
1633 }
1634
1635 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
1636 if (!sec)
1637 return 0;
1638
1639 list_for_each_entry(reloc, &sec->reloc_list, list) {
1640 if (reloc->sym->type != STT_SECTION) {
1641 WARN("unexpected relocation symbol type in %s", sec->name);
1642 return -1;
1643 }
1644
1645 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1646 if (!insn) {
1647 WARN("bad .discard.instr_begin entry");
1648 return -1;
1649 }
1650
1651 insn->instr++;
1652 }
1653
1654 return 0;
1655}
1656
1657static int read_intra_function_calls(struct objtool_file *file)
1658{
1659 struct instruction *insn;
1660 struct section *sec;
1661 struct reloc *reloc;
1662
1663 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
1664 if (!sec)
1665 return 0;
1666
1667 list_for_each_entry(reloc, &sec->reloc_list, list) {
1668 unsigned long dest_off;
1669
1670 if (reloc->sym->type != STT_SECTION) {
1671 WARN("unexpected relocation symbol type in %s",
1672 sec->name);
1673 return -1;
1674 }
1675
1676 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1677 if (!insn) {
1678 WARN("bad .discard.intra_function_call entry");
1679 return -1;
1680 }
1681
1682 if (insn->type != INSN_CALL) {
1683 WARN_FUNC("intra_function_call not a direct call",
1684 insn->sec, insn->offset);
1685 return -1;
1686 }
1687
1688 /*
1689 * Treat intra-function CALLs as JMPs, but with a stack_op.
1690 * See add_call_destinations(), which strips stack_ops from
1691 * normal CALLs.
1692 */
1693 insn->type = INSN_JUMP_UNCONDITIONAL;
1694
1695 dest_off = insn->offset + insn->len + insn->immediate;
1696 insn->jump_dest = find_insn(file, insn->sec, dest_off);
1697 if (!insn->jump_dest) {
1698 WARN_FUNC("can't find call dest at %s+0x%lx",
1699 insn->sec, insn->offset,
1700 insn->sec->name, dest_off);
1701 return -1;
1702 }
1703 }
1704
1705 return 0;
1706}
1707
1708static int read_static_call_tramps(struct objtool_file *file)
1709{
1710 struct section *sec;
1711 struct symbol *func;
1712
1713 for_each_sec(file, sec) {
1714 list_for_each_entry(func, &sec->symbol_list, list) {
1715 if (func->bind == STB_GLOBAL &&
1716 !strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
1717 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
1718 func->static_call_tramp = true;
1719 }
1720 }
1721
1722 return 0;
1723}
1724
1725static void mark_rodata(struct objtool_file *file)
1726{
1727 struct section *sec;
1728 bool found = false;
1729
1730 /*
1731 * Search for the following rodata sections, each of which can
1732 * potentially contain jump tables:
1733 *
1734 * - .rodata: can contain GCC switch tables
1735 * - .rodata.<func>: same, if -fdata-sections is being used
1736 * - .rodata..c_jump_table: contains C annotated jump tables
1737 *
1738 * .rodata.str1.* sections are ignored; they don't contain jump tables.
1739 */
1740 for_each_sec(file, sec) {
1741 if (!strncmp(sec->name, ".rodata", 7) &&
1742 !strstr(sec->name, ".str1.")) {
1743 sec->rodata = true;
1744 found = true;
1745 }
1746 }
1747
1748 file->rodata = found;
1749}
1750
1751__weak int arch_rewrite_retpolines(struct objtool_file *file)
1752{
1753 return 0;
1754}
1755
1756static int decode_sections(struct objtool_file *file)
1757{
1758 int ret;
1759
1760 mark_rodata(file);
1761
1762 ret = decode_instructions(file);
1763 if (ret)
1764 return ret;
1765
1766 ret = add_dead_ends(file);
1767 if (ret)
1768 return ret;
1769
1770 add_ignores(file);
1771 add_uaccess_safe(file);
1772
1773 ret = add_ignore_alternatives(file);
1774 if (ret)
1775 return ret;
1776
1777 /*
1778 * Must be before add_{jump_call}_destination.
1779 */
1780 ret = read_static_call_tramps(file);
1781 if (ret)
1782 return ret;
1783
1784 /*
1785 * Must be before add_special_section_alts() as that depends on
1786 * jump_dest being set.
1787 */
1788 ret = add_jump_destinations(file);
1789 if (ret)
1790 return ret;
1791
1792 ret = add_special_section_alts(file);
1793 if (ret)
1794 return ret;
1795
1796 /*
1797 * Must be before add_call_destination(); it changes INSN_CALL to
1798 * INSN_JUMP.
1799 */
1800 ret = read_intra_function_calls(file);
1801 if (ret)
1802 return ret;
1803
1804 ret = add_call_destinations(file);
1805 if (ret)
1806 return ret;
1807
1808 ret = add_jump_table_alts(file);
1809 if (ret)
1810 return ret;
1811
1812 ret = read_unwind_hints(file);
1813 if (ret)
1814 return ret;
1815
1816 ret = read_retpoline_hints(file);
1817 if (ret)
1818 return ret;
1819
1820 ret = read_instr_hints(file);
1821 if (ret)
1822 return ret;
1823
1824 /*
1825 * Must be after add_special_section_alts(), since this will emit
1826 * alternatives. Must be after add_{jump,call}_destination(), since
1827 * those create the call insn lists.
1828 */
1829 ret = arch_rewrite_retpolines(file);
1830 if (ret)
1831 return ret;
1832
1833 return 0;
1834}
1835
1836static bool is_fentry_call(struct instruction *insn)
1837{
1838 if (insn->type == INSN_CALL && insn->call_dest &&
1839 insn->call_dest->type == STT_NOTYPE &&
1840 !strcmp(insn->call_dest->name, "__fentry__"))
1841 return true;
1842
1843 return false;
1844}
1845
1846static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
1847{
1848 struct cfi_state *cfi = &state->cfi;
1849 int i;
1850
1851 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
1852 return true;
1853
1854 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
1855 return true;
1856
1857 if (cfi->stack_size != initial_func_cfi.cfa.offset)
1858 return true;
1859
1860 for (i = 0; i < CFI_NUM_REGS; i++) {
1861 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
1862 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
1863 return true;
1864 }
1865
1866 return false;
1867}
1868
1869static bool check_reg_frame_pos(const struct cfi_reg *reg,
1870 int expected_offset)
1871{
1872 return reg->base == CFI_CFA &&
1873 reg->offset == expected_offset;
1874}
1875
1876static bool has_valid_stack_frame(struct insn_state *state)
1877{
1878 struct cfi_state *cfi = &state->cfi;
1879
1880 if (cfi->cfa.base == CFI_BP &&
1881 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
1882 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
1883 return true;
1884
1885 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
1886 return true;
1887
1888 return false;
1889}
1890
1891static int update_cfi_state_regs(struct instruction *insn,
1892 struct cfi_state *cfi,
1893 struct stack_op *op)
1894{
1895 struct cfi_reg *cfa = &cfi->cfa;
1896
1897 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
1898 return 0;
1899
1900 /* push */
1901 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
1902 cfa->offset += 8;
1903
1904 /* pop */
1905 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
1906 cfa->offset -= 8;
1907
1908 /* add immediate to sp */
1909 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
1910 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
1911 cfa->offset -= op->src.offset;
1912
1913 return 0;
1914}
1915
1916static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
1917{
1918 if (arch_callee_saved_reg(reg) &&
1919 cfi->regs[reg].base == CFI_UNDEFINED) {
1920 cfi->regs[reg].base = base;
1921 cfi->regs[reg].offset = offset;
1922 }
1923}
1924
1925static void restore_reg(struct cfi_state *cfi, unsigned char reg)
1926{
1927 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
1928 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
1929}
1930
1931/*
1932 * A note about DRAP stack alignment:
1933 *
1934 * GCC has the concept of a DRAP register, which is used to help keep track of
1935 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
1936 * register. The typical DRAP pattern is:
1937 *
1938 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
1939 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
1940 * 41 ff 72 f8 pushq -0x8(%r10)
1941 * 55 push %rbp
1942 * 48 89 e5 mov %rsp,%rbp
1943 * (more pushes)
1944 * 41 52 push %r10
1945 * ...
1946 * 41 5a pop %r10
1947 * (more pops)
1948 * 5d pop %rbp
1949 * 49 8d 62 f8 lea -0x8(%r10),%rsp
1950 * c3 retq
1951 *
1952 * There are some variations in the epilogues, like:
1953 *
1954 * 5b pop %rbx
1955 * 41 5a pop %r10
1956 * 41 5c pop %r12
1957 * 41 5d pop %r13
1958 * 41 5e pop %r14
1959 * c9 leaveq
1960 * 49 8d 62 f8 lea -0x8(%r10),%rsp
1961 * c3 retq
1962 *
1963 * and:
1964 *
1965 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
1966 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
1967 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
1968 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
1969 * c9 leaveq
1970 * 49 8d 62 f8 lea -0x8(%r10),%rsp
1971 * c3 retq
1972 *
1973 * Sometimes r13 is used as the DRAP register, in which case it's saved and
1974 * restored beforehand:
1975 *
1976 * 41 55 push %r13
1977 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
1978 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
1979 * ...
1980 * 49 8d 65 f0 lea -0x10(%r13),%rsp
1981 * 41 5d pop %r13
1982 * c3 retq
1983 */
1984static int update_cfi_state(struct instruction *insn,
1985 struct instruction *next_insn,
1986 struct cfi_state *cfi, struct stack_op *op)
1987{
1988 struct cfi_reg *cfa = &cfi->cfa;
1989 struct cfi_reg *regs = cfi->regs;
1990
1991 /* stack operations don't make sense with an undefined CFA */
1992 if (cfa->base == CFI_UNDEFINED) {
1993 if (insn->func) {
1994 WARN_FUNC("undefined stack state", insn->sec, insn->offset);
1995 return -1;
1996 }
1997 return 0;
1998 }
1999
2000 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2001 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2002 return update_cfi_state_regs(insn, cfi, op);
2003
2004 switch (op->dest.type) {
2005
2006 case OP_DEST_REG:
2007 switch (op->src.type) {
2008
2009 case OP_SRC_REG:
2010 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2011 cfa->base == CFI_SP &&
2012 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) {
2013
2014 /* mov %rsp, %rbp */
2015 cfa->base = op->dest.reg;
2016 cfi->bp_scratch = false;
2017 }
2018
2019 else if (op->src.reg == CFI_SP &&
2020 op->dest.reg == CFI_BP && cfi->drap) {
2021
2022 /* drap: mov %rsp, %rbp */
2023 regs[CFI_BP].base = CFI_BP;
2024 regs[CFI_BP].offset = -cfi->stack_size;
2025 cfi->bp_scratch = false;
2026 }
2027
2028 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2029
2030 /*
2031 * mov %rsp, %reg
2032 *
2033 * This is needed for the rare case where GCC
2034 * does:
2035 *
2036 * mov %rsp, %rax
2037 * ...
2038 * mov %rax, %rsp
2039 */
2040 cfi->vals[op->dest.reg].base = CFI_CFA;
2041 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2042 }
2043
2044 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2045 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2046
2047 /*
2048 * mov %rbp, %rsp
2049 *
2050 * Restore the original stack pointer (Clang).
2051 */
2052 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2053 }
2054
2055 else if (op->dest.reg == cfa->base) {
2056
2057 /* mov %reg, %rsp */
2058 if (cfa->base == CFI_SP &&
2059 cfi->vals[op->src.reg].base == CFI_CFA) {
2060
2061 /*
2062 * This is needed for the rare case
2063 * where GCC does something dumb like:
2064 *
2065 * lea 0x8(%rsp), %rcx
2066 * ...
2067 * mov %rcx, %rsp
2068 */
2069 cfa->offset = -cfi->vals[op->src.reg].offset;
2070 cfi->stack_size = cfa->offset;
2071
2072 } else if (cfa->base == CFI_SP &&
2073 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2074 cfi->vals[op->src.reg].offset == cfa->offset) {
2075
2076 /*
2077 * Stack swizzle:
2078 *
2079 * 1: mov %rsp, (%[tos])
2080 * 2: mov %[tos], %rsp
2081 * ...
2082 * 3: pop %rsp
2083 *
2084 * Where:
2085 *
2086 * 1 - places a pointer to the previous
2087 * stack at the Top-of-Stack of the
2088 * new stack.
2089 *
2090 * 2 - switches to the new stack.
2091 *
2092 * 3 - pops the Top-of-Stack to restore
2093 * the original stack.
2094 *
2095 * Note: we set base to SP_INDIRECT
2096 * here and preserve offset. Therefore
2097 * when the unwinder reaches ToS it
2098 * will dereference SP and then add the
2099 * offset to find the next frame, IOW:
2100 * (%rsp) + offset.
2101 */
2102 cfa->base = CFI_SP_INDIRECT;
2103
2104 } else {
2105 cfa->base = CFI_UNDEFINED;
2106 cfa->offset = 0;
2107 }
2108 }
2109
2110 else if (op->dest.reg == CFI_SP &&
2111 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2112 cfi->vals[op->src.reg].offset == cfa->offset) {
2113
2114 /*
2115 * The same stack swizzle case 2) as above. But
2116 * because we can't change cfa->base, case 3)
2117 * will become a regular POP. Pretend we're a
2118 * PUSH so things don't go unbalanced.
2119 */
2120 cfi->stack_size += 8;
2121 }
2122
2123
2124 break;
2125
2126 case OP_SRC_ADD:
2127 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2128
2129 /* add imm, %rsp */
2130 cfi->stack_size -= op->src.offset;
2131 if (cfa->base == CFI_SP)
2132 cfa->offset -= op->src.offset;
2133 break;
2134 }
2135
2136 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2137
2138 /* lea disp(%rbp), %rsp */
2139 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2140 break;
2141 }
2142
2143 if (!cfi->drap && op->src.reg == CFI_SP &&
2144 op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2145 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) {
2146
2147 /* lea disp(%rsp), %rbp */
2148 cfa->base = CFI_BP;
2149 cfa->offset -= op->src.offset;
2150 cfi->bp_scratch = false;
2151 break;
2152 }
2153
2154 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2155
2156 /* drap: lea disp(%rsp), %drap */
2157 cfi->drap_reg = op->dest.reg;
2158
2159 /*
2160 * lea disp(%rsp), %reg
2161 *
2162 * This is needed for the rare case where GCC
2163 * does something dumb like:
2164 *
2165 * lea 0x8(%rsp), %rcx
2166 * ...
2167 * mov %rcx, %rsp
2168 */
2169 cfi->vals[op->dest.reg].base = CFI_CFA;
2170 cfi->vals[op->dest.reg].offset = \
2171 -cfi->stack_size + op->src.offset;
2172
2173 break;
2174 }
2175
2176 if (cfi->drap && op->dest.reg == CFI_SP &&
2177 op->src.reg == cfi->drap_reg) {
2178
2179 /* drap: lea disp(%drap), %rsp */
2180 cfa->base = CFI_SP;
2181 cfa->offset = cfi->stack_size = -op->src.offset;
2182 cfi->drap_reg = CFI_UNDEFINED;
2183 cfi->drap = false;
2184 break;
2185 }
2186
2187 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2188 WARN_FUNC("unsupported stack register modification",
2189 insn->sec, insn->offset);
2190 return -1;
2191 }
2192
2193 break;
2194
2195 case OP_SRC_AND:
2196 if (op->dest.reg != CFI_SP ||
2197 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2198 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2199 WARN_FUNC("unsupported stack pointer realignment",
2200 insn->sec, insn->offset);
2201 return -1;
2202 }
2203
2204 if (cfi->drap_reg != CFI_UNDEFINED) {
2205 /* drap: and imm, %rsp */
2206 cfa->base = cfi->drap_reg;
2207 cfa->offset = cfi->stack_size = 0;
2208 cfi->drap = true;
2209 }
2210
2211 /*
2212 * Older versions of GCC (4.8ish) realign the stack
2213 * without DRAP, with a frame pointer.
2214 */
2215
2216 break;
2217
2218 case OP_SRC_POP:
2219 case OP_SRC_POPF:
2220 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2221
2222 /* pop %rsp; # restore from a stack swizzle */
2223 cfa->base = CFI_SP;
2224 break;
2225 }
2226
2227 if (!cfi->drap && op->dest.reg == cfa->base) {
2228
2229 /* pop %rbp */
2230 cfa->base = CFI_SP;
2231 }
2232
2233 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
2234 op->dest.reg == cfi->drap_reg &&
2235 cfi->drap_offset == -cfi->stack_size) {
2236
2237 /* drap: pop %drap */
2238 cfa->base = cfi->drap_reg;
2239 cfa->offset = 0;
2240 cfi->drap_offset = -1;
2241
2242 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
2243
2244 /* pop %reg */
2245 restore_reg(cfi, op->dest.reg);
2246 }
2247
2248 cfi->stack_size -= 8;
2249 if (cfa->base == CFI_SP)
2250 cfa->offset -= 8;
2251
2252 break;
2253
2254 case OP_SRC_REG_INDIRECT:
2255 if (!cfi->drap && op->dest.reg == cfa->base &&
2256 op->dest.reg == CFI_BP) {
2257
2258 /* mov disp(%rsp), %rbp */
2259 cfa->base = CFI_SP;
2260 cfa->offset = cfi->stack_size;
2261 }
2262
2263 if (cfi->drap && op->src.reg == CFI_BP &&
2264 op->src.offset == cfi->drap_offset) {
2265
2266 /* drap: mov disp(%rbp), %drap */
2267 cfa->base = cfi->drap_reg;
2268 cfa->offset = 0;
2269 cfi->drap_offset = -1;
2270 }
2271
2272 if (cfi->drap && op->src.reg == CFI_BP &&
2273 op->src.offset == regs[op->dest.reg].offset) {
2274
2275 /* drap: mov disp(%rbp), %reg */
2276 restore_reg(cfi, op->dest.reg);
2277
2278 } else if (op->src.reg == cfa->base &&
2279 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
2280
2281 /* mov disp(%rbp), %reg */
2282 /* mov disp(%rsp), %reg */
2283 restore_reg(cfi, op->dest.reg);
2284
2285 } else if (op->src.reg == CFI_SP &&
2286 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
2287
2288 /* mov disp(%rsp), %reg */
2289 restore_reg(cfi, op->dest.reg);
2290 }
2291
2292 break;
2293
2294 default:
2295 WARN_FUNC("unknown stack-related instruction",
2296 insn->sec, insn->offset);
2297 return -1;
2298 }
2299
2300 break;
2301
2302 case OP_DEST_PUSH:
2303 case OP_DEST_PUSHF:
2304 cfi->stack_size += 8;
2305 if (cfa->base == CFI_SP)
2306 cfa->offset += 8;
2307
2308 if (op->src.type != OP_SRC_REG)
2309 break;
2310
2311 if (cfi->drap) {
2312 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2313
2314 /* drap: push %drap */
2315 cfa->base = CFI_BP_INDIRECT;
2316 cfa->offset = -cfi->stack_size;
2317
2318 /* save drap so we know when to restore it */
2319 cfi->drap_offset = -cfi->stack_size;
2320
2321 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2322
2323 /* drap: push %rbp */
2324 cfi->stack_size = 0;
2325
2326 } else {
2327
2328 /* drap: push %reg */
2329 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2330 }
2331
2332 } else {
2333
2334 /* push %reg */
2335 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2336 }
2337
2338 /* detect when asm code uses rbp as a scratch register */
2339 if (!no_fp && insn->func && op->src.reg == CFI_BP &&
2340 cfa->base != CFI_BP)
2341 cfi->bp_scratch = true;
2342 break;
2343
2344 case OP_DEST_REG_INDIRECT:
2345
2346 if (cfi->drap) {
2347 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2348
2349 /* drap: mov %drap, disp(%rbp) */
2350 cfa->base = CFI_BP_INDIRECT;
2351 cfa->offset = op->dest.offset;
2352
2353 /* save drap offset so we know when to restore it */
2354 cfi->drap_offset = op->dest.offset;
2355 } else {
2356
2357 /* drap: mov reg, disp(%rbp) */
2358 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2359 }
2360
2361 } else if (op->dest.reg == cfa->base) {
2362
2363 /* mov reg, disp(%rbp) */
2364 /* mov reg, disp(%rsp) */
2365 save_reg(cfi, op->src.reg, CFI_CFA,
2366 op->dest.offset - cfi->cfa.offset);
2367
2368 } else if (op->dest.reg == CFI_SP) {
2369
2370 /* mov reg, disp(%rsp) */
2371 save_reg(cfi, op->src.reg, CFI_CFA,
2372 op->dest.offset - cfi->stack_size);
2373
2374 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
2375
2376 /* mov %rsp, (%reg); # setup a stack swizzle. */
2377 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
2378 cfi->vals[op->dest.reg].offset = cfa->offset;
2379 }
2380
2381 break;
2382
2383 case OP_DEST_MEM:
2384 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2385 WARN_FUNC("unknown stack-related memory operation",
2386 insn->sec, insn->offset);
2387 return -1;
2388 }
2389
2390 /* pop mem */
2391 cfi->stack_size -= 8;
2392 if (cfa->base == CFI_SP)
2393 cfa->offset -= 8;
2394
2395 break;
2396
2397 default:
2398 WARN_FUNC("unknown stack-related instruction",
2399 insn->sec, insn->offset);
2400 return -1;
2401 }
2402
2403 return 0;
2404}
2405
2406/*
2407 * The stack layouts of alternatives instructions can sometimes diverge when
2408 * they have stack modifications. That's fine as long as the potential stack
2409 * layouts don't conflict at any given potential instruction boundary.
2410 *
2411 * Flatten the CFIs of the different alternative code streams (both original
2412 * and replacement) into a single shared CFI array which can be used to detect
2413 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
2414 */
2415static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2416{
2417 struct cfi_state **alt_cfi;
2418 int group_off;
2419
2420 if (!insn->alt_group)
2421 return 0;
2422
2423 alt_cfi = insn->alt_group->cfi;
2424 group_off = insn->offset - insn->alt_group->first_insn->offset;
2425
2426 if (!alt_cfi[group_off]) {
2427 alt_cfi[group_off] = &insn->cfi;
2428 } else {
2429 if (memcmp(alt_cfi[group_off], &insn->cfi, sizeof(struct cfi_state))) {
2430 WARN_FUNC("stack layout conflict in alternatives",
2431 insn->sec, insn->offset);
2432 return -1;
2433 }
2434 }
2435
2436 return 0;
2437}
2438
2439static int handle_insn_ops(struct instruction *insn,
2440 struct instruction *next_insn,
2441 struct insn_state *state)
2442{
2443 struct stack_op *op;
2444
2445 list_for_each_entry(op, &insn->stack_ops, list) {
2446
2447 if (update_cfi_state(insn, next_insn, &state->cfi, op))
2448 return 1;
2449
2450 if (!insn->alt_group)
2451 continue;
2452
2453 if (op->dest.type == OP_DEST_PUSHF) {
2454 if (!state->uaccess_stack) {
2455 state->uaccess_stack = 1;
2456 } else if (state->uaccess_stack >> 31) {
2457 WARN_FUNC("PUSHF stack exhausted",
2458 insn->sec, insn->offset);
2459 return 1;
2460 }
2461 state->uaccess_stack <<= 1;
2462 state->uaccess_stack |= state->uaccess;
2463 }
2464
2465 if (op->src.type == OP_SRC_POPF) {
2466 if (state->uaccess_stack) {
2467 state->uaccess = state->uaccess_stack & 1;
2468 state->uaccess_stack >>= 1;
2469 if (state->uaccess_stack == 1)
2470 state->uaccess_stack = 0;
2471 }
2472 }
2473 }
2474
2475 return 0;
2476}
2477
2478static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
2479{
2480 struct cfi_state *cfi1 = &insn->cfi;
2481 int i;
2482
2483 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
2484
2485 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
2486 insn->sec, insn->offset,
2487 cfi1->cfa.base, cfi1->cfa.offset,
2488 cfi2->cfa.base, cfi2->cfa.offset);
2489
2490 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
2491 for (i = 0; i < CFI_NUM_REGS; i++) {
2492 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
2493 sizeof(struct cfi_reg)))
2494 continue;
2495
2496 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
2497 insn->sec, insn->offset,
2498 i, cfi1->regs[i].base, cfi1->regs[i].offset,
2499 i, cfi2->regs[i].base, cfi2->regs[i].offset);
2500 break;
2501 }
2502
2503 } else if (cfi1->type != cfi2->type) {
2504
2505 WARN_FUNC("stack state mismatch: type1=%d type2=%d",
2506 insn->sec, insn->offset, cfi1->type, cfi2->type);
2507
2508 } else if (cfi1->drap != cfi2->drap ||
2509 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
2510 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
2511
2512 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
2513 insn->sec, insn->offset,
2514 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
2515 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
2516
2517 } else
2518 return true;
2519
2520 return false;
2521}
2522
2523static inline bool func_uaccess_safe(struct symbol *func)
2524{
2525 if (func)
2526 return func->uaccess_safe;
2527
2528 return false;
2529}
2530
2531static inline const char *call_dest_name(struct instruction *insn)
2532{
2533 if (insn->call_dest)
2534 return insn->call_dest->name;
2535
2536 return "{dynamic}";
2537}
2538
2539static inline bool noinstr_call_dest(struct symbol *func)
2540{
2541 /*
2542 * We can't deal with indirect function calls at present;
2543 * assume they're instrumented.
2544 */
2545 if (!func)
2546 return false;
2547
2548 /*
2549 * If the symbol is from a noinstr section; we good.
2550 */
2551 if (func->sec->noinstr)
2552 return true;
2553
2554 /*
2555 * The __ubsan_handle_*() calls are like WARN(), they only happen when
2556 * something 'BAD' happened. At the risk of taking the machine down,
2557 * let them proceed to get the message out.
2558 */
2559 if (!strncmp(func->name, "__ubsan_handle_", 15))
2560 return true;
2561
2562 return false;
2563}
2564
2565static int validate_call(struct instruction *insn, struct insn_state *state)
2566{
2567 if (state->noinstr && state->instr <= 0 &&
2568 !noinstr_call_dest(insn->call_dest)) {
2569 WARN_FUNC("call to %s() leaves .noinstr.text section",
2570 insn->sec, insn->offset, call_dest_name(insn));
2571 return 1;
2572 }
2573
2574 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
2575 WARN_FUNC("call to %s() with UACCESS enabled",
2576 insn->sec, insn->offset, call_dest_name(insn));
2577 return 1;
2578 }
2579
2580 if (state->df) {
2581 WARN_FUNC("call to %s() with DF set",
2582 insn->sec, insn->offset, call_dest_name(insn));
2583 return 1;
2584 }
2585
2586 return 0;
2587}
2588
2589static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
2590{
2591 if (has_modified_stack_frame(insn, state)) {
2592 WARN_FUNC("sibling call from callable instruction with modified stack frame",
2593 insn->sec, insn->offset);
2594 return 1;
2595 }
2596
2597 return validate_call(insn, state);
2598}
2599
2600static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
2601{
2602 if (state->noinstr && state->instr > 0) {
2603 WARN_FUNC("return with instrumentation enabled",
2604 insn->sec, insn->offset);
2605 return 1;
2606 }
2607
2608 if (state->uaccess && !func_uaccess_safe(func)) {
2609 WARN_FUNC("return with UACCESS enabled",
2610 insn->sec, insn->offset);
2611 return 1;
2612 }
2613
2614 if (!state->uaccess && func_uaccess_safe(func)) {
2615 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
2616 insn->sec, insn->offset);
2617 return 1;
2618 }
2619
2620 if (state->df) {
2621 WARN_FUNC("return with DF set",
2622 insn->sec, insn->offset);
2623 return 1;
2624 }
2625
2626 if (func && has_modified_stack_frame(insn, state)) {
2627 WARN_FUNC("return with modified stack frame",
2628 insn->sec, insn->offset);
2629 return 1;
2630 }
2631
2632 if (state->cfi.bp_scratch) {
2633 WARN_FUNC("BP used as a scratch register",
2634 insn->sec, insn->offset);
2635 return 1;
2636 }
2637
2638 return 0;
2639}
2640
2641static struct instruction *next_insn_to_validate(struct objtool_file *file,
2642 struct instruction *insn)
2643{
2644 struct alt_group *alt_group = insn->alt_group;
2645
2646 /*
2647 * Simulate the fact that alternatives are patched in-place. When the
2648 * end of a replacement alt_group is reached, redirect objtool flow to
2649 * the end of the original alt_group.
2650 */
2651 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
2652 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
2653
2654 return next_insn_same_sec(file, insn);
2655}
2656
2657/*
2658 * Follow the branch starting at the given instruction, and recursively follow
2659 * any other branches (jumps). Meanwhile, track the frame pointer state at
2660 * each instruction and validate all the rules described in
2661 * tools/objtool/Documentation/stack-validation.txt.
2662 */
2663static int validate_branch(struct objtool_file *file, struct symbol *func,
2664 struct instruction *insn, struct insn_state state)
2665{
2666 struct alternative *alt;
2667 struct instruction *next_insn;
2668 struct section *sec;
2669 u8 visited;
2670 int ret;
2671
2672 sec = insn->sec;
2673
2674 while (1) {
2675 next_insn = next_insn_to_validate(file, insn);
2676
2677 if (file->c_file && func && insn->func && func != insn->func->pfunc) {
2678 WARN("%s() falls through to next function %s()",
2679 func->name, insn->func->name);
2680 return 1;
2681 }
2682
2683 if (func && insn->ignore) {
2684 WARN_FUNC("BUG: why am I validating an ignored function?",
2685 sec, insn->offset);
2686 return 1;
2687 }
2688
2689 visited = 1 << state.uaccess;
2690 if (insn->visited) {
2691 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
2692 return 1;
2693
2694 if (insn->visited & visited)
2695 return 0;
2696 }
2697
2698 if (state.noinstr)
2699 state.instr += insn->instr;
2700
2701 if (insn->hint)
2702 state.cfi = insn->cfi;
2703 else
2704 insn->cfi = state.cfi;
2705
2706 insn->visited |= visited;
2707
2708 if (propagate_alt_cfi(file, insn))
2709 return 1;
2710
2711 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
2712 bool skip_orig = false;
2713
2714 list_for_each_entry(alt, &insn->alts, list) {
2715 if (alt->skip_orig)
2716 skip_orig = true;
2717
2718 ret = validate_branch(file, func, alt->insn, state);
2719 if (ret) {
2720 if (backtrace)
2721 BT_FUNC("(alt)", insn);
2722 return ret;
2723 }
2724 }
2725
2726 if (skip_orig)
2727 return 0;
2728 }
2729
2730 if (handle_insn_ops(insn, next_insn, &state))
2731 return 1;
2732
2733 switch (insn->type) {
2734
2735 case INSN_RETURN:
2736 return validate_return(func, insn, &state);
2737
2738 case INSN_CALL:
2739 case INSN_CALL_DYNAMIC:
2740 ret = validate_call(insn, &state);
2741 if (ret)
2742 return ret;
2743
2744 if (!no_fp && func && !is_fentry_call(insn) &&
2745 !has_valid_stack_frame(&state)) {
2746 WARN_FUNC("call without frame pointer save/setup",
2747 sec, insn->offset);
2748 return 1;
2749 }
2750
2751 if (dead_end_function(file, insn->call_dest))
2752 return 0;
2753
2754 break;
2755
2756 case INSN_JUMP_CONDITIONAL:
2757 case INSN_JUMP_UNCONDITIONAL:
2758 if (is_sibling_call(insn)) {
2759 ret = validate_sibling_call(insn, &state);
2760 if (ret)
2761 return ret;
2762
2763 } else if (insn->jump_dest) {
2764 ret = validate_branch(file, func,
2765 insn->jump_dest, state);
2766 if (ret) {
2767 if (backtrace)
2768 BT_FUNC("(branch)", insn);
2769 return ret;
2770 }
2771 }
2772
2773 if (insn->type == INSN_JUMP_UNCONDITIONAL)
2774 return 0;
2775
2776 break;
2777
2778 case INSN_JUMP_DYNAMIC:
2779 case INSN_JUMP_DYNAMIC_CONDITIONAL:
2780 if (is_sibling_call(insn)) {
2781 ret = validate_sibling_call(insn, &state);
2782 if (ret)
2783 return ret;
2784 }
2785
2786 if (insn->type == INSN_JUMP_DYNAMIC)
2787 return 0;
2788
2789 break;
2790
2791 case INSN_CONTEXT_SWITCH:
2792 if (func && (!next_insn || !next_insn->hint)) {
2793 WARN_FUNC("unsupported instruction in callable function",
2794 sec, insn->offset);
2795 return 1;
2796 }
2797 return 0;
2798
2799 case INSN_STAC:
2800 if (state.uaccess) {
2801 WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
2802 return 1;
2803 }
2804
2805 state.uaccess = true;
2806 break;
2807
2808 case INSN_CLAC:
2809 if (!state.uaccess && func) {
2810 WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
2811 return 1;
2812 }
2813
2814 if (func_uaccess_safe(func) && !state.uaccess_stack) {
2815 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
2816 return 1;
2817 }
2818
2819 state.uaccess = false;
2820 break;
2821
2822 case INSN_STD:
2823 if (state.df) {
2824 WARN_FUNC("recursive STD", sec, insn->offset);
2825 return 1;
2826 }
2827
2828 state.df = true;
2829 break;
2830
2831 case INSN_CLD:
2832 if (!state.df && func) {
2833 WARN_FUNC("redundant CLD", sec, insn->offset);
2834 return 1;
2835 }
2836
2837 state.df = false;
2838 break;
2839
2840 default:
2841 break;
2842 }
2843
2844 if (insn->dead_end)
2845 return 0;
2846
2847 if (!next_insn) {
2848 if (state.cfi.cfa.base == CFI_UNDEFINED)
2849 return 0;
2850 WARN("%s: unexpected end of section", sec->name);
2851 return 1;
2852 }
2853
2854 insn = next_insn;
2855 }
2856
2857 return 0;
2858}
2859
2860static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
2861{
2862 struct instruction *insn;
2863 struct insn_state state;
2864 int ret, warnings = 0;
2865
2866 if (!file->hints)
2867 return 0;
2868
2869 init_insn_state(&state, sec);
2870
2871 if (sec) {
2872 insn = find_insn(file, sec, 0);
2873 if (!insn)
2874 return 0;
2875 } else {
2876 insn = list_first_entry(&file->insn_list, typeof(*insn), list);
2877 }
2878
2879 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
2880 if (insn->hint && !insn->visited) {
2881 ret = validate_branch(file, insn->func, insn, state);
2882 if (ret && backtrace)
2883 BT_FUNC("<=== (hint)", insn);
2884 warnings += ret;
2885 }
2886
2887 insn = list_next_entry(insn, list);
2888 }
2889
2890 return warnings;
2891}
2892
2893static int validate_retpoline(struct objtool_file *file)
2894{
2895 struct instruction *insn;
2896 int warnings = 0;
2897
2898 for_each_insn(file, insn) {
2899 if (insn->type != INSN_JUMP_DYNAMIC &&
2900 insn->type != INSN_CALL_DYNAMIC)
2901 continue;
2902
2903 if (insn->retpoline_safe)
2904 continue;
2905
2906 /*
2907 * .init.text code is ran before userspace and thus doesn't
2908 * strictly need retpolines, except for modules which are
2909 * loaded late, they very much do need retpoline in their
2910 * .init.text
2911 */
2912 if (!strcmp(insn->sec->name, ".init.text") && !module)
2913 continue;
2914
2915 WARN_FUNC("indirect %s found in RETPOLINE build",
2916 insn->sec, insn->offset,
2917 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
2918
2919 warnings++;
2920 }
2921
2922 return warnings;
2923}
2924
2925static bool is_kasan_insn(struct instruction *insn)
2926{
2927 return (insn->type == INSN_CALL &&
2928 !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
2929}
2930
2931static bool is_ubsan_insn(struct instruction *insn)
2932{
2933 return (insn->type == INSN_CALL &&
2934 !strcmp(insn->call_dest->name,
2935 "__ubsan_handle_builtin_unreachable"));
2936}
2937
2938static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
2939{
2940 int i;
2941 struct instruction *prev_insn;
2942
2943 if (insn->ignore || insn->type == INSN_NOP)
2944 return true;
2945
2946 /*
2947 * Ignore any unused exceptions. This can happen when a whitelisted
2948 * function has an exception table entry.
2949 *
2950 * Also ignore alternative replacement instructions. This can happen
2951 * when a whitelisted function uses one of the ALTERNATIVE macros.
2952 */
2953 if (!strcmp(insn->sec->name, ".fixup") ||
2954 !strcmp(insn->sec->name, ".altinstr_replacement") ||
2955 !strcmp(insn->sec->name, ".altinstr_aux"))
2956 return true;
2957
2958 if (!insn->func)
2959 return false;
2960
2961 /*
2962 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
2963 * __builtin_unreachable(). The BUG() macro has an unreachable() after
2964 * the UD2, which causes GCC's undefined trap logic to emit another UD2
2965 * (or occasionally a JMP to UD2).
2966 *
2967 * It may also insert a UD2 after calling a __noreturn function.
2968 */
2969 prev_insn = list_prev_entry(insn, list);
2970 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
2971 (insn->type == INSN_BUG ||
2972 (insn->type == INSN_JUMP_UNCONDITIONAL &&
2973 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
2974 return true;
2975
2976 /*
2977 * Check if this (or a subsequent) instruction is related to
2978 * CONFIG_UBSAN or CONFIG_KASAN.
2979 *
2980 * End the search at 5 instructions to avoid going into the weeds.
2981 */
2982 for (i = 0; i < 5; i++) {
2983
2984 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
2985 return true;
2986
2987 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
2988 if (insn->jump_dest &&
2989 insn->jump_dest->func == insn->func) {
2990 insn = insn->jump_dest;
2991 continue;
2992 }
2993
2994 break;
2995 }
2996
2997 if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
2998 break;
2999
3000 insn = list_next_entry(insn, list);
3001 }
3002
3003 return false;
3004}
3005
3006static int validate_symbol(struct objtool_file *file, struct section *sec,
3007 struct symbol *sym, struct insn_state *state)
3008{
3009 struct instruction *insn;
3010 int ret;
3011
3012 if (!sym->len) {
3013 WARN("%s() is missing an ELF size annotation", sym->name);
3014 return 1;
3015 }
3016
3017 if (sym->pfunc != sym || sym->alias != sym)
3018 return 0;
3019
3020 insn = find_insn(file, sec, sym->offset);
3021 if (!insn || insn->ignore || insn->visited)
3022 return 0;
3023
3024 state->uaccess = sym->uaccess_safe;
3025
3026 ret = validate_branch(file, insn->func, insn, *state);
3027 if (ret && backtrace)
3028 BT_FUNC("<=== (sym)", insn);
3029 return ret;
3030}
3031
3032static int validate_section(struct objtool_file *file, struct section *sec)
3033{
3034 struct insn_state state;
3035 struct symbol *func;
3036 int warnings = 0;
3037
3038 list_for_each_entry(func, &sec->symbol_list, list) {
3039 if (func->type != STT_FUNC)
3040 continue;
3041
3042 init_insn_state(&state, sec);
3043 set_func_state(&state.cfi);
3044
3045 warnings += validate_symbol(file, sec, func, &state);
3046 }
3047
3048 return warnings;
3049}
3050
3051static int validate_vmlinux_functions(struct objtool_file *file)
3052{
3053 struct section *sec;
3054 int warnings = 0;
3055
3056 sec = find_section_by_name(file->elf, ".noinstr.text");
3057 if (sec) {
3058 warnings += validate_section(file, sec);
3059 warnings += validate_unwind_hints(file, sec);
3060 }
3061
3062 sec = find_section_by_name(file->elf, ".entry.text");
3063 if (sec) {
3064 warnings += validate_section(file, sec);
3065 warnings += validate_unwind_hints(file, sec);
3066 }
3067
3068 return warnings;
3069}
3070
3071static int validate_functions(struct objtool_file *file)
3072{
3073 struct section *sec;
3074 int warnings = 0;
3075
3076 for_each_sec(file, sec) {
3077 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
3078 continue;
3079
3080 warnings += validate_section(file, sec);
3081 }
3082
3083 return warnings;
3084}
3085
3086static int validate_reachable_instructions(struct objtool_file *file)
3087{
3088 struct instruction *insn;
3089
3090 if (file->ignore_unreachables)
3091 return 0;
3092
3093 for_each_insn(file, insn) {
3094 if (insn->visited || ignore_unreachable_insn(file, insn))
3095 continue;
3096
3097 WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
3098 return 1;
3099 }
3100
3101 return 0;
3102}
3103
3104int check(struct objtool_file *file)
3105{
3106 int ret, warnings = 0;
3107
3108 arch_initial_func_cfi_state(&initial_func_cfi);
3109
3110 ret = decode_sections(file);
3111 if (ret < 0)
3112 goto out;
3113 warnings += ret;
3114
3115 if (list_empty(&file->insn_list))
3116 goto out;
3117
3118 if (vmlinux && !validate_dup) {
3119 ret = validate_vmlinux_functions(file);
3120 if (ret < 0)
3121 goto out;
3122
3123 warnings += ret;
3124 goto out;
3125 }
3126
3127 if (retpoline) {
3128 ret = validate_retpoline(file);
3129 if (ret < 0)
3130 return ret;
3131 warnings += ret;
3132 }
3133
3134 ret = validate_functions(file);
3135 if (ret < 0)
3136 goto out;
3137 warnings += ret;
3138
3139 ret = validate_unwind_hints(file, NULL);
3140 if (ret < 0)
3141 goto out;
3142 warnings += ret;
3143
3144 if (!warnings) {
3145 ret = validate_reachable_instructions(file);
3146 if (ret < 0)
3147 goto out;
3148 warnings += ret;
3149 }
3150
3151 ret = create_static_call_sections(file);
3152 if (ret < 0)
3153 goto out;
3154 warnings += ret;
3155
3156 if (mcount) {
3157 ret = create_mcount_loc_sections(file);
3158 if (ret < 0)
3159 goto out;
3160 warnings += ret;
3161 }
3162
3163out:
3164 /*
3165 * For now, don't fail the kernel build on fatal warnings. These
3166 * errors are still fairly common due to the growing matrix of
3167 * supported toolchains and their recent pace of change.
3168 */
3169 return 0;
3170}