Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6#include <string.h>
7#include <stdlib.h>
8#include <sys/mman.h>
9
10#include <arch/elf.h>
11#include <objtool/builtin.h>
12#include <objtool/cfi.h>
13#include <objtool/arch.h>
14#include <objtool/check.h>
15#include <objtool/special.h>
16#include <objtool/warn.h>
17#include <objtool/endianness.h>
18
19#include <linux/objtool.h>
20#include <linux/hashtable.h>
21#include <linux/kernel.h>
22#include <linux/static_call_types.h>
23
24struct alternative {
25 struct list_head list;
26 struct instruction *insn;
27 bool skip_orig;
28};
29
30static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
31
32static struct cfi_init_state initial_func_cfi;
33static struct cfi_state init_cfi;
34static struct cfi_state func_cfi;
35
36struct instruction *find_insn(struct objtool_file *file,
37 struct section *sec, unsigned long offset)
38{
39 struct instruction *insn;
40
41 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
42 if (insn->sec == sec && insn->offset == offset)
43 return insn;
44 }
45
46 return NULL;
47}
48
49static struct instruction *next_insn_same_sec(struct objtool_file *file,
50 struct instruction *insn)
51{
52 struct instruction *next = list_next_entry(insn, list);
53
54 if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
55 return NULL;
56
57 return next;
58}
59
60static struct instruction *next_insn_same_func(struct objtool_file *file,
61 struct instruction *insn)
62{
63 struct instruction *next = list_next_entry(insn, list);
64 struct symbol *func = insn->func;
65
66 if (!func)
67 return NULL;
68
69 if (&next->list != &file->insn_list && next->func == func)
70 return next;
71
72 /* Check if we're already in the subfunction: */
73 if (func == func->cfunc)
74 return NULL;
75
76 /* Move to the subfunction: */
77 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
78}
79
80static struct instruction *prev_insn_same_sym(struct objtool_file *file,
81 struct instruction *insn)
82{
83 struct instruction *prev = list_prev_entry(insn, list);
84
85 if (&prev->list != &file->insn_list && prev->func == insn->func)
86 return prev;
87
88 return NULL;
89}
90
91#define func_for_each_insn(file, func, insn) \
92 for (insn = find_insn(file, func->sec, func->offset); \
93 insn; \
94 insn = next_insn_same_func(file, insn))
95
96#define sym_for_each_insn(file, sym, insn) \
97 for (insn = find_insn(file, sym->sec, sym->offset); \
98 insn && &insn->list != &file->insn_list && \
99 insn->sec == sym->sec && \
100 insn->offset < sym->offset + sym->len; \
101 insn = list_next_entry(insn, list))
102
103#define sym_for_each_insn_continue_reverse(file, sym, insn) \
104 for (insn = list_prev_entry(insn, list); \
105 &insn->list != &file->insn_list && \
106 insn->sec == sym->sec && insn->offset >= sym->offset; \
107 insn = list_prev_entry(insn, list))
108
109#define sec_for_each_insn_from(file, insn) \
110 for (; insn; insn = next_insn_same_sec(file, insn))
111
112#define sec_for_each_insn_continue(file, insn) \
113 for (insn = next_insn_same_sec(file, insn); insn; \
114 insn = next_insn_same_sec(file, insn))
115
116static bool is_jump_table_jump(struct instruction *insn)
117{
118 struct alt_group *alt_group = insn->alt_group;
119
120 if (insn->jump_table)
121 return true;
122
123 /* Retpoline alternative for a jump table? */
124 return alt_group && alt_group->orig_group &&
125 alt_group->orig_group->first_insn->jump_table;
126}
127
128static bool is_sibling_call(struct instruction *insn)
129{
130 /*
131 * Assume only ELF functions can make sibling calls. This ensures
132 * sibling call detection consistency between vmlinux.o and individual
133 * objects.
134 */
135 if (!insn->func)
136 return false;
137
138 /* An indirect jump is either a sibling call or a jump to a table. */
139 if (insn->type == INSN_JUMP_DYNAMIC)
140 return !is_jump_table_jump(insn);
141
142 /* add_jump_destinations() sets insn->call_dest for sibling calls. */
143 return (is_static_jump(insn) && insn->call_dest);
144}
145
146/*
147 * This checks to see if the given function is a "noreturn" function.
148 *
149 * For global functions which are outside the scope of this object file, we
150 * have to keep a manual list of them.
151 *
152 * For local functions, we have to detect them manually by simply looking for
153 * the lack of a return instruction.
154 */
155static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
156 int recursion)
157{
158 int i;
159 struct instruction *insn;
160 bool empty = true;
161
162 /*
163 * Unfortunately these have to be hard coded because the noreturn
164 * attribute isn't provided in ELF data.
165 */
166 static const char * const global_noreturns[] = {
167 "__stack_chk_fail",
168 "panic",
169 "do_exit",
170 "do_task_dead",
171 "kthread_exit",
172 "make_task_dead",
173 "__module_put_and_kthread_exit",
174 "kthread_complete_and_exit",
175 "__reiserfs_panic",
176 "lbug_with_loc",
177 "fortify_panic",
178 "usercopy_abort",
179 "machine_real_restart",
180 "rewind_stack_and_make_dead",
181 "kunit_try_catch_throw",
182 "xen_start_kernel",
183 "cpu_bringup_and_idle",
184 "do_group_exit",
185 "stop_this_cpu",
186 "__invalid_creds",
187 "cpu_startup_entry",
188 };
189
190 if (!func)
191 return false;
192
193 if (func->bind == STB_WEAK)
194 return false;
195
196 if (func->bind == STB_GLOBAL)
197 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
198 if (!strcmp(func->name, global_noreturns[i]))
199 return true;
200
201 if (!func->len)
202 return false;
203
204 insn = find_insn(file, func->sec, func->offset);
205 if (!insn->func)
206 return false;
207
208 func_for_each_insn(file, func, insn) {
209 empty = false;
210
211 if (insn->type == INSN_RETURN)
212 return false;
213 }
214
215 if (empty)
216 return false;
217
218 /*
219 * A function can have a sibling call instead of a return. In that
220 * case, the function's dead-end status depends on whether the target
221 * of the sibling call returns.
222 */
223 func_for_each_insn(file, func, insn) {
224 if (is_sibling_call(insn)) {
225 struct instruction *dest = insn->jump_dest;
226
227 if (!dest)
228 /* sibling call to another file */
229 return false;
230
231 /* local sibling call */
232 if (recursion == 5) {
233 /*
234 * Infinite recursion: two functions have
235 * sibling calls to each other. This is a very
236 * rare case. It means they aren't dead ends.
237 */
238 return false;
239 }
240
241 return __dead_end_function(file, dest->func, recursion+1);
242 }
243 }
244
245 return true;
246}
247
248static bool dead_end_function(struct objtool_file *file, struct symbol *func)
249{
250 return __dead_end_function(file, func, 0);
251}
252
253static void init_cfi_state(struct cfi_state *cfi)
254{
255 int i;
256
257 for (i = 0; i < CFI_NUM_REGS; i++) {
258 cfi->regs[i].base = CFI_UNDEFINED;
259 cfi->vals[i].base = CFI_UNDEFINED;
260 }
261 cfi->cfa.base = CFI_UNDEFINED;
262 cfi->drap_reg = CFI_UNDEFINED;
263 cfi->drap_offset = -1;
264}
265
266static void init_insn_state(struct insn_state *state, struct section *sec)
267{
268 memset(state, 0, sizeof(*state));
269 init_cfi_state(&state->cfi);
270
271 /*
272 * We need the full vmlinux for noinstr validation, otherwise we can
273 * not correctly determine insn->call_dest->sec (external symbols do
274 * not have a section).
275 */
276 if (vmlinux && noinstr && sec)
277 state->noinstr = sec->noinstr;
278}
279
280static struct cfi_state *cfi_alloc(void)
281{
282 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
283 if (!cfi) {
284 WARN("calloc failed");
285 exit(1);
286 }
287 nr_cfi++;
288 return cfi;
289}
290
291static int cfi_bits;
292static struct hlist_head *cfi_hash;
293
294static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
295{
296 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
297 (void *)cfi2 + sizeof(cfi2->hash),
298 sizeof(struct cfi_state) - sizeof(struct hlist_node));
299}
300
301static inline u32 cfi_key(struct cfi_state *cfi)
302{
303 return jhash((void *)cfi + sizeof(cfi->hash),
304 sizeof(*cfi) - sizeof(cfi->hash), 0);
305}
306
307static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
308{
309 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
310 struct cfi_state *obj;
311
312 hlist_for_each_entry(obj, head, hash) {
313 if (!cficmp(cfi, obj)) {
314 nr_cfi_cache++;
315 return obj;
316 }
317 }
318
319 obj = cfi_alloc();
320 *obj = *cfi;
321 hlist_add_head(&obj->hash, head);
322
323 return obj;
324}
325
326static void cfi_hash_add(struct cfi_state *cfi)
327{
328 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
329
330 hlist_add_head(&cfi->hash, head);
331}
332
333static void *cfi_hash_alloc(unsigned long size)
334{
335 cfi_bits = max(10, ilog2(size));
336 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
337 PROT_READ|PROT_WRITE,
338 MAP_PRIVATE|MAP_ANON, -1, 0);
339 if (cfi_hash == (void *)-1L) {
340 WARN("mmap fail cfi_hash");
341 cfi_hash = NULL;
342 } else if (stats) {
343 printf("cfi_bits: %d\n", cfi_bits);
344 }
345
346 return cfi_hash;
347}
348
349static unsigned long nr_insns;
350static unsigned long nr_insns_visited;
351
352/*
353 * Call the arch-specific instruction decoder for all the instructions and add
354 * them to the global instruction list.
355 */
356static int decode_instructions(struct objtool_file *file)
357{
358 struct section *sec;
359 struct symbol *func;
360 unsigned long offset;
361 struct instruction *insn;
362 int ret;
363
364 for_each_sec(file, sec) {
365
366 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
367 continue;
368
369 if (strcmp(sec->name, ".altinstr_replacement") &&
370 strcmp(sec->name, ".altinstr_aux") &&
371 strncmp(sec->name, ".discard.", 9))
372 sec->text = true;
373
374 if (!strcmp(sec->name, ".noinstr.text") ||
375 !strcmp(sec->name, ".entry.text"))
376 sec->noinstr = true;
377
378 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
379 insn = malloc(sizeof(*insn));
380 if (!insn) {
381 WARN("malloc failed");
382 return -1;
383 }
384 memset(insn, 0, sizeof(*insn));
385 INIT_LIST_HEAD(&insn->alts);
386 INIT_LIST_HEAD(&insn->stack_ops);
387 INIT_LIST_HEAD(&insn->call_node);
388
389 insn->sec = sec;
390 insn->offset = offset;
391
392 ret = arch_decode_instruction(file, sec, offset,
393 sec->sh.sh_size - offset,
394 &insn->len, &insn->type,
395 &insn->immediate,
396 &insn->stack_ops);
397 if (ret)
398 goto err;
399
400 /*
401 * By default, "ud2" is a dead end unless otherwise
402 * annotated, because GCC 7 inserts it for certain
403 * divide-by-zero cases.
404 */
405 if (insn->type == INSN_BUG)
406 insn->dead_end = true;
407
408 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
409 list_add_tail(&insn->list, &file->insn_list);
410 nr_insns++;
411 }
412
413 list_for_each_entry(func, &sec->symbol_list, list) {
414 if (func->type != STT_FUNC || func->alias != func)
415 continue;
416
417 if (!find_insn(file, sec, func->offset)) {
418 WARN("%s(): can't find starting instruction",
419 func->name);
420 return -1;
421 }
422
423 sym_for_each_insn(file, func, insn) {
424 insn->func = func;
425 if (insn->type == INSN_ENDBR && list_empty(&insn->call_node)) {
426 if (insn->offset == insn->func->offset) {
427 list_add_tail(&insn->call_node, &file->endbr_list);
428 file->nr_endbr++;
429 } else {
430 file->nr_endbr_int++;
431 }
432 }
433 }
434 }
435 }
436
437 if (stats)
438 printf("nr_insns: %lu\n", nr_insns);
439
440 return 0;
441
442err:
443 free(insn);
444 return ret;
445}
446
447/*
448 * Read the pv_ops[] .data table to find the static initialized values.
449 */
450static int add_pv_ops(struct objtool_file *file, const char *symname)
451{
452 struct symbol *sym, *func;
453 unsigned long off, end;
454 struct reloc *rel;
455 int idx;
456
457 sym = find_symbol_by_name(file->elf, symname);
458 if (!sym)
459 return 0;
460
461 off = sym->offset;
462 end = off + sym->len;
463 for (;;) {
464 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
465 if (!rel)
466 break;
467
468 func = rel->sym;
469 if (func->type == STT_SECTION)
470 func = find_symbol_by_offset(rel->sym->sec, rel->addend);
471
472 idx = (rel->offset - sym->offset) / sizeof(unsigned long);
473
474 objtool_pv_add(file, idx, func);
475
476 off = rel->offset + 1;
477 if (off > end)
478 break;
479 }
480
481 return 0;
482}
483
484/*
485 * Allocate and initialize file->pv_ops[].
486 */
487static int init_pv_ops(struct objtool_file *file)
488{
489 static const char *pv_ops_tables[] = {
490 "pv_ops",
491 "xen_cpu_ops",
492 "xen_irq_ops",
493 "xen_mmu_ops",
494 NULL,
495 };
496 const char *pv_ops;
497 struct symbol *sym;
498 int idx, nr;
499
500 if (!noinstr)
501 return 0;
502
503 file->pv_ops = NULL;
504
505 sym = find_symbol_by_name(file->elf, "pv_ops");
506 if (!sym)
507 return 0;
508
509 nr = sym->len / sizeof(unsigned long);
510 file->pv_ops = calloc(sizeof(struct pv_state), nr);
511 if (!file->pv_ops)
512 return -1;
513
514 for (idx = 0; idx < nr; idx++)
515 INIT_LIST_HEAD(&file->pv_ops[idx].targets);
516
517 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
518 add_pv_ops(file, pv_ops);
519
520 return 0;
521}
522
523static struct instruction *find_last_insn(struct objtool_file *file,
524 struct section *sec)
525{
526 struct instruction *insn = NULL;
527 unsigned int offset;
528 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
529
530 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
531 insn = find_insn(file, sec, offset);
532
533 return insn;
534}
535
536/*
537 * Mark "ud2" instructions and manually annotated dead ends.
538 */
539static int add_dead_ends(struct objtool_file *file)
540{
541 struct section *sec;
542 struct reloc *reloc;
543 struct instruction *insn;
544
545 /*
546 * Check for manually annotated dead ends.
547 */
548 sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
549 if (!sec)
550 goto reachable;
551
552 list_for_each_entry(reloc, &sec->reloc_list, list) {
553 if (reloc->sym->type != STT_SECTION) {
554 WARN("unexpected relocation symbol type in %s", sec->name);
555 return -1;
556 }
557 insn = find_insn(file, reloc->sym->sec, reloc->addend);
558 if (insn)
559 insn = list_prev_entry(insn, list);
560 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
561 insn = find_last_insn(file, reloc->sym->sec);
562 if (!insn) {
563 WARN("can't find unreachable insn at %s+0x%lx",
564 reloc->sym->sec->name, reloc->addend);
565 return -1;
566 }
567 } else {
568 WARN("can't find unreachable insn at %s+0x%lx",
569 reloc->sym->sec->name, reloc->addend);
570 return -1;
571 }
572
573 insn->dead_end = true;
574 }
575
576reachable:
577 /*
578 * These manually annotated reachable checks are needed for GCC 4.4,
579 * where the Linux unreachable() macro isn't supported. In that case
580 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
581 * not a dead end.
582 */
583 sec = find_section_by_name(file->elf, ".rela.discard.reachable");
584 if (!sec)
585 return 0;
586
587 list_for_each_entry(reloc, &sec->reloc_list, list) {
588 if (reloc->sym->type != STT_SECTION) {
589 WARN("unexpected relocation symbol type in %s", sec->name);
590 return -1;
591 }
592 insn = find_insn(file, reloc->sym->sec, reloc->addend);
593 if (insn)
594 insn = list_prev_entry(insn, list);
595 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
596 insn = find_last_insn(file, reloc->sym->sec);
597 if (!insn) {
598 WARN("can't find reachable insn at %s+0x%lx",
599 reloc->sym->sec->name, reloc->addend);
600 return -1;
601 }
602 } else {
603 WARN("can't find reachable insn at %s+0x%lx",
604 reloc->sym->sec->name, reloc->addend);
605 return -1;
606 }
607
608 insn->dead_end = false;
609 }
610
611 return 0;
612}
613
614static int create_static_call_sections(struct objtool_file *file)
615{
616 struct section *sec;
617 struct static_call_site *site;
618 struct instruction *insn;
619 struct symbol *key_sym;
620 char *key_name, *tmp;
621 int idx;
622
623 sec = find_section_by_name(file->elf, ".static_call_sites");
624 if (sec) {
625 INIT_LIST_HEAD(&file->static_call_list);
626 WARN("file already has .static_call_sites section, skipping");
627 return 0;
628 }
629
630 if (list_empty(&file->static_call_list))
631 return 0;
632
633 idx = 0;
634 list_for_each_entry(insn, &file->static_call_list, call_node)
635 idx++;
636
637 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
638 sizeof(struct static_call_site), idx);
639 if (!sec)
640 return -1;
641
642 idx = 0;
643 list_for_each_entry(insn, &file->static_call_list, call_node) {
644
645 site = (struct static_call_site *)sec->data->d_buf + idx;
646 memset(site, 0, sizeof(struct static_call_site));
647
648 /* populate reloc for 'addr' */
649 if (elf_add_reloc_to_insn(file->elf, sec,
650 idx * sizeof(struct static_call_site),
651 R_X86_64_PC32,
652 insn->sec, insn->offset))
653 return -1;
654
655 /* find key symbol */
656 key_name = strdup(insn->call_dest->name);
657 if (!key_name) {
658 perror("strdup");
659 return -1;
660 }
661 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
662 STATIC_CALL_TRAMP_PREFIX_LEN)) {
663 WARN("static_call: trampoline name malformed: %s", key_name);
664 return -1;
665 }
666 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
667 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
668
669 key_sym = find_symbol_by_name(file->elf, tmp);
670 if (!key_sym) {
671 if (!module) {
672 WARN("static_call: can't find static_call_key symbol: %s", tmp);
673 return -1;
674 }
675
676 /*
677 * For modules(), the key might not be exported, which
678 * means the module can make static calls but isn't
679 * allowed to change them.
680 *
681 * In that case we temporarily set the key to be the
682 * trampoline address. This is fixed up in
683 * static_call_add_module().
684 */
685 key_sym = insn->call_dest;
686 }
687 free(key_name);
688
689 /* populate reloc for 'key' */
690 if (elf_add_reloc(file->elf, sec,
691 idx * sizeof(struct static_call_site) + 4,
692 R_X86_64_PC32, key_sym,
693 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
694 return -1;
695
696 idx++;
697 }
698
699 return 0;
700}
701
702static int create_retpoline_sites_sections(struct objtool_file *file)
703{
704 struct instruction *insn;
705 struct section *sec;
706 int idx;
707
708 sec = find_section_by_name(file->elf, ".retpoline_sites");
709 if (sec) {
710 WARN("file already has .retpoline_sites, skipping");
711 return 0;
712 }
713
714 idx = 0;
715 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
716 idx++;
717
718 if (!idx)
719 return 0;
720
721 sec = elf_create_section(file->elf, ".retpoline_sites", 0,
722 sizeof(int), idx);
723 if (!sec) {
724 WARN("elf_create_section: .retpoline_sites");
725 return -1;
726 }
727
728 idx = 0;
729 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
730
731 int *site = (int *)sec->data->d_buf + idx;
732 *site = 0;
733
734 if (elf_add_reloc_to_insn(file->elf, sec,
735 idx * sizeof(int),
736 R_X86_64_PC32,
737 insn->sec, insn->offset)) {
738 WARN("elf_add_reloc_to_insn: .retpoline_sites");
739 return -1;
740 }
741
742 idx++;
743 }
744
745 return 0;
746}
747
748static int create_ibt_endbr_seal_sections(struct objtool_file *file)
749{
750 struct instruction *insn;
751 struct section *sec;
752 int idx;
753
754 sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
755 if (sec) {
756 WARN("file already has .ibt_endbr_seal, skipping");
757 return 0;
758 }
759
760 idx = 0;
761 list_for_each_entry(insn, &file->endbr_list, call_node)
762 idx++;
763
764 if (stats) {
765 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
766 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
767 printf("ibt: superfluous ENDBR: %d\n", idx);
768 }
769
770 if (!idx)
771 return 0;
772
773 sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0,
774 sizeof(int), idx);
775 if (!sec) {
776 WARN("elf_create_section: .ibt_endbr_seal");
777 return -1;
778 }
779
780 idx = 0;
781 list_for_each_entry(insn, &file->endbr_list, call_node) {
782
783 int *site = (int *)sec->data->d_buf + idx;
784 *site = 0;
785
786 if (elf_add_reloc_to_insn(file->elf, sec,
787 idx * sizeof(int),
788 R_X86_64_PC32,
789 insn->sec, insn->offset)) {
790 WARN("elf_add_reloc_to_insn: .ibt_endbr_seal");
791 return -1;
792 }
793
794 idx++;
795 }
796
797 return 0;
798}
799
800static int create_mcount_loc_sections(struct objtool_file *file)
801{
802 struct section *sec;
803 unsigned long *loc;
804 struct instruction *insn;
805 int idx;
806
807 sec = find_section_by_name(file->elf, "__mcount_loc");
808 if (sec) {
809 INIT_LIST_HEAD(&file->mcount_loc_list);
810 WARN("file already has __mcount_loc section, skipping");
811 return 0;
812 }
813
814 if (list_empty(&file->mcount_loc_list))
815 return 0;
816
817 idx = 0;
818 list_for_each_entry(insn, &file->mcount_loc_list, call_node)
819 idx++;
820
821 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
822 if (!sec)
823 return -1;
824
825 idx = 0;
826 list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
827
828 loc = (unsigned long *)sec->data->d_buf + idx;
829 memset(loc, 0, sizeof(unsigned long));
830
831 if (elf_add_reloc_to_insn(file->elf, sec,
832 idx * sizeof(unsigned long),
833 R_X86_64_64,
834 insn->sec, insn->offset))
835 return -1;
836
837 idx++;
838 }
839
840 return 0;
841}
842
843/*
844 * Warnings shouldn't be reported for ignored functions.
845 */
846static void add_ignores(struct objtool_file *file)
847{
848 struct instruction *insn;
849 struct section *sec;
850 struct symbol *func;
851 struct reloc *reloc;
852
853 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
854 if (!sec)
855 return;
856
857 list_for_each_entry(reloc, &sec->reloc_list, list) {
858 switch (reloc->sym->type) {
859 case STT_FUNC:
860 func = reloc->sym;
861 break;
862
863 case STT_SECTION:
864 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
865 if (!func)
866 continue;
867 break;
868
869 default:
870 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
871 continue;
872 }
873
874 func_for_each_insn(file, func, insn)
875 insn->ignore = true;
876 }
877}
878
879/*
880 * This is a whitelist of functions that is allowed to be called with AC set.
881 * The list is meant to be minimal and only contains compiler instrumentation
882 * ABI and a few functions used to implement *_{to,from}_user() functions.
883 *
884 * These functions must not directly change AC, but may PUSHF/POPF.
885 */
886static const char *uaccess_safe_builtin[] = {
887 /* KASAN */
888 "kasan_report",
889 "kasan_check_range",
890 /* KASAN out-of-line */
891 "__asan_loadN_noabort",
892 "__asan_load1_noabort",
893 "__asan_load2_noabort",
894 "__asan_load4_noabort",
895 "__asan_load8_noabort",
896 "__asan_load16_noabort",
897 "__asan_storeN_noabort",
898 "__asan_store1_noabort",
899 "__asan_store2_noabort",
900 "__asan_store4_noabort",
901 "__asan_store8_noabort",
902 "__asan_store16_noabort",
903 "__kasan_check_read",
904 "__kasan_check_write",
905 /* KASAN in-line */
906 "__asan_report_load_n_noabort",
907 "__asan_report_load1_noabort",
908 "__asan_report_load2_noabort",
909 "__asan_report_load4_noabort",
910 "__asan_report_load8_noabort",
911 "__asan_report_load16_noabort",
912 "__asan_report_store_n_noabort",
913 "__asan_report_store1_noabort",
914 "__asan_report_store2_noabort",
915 "__asan_report_store4_noabort",
916 "__asan_report_store8_noabort",
917 "__asan_report_store16_noabort",
918 /* KCSAN */
919 "__kcsan_check_access",
920 "__kcsan_mb",
921 "__kcsan_wmb",
922 "__kcsan_rmb",
923 "__kcsan_release",
924 "kcsan_found_watchpoint",
925 "kcsan_setup_watchpoint",
926 "kcsan_check_scoped_accesses",
927 "kcsan_disable_current",
928 "kcsan_enable_current_nowarn",
929 /* KCSAN/TSAN */
930 "__tsan_func_entry",
931 "__tsan_func_exit",
932 "__tsan_read_range",
933 "__tsan_write_range",
934 "__tsan_read1",
935 "__tsan_read2",
936 "__tsan_read4",
937 "__tsan_read8",
938 "__tsan_read16",
939 "__tsan_write1",
940 "__tsan_write2",
941 "__tsan_write4",
942 "__tsan_write8",
943 "__tsan_write16",
944 "__tsan_read_write1",
945 "__tsan_read_write2",
946 "__tsan_read_write4",
947 "__tsan_read_write8",
948 "__tsan_read_write16",
949 "__tsan_atomic8_load",
950 "__tsan_atomic16_load",
951 "__tsan_atomic32_load",
952 "__tsan_atomic64_load",
953 "__tsan_atomic8_store",
954 "__tsan_atomic16_store",
955 "__tsan_atomic32_store",
956 "__tsan_atomic64_store",
957 "__tsan_atomic8_exchange",
958 "__tsan_atomic16_exchange",
959 "__tsan_atomic32_exchange",
960 "__tsan_atomic64_exchange",
961 "__tsan_atomic8_fetch_add",
962 "__tsan_atomic16_fetch_add",
963 "__tsan_atomic32_fetch_add",
964 "__tsan_atomic64_fetch_add",
965 "__tsan_atomic8_fetch_sub",
966 "__tsan_atomic16_fetch_sub",
967 "__tsan_atomic32_fetch_sub",
968 "__tsan_atomic64_fetch_sub",
969 "__tsan_atomic8_fetch_and",
970 "__tsan_atomic16_fetch_and",
971 "__tsan_atomic32_fetch_and",
972 "__tsan_atomic64_fetch_and",
973 "__tsan_atomic8_fetch_or",
974 "__tsan_atomic16_fetch_or",
975 "__tsan_atomic32_fetch_or",
976 "__tsan_atomic64_fetch_or",
977 "__tsan_atomic8_fetch_xor",
978 "__tsan_atomic16_fetch_xor",
979 "__tsan_atomic32_fetch_xor",
980 "__tsan_atomic64_fetch_xor",
981 "__tsan_atomic8_fetch_nand",
982 "__tsan_atomic16_fetch_nand",
983 "__tsan_atomic32_fetch_nand",
984 "__tsan_atomic64_fetch_nand",
985 "__tsan_atomic8_compare_exchange_strong",
986 "__tsan_atomic16_compare_exchange_strong",
987 "__tsan_atomic32_compare_exchange_strong",
988 "__tsan_atomic64_compare_exchange_strong",
989 "__tsan_atomic8_compare_exchange_weak",
990 "__tsan_atomic16_compare_exchange_weak",
991 "__tsan_atomic32_compare_exchange_weak",
992 "__tsan_atomic64_compare_exchange_weak",
993 "__tsan_atomic8_compare_exchange_val",
994 "__tsan_atomic16_compare_exchange_val",
995 "__tsan_atomic32_compare_exchange_val",
996 "__tsan_atomic64_compare_exchange_val",
997 "__tsan_atomic_thread_fence",
998 "__tsan_atomic_signal_fence",
999 /* KCOV */
1000 "write_comp_data",
1001 "check_kcov_mode",
1002 "__sanitizer_cov_trace_pc",
1003 "__sanitizer_cov_trace_const_cmp1",
1004 "__sanitizer_cov_trace_const_cmp2",
1005 "__sanitizer_cov_trace_const_cmp4",
1006 "__sanitizer_cov_trace_const_cmp8",
1007 "__sanitizer_cov_trace_cmp1",
1008 "__sanitizer_cov_trace_cmp2",
1009 "__sanitizer_cov_trace_cmp4",
1010 "__sanitizer_cov_trace_cmp8",
1011 "__sanitizer_cov_trace_switch",
1012 /* UBSAN */
1013 "ubsan_type_mismatch_common",
1014 "__ubsan_handle_type_mismatch",
1015 "__ubsan_handle_type_mismatch_v1",
1016 "__ubsan_handle_shift_out_of_bounds",
1017 /* misc */
1018 "csum_partial_copy_generic",
1019 "copy_mc_fragile",
1020 "copy_mc_fragile_handle_tail",
1021 "copy_mc_enhanced_fast_string",
1022 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
1023 NULL
1024};
1025
1026static void add_uaccess_safe(struct objtool_file *file)
1027{
1028 struct symbol *func;
1029 const char **name;
1030
1031 if (!uaccess)
1032 return;
1033
1034 for (name = uaccess_safe_builtin; *name; name++) {
1035 func = find_symbol_by_name(file->elf, *name);
1036 if (!func)
1037 continue;
1038
1039 func->uaccess_safe = true;
1040 }
1041}
1042
1043/*
1044 * FIXME: For now, just ignore any alternatives which add retpolines. This is
1045 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
1046 * But it at least allows objtool to understand the control flow *around* the
1047 * retpoline.
1048 */
1049static int add_ignore_alternatives(struct objtool_file *file)
1050{
1051 struct section *sec;
1052 struct reloc *reloc;
1053 struct instruction *insn;
1054
1055 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
1056 if (!sec)
1057 return 0;
1058
1059 list_for_each_entry(reloc, &sec->reloc_list, list) {
1060 if (reloc->sym->type != STT_SECTION) {
1061 WARN("unexpected relocation symbol type in %s", sec->name);
1062 return -1;
1063 }
1064
1065 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1066 if (!insn) {
1067 WARN("bad .discard.ignore_alts entry");
1068 return -1;
1069 }
1070
1071 insn->ignore_alts = true;
1072 }
1073
1074 return 0;
1075}
1076
1077__weak bool arch_is_retpoline(struct symbol *sym)
1078{
1079 return false;
1080}
1081
1082#define NEGATIVE_RELOC ((void *)-1L)
1083
1084static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1085{
1086 if (insn->reloc == NEGATIVE_RELOC)
1087 return NULL;
1088
1089 if (!insn->reloc) {
1090 if (!file)
1091 return NULL;
1092
1093 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1094 insn->offset, insn->len);
1095 if (!insn->reloc) {
1096 insn->reloc = NEGATIVE_RELOC;
1097 return NULL;
1098 }
1099 }
1100
1101 return insn->reloc;
1102}
1103
1104static void remove_insn_ops(struct instruction *insn)
1105{
1106 struct stack_op *op, *tmp;
1107
1108 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
1109 list_del(&op->list);
1110 free(op);
1111 }
1112}
1113
1114static void annotate_call_site(struct objtool_file *file,
1115 struct instruction *insn, bool sibling)
1116{
1117 struct reloc *reloc = insn_reloc(file, insn);
1118 struct symbol *sym = insn->call_dest;
1119
1120 if (!sym)
1121 sym = reloc->sym;
1122
1123 /*
1124 * Alternative replacement code is just template code which is
1125 * sometimes copied to the original instruction. For now, don't
1126 * annotate it. (In the future we might consider annotating the
1127 * original instruction if/when it ever makes sense to do so.)
1128 */
1129 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1130 return;
1131
1132 if (sym->static_call_tramp) {
1133 list_add_tail(&insn->call_node, &file->static_call_list);
1134 return;
1135 }
1136
1137 if (sym->retpoline_thunk) {
1138 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1139 return;
1140 }
1141
1142 /*
1143 * Many compilers cannot disable KCOV or sanitizer calls with a function
1144 * attribute so they need a little help, NOP out any such calls from
1145 * noinstr text.
1146 */
1147 if (insn->sec->noinstr && sym->profiling_func) {
1148 if (reloc) {
1149 reloc->type = R_NONE;
1150 elf_write_reloc(file->elf, reloc);
1151 }
1152
1153 elf_write_insn(file->elf, insn->sec,
1154 insn->offset, insn->len,
1155 sibling ? arch_ret_insn(insn->len)
1156 : arch_nop_insn(insn->len));
1157
1158 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1159
1160 if (sibling) {
1161 /*
1162 * We've replaced the tail-call JMP insn by two new
1163 * insn: RET; INT3, except we only have a single struct
1164 * insn here. Mark it retpoline_safe to avoid the SLS
1165 * warning, instead of adding another insn.
1166 */
1167 insn->retpoline_safe = true;
1168 }
1169
1170 return;
1171 }
1172
1173 if (mcount && sym->fentry) {
1174 if (sibling)
1175 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
1176
1177 if (reloc) {
1178 reloc->type = R_NONE;
1179 elf_write_reloc(file->elf, reloc);
1180 }
1181
1182 elf_write_insn(file->elf, insn->sec,
1183 insn->offset, insn->len,
1184 arch_nop_insn(insn->len));
1185
1186 insn->type = INSN_NOP;
1187
1188 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1189 return;
1190 }
1191
1192 if (!sibling && dead_end_function(file, sym))
1193 insn->dead_end = true;
1194}
1195
1196static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1197 struct symbol *dest, bool sibling)
1198{
1199 insn->call_dest = dest;
1200 if (!dest)
1201 return;
1202
1203 /*
1204 * Whatever stack impact regular CALLs have, should be undone
1205 * by the RETURN of the called function.
1206 *
1207 * Annotated intra-function calls retain the stack_ops but
1208 * are converted to JUMP, see read_intra_function_calls().
1209 */
1210 remove_insn_ops(insn);
1211
1212 annotate_call_site(file, insn, sibling);
1213}
1214
1215static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1216{
1217 /*
1218 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1219 * so convert them accordingly.
1220 */
1221 switch (insn->type) {
1222 case INSN_CALL:
1223 insn->type = INSN_CALL_DYNAMIC;
1224 break;
1225 case INSN_JUMP_UNCONDITIONAL:
1226 insn->type = INSN_JUMP_DYNAMIC;
1227 break;
1228 case INSN_JUMP_CONDITIONAL:
1229 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1230 break;
1231 default:
1232 return;
1233 }
1234
1235 insn->retpoline_safe = true;
1236
1237 /*
1238 * Whatever stack impact regular CALLs have, should be undone
1239 * by the RETURN of the called function.
1240 *
1241 * Annotated intra-function calls retain the stack_ops but
1242 * are converted to JUMP, see read_intra_function_calls().
1243 */
1244 remove_insn_ops(insn);
1245
1246 annotate_call_site(file, insn, false);
1247}
1248
1249static bool same_function(struct instruction *insn1, struct instruction *insn2)
1250{
1251 return insn1->func->pfunc == insn2->func->pfunc;
1252}
1253
1254static bool is_first_func_insn(struct objtool_file *file, struct instruction *insn)
1255{
1256 if (insn->offset == insn->func->offset)
1257 return true;
1258
1259 if (ibt) {
1260 struct instruction *prev = prev_insn_same_sym(file, insn);
1261
1262 if (prev && prev->type == INSN_ENDBR &&
1263 insn->offset == insn->func->offset + prev->len)
1264 return true;
1265 }
1266
1267 return false;
1268}
1269
1270/*
1271 * Find the destination instructions for all jumps.
1272 */
1273static int add_jump_destinations(struct objtool_file *file)
1274{
1275 struct instruction *insn, *jump_dest;
1276 struct reloc *reloc;
1277 struct section *dest_sec;
1278 unsigned long dest_off;
1279
1280 for_each_insn(file, insn) {
1281 if (insn->jump_dest) {
1282 /*
1283 * handle_group_alt() may have previously set
1284 * 'jump_dest' for some alternatives.
1285 */
1286 continue;
1287 }
1288 if (!is_static_jump(insn))
1289 continue;
1290
1291 reloc = insn_reloc(file, insn);
1292 if (!reloc) {
1293 dest_sec = insn->sec;
1294 dest_off = arch_jump_destination(insn);
1295 } else if (reloc->sym->type == STT_SECTION) {
1296 dest_sec = reloc->sym->sec;
1297 dest_off = arch_dest_reloc_offset(reloc->addend);
1298 } else if (reloc->sym->retpoline_thunk) {
1299 add_retpoline_call(file, insn);
1300 continue;
1301 } else if (insn->func) {
1302 /*
1303 * External sibling call or internal sibling call with
1304 * STT_FUNC reloc.
1305 */
1306 add_call_dest(file, insn, reloc->sym, true);
1307 continue;
1308 } else if (reloc->sym->sec->idx) {
1309 dest_sec = reloc->sym->sec;
1310 dest_off = reloc->sym->sym.st_value +
1311 arch_dest_reloc_offset(reloc->addend);
1312 } else {
1313 /* non-func asm code jumping to another file */
1314 continue;
1315 }
1316
1317 jump_dest = find_insn(file, dest_sec, dest_off);
1318 if (!jump_dest) {
1319 WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
1320 insn->sec, insn->offset, dest_sec->name,
1321 dest_off);
1322 return -1;
1323 }
1324
1325 /*
1326 * Cross-function jump.
1327 */
1328 if (insn->func && jump_dest->func &&
1329 insn->func != jump_dest->func) {
1330
1331 /*
1332 * For GCC 8+, create parent/child links for any cold
1333 * subfunctions. This is _mostly_ redundant with a
1334 * similar initialization in read_symbols().
1335 *
1336 * If a function has aliases, we want the *first* such
1337 * function in the symbol table to be the subfunction's
1338 * parent. In that case we overwrite the
1339 * initialization done in read_symbols().
1340 *
1341 * However this code can't completely replace the
1342 * read_symbols() code because this doesn't detect the
1343 * case where the parent function's only reference to a
1344 * subfunction is through a jump table.
1345 */
1346 if (!strstr(insn->func->name, ".cold") &&
1347 strstr(jump_dest->func->name, ".cold")) {
1348 insn->func->cfunc = jump_dest->func;
1349 jump_dest->func->pfunc = insn->func;
1350
1351 } else if (!same_function(insn, jump_dest) &&
1352 is_first_func_insn(file, jump_dest)) {
1353 /*
1354 * Internal sibling call without reloc or with
1355 * STT_SECTION reloc.
1356 */
1357 add_call_dest(file, insn, jump_dest->func, true);
1358 continue;
1359 }
1360 }
1361
1362 insn->jump_dest = jump_dest;
1363 }
1364
1365 return 0;
1366}
1367
1368static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1369{
1370 struct symbol *call_dest;
1371
1372 call_dest = find_func_by_offset(sec, offset);
1373 if (!call_dest)
1374 call_dest = find_symbol_by_offset(sec, offset);
1375
1376 return call_dest;
1377}
1378
1379/*
1380 * Find the destination instructions for all calls.
1381 */
1382static int add_call_destinations(struct objtool_file *file)
1383{
1384 struct instruction *insn;
1385 unsigned long dest_off;
1386 struct symbol *dest;
1387 struct reloc *reloc;
1388
1389 for_each_insn(file, insn) {
1390 if (insn->type != INSN_CALL)
1391 continue;
1392
1393 reloc = insn_reloc(file, insn);
1394 if (!reloc) {
1395 dest_off = arch_jump_destination(insn);
1396 dest = find_call_destination(insn->sec, dest_off);
1397
1398 add_call_dest(file, insn, dest, false);
1399
1400 if (insn->ignore)
1401 continue;
1402
1403 if (!insn->call_dest) {
1404 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1405 return -1;
1406 }
1407
1408 if (insn->func && insn->call_dest->type != STT_FUNC) {
1409 WARN_FUNC("unsupported call to non-function",
1410 insn->sec, insn->offset);
1411 return -1;
1412 }
1413
1414 } else if (reloc->sym->type == STT_SECTION) {
1415 dest_off = arch_dest_reloc_offset(reloc->addend);
1416 dest = find_call_destination(reloc->sym->sec, dest_off);
1417 if (!dest) {
1418 WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1419 insn->sec, insn->offset,
1420 reloc->sym->sec->name,
1421 dest_off);
1422 return -1;
1423 }
1424
1425 add_call_dest(file, insn, dest, false);
1426
1427 } else if (reloc->sym->retpoline_thunk) {
1428 add_retpoline_call(file, insn);
1429
1430 } else
1431 add_call_dest(file, insn, reloc->sym, false);
1432 }
1433
1434 return 0;
1435}
1436
1437/*
1438 * The .alternatives section requires some extra special care over and above
1439 * other special sections because alternatives are patched in place.
1440 */
1441static int handle_group_alt(struct objtool_file *file,
1442 struct special_alt *special_alt,
1443 struct instruction *orig_insn,
1444 struct instruction **new_insn)
1445{
1446 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1447 struct alt_group *orig_alt_group, *new_alt_group;
1448 unsigned long dest_off;
1449
1450
1451 orig_alt_group = malloc(sizeof(*orig_alt_group));
1452 if (!orig_alt_group) {
1453 WARN("malloc failed");
1454 return -1;
1455 }
1456 orig_alt_group->cfi = calloc(special_alt->orig_len,
1457 sizeof(struct cfi_state *));
1458 if (!orig_alt_group->cfi) {
1459 WARN("calloc failed");
1460 return -1;
1461 }
1462
1463 last_orig_insn = NULL;
1464 insn = orig_insn;
1465 sec_for_each_insn_from(file, insn) {
1466 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1467 break;
1468
1469 insn->alt_group = orig_alt_group;
1470 last_orig_insn = insn;
1471 }
1472 orig_alt_group->orig_group = NULL;
1473 orig_alt_group->first_insn = orig_insn;
1474 orig_alt_group->last_insn = last_orig_insn;
1475
1476
1477 new_alt_group = malloc(sizeof(*new_alt_group));
1478 if (!new_alt_group) {
1479 WARN("malloc failed");
1480 return -1;
1481 }
1482
1483 if (special_alt->new_len < special_alt->orig_len) {
1484 /*
1485 * Insert a fake nop at the end to make the replacement
1486 * alt_group the same size as the original. This is needed to
1487 * allow propagate_alt_cfi() to do its magic. When the last
1488 * instruction affects the stack, the instruction after it (the
1489 * nop) will propagate the new state to the shared CFI array.
1490 */
1491 nop = malloc(sizeof(*nop));
1492 if (!nop) {
1493 WARN("malloc failed");
1494 return -1;
1495 }
1496 memset(nop, 0, sizeof(*nop));
1497 INIT_LIST_HEAD(&nop->alts);
1498 INIT_LIST_HEAD(&nop->stack_ops);
1499
1500 nop->sec = special_alt->new_sec;
1501 nop->offset = special_alt->new_off + special_alt->new_len;
1502 nop->len = special_alt->orig_len - special_alt->new_len;
1503 nop->type = INSN_NOP;
1504 nop->func = orig_insn->func;
1505 nop->alt_group = new_alt_group;
1506 nop->ignore = orig_insn->ignore_alts;
1507 }
1508
1509 if (!special_alt->new_len) {
1510 *new_insn = nop;
1511 goto end;
1512 }
1513
1514 insn = *new_insn;
1515 sec_for_each_insn_from(file, insn) {
1516 struct reloc *alt_reloc;
1517
1518 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1519 break;
1520
1521 last_new_insn = insn;
1522
1523 insn->ignore = orig_insn->ignore_alts;
1524 insn->func = orig_insn->func;
1525 insn->alt_group = new_alt_group;
1526
1527 /*
1528 * Since alternative replacement code is copy/pasted by the
1529 * kernel after applying relocations, generally such code can't
1530 * have relative-address relocation references to outside the
1531 * .altinstr_replacement section, unless the arch's
1532 * alternatives code can adjust the relative offsets
1533 * accordingly.
1534 */
1535 alt_reloc = insn_reloc(file, insn);
1536 if (alt_reloc &&
1537 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1538
1539 WARN_FUNC("unsupported relocation in alternatives section",
1540 insn->sec, insn->offset);
1541 return -1;
1542 }
1543
1544 if (!is_static_jump(insn))
1545 continue;
1546
1547 if (!insn->immediate)
1548 continue;
1549
1550 dest_off = arch_jump_destination(insn);
1551 if (dest_off == special_alt->new_off + special_alt->new_len) {
1552 insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1553 if (!insn->jump_dest) {
1554 WARN_FUNC("can't find alternative jump destination",
1555 insn->sec, insn->offset);
1556 return -1;
1557 }
1558 }
1559 }
1560
1561 if (!last_new_insn) {
1562 WARN_FUNC("can't find last new alternative instruction",
1563 special_alt->new_sec, special_alt->new_off);
1564 return -1;
1565 }
1566
1567 if (nop)
1568 list_add(&nop->list, &last_new_insn->list);
1569end:
1570 new_alt_group->orig_group = orig_alt_group;
1571 new_alt_group->first_insn = *new_insn;
1572 new_alt_group->last_insn = nop ? : last_new_insn;
1573 new_alt_group->cfi = orig_alt_group->cfi;
1574 return 0;
1575}
1576
1577/*
1578 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1579 * If the original instruction is a jump, make the alt entry an effective nop
1580 * by just skipping the original instruction.
1581 */
1582static int handle_jump_alt(struct objtool_file *file,
1583 struct special_alt *special_alt,
1584 struct instruction *orig_insn,
1585 struct instruction **new_insn)
1586{
1587 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1588 orig_insn->type != INSN_NOP) {
1589
1590 WARN_FUNC("unsupported instruction at jump label",
1591 orig_insn->sec, orig_insn->offset);
1592 return -1;
1593 }
1594
1595 if (special_alt->key_addend & 2) {
1596 struct reloc *reloc = insn_reloc(file, orig_insn);
1597
1598 if (reloc) {
1599 reloc->type = R_NONE;
1600 elf_write_reloc(file->elf, reloc);
1601 }
1602 elf_write_insn(file->elf, orig_insn->sec,
1603 orig_insn->offset, orig_insn->len,
1604 arch_nop_insn(orig_insn->len));
1605 orig_insn->type = INSN_NOP;
1606 }
1607
1608 if (orig_insn->type == INSN_NOP) {
1609 if (orig_insn->len == 2)
1610 file->jl_nop_short++;
1611 else
1612 file->jl_nop_long++;
1613
1614 return 0;
1615 }
1616
1617 if (orig_insn->len == 2)
1618 file->jl_short++;
1619 else
1620 file->jl_long++;
1621
1622 *new_insn = list_next_entry(orig_insn, list);
1623 return 0;
1624}
1625
1626/*
1627 * Read all the special sections which have alternate instructions which can be
1628 * patched in or redirected to at runtime. Each instruction having alternate
1629 * instruction(s) has them added to its insn->alts list, which will be
1630 * traversed in validate_branch().
1631 */
1632static int add_special_section_alts(struct objtool_file *file)
1633{
1634 struct list_head special_alts;
1635 struct instruction *orig_insn, *new_insn;
1636 struct special_alt *special_alt, *tmp;
1637 struct alternative *alt;
1638 int ret;
1639
1640 ret = special_get_alts(file->elf, &special_alts);
1641 if (ret)
1642 return ret;
1643
1644 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1645
1646 orig_insn = find_insn(file, special_alt->orig_sec,
1647 special_alt->orig_off);
1648 if (!orig_insn) {
1649 WARN_FUNC("special: can't find orig instruction",
1650 special_alt->orig_sec, special_alt->orig_off);
1651 ret = -1;
1652 goto out;
1653 }
1654
1655 new_insn = NULL;
1656 if (!special_alt->group || special_alt->new_len) {
1657 new_insn = find_insn(file, special_alt->new_sec,
1658 special_alt->new_off);
1659 if (!new_insn) {
1660 WARN_FUNC("special: can't find new instruction",
1661 special_alt->new_sec,
1662 special_alt->new_off);
1663 ret = -1;
1664 goto out;
1665 }
1666 }
1667
1668 if (special_alt->group) {
1669 if (!special_alt->orig_len) {
1670 WARN_FUNC("empty alternative entry",
1671 orig_insn->sec, orig_insn->offset);
1672 continue;
1673 }
1674
1675 ret = handle_group_alt(file, special_alt, orig_insn,
1676 &new_insn);
1677 if (ret)
1678 goto out;
1679 } else if (special_alt->jump_or_nop) {
1680 ret = handle_jump_alt(file, special_alt, orig_insn,
1681 &new_insn);
1682 if (ret)
1683 goto out;
1684 }
1685
1686 alt = malloc(sizeof(*alt));
1687 if (!alt) {
1688 WARN("malloc failed");
1689 ret = -1;
1690 goto out;
1691 }
1692
1693 alt->insn = new_insn;
1694 alt->skip_orig = special_alt->skip_orig;
1695 orig_insn->ignore_alts |= special_alt->skip_alt;
1696 list_add_tail(&alt->list, &orig_insn->alts);
1697
1698 list_del(&special_alt->list);
1699 free(special_alt);
1700 }
1701
1702 if (stats) {
1703 printf("jl\\\tNOP\tJMP\n");
1704 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1705 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1706 }
1707
1708out:
1709 return ret;
1710}
1711
1712static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1713 struct reloc *table)
1714{
1715 struct reloc *reloc = table;
1716 struct instruction *dest_insn;
1717 struct alternative *alt;
1718 struct symbol *pfunc = insn->func->pfunc;
1719 unsigned int prev_offset = 0;
1720
1721 /*
1722 * Each @reloc is a switch table relocation which points to the target
1723 * instruction.
1724 */
1725 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1726
1727 /* Check for the end of the table: */
1728 if (reloc != table && reloc->jump_table_start)
1729 break;
1730
1731 /* Make sure the table entries are consecutive: */
1732 if (prev_offset && reloc->offset != prev_offset + 8)
1733 break;
1734
1735 /* Detect function pointers from contiguous objects: */
1736 if (reloc->sym->sec == pfunc->sec &&
1737 reloc->addend == pfunc->offset)
1738 break;
1739
1740 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1741 if (!dest_insn)
1742 break;
1743
1744 /* Make sure the destination is in the same function: */
1745 if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1746 break;
1747
1748 alt = malloc(sizeof(*alt));
1749 if (!alt) {
1750 WARN("malloc failed");
1751 return -1;
1752 }
1753
1754 alt->insn = dest_insn;
1755 list_add_tail(&alt->list, &insn->alts);
1756 prev_offset = reloc->offset;
1757 }
1758
1759 if (!prev_offset) {
1760 WARN_FUNC("can't find switch jump table",
1761 insn->sec, insn->offset);
1762 return -1;
1763 }
1764
1765 return 0;
1766}
1767
1768/*
1769 * find_jump_table() - Given a dynamic jump, find the switch jump table
1770 * associated with it.
1771 */
1772static struct reloc *find_jump_table(struct objtool_file *file,
1773 struct symbol *func,
1774 struct instruction *insn)
1775{
1776 struct reloc *table_reloc;
1777 struct instruction *dest_insn, *orig_insn = insn;
1778
1779 /*
1780 * Backward search using the @first_jump_src links, these help avoid
1781 * much of the 'in between' code. Which avoids us getting confused by
1782 * it.
1783 */
1784 for (;
1785 insn && insn->func && insn->func->pfunc == func;
1786 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1787
1788 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1789 break;
1790
1791 /* allow small jumps within the range */
1792 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
1793 insn->jump_dest &&
1794 (insn->jump_dest->offset <= insn->offset ||
1795 insn->jump_dest->offset > orig_insn->offset))
1796 break;
1797
1798 table_reloc = arch_find_switch_table(file, insn);
1799 if (!table_reloc)
1800 continue;
1801 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1802 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
1803 continue;
1804
1805 return table_reloc;
1806 }
1807
1808 return NULL;
1809}
1810
1811/*
1812 * First pass: Mark the head of each jump table so that in the next pass,
1813 * we know when a given jump table ends and the next one starts.
1814 */
1815static void mark_func_jump_tables(struct objtool_file *file,
1816 struct symbol *func)
1817{
1818 struct instruction *insn, *last = NULL;
1819 struct reloc *reloc;
1820
1821 func_for_each_insn(file, func, insn) {
1822 if (!last)
1823 last = insn;
1824
1825 /*
1826 * Store back-pointers for unconditional forward jumps such
1827 * that find_jump_table() can back-track using those and
1828 * avoid some potentially confusing code.
1829 */
1830 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
1831 insn->offset > last->offset &&
1832 insn->jump_dest->offset > insn->offset &&
1833 !insn->jump_dest->first_jump_src) {
1834
1835 insn->jump_dest->first_jump_src = insn;
1836 last = insn->jump_dest;
1837 }
1838
1839 if (insn->type != INSN_JUMP_DYNAMIC)
1840 continue;
1841
1842 reloc = find_jump_table(file, func, insn);
1843 if (reloc) {
1844 reloc->jump_table_start = true;
1845 insn->jump_table = reloc;
1846 }
1847 }
1848}
1849
1850static int add_func_jump_tables(struct objtool_file *file,
1851 struct symbol *func)
1852{
1853 struct instruction *insn;
1854 int ret;
1855
1856 func_for_each_insn(file, func, insn) {
1857 if (!insn->jump_table)
1858 continue;
1859
1860 ret = add_jump_table(file, insn, insn->jump_table);
1861 if (ret)
1862 return ret;
1863 }
1864
1865 return 0;
1866}
1867
1868/*
1869 * For some switch statements, gcc generates a jump table in the .rodata
1870 * section which contains a list of addresses within the function to jump to.
1871 * This finds these jump tables and adds them to the insn->alts lists.
1872 */
1873static int add_jump_table_alts(struct objtool_file *file)
1874{
1875 struct section *sec;
1876 struct symbol *func;
1877 int ret;
1878
1879 if (!file->rodata)
1880 return 0;
1881
1882 for_each_sec(file, sec) {
1883 list_for_each_entry(func, &sec->symbol_list, list) {
1884 if (func->type != STT_FUNC)
1885 continue;
1886
1887 mark_func_jump_tables(file, func);
1888 ret = add_func_jump_tables(file, func);
1889 if (ret)
1890 return ret;
1891 }
1892 }
1893
1894 return 0;
1895}
1896
1897static void set_func_state(struct cfi_state *state)
1898{
1899 state->cfa = initial_func_cfi.cfa;
1900 memcpy(&state->regs, &initial_func_cfi.regs,
1901 CFI_NUM_REGS * sizeof(struct cfi_reg));
1902 state->stack_size = initial_func_cfi.cfa.offset;
1903}
1904
1905static int read_unwind_hints(struct objtool_file *file)
1906{
1907 struct cfi_state cfi = init_cfi;
1908 struct section *sec, *relocsec;
1909 struct unwind_hint *hint;
1910 struct instruction *insn;
1911 struct reloc *reloc;
1912 int i;
1913
1914 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
1915 if (!sec)
1916 return 0;
1917
1918 relocsec = sec->reloc;
1919 if (!relocsec) {
1920 WARN("missing .rela.discard.unwind_hints section");
1921 return -1;
1922 }
1923
1924 if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
1925 WARN("struct unwind_hint size mismatch");
1926 return -1;
1927 }
1928
1929 file->hints = true;
1930
1931 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
1932 hint = (struct unwind_hint *)sec->data->d_buf + i;
1933
1934 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
1935 if (!reloc) {
1936 WARN("can't find reloc for unwind_hints[%d]", i);
1937 return -1;
1938 }
1939
1940 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1941 if (!insn) {
1942 WARN("can't find insn for unwind_hints[%d]", i);
1943 return -1;
1944 }
1945
1946 insn->hint = true;
1947
1948 if (ibt && hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
1949 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
1950
1951 if (sym && sym->bind == STB_GLOBAL &&
1952 insn->type != INSN_ENDBR && !insn->noendbr) {
1953 WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
1954 insn->sec, insn->offset);
1955 }
1956 }
1957
1958 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
1959 insn->cfi = &func_cfi;
1960 continue;
1961 }
1962
1963 if (insn->cfi)
1964 cfi = *(insn->cfi);
1965
1966 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
1967 WARN_FUNC("unsupported unwind_hint sp base reg %d",
1968 insn->sec, insn->offset, hint->sp_reg);
1969 return -1;
1970 }
1971
1972 cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
1973 cfi.type = hint->type;
1974 cfi.end = hint->end;
1975
1976 insn->cfi = cfi_hash_find_or_add(&cfi);
1977 }
1978
1979 return 0;
1980}
1981
1982static int read_noendbr_hints(struct objtool_file *file)
1983{
1984 struct section *sec;
1985 struct instruction *insn;
1986 struct reloc *reloc;
1987
1988 sec = find_section_by_name(file->elf, ".rela.discard.noendbr");
1989 if (!sec)
1990 return 0;
1991
1992 list_for_each_entry(reloc, &sec->reloc_list, list) {
1993 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
1994 if (!insn) {
1995 WARN("bad .discard.noendbr entry");
1996 return -1;
1997 }
1998
1999 if (insn->type == INSN_ENDBR)
2000 WARN_FUNC("ANNOTATE_NOENDBR on ENDBR", insn->sec, insn->offset);
2001
2002 insn->noendbr = 1;
2003 }
2004
2005 return 0;
2006}
2007
2008static int read_retpoline_hints(struct objtool_file *file)
2009{
2010 struct section *sec;
2011 struct instruction *insn;
2012 struct reloc *reloc;
2013
2014 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
2015 if (!sec)
2016 return 0;
2017
2018 list_for_each_entry(reloc, &sec->reloc_list, list) {
2019 if (reloc->sym->type != STT_SECTION) {
2020 WARN("unexpected relocation symbol type in %s", sec->name);
2021 return -1;
2022 }
2023
2024 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2025 if (!insn) {
2026 WARN("bad .discard.retpoline_safe entry");
2027 return -1;
2028 }
2029
2030 if (insn->type != INSN_JUMP_DYNAMIC &&
2031 insn->type != INSN_CALL_DYNAMIC) {
2032 WARN_FUNC("retpoline_safe hint not an indirect jump/call",
2033 insn->sec, insn->offset);
2034 return -1;
2035 }
2036
2037 insn->retpoline_safe = true;
2038 }
2039
2040 return 0;
2041}
2042
2043static int read_instr_hints(struct objtool_file *file)
2044{
2045 struct section *sec;
2046 struct instruction *insn;
2047 struct reloc *reloc;
2048
2049 sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
2050 if (!sec)
2051 return 0;
2052
2053 list_for_each_entry(reloc, &sec->reloc_list, list) {
2054 if (reloc->sym->type != STT_SECTION) {
2055 WARN("unexpected relocation symbol type in %s", sec->name);
2056 return -1;
2057 }
2058
2059 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2060 if (!insn) {
2061 WARN("bad .discard.instr_end entry");
2062 return -1;
2063 }
2064
2065 insn->instr--;
2066 }
2067
2068 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
2069 if (!sec)
2070 return 0;
2071
2072 list_for_each_entry(reloc, &sec->reloc_list, list) {
2073 if (reloc->sym->type != STT_SECTION) {
2074 WARN("unexpected relocation symbol type in %s", sec->name);
2075 return -1;
2076 }
2077
2078 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2079 if (!insn) {
2080 WARN("bad .discard.instr_begin entry");
2081 return -1;
2082 }
2083
2084 insn->instr++;
2085 }
2086
2087 return 0;
2088}
2089
2090static int read_intra_function_calls(struct objtool_file *file)
2091{
2092 struct instruction *insn;
2093 struct section *sec;
2094 struct reloc *reloc;
2095
2096 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
2097 if (!sec)
2098 return 0;
2099
2100 list_for_each_entry(reloc, &sec->reloc_list, list) {
2101 unsigned long dest_off;
2102
2103 if (reloc->sym->type != STT_SECTION) {
2104 WARN("unexpected relocation symbol type in %s",
2105 sec->name);
2106 return -1;
2107 }
2108
2109 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2110 if (!insn) {
2111 WARN("bad .discard.intra_function_call entry");
2112 return -1;
2113 }
2114
2115 if (insn->type != INSN_CALL) {
2116 WARN_FUNC("intra_function_call not a direct call",
2117 insn->sec, insn->offset);
2118 return -1;
2119 }
2120
2121 /*
2122 * Treat intra-function CALLs as JMPs, but with a stack_op.
2123 * See add_call_destinations(), which strips stack_ops from
2124 * normal CALLs.
2125 */
2126 insn->type = INSN_JUMP_UNCONDITIONAL;
2127
2128 dest_off = insn->offset + insn->len + insn->immediate;
2129 insn->jump_dest = find_insn(file, insn->sec, dest_off);
2130 if (!insn->jump_dest) {
2131 WARN_FUNC("can't find call dest at %s+0x%lx",
2132 insn->sec, insn->offset,
2133 insn->sec->name, dest_off);
2134 return -1;
2135 }
2136 }
2137
2138 return 0;
2139}
2140
2141/*
2142 * Return true if name matches an instrumentation function, where calls to that
2143 * function from noinstr code can safely be removed, but compilers won't do so.
2144 */
2145static bool is_profiling_func(const char *name)
2146{
2147 /*
2148 * Many compilers cannot disable KCOV with a function attribute.
2149 */
2150 if (!strncmp(name, "__sanitizer_cov_", 16))
2151 return true;
2152
2153 /*
2154 * Some compilers currently do not remove __tsan_func_entry/exit nor
2155 * __tsan_atomic_signal_fence (used for barrier instrumentation) with
2156 * the __no_sanitize_thread attribute, remove them. Once the kernel's
2157 * minimum Clang version is 14.0, this can be removed.
2158 */
2159 if (!strncmp(name, "__tsan_func_", 12) ||
2160 !strcmp(name, "__tsan_atomic_signal_fence"))
2161 return true;
2162
2163 return false;
2164}
2165
2166static int classify_symbols(struct objtool_file *file)
2167{
2168 struct section *sec;
2169 struct symbol *func;
2170
2171 for_each_sec(file, sec) {
2172 list_for_each_entry(func, &sec->symbol_list, list) {
2173 if (func->bind != STB_GLOBAL)
2174 continue;
2175
2176 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2177 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2178 func->static_call_tramp = true;
2179
2180 if (arch_is_retpoline(func))
2181 func->retpoline_thunk = true;
2182
2183 if (!strcmp(func->name, "__fentry__"))
2184 func->fentry = true;
2185
2186 if (is_profiling_func(func->name))
2187 func->profiling_func = true;
2188 }
2189 }
2190
2191 return 0;
2192}
2193
2194static void mark_rodata(struct objtool_file *file)
2195{
2196 struct section *sec;
2197 bool found = false;
2198
2199 /*
2200 * Search for the following rodata sections, each of which can
2201 * potentially contain jump tables:
2202 *
2203 * - .rodata: can contain GCC switch tables
2204 * - .rodata.<func>: same, if -fdata-sections is being used
2205 * - .rodata..c_jump_table: contains C annotated jump tables
2206 *
2207 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2208 */
2209 for_each_sec(file, sec) {
2210 if (!strncmp(sec->name, ".rodata", 7) &&
2211 !strstr(sec->name, ".str1.")) {
2212 sec->rodata = true;
2213 found = true;
2214 }
2215 }
2216
2217 file->rodata = found;
2218}
2219
2220static int decode_sections(struct objtool_file *file)
2221{
2222 int ret;
2223
2224 mark_rodata(file);
2225
2226 ret = init_pv_ops(file);
2227 if (ret)
2228 return ret;
2229
2230 ret = decode_instructions(file);
2231 if (ret)
2232 return ret;
2233
2234 add_ignores(file);
2235 add_uaccess_safe(file);
2236
2237 ret = add_ignore_alternatives(file);
2238 if (ret)
2239 return ret;
2240
2241 /*
2242 * Must be before read_unwind_hints() since that needs insn->noendbr.
2243 */
2244 ret = read_noendbr_hints(file);
2245 if (ret)
2246 return ret;
2247
2248 /*
2249 * Must be before add_{jump_call}_destination.
2250 */
2251 ret = classify_symbols(file);
2252 if (ret)
2253 return ret;
2254
2255 /*
2256 * Must be before add_jump_destinations(), which depends on 'func'
2257 * being set for alternatives, to enable proper sibling call detection.
2258 */
2259 ret = add_special_section_alts(file);
2260 if (ret)
2261 return ret;
2262
2263 ret = add_jump_destinations(file);
2264 if (ret)
2265 return ret;
2266
2267 /*
2268 * Must be before add_call_destination(); it changes INSN_CALL to
2269 * INSN_JUMP.
2270 */
2271 ret = read_intra_function_calls(file);
2272 if (ret)
2273 return ret;
2274
2275 ret = add_call_destinations(file);
2276 if (ret)
2277 return ret;
2278
2279 /*
2280 * Must be after add_call_destinations() such that it can override
2281 * dead_end_function() marks.
2282 */
2283 ret = add_dead_ends(file);
2284 if (ret)
2285 return ret;
2286
2287 ret = add_jump_table_alts(file);
2288 if (ret)
2289 return ret;
2290
2291 ret = read_unwind_hints(file);
2292 if (ret)
2293 return ret;
2294
2295 ret = read_retpoline_hints(file);
2296 if (ret)
2297 return ret;
2298
2299 ret = read_instr_hints(file);
2300 if (ret)
2301 return ret;
2302
2303 return 0;
2304}
2305
2306static bool is_fentry_call(struct instruction *insn)
2307{
2308 if (insn->type == INSN_CALL &&
2309 insn->call_dest &&
2310 insn->call_dest->fentry)
2311 return true;
2312
2313 return false;
2314}
2315
2316static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2317{
2318 struct cfi_state *cfi = &state->cfi;
2319 int i;
2320
2321 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2322 return true;
2323
2324 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2325 return true;
2326
2327 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2328 return true;
2329
2330 for (i = 0; i < CFI_NUM_REGS; i++) {
2331 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2332 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2333 return true;
2334 }
2335
2336 return false;
2337}
2338
2339static bool check_reg_frame_pos(const struct cfi_reg *reg,
2340 int expected_offset)
2341{
2342 return reg->base == CFI_CFA &&
2343 reg->offset == expected_offset;
2344}
2345
2346static bool has_valid_stack_frame(struct insn_state *state)
2347{
2348 struct cfi_state *cfi = &state->cfi;
2349
2350 if (cfi->cfa.base == CFI_BP &&
2351 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2352 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2353 return true;
2354
2355 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2356 return true;
2357
2358 return false;
2359}
2360
2361static int update_cfi_state_regs(struct instruction *insn,
2362 struct cfi_state *cfi,
2363 struct stack_op *op)
2364{
2365 struct cfi_reg *cfa = &cfi->cfa;
2366
2367 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2368 return 0;
2369
2370 /* push */
2371 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2372 cfa->offset += 8;
2373
2374 /* pop */
2375 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2376 cfa->offset -= 8;
2377
2378 /* add immediate to sp */
2379 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2380 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2381 cfa->offset -= op->src.offset;
2382
2383 return 0;
2384}
2385
2386static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2387{
2388 if (arch_callee_saved_reg(reg) &&
2389 cfi->regs[reg].base == CFI_UNDEFINED) {
2390 cfi->regs[reg].base = base;
2391 cfi->regs[reg].offset = offset;
2392 }
2393}
2394
2395static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2396{
2397 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2398 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2399}
2400
2401/*
2402 * A note about DRAP stack alignment:
2403 *
2404 * GCC has the concept of a DRAP register, which is used to help keep track of
2405 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2406 * register. The typical DRAP pattern is:
2407 *
2408 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2409 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2410 * 41 ff 72 f8 pushq -0x8(%r10)
2411 * 55 push %rbp
2412 * 48 89 e5 mov %rsp,%rbp
2413 * (more pushes)
2414 * 41 52 push %r10
2415 * ...
2416 * 41 5a pop %r10
2417 * (more pops)
2418 * 5d pop %rbp
2419 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2420 * c3 retq
2421 *
2422 * There are some variations in the epilogues, like:
2423 *
2424 * 5b pop %rbx
2425 * 41 5a pop %r10
2426 * 41 5c pop %r12
2427 * 41 5d pop %r13
2428 * 41 5e pop %r14
2429 * c9 leaveq
2430 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2431 * c3 retq
2432 *
2433 * and:
2434 *
2435 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2436 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2437 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2438 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2439 * c9 leaveq
2440 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2441 * c3 retq
2442 *
2443 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2444 * restored beforehand:
2445 *
2446 * 41 55 push %r13
2447 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2448 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2449 * ...
2450 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2451 * 41 5d pop %r13
2452 * c3 retq
2453 */
2454static int update_cfi_state(struct instruction *insn,
2455 struct instruction *next_insn,
2456 struct cfi_state *cfi, struct stack_op *op)
2457{
2458 struct cfi_reg *cfa = &cfi->cfa;
2459 struct cfi_reg *regs = cfi->regs;
2460
2461 /* stack operations don't make sense with an undefined CFA */
2462 if (cfa->base == CFI_UNDEFINED) {
2463 if (insn->func) {
2464 WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2465 return -1;
2466 }
2467 return 0;
2468 }
2469
2470 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2471 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2472 return update_cfi_state_regs(insn, cfi, op);
2473
2474 switch (op->dest.type) {
2475
2476 case OP_DEST_REG:
2477 switch (op->src.type) {
2478
2479 case OP_SRC_REG:
2480 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2481 cfa->base == CFI_SP &&
2482 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) {
2483
2484 /* mov %rsp, %rbp */
2485 cfa->base = op->dest.reg;
2486 cfi->bp_scratch = false;
2487 }
2488
2489 else if (op->src.reg == CFI_SP &&
2490 op->dest.reg == CFI_BP && cfi->drap) {
2491
2492 /* drap: mov %rsp, %rbp */
2493 regs[CFI_BP].base = CFI_BP;
2494 regs[CFI_BP].offset = -cfi->stack_size;
2495 cfi->bp_scratch = false;
2496 }
2497
2498 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2499
2500 /*
2501 * mov %rsp, %reg
2502 *
2503 * This is needed for the rare case where GCC
2504 * does:
2505 *
2506 * mov %rsp, %rax
2507 * ...
2508 * mov %rax, %rsp
2509 */
2510 cfi->vals[op->dest.reg].base = CFI_CFA;
2511 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2512 }
2513
2514 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2515 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2516
2517 /*
2518 * mov %rbp, %rsp
2519 *
2520 * Restore the original stack pointer (Clang).
2521 */
2522 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2523 }
2524
2525 else if (op->dest.reg == cfa->base) {
2526
2527 /* mov %reg, %rsp */
2528 if (cfa->base == CFI_SP &&
2529 cfi->vals[op->src.reg].base == CFI_CFA) {
2530
2531 /*
2532 * This is needed for the rare case
2533 * where GCC does something dumb like:
2534 *
2535 * lea 0x8(%rsp), %rcx
2536 * ...
2537 * mov %rcx, %rsp
2538 */
2539 cfa->offset = -cfi->vals[op->src.reg].offset;
2540 cfi->stack_size = cfa->offset;
2541
2542 } else if (cfa->base == CFI_SP &&
2543 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2544 cfi->vals[op->src.reg].offset == cfa->offset) {
2545
2546 /*
2547 * Stack swizzle:
2548 *
2549 * 1: mov %rsp, (%[tos])
2550 * 2: mov %[tos], %rsp
2551 * ...
2552 * 3: pop %rsp
2553 *
2554 * Where:
2555 *
2556 * 1 - places a pointer to the previous
2557 * stack at the Top-of-Stack of the
2558 * new stack.
2559 *
2560 * 2 - switches to the new stack.
2561 *
2562 * 3 - pops the Top-of-Stack to restore
2563 * the original stack.
2564 *
2565 * Note: we set base to SP_INDIRECT
2566 * here and preserve offset. Therefore
2567 * when the unwinder reaches ToS it
2568 * will dereference SP and then add the
2569 * offset to find the next frame, IOW:
2570 * (%rsp) + offset.
2571 */
2572 cfa->base = CFI_SP_INDIRECT;
2573
2574 } else {
2575 cfa->base = CFI_UNDEFINED;
2576 cfa->offset = 0;
2577 }
2578 }
2579
2580 else if (op->dest.reg == CFI_SP &&
2581 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2582 cfi->vals[op->src.reg].offset == cfa->offset) {
2583
2584 /*
2585 * The same stack swizzle case 2) as above. But
2586 * because we can't change cfa->base, case 3)
2587 * will become a regular POP. Pretend we're a
2588 * PUSH so things don't go unbalanced.
2589 */
2590 cfi->stack_size += 8;
2591 }
2592
2593
2594 break;
2595
2596 case OP_SRC_ADD:
2597 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2598
2599 /* add imm, %rsp */
2600 cfi->stack_size -= op->src.offset;
2601 if (cfa->base == CFI_SP)
2602 cfa->offset -= op->src.offset;
2603 break;
2604 }
2605
2606 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2607
2608 /* lea disp(%rbp), %rsp */
2609 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2610 break;
2611 }
2612
2613 if (!cfi->drap && op->src.reg == CFI_SP &&
2614 op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2615 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) {
2616
2617 /* lea disp(%rsp), %rbp */
2618 cfa->base = CFI_BP;
2619 cfa->offset -= op->src.offset;
2620 cfi->bp_scratch = false;
2621 break;
2622 }
2623
2624 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2625
2626 /* drap: lea disp(%rsp), %drap */
2627 cfi->drap_reg = op->dest.reg;
2628
2629 /*
2630 * lea disp(%rsp), %reg
2631 *
2632 * This is needed for the rare case where GCC
2633 * does something dumb like:
2634 *
2635 * lea 0x8(%rsp), %rcx
2636 * ...
2637 * mov %rcx, %rsp
2638 */
2639 cfi->vals[op->dest.reg].base = CFI_CFA;
2640 cfi->vals[op->dest.reg].offset = \
2641 -cfi->stack_size + op->src.offset;
2642
2643 break;
2644 }
2645
2646 if (cfi->drap && op->dest.reg == CFI_SP &&
2647 op->src.reg == cfi->drap_reg) {
2648
2649 /* drap: lea disp(%drap), %rsp */
2650 cfa->base = CFI_SP;
2651 cfa->offset = cfi->stack_size = -op->src.offset;
2652 cfi->drap_reg = CFI_UNDEFINED;
2653 cfi->drap = false;
2654 break;
2655 }
2656
2657 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2658 WARN_FUNC("unsupported stack register modification",
2659 insn->sec, insn->offset);
2660 return -1;
2661 }
2662
2663 break;
2664
2665 case OP_SRC_AND:
2666 if (op->dest.reg != CFI_SP ||
2667 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2668 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2669 WARN_FUNC("unsupported stack pointer realignment",
2670 insn->sec, insn->offset);
2671 return -1;
2672 }
2673
2674 if (cfi->drap_reg != CFI_UNDEFINED) {
2675 /* drap: and imm, %rsp */
2676 cfa->base = cfi->drap_reg;
2677 cfa->offset = cfi->stack_size = 0;
2678 cfi->drap = true;
2679 }
2680
2681 /*
2682 * Older versions of GCC (4.8ish) realign the stack
2683 * without DRAP, with a frame pointer.
2684 */
2685
2686 break;
2687
2688 case OP_SRC_POP:
2689 case OP_SRC_POPF:
2690 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2691
2692 /* pop %rsp; # restore from a stack swizzle */
2693 cfa->base = CFI_SP;
2694 break;
2695 }
2696
2697 if (!cfi->drap && op->dest.reg == cfa->base) {
2698
2699 /* pop %rbp */
2700 cfa->base = CFI_SP;
2701 }
2702
2703 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
2704 op->dest.reg == cfi->drap_reg &&
2705 cfi->drap_offset == -cfi->stack_size) {
2706
2707 /* drap: pop %drap */
2708 cfa->base = cfi->drap_reg;
2709 cfa->offset = 0;
2710 cfi->drap_offset = -1;
2711
2712 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
2713
2714 /* pop %reg */
2715 restore_reg(cfi, op->dest.reg);
2716 }
2717
2718 cfi->stack_size -= 8;
2719 if (cfa->base == CFI_SP)
2720 cfa->offset -= 8;
2721
2722 break;
2723
2724 case OP_SRC_REG_INDIRECT:
2725 if (!cfi->drap && op->dest.reg == cfa->base &&
2726 op->dest.reg == CFI_BP) {
2727
2728 /* mov disp(%rsp), %rbp */
2729 cfa->base = CFI_SP;
2730 cfa->offset = cfi->stack_size;
2731 }
2732
2733 if (cfi->drap && op->src.reg == CFI_BP &&
2734 op->src.offset == cfi->drap_offset) {
2735
2736 /* drap: mov disp(%rbp), %drap */
2737 cfa->base = cfi->drap_reg;
2738 cfa->offset = 0;
2739 cfi->drap_offset = -1;
2740 }
2741
2742 if (cfi->drap && op->src.reg == CFI_BP &&
2743 op->src.offset == regs[op->dest.reg].offset) {
2744
2745 /* drap: mov disp(%rbp), %reg */
2746 restore_reg(cfi, op->dest.reg);
2747
2748 } else if (op->src.reg == cfa->base &&
2749 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
2750
2751 /* mov disp(%rbp), %reg */
2752 /* mov disp(%rsp), %reg */
2753 restore_reg(cfi, op->dest.reg);
2754
2755 } else if (op->src.reg == CFI_SP &&
2756 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
2757
2758 /* mov disp(%rsp), %reg */
2759 restore_reg(cfi, op->dest.reg);
2760 }
2761
2762 break;
2763
2764 default:
2765 WARN_FUNC("unknown stack-related instruction",
2766 insn->sec, insn->offset);
2767 return -1;
2768 }
2769
2770 break;
2771
2772 case OP_DEST_PUSH:
2773 case OP_DEST_PUSHF:
2774 cfi->stack_size += 8;
2775 if (cfa->base == CFI_SP)
2776 cfa->offset += 8;
2777
2778 if (op->src.type != OP_SRC_REG)
2779 break;
2780
2781 if (cfi->drap) {
2782 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2783
2784 /* drap: push %drap */
2785 cfa->base = CFI_BP_INDIRECT;
2786 cfa->offset = -cfi->stack_size;
2787
2788 /* save drap so we know when to restore it */
2789 cfi->drap_offset = -cfi->stack_size;
2790
2791 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2792
2793 /* drap: push %rbp */
2794 cfi->stack_size = 0;
2795
2796 } else {
2797
2798 /* drap: push %reg */
2799 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2800 }
2801
2802 } else {
2803
2804 /* push %reg */
2805 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2806 }
2807
2808 /* detect when asm code uses rbp as a scratch register */
2809 if (!no_fp && insn->func && op->src.reg == CFI_BP &&
2810 cfa->base != CFI_BP)
2811 cfi->bp_scratch = true;
2812 break;
2813
2814 case OP_DEST_REG_INDIRECT:
2815
2816 if (cfi->drap) {
2817 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2818
2819 /* drap: mov %drap, disp(%rbp) */
2820 cfa->base = CFI_BP_INDIRECT;
2821 cfa->offset = op->dest.offset;
2822
2823 /* save drap offset so we know when to restore it */
2824 cfi->drap_offset = op->dest.offset;
2825 } else {
2826
2827 /* drap: mov reg, disp(%rbp) */
2828 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2829 }
2830
2831 } else if (op->dest.reg == cfa->base) {
2832
2833 /* mov reg, disp(%rbp) */
2834 /* mov reg, disp(%rsp) */
2835 save_reg(cfi, op->src.reg, CFI_CFA,
2836 op->dest.offset - cfi->cfa.offset);
2837
2838 } else if (op->dest.reg == CFI_SP) {
2839
2840 /* mov reg, disp(%rsp) */
2841 save_reg(cfi, op->src.reg, CFI_CFA,
2842 op->dest.offset - cfi->stack_size);
2843
2844 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
2845
2846 /* mov %rsp, (%reg); # setup a stack swizzle. */
2847 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
2848 cfi->vals[op->dest.reg].offset = cfa->offset;
2849 }
2850
2851 break;
2852
2853 case OP_DEST_MEM:
2854 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2855 WARN_FUNC("unknown stack-related memory operation",
2856 insn->sec, insn->offset);
2857 return -1;
2858 }
2859
2860 /* pop mem */
2861 cfi->stack_size -= 8;
2862 if (cfa->base == CFI_SP)
2863 cfa->offset -= 8;
2864
2865 break;
2866
2867 default:
2868 WARN_FUNC("unknown stack-related instruction",
2869 insn->sec, insn->offset);
2870 return -1;
2871 }
2872
2873 return 0;
2874}
2875
2876/*
2877 * The stack layouts of alternatives instructions can sometimes diverge when
2878 * they have stack modifications. That's fine as long as the potential stack
2879 * layouts don't conflict at any given potential instruction boundary.
2880 *
2881 * Flatten the CFIs of the different alternative code streams (both original
2882 * and replacement) into a single shared CFI array which can be used to detect
2883 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
2884 */
2885static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2886{
2887 struct cfi_state **alt_cfi;
2888 int group_off;
2889
2890 if (!insn->alt_group)
2891 return 0;
2892
2893 if (!insn->cfi) {
2894 WARN("CFI missing");
2895 return -1;
2896 }
2897
2898 alt_cfi = insn->alt_group->cfi;
2899 group_off = insn->offset - insn->alt_group->first_insn->offset;
2900
2901 if (!alt_cfi[group_off]) {
2902 alt_cfi[group_off] = insn->cfi;
2903 } else {
2904 if (cficmp(alt_cfi[group_off], insn->cfi)) {
2905 WARN_FUNC("stack layout conflict in alternatives",
2906 insn->sec, insn->offset);
2907 return -1;
2908 }
2909 }
2910
2911 return 0;
2912}
2913
2914static int handle_insn_ops(struct instruction *insn,
2915 struct instruction *next_insn,
2916 struct insn_state *state)
2917{
2918 struct stack_op *op;
2919
2920 list_for_each_entry(op, &insn->stack_ops, list) {
2921
2922 if (update_cfi_state(insn, next_insn, &state->cfi, op))
2923 return 1;
2924
2925 if (!insn->alt_group)
2926 continue;
2927
2928 if (op->dest.type == OP_DEST_PUSHF) {
2929 if (!state->uaccess_stack) {
2930 state->uaccess_stack = 1;
2931 } else if (state->uaccess_stack >> 31) {
2932 WARN_FUNC("PUSHF stack exhausted",
2933 insn->sec, insn->offset);
2934 return 1;
2935 }
2936 state->uaccess_stack <<= 1;
2937 state->uaccess_stack |= state->uaccess;
2938 }
2939
2940 if (op->src.type == OP_SRC_POPF) {
2941 if (state->uaccess_stack) {
2942 state->uaccess = state->uaccess_stack & 1;
2943 state->uaccess_stack >>= 1;
2944 if (state->uaccess_stack == 1)
2945 state->uaccess_stack = 0;
2946 }
2947 }
2948 }
2949
2950 return 0;
2951}
2952
2953static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
2954{
2955 struct cfi_state *cfi1 = insn->cfi;
2956 int i;
2957
2958 if (!cfi1) {
2959 WARN("CFI missing");
2960 return false;
2961 }
2962
2963 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
2964
2965 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
2966 insn->sec, insn->offset,
2967 cfi1->cfa.base, cfi1->cfa.offset,
2968 cfi2->cfa.base, cfi2->cfa.offset);
2969
2970 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
2971 for (i = 0; i < CFI_NUM_REGS; i++) {
2972 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
2973 sizeof(struct cfi_reg)))
2974 continue;
2975
2976 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
2977 insn->sec, insn->offset,
2978 i, cfi1->regs[i].base, cfi1->regs[i].offset,
2979 i, cfi2->regs[i].base, cfi2->regs[i].offset);
2980 break;
2981 }
2982
2983 } else if (cfi1->type != cfi2->type) {
2984
2985 WARN_FUNC("stack state mismatch: type1=%d type2=%d",
2986 insn->sec, insn->offset, cfi1->type, cfi2->type);
2987
2988 } else if (cfi1->drap != cfi2->drap ||
2989 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
2990 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
2991
2992 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
2993 insn->sec, insn->offset,
2994 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
2995 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
2996
2997 } else
2998 return true;
2999
3000 return false;
3001}
3002
3003static inline bool func_uaccess_safe(struct symbol *func)
3004{
3005 if (func)
3006 return func->uaccess_safe;
3007
3008 return false;
3009}
3010
3011static inline const char *call_dest_name(struct instruction *insn)
3012{
3013 static char pvname[19];
3014 struct reloc *rel;
3015 int idx;
3016
3017 if (insn->call_dest)
3018 return insn->call_dest->name;
3019
3020 rel = insn_reloc(NULL, insn);
3021 if (rel && !strcmp(rel->sym->name, "pv_ops")) {
3022 idx = (rel->addend / sizeof(void *));
3023 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3024 return pvname;
3025 }
3026
3027 return "{dynamic}";
3028}
3029
3030static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3031{
3032 struct symbol *target;
3033 struct reloc *rel;
3034 int idx;
3035
3036 rel = insn_reloc(file, insn);
3037 if (!rel || strcmp(rel->sym->name, "pv_ops"))
3038 return false;
3039
3040 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));
3041
3042 if (file->pv_ops[idx].clean)
3043 return true;
3044
3045 file->pv_ops[idx].clean = true;
3046
3047 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3048 if (!target->sec->noinstr) {
3049 WARN("pv_ops[%d]: %s", idx, target->name);
3050 file->pv_ops[idx].clean = false;
3051 }
3052 }
3053
3054 return file->pv_ops[idx].clean;
3055}
3056
3057static inline bool noinstr_call_dest(struct objtool_file *file,
3058 struct instruction *insn,
3059 struct symbol *func)
3060{
3061 /*
3062 * We can't deal with indirect function calls at present;
3063 * assume they're instrumented.
3064 */
3065 if (!func) {
3066 if (file->pv_ops)
3067 return pv_call_dest(file, insn);
3068
3069 return false;
3070 }
3071
3072 /*
3073 * If the symbol is from a noinstr section; we good.
3074 */
3075 if (func->sec->noinstr)
3076 return true;
3077
3078 /*
3079 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3080 * something 'BAD' happened. At the risk of taking the machine down,
3081 * let them proceed to get the message out.
3082 */
3083 if (!strncmp(func->name, "__ubsan_handle_", 15))
3084 return true;
3085
3086 return false;
3087}
3088
3089static int validate_call(struct objtool_file *file,
3090 struct instruction *insn,
3091 struct insn_state *state)
3092{
3093 if (state->noinstr && state->instr <= 0 &&
3094 !noinstr_call_dest(file, insn, insn->call_dest)) {
3095 WARN_FUNC("call to %s() leaves .noinstr.text section",
3096 insn->sec, insn->offset, call_dest_name(insn));
3097 return 1;
3098 }
3099
3100 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
3101 WARN_FUNC("call to %s() with UACCESS enabled",
3102 insn->sec, insn->offset, call_dest_name(insn));
3103 return 1;
3104 }
3105
3106 if (state->df) {
3107 WARN_FUNC("call to %s() with DF set",
3108 insn->sec, insn->offset, call_dest_name(insn));
3109 return 1;
3110 }
3111
3112 return 0;
3113}
3114
3115static int validate_sibling_call(struct objtool_file *file,
3116 struct instruction *insn,
3117 struct insn_state *state)
3118{
3119 if (has_modified_stack_frame(insn, state)) {
3120 WARN_FUNC("sibling call from callable instruction with modified stack frame",
3121 insn->sec, insn->offset);
3122 return 1;
3123 }
3124
3125 return validate_call(file, insn, state);
3126}
3127
3128static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3129{
3130 if (state->noinstr && state->instr > 0) {
3131 WARN_FUNC("return with instrumentation enabled",
3132 insn->sec, insn->offset);
3133 return 1;
3134 }
3135
3136 if (state->uaccess && !func_uaccess_safe(func)) {
3137 WARN_FUNC("return with UACCESS enabled",
3138 insn->sec, insn->offset);
3139 return 1;
3140 }
3141
3142 if (!state->uaccess && func_uaccess_safe(func)) {
3143 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
3144 insn->sec, insn->offset);
3145 return 1;
3146 }
3147
3148 if (state->df) {
3149 WARN_FUNC("return with DF set",
3150 insn->sec, insn->offset);
3151 return 1;
3152 }
3153
3154 if (func && has_modified_stack_frame(insn, state)) {
3155 WARN_FUNC("return with modified stack frame",
3156 insn->sec, insn->offset);
3157 return 1;
3158 }
3159
3160 if (state->cfi.bp_scratch) {
3161 WARN_FUNC("BP used as a scratch register",
3162 insn->sec, insn->offset);
3163 return 1;
3164 }
3165
3166 return 0;
3167}
3168
3169static struct instruction *next_insn_to_validate(struct objtool_file *file,
3170 struct instruction *insn)
3171{
3172 struct alt_group *alt_group = insn->alt_group;
3173
3174 /*
3175 * Simulate the fact that alternatives are patched in-place. When the
3176 * end of a replacement alt_group is reached, redirect objtool flow to
3177 * the end of the original alt_group.
3178 */
3179 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
3180 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3181
3182 return next_insn_same_sec(file, insn);
3183}
3184
3185static struct instruction *
3186validate_ibt_reloc(struct objtool_file *file, struct reloc *reloc)
3187{
3188 struct instruction *dest;
3189 struct section *sec;
3190 unsigned long off;
3191
3192 sec = reloc->sym->sec;
3193 off = reloc->sym->offset;
3194
3195 if ((reloc->sec->base->sh.sh_flags & SHF_EXECINSTR) &&
3196 (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32))
3197 off += arch_dest_reloc_offset(reloc->addend);
3198 else
3199 off += reloc->addend;
3200
3201 dest = find_insn(file, sec, off);
3202 if (!dest)
3203 return NULL;
3204
3205 if (dest->type == INSN_ENDBR) {
3206 if (!list_empty(&dest->call_node))
3207 list_del_init(&dest->call_node);
3208
3209 return NULL;
3210 }
3211
3212 if (reloc->sym->static_call_tramp)
3213 return NULL;
3214
3215 return dest;
3216}
3217
3218static void warn_noendbr(const char *msg, struct section *sec, unsigned long offset,
3219 struct instruction *dest)
3220{
3221 WARN_FUNC("%srelocation to !ENDBR: %s", sec, offset, msg,
3222 offstr(dest->sec, dest->offset));
3223}
3224
3225static void validate_ibt_dest(struct objtool_file *file, struct instruction *insn,
3226 struct instruction *dest)
3227{
3228 if (dest->func && dest->func == insn->func) {
3229 /*
3230 * Anything from->to self is either _THIS_IP_ or IRET-to-self.
3231 *
3232 * There is no sane way to annotate _THIS_IP_ since the compiler treats the
3233 * relocation as a constant and is happy to fold in offsets, skewing any
3234 * annotation we do, leading to vast amounts of false-positives.
3235 *
3236 * There's also compiler generated _THIS_IP_ through KCOV and
3237 * such which we have no hope of annotating.
3238 *
3239 * As such, blanket accept self-references without issue.
3240 */
3241 return;
3242 }
3243
3244 if (dest->noendbr)
3245 return;
3246
3247 warn_noendbr("", insn->sec, insn->offset, dest);
3248}
3249
3250static void validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
3251{
3252 struct instruction *dest;
3253 struct reloc *reloc;
3254
3255 switch (insn->type) {
3256 case INSN_CALL:
3257 case INSN_CALL_DYNAMIC:
3258 case INSN_JUMP_CONDITIONAL:
3259 case INSN_JUMP_UNCONDITIONAL:
3260 case INSN_JUMP_DYNAMIC:
3261 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3262 case INSN_RETURN:
3263 /*
3264 * We're looking for code references setting up indirect code
3265 * flow. As such, ignore direct code flow and the actual
3266 * dynamic branches.
3267 */
3268 return;
3269
3270 case INSN_NOP:
3271 /*
3272 * handle_group_alt() will create INSN_NOP instruction that
3273 * don't belong to any section, ignore all NOP since they won't
3274 * carry a (useful) relocation anyway.
3275 */
3276 return;
3277
3278 default:
3279 break;
3280 }
3281
3282 for (reloc = insn_reloc(file, insn);
3283 reloc;
3284 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
3285 reloc->offset + 1,
3286 (insn->offset + insn->len) - (reloc->offset + 1))) {
3287 dest = validate_ibt_reloc(file, reloc);
3288 if (dest)
3289 validate_ibt_dest(file, insn, dest);
3290 }
3291}
3292
3293/*
3294 * Follow the branch starting at the given instruction, and recursively follow
3295 * any other branches (jumps). Meanwhile, track the frame pointer state at
3296 * each instruction and validate all the rules described in
3297 * tools/objtool/Documentation/stack-validation.txt.
3298 */
3299static int validate_branch(struct objtool_file *file, struct symbol *func,
3300 struct instruction *insn, struct insn_state state)
3301{
3302 struct alternative *alt;
3303 struct instruction *next_insn, *prev_insn = NULL;
3304 struct section *sec;
3305 u8 visited;
3306 int ret;
3307
3308 sec = insn->sec;
3309
3310 while (1) {
3311 next_insn = next_insn_to_validate(file, insn);
3312
3313 if (func && insn->func && func != insn->func->pfunc) {
3314 WARN("%s() falls through to next function %s()",
3315 func->name, insn->func->name);
3316 return 1;
3317 }
3318
3319 if (func && insn->ignore) {
3320 WARN_FUNC("BUG: why am I validating an ignored function?",
3321 sec, insn->offset);
3322 return 1;
3323 }
3324
3325 visited = 1 << state.uaccess;
3326 if (insn->visited) {
3327 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3328 return 1;
3329
3330 if (insn->visited & visited)
3331 return 0;
3332 } else {
3333 nr_insns_visited++;
3334 }
3335
3336 if (state.noinstr)
3337 state.instr += insn->instr;
3338
3339 if (insn->hint) {
3340 state.cfi = *insn->cfi;
3341 } else {
3342 /* XXX track if we actually changed state.cfi */
3343
3344 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3345 insn->cfi = prev_insn->cfi;
3346 nr_cfi_reused++;
3347 } else {
3348 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3349 }
3350 }
3351
3352 insn->visited |= visited;
3353
3354 if (propagate_alt_cfi(file, insn))
3355 return 1;
3356
3357 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3358 bool skip_orig = false;
3359
3360 list_for_each_entry(alt, &insn->alts, list) {
3361 if (alt->skip_orig)
3362 skip_orig = true;
3363
3364 ret = validate_branch(file, func, alt->insn, state);
3365 if (ret) {
3366 if (backtrace)
3367 BT_FUNC("(alt)", insn);
3368 return ret;
3369 }
3370 }
3371
3372 if (skip_orig)
3373 return 0;
3374 }
3375
3376 if (handle_insn_ops(insn, next_insn, &state))
3377 return 1;
3378
3379 switch (insn->type) {
3380
3381 case INSN_RETURN:
3382 if (sls && !insn->retpoline_safe &&
3383 next_insn && next_insn->type != INSN_TRAP) {
3384 WARN_FUNC("missing int3 after ret",
3385 insn->sec, insn->offset);
3386 }
3387 return validate_return(func, insn, &state);
3388
3389 case INSN_CALL:
3390 case INSN_CALL_DYNAMIC:
3391 ret = validate_call(file, insn, &state);
3392 if (ret)
3393 return ret;
3394
3395 if (!no_fp && func && !is_fentry_call(insn) &&
3396 !has_valid_stack_frame(&state)) {
3397 WARN_FUNC("call without frame pointer save/setup",
3398 sec, insn->offset);
3399 return 1;
3400 }
3401
3402 if (insn->dead_end)
3403 return 0;
3404
3405 break;
3406
3407 case INSN_JUMP_CONDITIONAL:
3408 case INSN_JUMP_UNCONDITIONAL:
3409 if (is_sibling_call(insn)) {
3410 ret = validate_sibling_call(file, insn, &state);
3411 if (ret)
3412 return ret;
3413
3414 } else if (insn->jump_dest) {
3415 ret = validate_branch(file, func,
3416 insn->jump_dest, state);
3417 if (ret) {
3418 if (backtrace)
3419 BT_FUNC("(branch)", insn);
3420 return ret;
3421 }
3422 }
3423
3424 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3425 return 0;
3426
3427 break;
3428
3429 case INSN_JUMP_DYNAMIC:
3430 if (sls && !insn->retpoline_safe &&
3431 next_insn && next_insn->type != INSN_TRAP) {
3432 WARN_FUNC("missing int3 after indirect jump",
3433 insn->sec, insn->offset);
3434 }
3435
3436 /* fallthrough */
3437 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3438 if (is_sibling_call(insn)) {
3439 ret = validate_sibling_call(file, insn, &state);
3440 if (ret)
3441 return ret;
3442 }
3443
3444 if (insn->type == INSN_JUMP_DYNAMIC)
3445 return 0;
3446
3447 break;
3448
3449 case INSN_CONTEXT_SWITCH:
3450 if (func && (!next_insn || !next_insn->hint)) {
3451 WARN_FUNC("unsupported instruction in callable function",
3452 sec, insn->offset);
3453 return 1;
3454 }
3455 return 0;
3456
3457 case INSN_STAC:
3458 if (state.uaccess) {
3459 WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
3460 return 1;
3461 }
3462
3463 state.uaccess = true;
3464 break;
3465
3466 case INSN_CLAC:
3467 if (!state.uaccess && func) {
3468 WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
3469 return 1;
3470 }
3471
3472 if (func_uaccess_safe(func) && !state.uaccess_stack) {
3473 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
3474 return 1;
3475 }
3476
3477 state.uaccess = false;
3478 break;
3479
3480 case INSN_STD:
3481 if (state.df) {
3482 WARN_FUNC("recursive STD", sec, insn->offset);
3483 return 1;
3484 }
3485
3486 state.df = true;
3487 break;
3488
3489 case INSN_CLD:
3490 if (!state.df && func) {
3491 WARN_FUNC("redundant CLD", sec, insn->offset);
3492 return 1;
3493 }
3494
3495 state.df = false;
3496 break;
3497
3498 default:
3499 break;
3500 }
3501
3502 if (ibt)
3503 validate_ibt_insn(file, insn);
3504
3505 if (insn->dead_end)
3506 return 0;
3507
3508 if (!next_insn) {
3509 if (state.cfi.cfa.base == CFI_UNDEFINED)
3510 return 0;
3511 WARN("%s: unexpected end of section", sec->name);
3512 return 1;
3513 }
3514
3515 prev_insn = insn;
3516 insn = next_insn;
3517 }
3518
3519 return 0;
3520}
3521
3522static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3523{
3524 struct instruction *insn;
3525 struct insn_state state;
3526 int ret, warnings = 0;
3527
3528 if (!file->hints)
3529 return 0;
3530
3531 init_insn_state(&state, sec);
3532
3533 if (sec) {
3534 insn = find_insn(file, sec, 0);
3535 if (!insn)
3536 return 0;
3537 } else {
3538 insn = list_first_entry(&file->insn_list, typeof(*insn), list);
3539 }
3540
3541 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
3542 if (insn->hint && !insn->visited && !insn->ignore) {
3543 ret = validate_branch(file, insn->func, insn, state);
3544 if (ret && backtrace)
3545 BT_FUNC("<=== (hint)", insn);
3546 warnings += ret;
3547 }
3548
3549 insn = list_next_entry(insn, list);
3550 }
3551
3552 return warnings;
3553}
3554
3555static int validate_retpoline(struct objtool_file *file)
3556{
3557 struct instruction *insn;
3558 int warnings = 0;
3559
3560 for_each_insn(file, insn) {
3561 if (insn->type != INSN_JUMP_DYNAMIC &&
3562 insn->type != INSN_CALL_DYNAMIC)
3563 continue;
3564
3565 if (insn->retpoline_safe)
3566 continue;
3567
3568 /*
3569 * .init.text code is ran before userspace and thus doesn't
3570 * strictly need retpolines, except for modules which are
3571 * loaded late, they very much do need retpoline in their
3572 * .init.text
3573 */
3574 if (!strcmp(insn->sec->name, ".init.text") && !module)
3575 continue;
3576
3577 WARN_FUNC("indirect %s found in RETPOLINE build",
3578 insn->sec, insn->offset,
3579 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3580
3581 warnings++;
3582 }
3583
3584 return warnings;
3585}
3586
3587static bool is_kasan_insn(struct instruction *insn)
3588{
3589 return (insn->type == INSN_CALL &&
3590 !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
3591}
3592
3593static bool is_ubsan_insn(struct instruction *insn)
3594{
3595 return (insn->type == INSN_CALL &&
3596 !strcmp(insn->call_dest->name,
3597 "__ubsan_handle_builtin_unreachable"));
3598}
3599
3600static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
3601{
3602 int i;
3603 struct instruction *prev_insn;
3604
3605 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
3606 return true;
3607
3608 /*
3609 * Ignore alternative replacement instructions. This can happen
3610 * when a whitelisted function uses one of the ALTERNATIVE macros.
3611 */
3612 if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
3613 !strcmp(insn->sec->name, ".altinstr_aux"))
3614 return true;
3615
3616 /*
3617 * Whole archive runs might encounder dead code from weak symbols.
3618 * This is where the linker will have dropped the weak symbol in
3619 * favour of a regular symbol, but leaves the code in place.
3620 *
3621 * In this case we'll find a piece of code (whole function) that is not
3622 * covered by a !section symbol. Ignore them.
3623 */
3624 if (!insn->func && lto) {
3625 int size = find_symbol_hole_containing(insn->sec, insn->offset);
3626 unsigned long end = insn->offset + size;
3627
3628 if (!size) /* not a hole */
3629 return false;
3630
3631 if (size < 0) /* hole until the end */
3632 return true;
3633
3634 sec_for_each_insn_continue(file, insn) {
3635 /*
3636 * If we reach a visited instruction at or before the
3637 * end of the hole, ignore the unreachable.
3638 */
3639 if (insn->visited)
3640 return true;
3641
3642 if (insn->offset >= end)
3643 break;
3644
3645 /*
3646 * If this hole jumps to a .cold function, mark it ignore too.
3647 */
3648 if (insn->jump_dest && insn->jump_dest->func &&
3649 strstr(insn->jump_dest->func->name, ".cold")) {
3650 struct instruction *dest = insn->jump_dest;
3651 func_for_each_insn(file, dest->func, dest)
3652 dest->ignore = true;
3653 }
3654 }
3655
3656 return false;
3657 }
3658
3659 if (!insn->func)
3660 return false;
3661
3662 if (insn->func->static_call_tramp)
3663 return true;
3664
3665 /*
3666 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
3667 * __builtin_unreachable(). The BUG() macro has an unreachable() after
3668 * the UD2, which causes GCC's undefined trap logic to emit another UD2
3669 * (or occasionally a JMP to UD2).
3670 *
3671 * It may also insert a UD2 after calling a __noreturn function.
3672 */
3673 prev_insn = list_prev_entry(insn, list);
3674 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
3675 (insn->type == INSN_BUG ||
3676 (insn->type == INSN_JUMP_UNCONDITIONAL &&
3677 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
3678 return true;
3679
3680 /*
3681 * Check if this (or a subsequent) instruction is related to
3682 * CONFIG_UBSAN or CONFIG_KASAN.
3683 *
3684 * End the search at 5 instructions to avoid going into the weeds.
3685 */
3686 for (i = 0; i < 5; i++) {
3687
3688 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
3689 return true;
3690
3691 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
3692 if (insn->jump_dest &&
3693 insn->jump_dest->func == insn->func) {
3694 insn = insn->jump_dest;
3695 continue;
3696 }
3697
3698 break;
3699 }
3700
3701 if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
3702 break;
3703
3704 insn = list_next_entry(insn, list);
3705 }
3706
3707 return false;
3708}
3709
3710static int validate_symbol(struct objtool_file *file, struct section *sec,
3711 struct symbol *sym, struct insn_state *state)
3712{
3713 struct instruction *insn;
3714 int ret;
3715
3716 if (!sym->len) {
3717 WARN("%s() is missing an ELF size annotation", sym->name);
3718 return 1;
3719 }
3720
3721 if (sym->pfunc != sym || sym->alias != sym)
3722 return 0;
3723
3724 insn = find_insn(file, sec, sym->offset);
3725 if (!insn || insn->ignore || insn->visited)
3726 return 0;
3727
3728 state->uaccess = sym->uaccess_safe;
3729
3730 ret = validate_branch(file, insn->func, insn, *state);
3731 if (ret && backtrace)
3732 BT_FUNC("<=== (sym)", insn);
3733 return ret;
3734}
3735
3736static int validate_section(struct objtool_file *file, struct section *sec)
3737{
3738 struct insn_state state;
3739 struct symbol *func;
3740 int warnings = 0;
3741
3742 list_for_each_entry(func, &sec->symbol_list, list) {
3743 if (func->type != STT_FUNC)
3744 continue;
3745
3746 init_insn_state(&state, sec);
3747 set_func_state(&state.cfi);
3748
3749 warnings += validate_symbol(file, sec, func, &state);
3750 }
3751
3752 return warnings;
3753}
3754
3755static int validate_vmlinux_functions(struct objtool_file *file)
3756{
3757 struct section *sec;
3758 int warnings = 0;
3759
3760 sec = find_section_by_name(file->elf, ".noinstr.text");
3761 if (sec) {
3762 warnings += validate_section(file, sec);
3763 warnings += validate_unwind_hints(file, sec);
3764 }
3765
3766 sec = find_section_by_name(file->elf, ".entry.text");
3767 if (sec) {
3768 warnings += validate_section(file, sec);
3769 warnings += validate_unwind_hints(file, sec);
3770 }
3771
3772 return warnings;
3773}
3774
3775static int validate_functions(struct objtool_file *file)
3776{
3777 struct section *sec;
3778 int warnings = 0;
3779
3780 for_each_sec(file, sec) {
3781 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
3782 continue;
3783
3784 warnings += validate_section(file, sec);
3785 }
3786
3787 return warnings;
3788}
3789
3790static int validate_ibt(struct objtool_file *file)
3791{
3792 struct section *sec;
3793 struct reloc *reloc;
3794
3795 for_each_sec(file, sec) {
3796 bool is_data;
3797
3798 /* already done in validate_branch() */
3799 if (sec->sh.sh_flags & SHF_EXECINSTR)
3800 continue;
3801
3802 if (!sec->reloc)
3803 continue;
3804
3805 if (!strncmp(sec->name, ".orc", 4))
3806 continue;
3807
3808 if (!strncmp(sec->name, ".discard", 8))
3809 continue;
3810
3811 if (!strncmp(sec->name, ".debug", 6))
3812 continue;
3813
3814 if (!strcmp(sec->name, "_error_injection_whitelist"))
3815 continue;
3816
3817 if (!strcmp(sec->name, "_kprobe_blacklist"))
3818 continue;
3819
3820 is_data = strstr(sec->name, ".data") || strstr(sec->name, ".rodata");
3821
3822 list_for_each_entry(reloc, &sec->reloc->reloc_list, list) {
3823 struct instruction *dest;
3824
3825 dest = validate_ibt_reloc(file, reloc);
3826 if (is_data && dest && !dest->noendbr)
3827 warn_noendbr("data ", sec, reloc->offset, dest);
3828 }
3829 }
3830
3831 return 0;
3832}
3833
3834static int validate_reachable_instructions(struct objtool_file *file)
3835{
3836 struct instruction *insn;
3837
3838 if (file->ignore_unreachables)
3839 return 0;
3840
3841 for_each_insn(file, insn) {
3842 if (insn->visited || ignore_unreachable_insn(file, insn))
3843 continue;
3844
3845 WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
3846 return 1;
3847 }
3848
3849 return 0;
3850}
3851
3852int check(struct objtool_file *file)
3853{
3854 int ret, warnings = 0;
3855
3856 if (lto && !(vmlinux || module)) {
3857 fprintf(stderr, "--lto requires: --vmlinux or --module\n");
3858 return 1;
3859 }
3860
3861 if (ibt && !lto) {
3862 fprintf(stderr, "--ibt requires: --lto\n");
3863 return 1;
3864 }
3865
3866 arch_initial_func_cfi_state(&initial_func_cfi);
3867 init_cfi_state(&init_cfi);
3868 init_cfi_state(&func_cfi);
3869 set_func_state(&func_cfi);
3870
3871 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
3872 goto out;
3873
3874 cfi_hash_add(&init_cfi);
3875 cfi_hash_add(&func_cfi);
3876
3877 ret = decode_sections(file);
3878 if (ret < 0)
3879 goto out;
3880
3881 warnings += ret;
3882
3883 if (list_empty(&file->insn_list))
3884 goto out;
3885
3886 if (vmlinux && !lto) {
3887 ret = validate_vmlinux_functions(file);
3888 if (ret < 0)
3889 goto out;
3890
3891 warnings += ret;
3892 goto out;
3893 }
3894
3895 if (retpoline) {
3896 ret = validate_retpoline(file);
3897 if (ret < 0)
3898 return ret;
3899 warnings += ret;
3900 }
3901
3902 ret = validate_functions(file);
3903 if (ret < 0)
3904 goto out;
3905 warnings += ret;
3906
3907 ret = validate_unwind_hints(file, NULL);
3908 if (ret < 0)
3909 goto out;
3910 warnings += ret;
3911
3912 if (ibt) {
3913 ret = validate_ibt(file);
3914 if (ret < 0)
3915 goto out;
3916 warnings += ret;
3917 }
3918
3919 if (!warnings) {
3920 ret = validate_reachable_instructions(file);
3921 if (ret < 0)
3922 goto out;
3923 warnings += ret;
3924 }
3925
3926 ret = create_static_call_sections(file);
3927 if (ret < 0)
3928 goto out;
3929 warnings += ret;
3930
3931 if (retpoline) {
3932 ret = create_retpoline_sites_sections(file);
3933 if (ret < 0)
3934 goto out;
3935 warnings += ret;
3936 }
3937
3938 if (mcount) {
3939 ret = create_mcount_loc_sections(file);
3940 if (ret < 0)
3941 goto out;
3942 warnings += ret;
3943 }
3944
3945 if (ibt) {
3946 ret = create_ibt_endbr_seal_sections(file);
3947 if (ret < 0)
3948 goto out;
3949 warnings += ret;
3950 }
3951
3952 if (stats) {
3953 printf("nr_insns_visited: %ld\n", nr_insns_visited);
3954 printf("nr_cfi: %ld\n", nr_cfi);
3955 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
3956 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
3957 }
3958
3959out:
3960 /*
3961 * For now, don't fail the kernel build on fatal warnings. These
3962 * errors are still fairly common due to the growing matrix of
3963 * supported toolchains and their recent pace of change.
3964 */
3965 return 0;
3966}