Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6#include <string.h>
7#include <stdlib.h>
8#include <inttypes.h>
9#include <sys/mman.h>
10
11#include <arch/elf.h>
12#include <objtool/builtin.h>
13#include <objtool/cfi.h>
14#include <objtool/arch.h>
15#include <objtool/check.h>
16#include <objtool/special.h>
17#include <objtool/warn.h>
18#include <objtool/endianness.h>
19
20#include <linux/objtool.h>
21#include <linux/hashtable.h>
22#include <linux/kernel.h>
23#include <linux/static_call_types.h>
24
25struct alternative {
26 struct alternative *next;
27 struct instruction *insn;
28 bool skip_orig;
29};
30
31static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
32
33static struct cfi_init_state initial_func_cfi;
34static struct cfi_state init_cfi;
35static struct cfi_state func_cfi;
36
37struct instruction *find_insn(struct objtool_file *file,
38 struct section *sec, unsigned long offset)
39{
40 struct instruction *insn;
41
42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43 if (insn->sec == sec && insn->offset == offset)
44 return insn;
45 }
46
47 return NULL;
48}
49
50struct instruction *next_insn_same_sec(struct objtool_file *file,
51 struct instruction *insn)
52{
53 if (insn->idx == INSN_CHUNK_MAX)
54 return find_insn(file, insn->sec, insn->offset + insn->len);
55
56 insn++;
57 if (!insn->len)
58 return NULL;
59
60 return insn;
61}
62
63static struct instruction *next_insn_same_func(struct objtool_file *file,
64 struct instruction *insn)
65{
66 struct instruction *next = next_insn_same_sec(file, insn);
67 struct symbol *func = insn_func(insn);
68
69 if (!func)
70 return NULL;
71
72 if (next && insn_func(next) == func)
73 return next;
74
75 /* Check if we're already in the subfunction: */
76 if (func == func->cfunc)
77 return NULL;
78
79 /* Move to the subfunction: */
80 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
81}
82
83static struct instruction *prev_insn_same_sec(struct objtool_file *file,
84 struct instruction *insn)
85{
86 if (insn->idx == 0) {
87 if (insn->prev_len)
88 return find_insn(file, insn->sec, insn->offset - insn->prev_len);
89 return NULL;
90 }
91
92 return insn - 1;
93}
94
95static struct instruction *prev_insn_same_sym(struct objtool_file *file,
96 struct instruction *insn)
97{
98 struct instruction *prev = prev_insn_same_sec(file, insn);
99
100 if (prev && insn_func(prev) == insn_func(insn))
101 return prev;
102
103 return NULL;
104}
105
106#define for_each_insn(file, insn) \
107 for (struct section *__sec, *__fake = (struct section *)1; \
108 __fake; __fake = NULL) \
109 for_each_sec(file, __sec) \
110 sec_for_each_insn(file, __sec, insn)
111
112#define func_for_each_insn(file, func, insn) \
113 for (insn = find_insn(file, func->sec, func->offset); \
114 insn; \
115 insn = next_insn_same_func(file, insn))
116
117#define sym_for_each_insn(file, sym, insn) \
118 for (insn = find_insn(file, sym->sec, sym->offset); \
119 insn && insn->offset < sym->offset + sym->len; \
120 insn = next_insn_same_sec(file, insn))
121
122#define sym_for_each_insn_continue_reverse(file, sym, insn) \
123 for (insn = prev_insn_same_sec(file, insn); \
124 insn && insn->offset >= sym->offset; \
125 insn = prev_insn_same_sec(file, insn))
126
127#define sec_for_each_insn_from(file, insn) \
128 for (; insn; insn = next_insn_same_sec(file, insn))
129
130#define sec_for_each_insn_continue(file, insn) \
131 for (insn = next_insn_same_sec(file, insn); insn; \
132 insn = next_insn_same_sec(file, insn))
133
134static inline struct symbol *insn_call_dest(struct instruction *insn)
135{
136 if (insn->type == INSN_JUMP_DYNAMIC ||
137 insn->type == INSN_CALL_DYNAMIC)
138 return NULL;
139
140 return insn->_call_dest;
141}
142
143static inline struct reloc *insn_jump_table(struct instruction *insn)
144{
145 if (insn->type == INSN_JUMP_DYNAMIC ||
146 insn->type == INSN_CALL_DYNAMIC)
147 return insn->_jump_table;
148
149 return NULL;
150}
151
152static bool is_jump_table_jump(struct instruction *insn)
153{
154 struct alt_group *alt_group = insn->alt_group;
155
156 if (insn_jump_table(insn))
157 return true;
158
159 /* Retpoline alternative for a jump table? */
160 return alt_group && alt_group->orig_group &&
161 insn_jump_table(alt_group->orig_group->first_insn);
162}
163
164static bool is_sibling_call(struct instruction *insn)
165{
166 /*
167 * Assume only STT_FUNC calls have jump-tables.
168 */
169 if (insn_func(insn)) {
170 /* An indirect jump is either a sibling call or a jump to a table. */
171 if (insn->type == INSN_JUMP_DYNAMIC)
172 return !is_jump_table_jump(insn);
173 }
174
175 /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
176 return (is_static_jump(insn) && insn_call_dest(insn));
177}
178
179/*
180 * This checks to see if the given function is a "noreturn" function.
181 *
182 * For global functions which are outside the scope of this object file, we
183 * have to keep a manual list of them.
184 *
185 * For local functions, we have to detect them manually by simply looking for
186 * the lack of a return instruction.
187 */
188static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
189 int recursion)
190{
191 int i;
192 struct instruction *insn;
193 bool empty = true;
194
195 /*
196 * Unfortunately these have to be hard coded because the noreturn
197 * attribute isn't provided in ELF data. Keep 'em sorted.
198 */
199 static const char * const global_noreturns[] = {
200 "__invalid_creds",
201 "__module_put_and_kthread_exit",
202 "__reiserfs_panic",
203 "__stack_chk_fail",
204 "__ubsan_handle_builtin_unreachable",
205 "cpu_bringup_and_idle",
206 "cpu_startup_entry",
207 "do_exit",
208 "do_group_exit",
209 "do_task_dead",
210 "ex_handler_msr_mce",
211 "fortify_panic",
212 "kthread_complete_and_exit",
213 "kthread_exit",
214 "kunit_try_catch_throw",
215 "lbug_with_loc",
216 "machine_real_restart",
217 "make_task_dead",
218 "panic",
219 "rewind_stack_and_make_dead",
220 "sev_es_terminate",
221 "snp_abort",
222 "stop_this_cpu",
223 "usercopy_abort",
224 "xen_cpu_bringup_again",
225 "xen_start_kernel",
226 };
227
228 if (!func)
229 return false;
230
231 if (func->bind == STB_WEAK)
232 return false;
233
234 if (func->bind == STB_GLOBAL)
235 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
236 if (!strcmp(func->name, global_noreturns[i]))
237 return true;
238
239 if (!func->len)
240 return false;
241
242 insn = find_insn(file, func->sec, func->offset);
243 if (!insn || !insn_func(insn))
244 return false;
245
246 func_for_each_insn(file, func, insn) {
247 empty = false;
248
249 if (insn->type == INSN_RETURN)
250 return false;
251 }
252
253 if (empty)
254 return false;
255
256 /*
257 * A function can have a sibling call instead of a return. In that
258 * case, the function's dead-end status depends on whether the target
259 * of the sibling call returns.
260 */
261 func_for_each_insn(file, func, insn) {
262 if (is_sibling_call(insn)) {
263 struct instruction *dest = insn->jump_dest;
264
265 if (!dest)
266 /* sibling call to another file */
267 return false;
268
269 /* local sibling call */
270 if (recursion == 5) {
271 /*
272 * Infinite recursion: two functions have
273 * sibling calls to each other. This is a very
274 * rare case. It means they aren't dead ends.
275 */
276 return false;
277 }
278
279 return __dead_end_function(file, insn_func(dest), recursion+1);
280 }
281 }
282
283 return true;
284}
285
286static bool dead_end_function(struct objtool_file *file, struct symbol *func)
287{
288 return __dead_end_function(file, func, 0);
289}
290
291static void init_cfi_state(struct cfi_state *cfi)
292{
293 int i;
294
295 for (i = 0; i < CFI_NUM_REGS; i++) {
296 cfi->regs[i].base = CFI_UNDEFINED;
297 cfi->vals[i].base = CFI_UNDEFINED;
298 }
299 cfi->cfa.base = CFI_UNDEFINED;
300 cfi->drap_reg = CFI_UNDEFINED;
301 cfi->drap_offset = -1;
302}
303
304static void init_insn_state(struct objtool_file *file, struct insn_state *state,
305 struct section *sec)
306{
307 memset(state, 0, sizeof(*state));
308 init_cfi_state(&state->cfi);
309
310 /*
311 * We need the full vmlinux for noinstr validation, otherwise we can
312 * not correctly determine insn_call_dest(insn)->sec (external symbols
313 * do not have a section).
314 */
315 if (opts.link && opts.noinstr && sec)
316 state->noinstr = sec->noinstr;
317}
318
319static struct cfi_state *cfi_alloc(void)
320{
321 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
322 if (!cfi) {
323 WARN("calloc failed");
324 exit(1);
325 }
326 nr_cfi++;
327 return cfi;
328}
329
330static int cfi_bits;
331static struct hlist_head *cfi_hash;
332
333static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
334{
335 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
336 (void *)cfi2 + sizeof(cfi2->hash),
337 sizeof(struct cfi_state) - sizeof(struct hlist_node));
338}
339
340static inline u32 cfi_key(struct cfi_state *cfi)
341{
342 return jhash((void *)cfi + sizeof(cfi->hash),
343 sizeof(*cfi) - sizeof(cfi->hash), 0);
344}
345
346static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
347{
348 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
349 struct cfi_state *obj;
350
351 hlist_for_each_entry(obj, head, hash) {
352 if (!cficmp(cfi, obj)) {
353 nr_cfi_cache++;
354 return obj;
355 }
356 }
357
358 obj = cfi_alloc();
359 *obj = *cfi;
360 hlist_add_head(&obj->hash, head);
361
362 return obj;
363}
364
365static void cfi_hash_add(struct cfi_state *cfi)
366{
367 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
368
369 hlist_add_head(&cfi->hash, head);
370}
371
372static void *cfi_hash_alloc(unsigned long size)
373{
374 cfi_bits = max(10, ilog2(size));
375 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
376 PROT_READ|PROT_WRITE,
377 MAP_PRIVATE|MAP_ANON, -1, 0);
378 if (cfi_hash == (void *)-1L) {
379 WARN("mmap fail cfi_hash");
380 cfi_hash = NULL;
381 } else if (opts.stats) {
382 printf("cfi_bits: %d\n", cfi_bits);
383 }
384
385 return cfi_hash;
386}
387
388static unsigned long nr_insns;
389static unsigned long nr_insns_visited;
390
391/*
392 * Call the arch-specific instruction decoder for all the instructions and add
393 * them to the global instruction list.
394 */
395static int decode_instructions(struct objtool_file *file)
396{
397 struct section *sec;
398 struct symbol *func;
399 unsigned long offset;
400 struct instruction *insn;
401 int ret;
402
403 for_each_sec(file, sec) {
404 struct instruction *insns = NULL;
405 u8 prev_len = 0;
406 u8 idx = 0;
407
408 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
409 continue;
410
411 if (strcmp(sec->name, ".altinstr_replacement") &&
412 strcmp(sec->name, ".altinstr_aux") &&
413 strncmp(sec->name, ".discard.", 9))
414 sec->text = true;
415
416 if (!strcmp(sec->name, ".noinstr.text") ||
417 !strcmp(sec->name, ".entry.text") ||
418 !strcmp(sec->name, ".cpuidle.text") ||
419 !strncmp(sec->name, ".text.__x86.", 12))
420 sec->noinstr = true;
421
422 /*
423 * .init.text code is ran before userspace and thus doesn't
424 * strictly need retpolines, except for modules which are
425 * loaded late, they very much do need retpoline in their
426 * .init.text
427 */
428 if (!strcmp(sec->name, ".init.text") && !opts.module)
429 sec->init = true;
430
431 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
432 if (!insns || idx == INSN_CHUNK_MAX) {
433 insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
434 if (!insns) {
435 WARN("malloc failed");
436 return -1;
437 }
438 idx = 0;
439 } else {
440 idx++;
441 }
442 insn = &insns[idx];
443 insn->idx = idx;
444
445 INIT_LIST_HEAD(&insn->call_node);
446 insn->sec = sec;
447 insn->offset = offset;
448 insn->prev_len = prev_len;
449
450 ret = arch_decode_instruction(file, sec, offset,
451 sec->sh.sh_size - offset,
452 insn);
453 if (ret)
454 return ret;
455
456 prev_len = insn->len;
457
458 /*
459 * By default, "ud2" is a dead end unless otherwise
460 * annotated, because GCC 7 inserts it for certain
461 * divide-by-zero cases.
462 */
463 if (insn->type == INSN_BUG)
464 insn->dead_end = true;
465
466 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
467 nr_insns++;
468 }
469
470// printf("%s: last chunk used: %d\n", sec->name, (int)idx);
471
472 list_for_each_entry(func, &sec->symbol_list, list) {
473 if (func->type != STT_NOTYPE && func->type != STT_FUNC)
474 continue;
475
476 if (func->offset == sec->sh.sh_size) {
477 /* Heuristic: likely an "end" symbol */
478 if (func->type == STT_NOTYPE)
479 continue;
480 WARN("%s(): STT_FUNC at end of section",
481 func->name);
482 return -1;
483 }
484
485 if (func->return_thunk || func->alias != func)
486 continue;
487
488 if (!find_insn(file, sec, func->offset)) {
489 WARN("%s(): can't find starting instruction",
490 func->name);
491 return -1;
492 }
493
494 sym_for_each_insn(file, func, insn) {
495 insn->sym = func;
496 if (func->type == STT_FUNC &&
497 insn->type == INSN_ENDBR &&
498 list_empty(&insn->call_node)) {
499 if (insn->offset == func->offset) {
500 list_add_tail(&insn->call_node, &file->endbr_list);
501 file->nr_endbr++;
502 } else {
503 file->nr_endbr_int++;
504 }
505 }
506 }
507 }
508 }
509
510 if (opts.stats)
511 printf("nr_insns: %lu\n", nr_insns);
512
513 return 0;
514}
515
516/*
517 * Read the pv_ops[] .data table to find the static initialized values.
518 */
519static int add_pv_ops(struct objtool_file *file, const char *symname)
520{
521 struct symbol *sym, *func;
522 unsigned long off, end;
523 struct reloc *rel;
524 int idx;
525
526 sym = find_symbol_by_name(file->elf, symname);
527 if (!sym)
528 return 0;
529
530 off = sym->offset;
531 end = off + sym->len;
532 for (;;) {
533 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
534 if (!rel)
535 break;
536
537 func = rel->sym;
538 if (func->type == STT_SECTION)
539 func = find_symbol_by_offset(rel->sym->sec, rel->addend);
540
541 idx = (rel->offset - sym->offset) / sizeof(unsigned long);
542
543 objtool_pv_add(file, idx, func);
544
545 off = rel->offset + 1;
546 if (off > end)
547 break;
548 }
549
550 return 0;
551}
552
553/*
554 * Allocate and initialize file->pv_ops[].
555 */
556static int init_pv_ops(struct objtool_file *file)
557{
558 static const char *pv_ops_tables[] = {
559 "pv_ops",
560 "xen_cpu_ops",
561 "xen_irq_ops",
562 "xen_mmu_ops",
563 NULL,
564 };
565 const char *pv_ops;
566 struct symbol *sym;
567 int idx, nr;
568
569 if (!opts.noinstr)
570 return 0;
571
572 file->pv_ops = NULL;
573
574 sym = find_symbol_by_name(file->elf, "pv_ops");
575 if (!sym)
576 return 0;
577
578 nr = sym->len / sizeof(unsigned long);
579 file->pv_ops = calloc(sizeof(struct pv_state), nr);
580 if (!file->pv_ops)
581 return -1;
582
583 for (idx = 0; idx < nr; idx++)
584 INIT_LIST_HEAD(&file->pv_ops[idx].targets);
585
586 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
587 add_pv_ops(file, pv_ops);
588
589 return 0;
590}
591
592static struct instruction *find_last_insn(struct objtool_file *file,
593 struct section *sec)
594{
595 struct instruction *insn = NULL;
596 unsigned int offset;
597 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
598
599 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
600 insn = find_insn(file, sec, offset);
601
602 return insn;
603}
604
605/*
606 * Mark "ud2" instructions and manually annotated dead ends.
607 */
608static int add_dead_ends(struct objtool_file *file)
609{
610 struct section *sec;
611 struct reloc *reloc;
612 struct instruction *insn;
613
614 /*
615 * Check for manually annotated dead ends.
616 */
617 sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
618 if (!sec)
619 goto reachable;
620
621 list_for_each_entry(reloc, &sec->reloc_list, list) {
622 if (reloc->sym->type != STT_SECTION) {
623 WARN("unexpected relocation symbol type in %s", sec->name);
624 return -1;
625 }
626 insn = find_insn(file, reloc->sym->sec, reloc->addend);
627 if (insn)
628 insn = prev_insn_same_sec(file, insn);
629 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
630 insn = find_last_insn(file, reloc->sym->sec);
631 if (!insn) {
632 WARN("can't find unreachable insn at %s+0x%" PRIx64,
633 reloc->sym->sec->name, reloc->addend);
634 return -1;
635 }
636 } else {
637 WARN("can't find unreachable insn at %s+0x%" PRIx64,
638 reloc->sym->sec->name, reloc->addend);
639 return -1;
640 }
641
642 insn->dead_end = true;
643 }
644
645reachable:
646 /*
647 * These manually annotated reachable checks are needed for GCC 4.4,
648 * where the Linux unreachable() macro isn't supported. In that case
649 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
650 * not a dead end.
651 */
652 sec = find_section_by_name(file->elf, ".rela.discard.reachable");
653 if (!sec)
654 return 0;
655
656 list_for_each_entry(reloc, &sec->reloc_list, list) {
657 if (reloc->sym->type != STT_SECTION) {
658 WARN("unexpected relocation symbol type in %s", sec->name);
659 return -1;
660 }
661 insn = find_insn(file, reloc->sym->sec, reloc->addend);
662 if (insn)
663 insn = prev_insn_same_sec(file, insn);
664 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
665 insn = find_last_insn(file, reloc->sym->sec);
666 if (!insn) {
667 WARN("can't find reachable insn at %s+0x%" PRIx64,
668 reloc->sym->sec->name, reloc->addend);
669 return -1;
670 }
671 } else {
672 WARN("can't find reachable insn at %s+0x%" PRIx64,
673 reloc->sym->sec->name, reloc->addend);
674 return -1;
675 }
676
677 insn->dead_end = false;
678 }
679
680 return 0;
681}
682
683static int create_static_call_sections(struct objtool_file *file)
684{
685 struct section *sec;
686 struct static_call_site *site;
687 struct instruction *insn;
688 struct symbol *key_sym;
689 char *key_name, *tmp;
690 int idx;
691
692 sec = find_section_by_name(file->elf, ".static_call_sites");
693 if (sec) {
694 INIT_LIST_HEAD(&file->static_call_list);
695 WARN("file already has .static_call_sites section, skipping");
696 return 0;
697 }
698
699 if (list_empty(&file->static_call_list))
700 return 0;
701
702 idx = 0;
703 list_for_each_entry(insn, &file->static_call_list, call_node)
704 idx++;
705
706 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
707 sizeof(struct static_call_site), idx);
708 if (!sec)
709 return -1;
710
711 idx = 0;
712 list_for_each_entry(insn, &file->static_call_list, call_node) {
713
714 site = (struct static_call_site *)sec->data->d_buf + idx;
715 memset(site, 0, sizeof(struct static_call_site));
716
717 /* populate reloc for 'addr' */
718 if (elf_add_reloc_to_insn(file->elf, sec,
719 idx * sizeof(struct static_call_site),
720 R_X86_64_PC32,
721 insn->sec, insn->offset))
722 return -1;
723
724 /* find key symbol */
725 key_name = strdup(insn_call_dest(insn)->name);
726 if (!key_name) {
727 perror("strdup");
728 return -1;
729 }
730 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
731 STATIC_CALL_TRAMP_PREFIX_LEN)) {
732 WARN("static_call: trampoline name malformed: %s", key_name);
733 free(key_name);
734 return -1;
735 }
736 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
737 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
738
739 key_sym = find_symbol_by_name(file->elf, tmp);
740 if (!key_sym) {
741 if (!opts.module) {
742 WARN("static_call: can't find static_call_key symbol: %s", tmp);
743 free(key_name);
744 return -1;
745 }
746
747 /*
748 * For modules(), the key might not be exported, which
749 * means the module can make static calls but isn't
750 * allowed to change them.
751 *
752 * In that case we temporarily set the key to be the
753 * trampoline address. This is fixed up in
754 * static_call_add_module().
755 */
756 key_sym = insn_call_dest(insn);
757 }
758 free(key_name);
759
760 /* populate reloc for 'key' */
761 if (elf_add_reloc(file->elf, sec,
762 idx * sizeof(struct static_call_site) + 4,
763 R_X86_64_PC32, key_sym,
764 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
765 return -1;
766
767 idx++;
768 }
769
770 return 0;
771}
772
773static int create_retpoline_sites_sections(struct objtool_file *file)
774{
775 struct instruction *insn;
776 struct section *sec;
777 int idx;
778
779 sec = find_section_by_name(file->elf, ".retpoline_sites");
780 if (sec) {
781 WARN("file already has .retpoline_sites, skipping");
782 return 0;
783 }
784
785 idx = 0;
786 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
787 idx++;
788
789 if (!idx)
790 return 0;
791
792 sec = elf_create_section(file->elf, ".retpoline_sites", 0,
793 sizeof(int), idx);
794 if (!sec) {
795 WARN("elf_create_section: .retpoline_sites");
796 return -1;
797 }
798
799 idx = 0;
800 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
801
802 int *site = (int *)sec->data->d_buf + idx;
803 *site = 0;
804
805 if (elf_add_reloc_to_insn(file->elf, sec,
806 idx * sizeof(int),
807 R_X86_64_PC32,
808 insn->sec, insn->offset)) {
809 WARN("elf_add_reloc_to_insn: .retpoline_sites");
810 return -1;
811 }
812
813 idx++;
814 }
815
816 return 0;
817}
818
819static int create_return_sites_sections(struct objtool_file *file)
820{
821 struct instruction *insn;
822 struct section *sec;
823 int idx;
824
825 sec = find_section_by_name(file->elf, ".return_sites");
826 if (sec) {
827 WARN("file already has .return_sites, skipping");
828 return 0;
829 }
830
831 idx = 0;
832 list_for_each_entry(insn, &file->return_thunk_list, call_node)
833 idx++;
834
835 if (!idx)
836 return 0;
837
838 sec = elf_create_section(file->elf, ".return_sites", 0,
839 sizeof(int), idx);
840 if (!sec) {
841 WARN("elf_create_section: .return_sites");
842 return -1;
843 }
844
845 idx = 0;
846 list_for_each_entry(insn, &file->return_thunk_list, call_node) {
847
848 int *site = (int *)sec->data->d_buf + idx;
849 *site = 0;
850
851 if (elf_add_reloc_to_insn(file->elf, sec,
852 idx * sizeof(int),
853 R_X86_64_PC32,
854 insn->sec, insn->offset)) {
855 WARN("elf_add_reloc_to_insn: .return_sites");
856 return -1;
857 }
858
859 idx++;
860 }
861
862 return 0;
863}
864
865static int create_ibt_endbr_seal_sections(struct objtool_file *file)
866{
867 struct instruction *insn;
868 struct section *sec;
869 int idx;
870
871 sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
872 if (sec) {
873 WARN("file already has .ibt_endbr_seal, skipping");
874 return 0;
875 }
876
877 idx = 0;
878 list_for_each_entry(insn, &file->endbr_list, call_node)
879 idx++;
880
881 if (opts.stats) {
882 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
883 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
884 printf("ibt: superfluous ENDBR: %d\n", idx);
885 }
886
887 if (!idx)
888 return 0;
889
890 sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0,
891 sizeof(int), idx);
892 if (!sec) {
893 WARN("elf_create_section: .ibt_endbr_seal");
894 return -1;
895 }
896
897 idx = 0;
898 list_for_each_entry(insn, &file->endbr_list, call_node) {
899
900 int *site = (int *)sec->data->d_buf + idx;
901 struct symbol *sym = insn->sym;
902 *site = 0;
903
904 if (opts.module && sym && sym->type == STT_FUNC &&
905 insn->offset == sym->offset &&
906 (!strcmp(sym->name, "init_module") ||
907 !strcmp(sym->name, "cleanup_module")))
908 WARN("%s(): not an indirect call target", sym->name);
909
910 if (elf_add_reloc_to_insn(file->elf, sec,
911 idx * sizeof(int),
912 R_X86_64_PC32,
913 insn->sec, insn->offset)) {
914 WARN("elf_add_reloc_to_insn: .ibt_endbr_seal");
915 return -1;
916 }
917
918 idx++;
919 }
920
921 return 0;
922}
923
924static int create_cfi_sections(struct objtool_file *file)
925{
926 struct section *sec, *s;
927 struct symbol *sym;
928 unsigned int *loc;
929 int idx;
930
931 sec = find_section_by_name(file->elf, ".cfi_sites");
932 if (sec) {
933 INIT_LIST_HEAD(&file->call_list);
934 WARN("file already has .cfi_sites section, skipping");
935 return 0;
936 }
937
938 idx = 0;
939 for_each_sec(file, s) {
940 if (!s->text)
941 continue;
942
943 list_for_each_entry(sym, &s->symbol_list, list) {
944 if (sym->type != STT_FUNC)
945 continue;
946
947 if (strncmp(sym->name, "__cfi_", 6))
948 continue;
949
950 idx++;
951 }
952 }
953
954 sec = elf_create_section(file->elf, ".cfi_sites", 0, sizeof(unsigned int), idx);
955 if (!sec)
956 return -1;
957
958 idx = 0;
959 for_each_sec(file, s) {
960 if (!s->text)
961 continue;
962
963 list_for_each_entry(sym, &s->symbol_list, list) {
964 if (sym->type != STT_FUNC)
965 continue;
966
967 if (strncmp(sym->name, "__cfi_", 6))
968 continue;
969
970 loc = (unsigned int *)sec->data->d_buf + idx;
971 memset(loc, 0, sizeof(unsigned int));
972
973 if (elf_add_reloc_to_insn(file->elf, sec,
974 idx * sizeof(unsigned int),
975 R_X86_64_PC32,
976 s, sym->offset))
977 return -1;
978
979 idx++;
980 }
981 }
982
983 return 0;
984}
985
986static int create_mcount_loc_sections(struct objtool_file *file)
987{
988 int addrsize = elf_class_addrsize(file->elf);
989 struct instruction *insn;
990 struct section *sec;
991 int idx;
992
993 sec = find_section_by_name(file->elf, "__mcount_loc");
994 if (sec) {
995 INIT_LIST_HEAD(&file->mcount_loc_list);
996 WARN("file already has __mcount_loc section, skipping");
997 return 0;
998 }
999
1000 if (list_empty(&file->mcount_loc_list))
1001 return 0;
1002
1003 idx = 0;
1004 list_for_each_entry(insn, &file->mcount_loc_list, call_node)
1005 idx++;
1006
1007 sec = elf_create_section(file->elf, "__mcount_loc", 0, addrsize, idx);
1008 if (!sec)
1009 return -1;
1010
1011 sec->sh.sh_addralign = addrsize;
1012
1013 idx = 0;
1014 list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
1015 void *loc;
1016
1017 loc = sec->data->d_buf + idx;
1018 memset(loc, 0, addrsize);
1019
1020 if (elf_add_reloc_to_insn(file->elf, sec, idx,
1021 addrsize == sizeof(u64) ? R_ABS64 : R_ABS32,
1022 insn->sec, insn->offset))
1023 return -1;
1024
1025 idx += addrsize;
1026 }
1027
1028 return 0;
1029}
1030
1031static int create_direct_call_sections(struct objtool_file *file)
1032{
1033 struct instruction *insn;
1034 struct section *sec;
1035 unsigned int *loc;
1036 int idx;
1037
1038 sec = find_section_by_name(file->elf, ".call_sites");
1039 if (sec) {
1040 INIT_LIST_HEAD(&file->call_list);
1041 WARN("file already has .call_sites section, skipping");
1042 return 0;
1043 }
1044
1045 if (list_empty(&file->call_list))
1046 return 0;
1047
1048 idx = 0;
1049 list_for_each_entry(insn, &file->call_list, call_node)
1050 idx++;
1051
1052 sec = elf_create_section(file->elf, ".call_sites", 0, sizeof(unsigned int), idx);
1053 if (!sec)
1054 return -1;
1055
1056 idx = 0;
1057 list_for_each_entry(insn, &file->call_list, call_node) {
1058
1059 loc = (unsigned int *)sec->data->d_buf + idx;
1060 memset(loc, 0, sizeof(unsigned int));
1061
1062 if (elf_add_reloc_to_insn(file->elf, sec,
1063 idx * sizeof(unsigned int),
1064 R_X86_64_PC32,
1065 insn->sec, insn->offset))
1066 return -1;
1067
1068 idx++;
1069 }
1070
1071 return 0;
1072}
1073
1074/*
1075 * Warnings shouldn't be reported for ignored functions.
1076 */
1077static void add_ignores(struct objtool_file *file)
1078{
1079 struct instruction *insn;
1080 struct section *sec;
1081 struct symbol *func;
1082 struct reloc *reloc;
1083
1084 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1085 if (!sec)
1086 return;
1087
1088 list_for_each_entry(reloc, &sec->reloc_list, list) {
1089 switch (reloc->sym->type) {
1090 case STT_FUNC:
1091 func = reloc->sym;
1092 break;
1093
1094 case STT_SECTION:
1095 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
1096 if (!func)
1097 continue;
1098 break;
1099
1100 default:
1101 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
1102 continue;
1103 }
1104
1105 func_for_each_insn(file, func, insn)
1106 insn->ignore = true;
1107 }
1108}
1109
1110/*
1111 * This is a whitelist of functions that is allowed to be called with AC set.
1112 * The list is meant to be minimal and only contains compiler instrumentation
1113 * ABI and a few functions used to implement *_{to,from}_user() functions.
1114 *
1115 * These functions must not directly change AC, but may PUSHF/POPF.
1116 */
1117static const char *uaccess_safe_builtin[] = {
1118 /* KASAN */
1119 "kasan_report",
1120 "kasan_check_range",
1121 /* KASAN out-of-line */
1122 "__asan_loadN_noabort",
1123 "__asan_load1_noabort",
1124 "__asan_load2_noabort",
1125 "__asan_load4_noabort",
1126 "__asan_load8_noabort",
1127 "__asan_load16_noabort",
1128 "__asan_storeN_noabort",
1129 "__asan_store1_noabort",
1130 "__asan_store2_noabort",
1131 "__asan_store4_noabort",
1132 "__asan_store8_noabort",
1133 "__asan_store16_noabort",
1134 "__kasan_check_read",
1135 "__kasan_check_write",
1136 /* KASAN in-line */
1137 "__asan_report_load_n_noabort",
1138 "__asan_report_load1_noabort",
1139 "__asan_report_load2_noabort",
1140 "__asan_report_load4_noabort",
1141 "__asan_report_load8_noabort",
1142 "__asan_report_load16_noabort",
1143 "__asan_report_store_n_noabort",
1144 "__asan_report_store1_noabort",
1145 "__asan_report_store2_noabort",
1146 "__asan_report_store4_noabort",
1147 "__asan_report_store8_noabort",
1148 "__asan_report_store16_noabort",
1149 /* KCSAN */
1150 "__kcsan_check_access",
1151 "__kcsan_mb",
1152 "__kcsan_wmb",
1153 "__kcsan_rmb",
1154 "__kcsan_release",
1155 "kcsan_found_watchpoint",
1156 "kcsan_setup_watchpoint",
1157 "kcsan_check_scoped_accesses",
1158 "kcsan_disable_current",
1159 "kcsan_enable_current_nowarn",
1160 /* KCSAN/TSAN */
1161 "__tsan_func_entry",
1162 "__tsan_func_exit",
1163 "__tsan_read_range",
1164 "__tsan_write_range",
1165 "__tsan_read1",
1166 "__tsan_read2",
1167 "__tsan_read4",
1168 "__tsan_read8",
1169 "__tsan_read16",
1170 "__tsan_write1",
1171 "__tsan_write2",
1172 "__tsan_write4",
1173 "__tsan_write8",
1174 "__tsan_write16",
1175 "__tsan_read_write1",
1176 "__tsan_read_write2",
1177 "__tsan_read_write4",
1178 "__tsan_read_write8",
1179 "__tsan_read_write16",
1180 "__tsan_volatile_read1",
1181 "__tsan_volatile_read2",
1182 "__tsan_volatile_read4",
1183 "__tsan_volatile_read8",
1184 "__tsan_volatile_read16",
1185 "__tsan_volatile_write1",
1186 "__tsan_volatile_write2",
1187 "__tsan_volatile_write4",
1188 "__tsan_volatile_write8",
1189 "__tsan_volatile_write16",
1190 "__tsan_atomic8_load",
1191 "__tsan_atomic16_load",
1192 "__tsan_atomic32_load",
1193 "__tsan_atomic64_load",
1194 "__tsan_atomic8_store",
1195 "__tsan_atomic16_store",
1196 "__tsan_atomic32_store",
1197 "__tsan_atomic64_store",
1198 "__tsan_atomic8_exchange",
1199 "__tsan_atomic16_exchange",
1200 "__tsan_atomic32_exchange",
1201 "__tsan_atomic64_exchange",
1202 "__tsan_atomic8_fetch_add",
1203 "__tsan_atomic16_fetch_add",
1204 "__tsan_atomic32_fetch_add",
1205 "__tsan_atomic64_fetch_add",
1206 "__tsan_atomic8_fetch_sub",
1207 "__tsan_atomic16_fetch_sub",
1208 "__tsan_atomic32_fetch_sub",
1209 "__tsan_atomic64_fetch_sub",
1210 "__tsan_atomic8_fetch_and",
1211 "__tsan_atomic16_fetch_and",
1212 "__tsan_atomic32_fetch_and",
1213 "__tsan_atomic64_fetch_and",
1214 "__tsan_atomic8_fetch_or",
1215 "__tsan_atomic16_fetch_or",
1216 "__tsan_atomic32_fetch_or",
1217 "__tsan_atomic64_fetch_or",
1218 "__tsan_atomic8_fetch_xor",
1219 "__tsan_atomic16_fetch_xor",
1220 "__tsan_atomic32_fetch_xor",
1221 "__tsan_atomic64_fetch_xor",
1222 "__tsan_atomic8_fetch_nand",
1223 "__tsan_atomic16_fetch_nand",
1224 "__tsan_atomic32_fetch_nand",
1225 "__tsan_atomic64_fetch_nand",
1226 "__tsan_atomic8_compare_exchange_strong",
1227 "__tsan_atomic16_compare_exchange_strong",
1228 "__tsan_atomic32_compare_exchange_strong",
1229 "__tsan_atomic64_compare_exchange_strong",
1230 "__tsan_atomic8_compare_exchange_weak",
1231 "__tsan_atomic16_compare_exchange_weak",
1232 "__tsan_atomic32_compare_exchange_weak",
1233 "__tsan_atomic64_compare_exchange_weak",
1234 "__tsan_atomic8_compare_exchange_val",
1235 "__tsan_atomic16_compare_exchange_val",
1236 "__tsan_atomic32_compare_exchange_val",
1237 "__tsan_atomic64_compare_exchange_val",
1238 "__tsan_atomic_thread_fence",
1239 "__tsan_atomic_signal_fence",
1240 "__tsan_unaligned_read16",
1241 "__tsan_unaligned_write16",
1242 /* KCOV */
1243 "write_comp_data",
1244 "check_kcov_mode",
1245 "__sanitizer_cov_trace_pc",
1246 "__sanitizer_cov_trace_const_cmp1",
1247 "__sanitizer_cov_trace_const_cmp2",
1248 "__sanitizer_cov_trace_const_cmp4",
1249 "__sanitizer_cov_trace_const_cmp8",
1250 "__sanitizer_cov_trace_cmp1",
1251 "__sanitizer_cov_trace_cmp2",
1252 "__sanitizer_cov_trace_cmp4",
1253 "__sanitizer_cov_trace_cmp8",
1254 "__sanitizer_cov_trace_switch",
1255 /* KMSAN */
1256 "kmsan_copy_to_user",
1257 "kmsan_report",
1258 "kmsan_unpoison_entry_regs",
1259 "kmsan_unpoison_memory",
1260 "__msan_chain_origin",
1261 "__msan_get_context_state",
1262 "__msan_instrument_asm_store",
1263 "__msan_metadata_ptr_for_load_1",
1264 "__msan_metadata_ptr_for_load_2",
1265 "__msan_metadata_ptr_for_load_4",
1266 "__msan_metadata_ptr_for_load_8",
1267 "__msan_metadata_ptr_for_load_n",
1268 "__msan_metadata_ptr_for_store_1",
1269 "__msan_metadata_ptr_for_store_2",
1270 "__msan_metadata_ptr_for_store_4",
1271 "__msan_metadata_ptr_for_store_8",
1272 "__msan_metadata_ptr_for_store_n",
1273 "__msan_poison_alloca",
1274 "__msan_warning",
1275 /* UBSAN */
1276 "ubsan_type_mismatch_common",
1277 "__ubsan_handle_type_mismatch",
1278 "__ubsan_handle_type_mismatch_v1",
1279 "__ubsan_handle_shift_out_of_bounds",
1280 "__ubsan_handle_load_invalid_value",
1281 /* misc */
1282 "csum_partial_copy_generic",
1283 "copy_mc_fragile",
1284 "copy_mc_fragile_handle_tail",
1285 "copy_mc_enhanced_fast_string",
1286 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
1287 "clear_user_erms",
1288 "clear_user_rep_good",
1289 "clear_user_original",
1290 NULL
1291};
1292
1293static void add_uaccess_safe(struct objtool_file *file)
1294{
1295 struct symbol *func;
1296 const char **name;
1297
1298 if (!opts.uaccess)
1299 return;
1300
1301 for (name = uaccess_safe_builtin; *name; name++) {
1302 func = find_symbol_by_name(file->elf, *name);
1303 if (!func)
1304 continue;
1305
1306 func->uaccess_safe = true;
1307 }
1308}
1309
1310/*
1311 * FIXME: For now, just ignore any alternatives which add retpolines. This is
1312 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
1313 * But it at least allows objtool to understand the control flow *around* the
1314 * retpoline.
1315 */
1316static int add_ignore_alternatives(struct objtool_file *file)
1317{
1318 struct section *sec;
1319 struct reloc *reloc;
1320 struct instruction *insn;
1321
1322 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
1323 if (!sec)
1324 return 0;
1325
1326 list_for_each_entry(reloc, &sec->reloc_list, list) {
1327 if (reloc->sym->type != STT_SECTION) {
1328 WARN("unexpected relocation symbol type in %s", sec->name);
1329 return -1;
1330 }
1331
1332 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1333 if (!insn) {
1334 WARN("bad .discard.ignore_alts entry");
1335 return -1;
1336 }
1337
1338 insn->ignore_alts = true;
1339 }
1340
1341 return 0;
1342}
1343
1344__weak bool arch_is_retpoline(struct symbol *sym)
1345{
1346 return false;
1347}
1348
1349__weak bool arch_is_rethunk(struct symbol *sym)
1350{
1351 return false;
1352}
1353
1354static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1355{
1356 struct reloc *reloc;
1357
1358 if (insn->no_reloc)
1359 return NULL;
1360
1361 if (!file)
1362 return NULL;
1363
1364 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1365 insn->offset, insn->len);
1366 if (!reloc) {
1367 insn->no_reloc = 1;
1368 return NULL;
1369 }
1370
1371 return reloc;
1372}
1373
1374static void remove_insn_ops(struct instruction *insn)
1375{
1376 struct stack_op *op, *next;
1377
1378 for (op = insn->stack_ops; op; op = next) {
1379 next = op->next;
1380 free(op);
1381 }
1382 insn->stack_ops = NULL;
1383}
1384
1385static void annotate_call_site(struct objtool_file *file,
1386 struct instruction *insn, bool sibling)
1387{
1388 struct reloc *reloc = insn_reloc(file, insn);
1389 struct symbol *sym = insn_call_dest(insn);
1390
1391 if (!sym)
1392 sym = reloc->sym;
1393
1394 /*
1395 * Alternative replacement code is just template code which is
1396 * sometimes copied to the original instruction. For now, don't
1397 * annotate it. (In the future we might consider annotating the
1398 * original instruction if/when it ever makes sense to do so.)
1399 */
1400 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1401 return;
1402
1403 if (sym->static_call_tramp) {
1404 list_add_tail(&insn->call_node, &file->static_call_list);
1405 return;
1406 }
1407
1408 if (sym->retpoline_thunk) {
1409 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1410 return;
1411 }
1412
1413 /*
1414 * Many compilers cannot disable KCOV or sanitizer calls with a function
1415 * attribute so they need a little help, NOP out any such calls from
1416 * noinstr text.
1417 */
1418 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1419 if (reloc) {
1420 reloc->type = R_NONE;
1421 elf_write_reloc(file->elf, reloc);
1422 }
1423
1424 elf_write_insn(file->elf, insn->sec,
1425 insn->offset, insn->len,
1426 sibling ? arch_ret_insn(insn->len)
1427 : arch_nop_insn(insn->len));
1428
1429 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1430
1431 if (sibling) {
1432 /*
1433 * We've replaced the tail-call JMP insn by two new
1434 * insn: RET; INT3, except we only have a single struct
1435 * insn here. Mark it retpoline_safe to avoid the SLS
1436 * warning, instead of adding another insn.
1437 */
1438 insn->retpoline_safe = true;
1439 }
1440
1441 return;
1442 }
1443
1444 if (opts.mcount && sym->fentry) {
1445 if (sibling)
1446 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
1447 if (opts.mnop) {
1448 if (reloc) {
1449 reloc->type = R_NONE;
1450 elf_write_reloc(file->elf, reloc);
1451 }
1452
1453 elf_write_insn(file->elf, insn->sec,
1454 insn->offset, insn->len,
1455 arch_nop_insn(insn->len));
1456
1457 insn->type = INSN_NOP;
1458 }
1459
1460 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1461 return;
1462 }
1463
1464 if (insn->type == INSN_CALL && !insn->sec->init)
1465 list_add_tail(&insn->call_node, &file->call_list);
1466
1467 if (!sibling && dead_end_function(file, sym))
1468 insn->dead_end = true;
1469}
1470
1471static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1472 struct symbol *dest, bool sibling)
1473{
1474 insn->_call_dest = dest;
1475 if (!dest)
1476 return;
1477
1478 /*
1479 * Whatever stack impact regular CALLs have, should be undone
1480 * by the RETURN of the called function.
1481 *
1482 * Annotated intra-function calls retain the stack_ops but
1483 * are converted to JUMP, see read_intra_function_calls().
1484 */
1485 remove_insn_ops(insn);
1486
1487 annotate_call_site(file, insn, sibling);
1488}
1489
1490static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1491{
1492 /*
1493 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1494 * so convert them accordingly.
1495 */
1496 switch (insn->type) {
1497 case INSN_CALL:
1498 insn->type = INSN_CALL_DYNAMIC;
1499 break;
1500 case INSN_JUMP_UNCONDITIONAL:
1501 insn->type = INSN_JUMP_DYNAMIC;
1502 break;
1503 case INSN_JUMP_CONDITIONAL:
1504 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1505 break;
1506 default:
1507 return;
1508 }
1509
1510 insn->retpoline_safe = true;
1511
1512 /*
1513 * Whatever stack impact regular CALLs have, should be undone
1514 * by the RETURN of the called function.
1515 *
1516 * Annotated intra-function calls retain the stack_ops but
1517 * are converted to JUMP, see read_intra_function_calls().
1518 */
1519 remove_insn_ops(insn);
1520
1521 annotate_call_site(file, insn, false);
1522}
1523
1524static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1525{
1526 /*
1527 * Return thunk tail calls are really just returns in disguise,
1528 * so convert them accordingly.
1529 */
1530 insn->type = INSN_RETURN;
1531 insn->retpoline_safe = true;
1532
1533 if (add)
1534 list_add_tail(&insn->call_node, &file->return_thunk_list);
1535}
1536
1537static bool is_first_func_insn(struct objtool_file *file,
1538 struct instruction *insn, struct symbol *sym)
1539{
1540 if (insn->offset == sym->offset)
1541 return true;
1542
1543 /* Allow direct CALL/JMP past ENDBR */
1544 if (opts.ibt) {
1545 struct instruction *prev = prev_insn_same_sym(file, insn);
1546
1547 if (prev && prev->type == INSN_ENDBR &&
1548 insn->offset == sym->offset + prev->len)
1549 return true;
1550 }
1551
1552 return false;
1553}
1554
1555/*
1556 * A sibling call is a tail-call to another symbol -- to differentiate from a
1557 * recursive tail-call which is to the same symbol.
1558 */
1559static bool jump_is_sibling_call(struct objtool_file *file,
1560 struct instruction *from, struct instruction *to)
1561{
1562 struct symbol *fs = from->sym;
1563 struct symbol *ts = to->sym;
1564
1565 /* Not a sibling call if from/to a symbol hole */
1566 if (!fs || !ts)
1567 return false;
1568
1569 /* Not a sibling call if not targeting the start of a symbol. */
1570 if (!is_first_func_insn(file, to, ts))
1571 return false;
1572
1573 /* Disallow sibling calls into STT_NOTYPE */
1574 if (ts->type == STT_NOTYPE)
1575 return false;
1576
1577 /* Must not be self to be a sibling */
1578 return fs->pfunc != ts->pfunc;
1579}
1580
1581/*
1582 * Find the destination instructions for all jumps.
1583 */
1584static int add_jump_destinations(struct objtool_file *file)
1585{
1586 struct instruction *insn, *jump_dest;
1587 struct reloc *reloc;
1588 struct section *dest_sec;
1589 unsigned long dest_off;
1590
1591 for_each_insn(file, insn) {
1592 if (insn->jump_dest) {
1593 /*
1594 * handle_group_alt() may have previously set
1595 * 'jump_dest' for some alternatives.
1596 */
1597 continue;
1598 }
1599 if (!is_static_jump(insn))
1600 continue;
1601
1602 reloc = insn_reloc(file, insn);
1603 if (!reloc) {
1604 dest_sec = insn->sec;
1605 dest_off = arch_jump_destination(insn);
1606 } else if (reloc->sym->type == STT_SECTION) {
1607 dest_sec = reloc->sym->sec;
1608 dest_off = arch_dest_reloc_offset(reloc->addend);
1609 } else if (reloc->sym->retpoline_thunk) {
1610 add_retpoline_call(file, insn);
1611 continue;
1612 } else if (reloc->sym->return_thunk) {
1613 add_return_call(file, insn, true);
1614 continue;
1615 } else if (insn_func(insn)) {
1616 /*
1617 * External sibling call or internal sibling call with
1618 * STT_FUNC reloc.
1619 */
1620 add_call_dest(file, insn, reloc->sym, true);
1621 continue;
1622 } else if (reloc->sym->sec->idx) {
1623 dest_sec = reloc->sym->sec;
1624 dest_off = reloc->sym->sym.st_value +
1625 arch_dest_reloc_offset(reloc->addend);
1626 } else {
1627 /* non-func asm code jumping to another file */
1628 continue;
1629 }
1630
1631 jump_dest = find_insn(file, dest_sec, dest_off);
1632 if (!jump_dest) {
1633 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1634
1635 /*
1636 * This is a special case for zen_untrain_ret().
1637 * It jumps to __x86_return_thunk(), but objtool
1638 * can't find the thunk's starting RET
1639 * instruction, because the RET is also in the
1640 * middle of another instruction. Objtool only
1641 * knows about the outer instruction.
1642 */
1643 if (sym && sym->return_thunk) {
1644 add_return_call(file, insn, false);
1645 continue;
1646 }
1647
1648 WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
1649 insn->sec, insn->offset, dest_sec->name,
1650 dest_off);
1651 return -1;
1652 }
1653
1654 /*
1655 * Cross-function jump.
1656 */
1657 if (insn_func(insn) && insn_func(jump_dest) &&
1658 insn_func(insn) != insn_func(jump_dest)) {
1659
1660 /*
1661 * For GCC 8+, create parent/child links for any cold
1662 * subfunctions. This is _mostly_ redundant with a
1663 * similar initialization in read_symbols().
1664 *
1665 * If a function has aliases, we want the *first* such
1666 * function in the symbol table to be the subfunction's
1667 * parent. In that case we overwrite the
1668 * initialization done in read_symbols().
1669 *
1670 * However this code can't completely replace the
1671 * read_symbols() code because this doesn't detect the
1672 * case where the parent function's only reference to a
1673 * subfunction is through a jump table.
1674 */
1675 if (!strstr(insn_func(insn)->name, ".cold") &&
1676 strstr(insn_func(jump_dest)->name, ".cold")) {
1677 insn_func(insn)->cfunc = insn_func(jump_dest);
1678 insn_func(jump_dest)->pfunc = insn_func(insn);
1679 }
1680 }
1681
1682 if (jump_is_sibling_call(file, insn, jump_dest)) {
1683 /*
1684 * Internal sibling call without reloc or with
1685 * STT_SECTION reloc.
1686 */
1687 add_call_dest(file, insn, insn_func(jump_dest), true);
1688 continue;
1689 }
1690
1691 insn->jump_dest = jump_dest;
1692 }
1693
1694 return 0;
1695}
1696
1697static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1698{
1699 struct symbol *call_dest;
1700
1701 call_dest = find_func_by_offset(sec, offset);
1702 if (!call_dest)
1703 call_dest = find_symbol_by_offset(sec, offset);
1704
1705 return call_dest;
1706}
1707
1708/*
1709 * Find the destination instructions for all calls.
1710 */
1711static int add_call_destinations(struct objtool_file *file)
1712{
1713 struct instruction *insn;
1714 unsigned long dest_off;
1715 struct symbol *dest;
1716 struct reloc *reloc;
1717
1718 for_each_insn(file, insn) {
1719 if (insn->type != INSN_CALL)
1720 continue;
1721
1722 reloc = insn_reloc(file, insn);
1723 if (!reloc) {
1724 dest_off = arch_jump_destination(insn);
1725 dest = find_call_destination(insn->sec, dest_off);
1726
1727 add_call_dest(file, insn, dest, false);
1728
1729 if (insn->ignore)
1730 continue;
1731
1732 if (!insn_call_dest(insn)) {
1733 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1734 return -1;
1735 }
1736
1737 if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) {
1738 WARN_FUNC("unsupported call to non-function",
1739 insn->sec, insn->offset);
1740 return -1;
1741 }
1742
1743 } else if (reloc->sym->type == STT_SECTION) {
1744 dest_off = arch_dest_reloc_offset(reloc->addend);
1745 dest = find_call_destination(reloc->sym->sec, dest_off);
1746 if (!dest) {
1747 WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1748 insn->sec, insn->offset,
1749 reloc->sym->sec->name,
1750 dest_off);
1751 return -1;
1752 }
1753
1754 add_call_dest(file, insn, dest, false);
1755
1756 } else if (reloc->sym->retpoline_thunk) {
1757 add_retpoline_call(file, insn);
1758
1759 } else
1760 add_call_dest(file, insn, reloc->sym, false);
1761 }
1762
1763 return 0;
1764}
1765
1766/*
1767 * The .alternatives section requires some extra special care over and above
1768 * other special sections because alternatives are patched in place.
1769 */
1770static int handle_group_alt(struct objtool_file *file,
1771 struct special_alt *special_alt,
1772 struct instruction *orig_insn,
1773 struct instruction **new_insn)
1774{
1775 struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1776 struct alt_group *orig_alt_group, *new_alt_group;
1777 unsigned long dest_off;
1778
1779 orig_alt_group = orig_insn->alt_group;
1780 if (!orig_alt_group) {
1781 struct instruction *last_orig_insn = NULL;
1782
1783 orig_alt_group = malloc(sizeof(*orig_alt_group));
1784 if (!orig_alt_group) {
1785 WARN("malloc failed");
1786 return -1;
1787 }
1788 orig_alt_group->cfi = calloc(special_alt->orig_len,
1789 sizeof(struct cfi_state *));
1790 if (!orig_alt_group->cfi) {
1791 WARN("calloc failed");
1792 return -1;
1793 }
1794
1795 insn = orig_insn;
1796 sec_for_each_insn_from(file, insn) {
1797 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1798 break;
1799
1800 insn->alt_group = orig_alt_group;
1801 last_orig_insn = insn;
1802 }
1803 orig_alt_group->orig_group = NULL;
1804 orig_alt_group->first_insn = orig_insn;
1805 orig_alt_group->last_insn = last_orig_insn;
1806 orig_alt_group->nop = NULL;
1807 } else {
1808 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1809 orig_alt_group->first_insn->offset != special_alt->orig_len) {
1810 WARN_FUNC("weirdly overlapping alternative! %ld != %d",
1811 orig_insn->sec, orig_insn->offset,
1812 orig_alt_group->last_insn->offset +
1813 orig_alt_group->last_insn->len -
1814 orig_alt_group->first_insn->offset,
1815 special_alt->orig_len);
1816 return -1;
1817 }
1818 }
1819
1820 new_alt_group = malloc(sizeof(*new_alt_group));
1821 if (!new_alt_group) {
1822 WARN("malloc failed");
1823 return -1;
1824 }
1825
1826 if (special_alt->new_len < special_alt->orig_len) {
1827 /*
1828 * Insert a fake nop at the end to make the replacement
1829 * alt_group the same size as the original. This is needed to
1830 * allow propagate_alt_cfi() to do its magic. When the last
1831 * instruction affects the stack, the instruction after it (the
1832 * nop) will propagate the new state to the shared CFI array.
1833 */
1834 nop = malloc(sizeof(*nop));
1835 if (!nop) {
1836 WARN("malloc failed");
1837 return -1;
1838 }
1839 memset(nop, 0, sizeof(*nop));
1840
1841 nop->sec = special_alt->new_sec;
1842 nop->offset = special_alt->new_off + special_alt->new_len;
1843 nop->len = special_alt->orig_len - special_alt->new_len;
1844 nop->type = INSN_NOP;
1845 nop->sym = orig_insn->sym;
1846 nop->alt_group = new_alt_group;
1847 nop->ignore = orig_insn->ignore_alts;
1848 }
1849
1850 if (!special_alt->new_len) {
1851 *new_insn = nop;
1852 goto end;
1853 }
1854
1855 insn = *new_insn;
1856 sec_for_each_insn_from(file, insn) {
1857 struct reloc *alt_reloc;
1858
1859 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1860 break;
1861
1862 last_new_insn = insn;
1863
1864 insn->ignore = orig_insn->ignore_alts;
1865 insn->sym = orig_insn->sym;
1866 insn->alt_group = new_alt_group;
1867
1868 /*
1869 * Since alternative replacement code is copy/pasted by the
1870 * kernel after applying relocations, generally such code can't
1871 * have relative-address relocation references to outside the
1872 * .altinstr_replacement section, unless the arch's
1873 * alternatives code can adjust the relative offsets
1874 * accordingly.
1875 */
1876 alt_reloc = insn_reloc(file, insn);
1877 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1878 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1879
1880 WARN_FUNC("unsupported relocation in alternatives section",
1881 insn->sec, insn->offset);
1882 return -1;
1883 }
1884
1885 if (!is_static_jump(insn))
1886 continue;
1887
1888 if (!insn->immediate)
1889 continue;
1890
1891 dest_off = arch_jump_destination(insn);
1892 if (dest_off == special_alt->new_off + special_alt->new_len) {
1893 insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1894 if (!insn->jump_dest) {
1895 WARN_FUNC("can't find alternative jump destination",
1896 insn->sec, insn->offset);
1897 return -1;
1898 }
1899 }
1900 }
1901
1902 if (!last_new_insn) {
1903 WARN_FUNC("can't find last new alternative instruction",
1904 special_alt->new_sec, special_alt->new_off);
1905 return -1;
1906 }
1907
1908end:
1909 new_alt_group->orig_group = orig_alt_group;
1910 new_alt_group->first_insn = *new_insn;
1911 new_alt_group->last_insn = last_new_insn;
1912 new_alt_group->nop = nop;
1913 new_alt_group->cfi = orig_alt_group->cfi;
1914 return 0;
1915}
1916
1917/*
1918 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1919 * If the original instruction is a jump, make the alt entry an effective nop
1920 * by just skipping the original instruction.
1921 */
1922static int handle_jump_alt(struct objtool_file *file,
1923 struct special_alt *special_alt,
1924 struct instruction *orig_insn,
1925 struct instruction **new_insn)
1926{
1927 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1928 orig_insn->type != INSN_NOP) {
1929
1930 WARN_FUNC("unsupported instruction at jump label",
1931 orig_insn->sec, orig_insn->offset);
1932 return -1;
1933 }
1934
1935 if (opts.hack_jump_label && special_alt->key_addend & 2) {
1936 struct reloc *reloc = insn_reloc(file, orig_insn);
1937
1938 if (reloc) {
1939 reloc->type = R_NONE;
1940 elf_write_reloc(file->elf, reloc);
1941 }
1942 elf_write_insn(file->elf, orig_insn->sec,
1943 orig_insn->offset, orig_insn->len,
1944 arch_nop_insn(orig_insn->len));
1945 orig_insn->type = INSN_NOP;
1946 }
1947
1948 if (orig_insn->type == INSN_NOP) {
1949 if (orig_insn->len == 2)
1950 file->jl_nop_short++;
1951 else
1952 file->jl_nop_long++;
1953
1954 return 0;
1955 }
1956
1957 if (orig_insn->len == 2)
1958 file->jl_short++;
1959 else
1960 file->jl_long++;
1961
1962 *new_insn = next_insn_same_sec(file, orig_insn);
1963 return 0;
1964}
1965
1966/*
1967 * Read all the special sections which have alternate instructions which can be
1968 * patched in or redirected to at runtime. Each instruction having alternate
1969 * instruction(s) has them added to its insn->alts list, which will be
1970 * traversed in validate_branch().
1971 */
1972static int add_special_section_alts(struct objtool_file *file)
1973{
1974 struct list_head special_alts;
1975 struct instruction *orig_insn, *new_insn;
1976 struct special_alt *special_alt, *tmp;
1977 struct alternative *alt;
1978 int ret;
1979
1980 ret = special_get_alts(file->elf, &special_alts);
1981 if (ret)
1982 return ret;
1983
1984 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1985
1986 orig_insn = find_insn(file, special_alt->orig_sec,
1987 special_alt->orig_off);
1988 if (!orig_insn) {
1989 WARN_FUNC("special: can't find orig instruction",
1990 special_alt->orig_sec, special_alt->orig_off);
1991 ret = -1;
1992 goto out;
1993 }
1994
1995 new_insn = NULL;
1996 if (!special_alt->group || special_alt->new_len) {
1997 new_insn = find_insn(file, special_alt->new_sec,
1998 special_alt->new_off);
1999 if (!new_insn) {
2000 WARN_FUNC("special: can't find new instruction",
2001 special_alt->new_sec,
2002 special_alt->new_off);
2003 ret = -1;
2004 goto out;
2005 }
2006 }
2007
2008 if (special_alt->group) {
2009 if (!special_alt->orig_len) {
2010 WARN_FUNC("empty alternative entry",
2011 orig_insn->sec, orig_insn->offset);
2012 continue;
2013 }
2014
2015 ret = handle_group_alt(file, special_alt, orig_insn,
2016 &new_insn);
2017 if (ret)
2018 goto out;
2019 } else if (special_alt->jump_or_nop) {
2020 ret = handle_jump_alt(file, special_alt, orig_insn,
2021 &new_insn);
2022 if (ret)
2023 goto out;
2024 }
2025
2026 alt = malloc(sizeof(*alt));
2027 if (!alt) {
2028 WARN("malloc failed");
2029 ret = -1;
2030 goto out;
2031 }
2032
2033 alt->insn = new_insn;
2034 alt->skip_orig = special_alt->skip_orig;
2035 orig_insn->ignore_alts |= special_alt->skip_alt;
2036 alt->next = orig_insn->alts;
2037 orig_insn->alts = alt;
2038
2039 list_del(&special_alt->list);
2040 free(special_alt);
2041 }
2042
2043 if (opts.stats) {
2044 printf("jl\\\tNOP\tJMP\n");
2045 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
2046 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
2047 }
2048
2049out:
2050 return ret;
2051}
2052
2053static int add_jump_table(struct objtool_file *file, struct instruction *insn,
2054 struct reloc *table)
2055{
2056 struct reloc *reloc = table;
2057 struct instruction *dest_insn;
2058 struct alternative *alt;
2059 struct symbol *pfunc = insn_func(insn)->pfunc;
2060 unsigned int prev_offset = 0;
2061
2062 /*
2063 * Each @reloc is a switch table relocation which points to the target
2064 * instruction.
2065 */
2066 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
2067
2068 /* Check for the end of the table: */
2069 if (reloc != table && reloc->jump_table_start)
2070 break;
2071
2072 /* Make sure the table entries are consecutive: */
2073 if (prev_offset && reloc->offset != prev_offset + 8)
2074 break;
2075
2076 /* Detect function pointers from contiguous objects: */
2077 if (reloc->sym->sec == pfunc->sec &&
2078 reloc->addend == pfunc->offset)
2079 break;
2080
2081 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
2082 if (!dest_insn)
2083 break;
2084
2085 /* Make sure the destination is in the same function: */
2086 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2087 break;
2088
2089 alt = malloc(sizeof(*alt));
2090 if (!alt) {
2091 WARN("malloc failed");
2092 return -1;
2093 }
2094
2095 alt->insn = dest_insn;
2096 alt->next = insn->alts;
2097 insn->alts = alt;
2098 prev_offset = reloc->offset;
2099 }
2100
2101 if (!prev_offset) {
2102 WARN_FUNC("can't find switch jump table",
2103 insn->sec, insn->offset);
2104 return -1;
2105 }
2106
2107 return 0;
2108}
2109
2110/*
2111 * find_jump_table() - Given a dynamic jump, find the switch jump table
2112 * associated with it.
2113 */
2114static struct reloc *find_jump_table(struct objtool_file *file,
2115 struct symbol *func,
2116 struct instruction *insn)
2117{
2118 struct reloc *table_reloc;
2119 struct instruction *dest_insn, *orig_insn = insn;
2120
2121 /*
2122 * Backward search using the @first_jump_src links, these help avoid
2123 * much of the 'in between' code. Which avoids us getting confused by
2124 * it.
2125 */
2126 for (;
2127 insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2128 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2129
2130 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2131 break;
2132
2133 /* allow small jumps within the range */
2134 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2135 insn->jump_dest &&
2136 (insn->jump_dest->offset <= insn->offset ||
2137 insn->jump_dest->offset > orig_insn->offset))
2138 break;
2139
2140 table_reloc = arch_find_switch_table(file, insn);
2141 if (!table_reloc)
2142 continue;
2143 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
2144 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2145 continue;
2146
2147 return table_reloc;
2148 }
2149
2150 return NULL;
2151}
2152
2153/*
2154 * First pass: Mark the head of each jump table so that in the next pass,
2155 * we know when a given jump table ends and the next one starts.
2156 */
2157static void mark_func_jump_tables(struct objtool_file *file,
2158 struct symbol *func)
2159{
2160 struct instruction *insn, *last = NULL;
2161 struct reloc *reloc;
2162
2163 func_for_each_insn(file, func, insn) {
2164 if (!last)
2165 last = insn;
2166
2167 /*
2168 * Store back-pointers for unconditional forward jumps such
2169 * that find_jump_table() can back-track using those and
2170 * avoid some potentially confusing code.
2171 */
2172 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2173 insn->offset > last->offset &&
2174 insn->jump_dest->offset > insn->offset &&
2175 !insn->jump_dest->first_jump_src) {
2176
2177 insn->jump_dest->first_jump_src = insn;
2178 last = insn->jump_dest;
2179 }
2180
2181 if (insn->type != INSN_JUMP_DYNAMIC)
2182 continue;
2183
2184 reloc = find_jump_table(file, func, insn);
2185 if (reloc) {
2186 reloc->jump_table_start = true;
2187 insn->_jump_table = reloc;
2188 }
2189 }
2190}
2191
2192static int add_func_jump_tables(struct objtool_file *file,
2193 struct symbol *func)
2194{
2195 struct instruction *insn;
2196 int ret;
2197
2198 func_for_each_insn(file, func, insn) {
2199 if (!insn_jump_table(insn))
2200 continue;
2201
2202 ret = add_jump_table(file, insn, insn_jump_table(insn));
2203 if (ret)
2204 return ret;
2205 }
2206
2207 return 0;
2208}
2209
2210/*
2211 * For some switch statements, gcc generates a jump table in the .rodata
2212 * section which contains a list of addresses within the function to jump to.
2213 * This finds these jump tables and adds them to the insn->alts lists.
2214 */
2215static int add_jump_table_alts(struct objtool_file *file)
2216{
2217 struct section *sec;
2218 struct symbol *func;
2219 int ret;
2220
2221 if (!file->rodata)
2222 return 0;
2223
2224 for_each_sec(file, sec) {
2225 list_for_each_entry(func, &sec->symbol_list, list) {
2226 if (func->type != STT_FUNC)
2227 continue;
2228
2229 mark_func_jump_tables(file, func);
2230 ret = add_func_jump_tables(file, func);
2231 if (ret)
2232 return ret;
2233 }
2234 }
2235
2236 return 0;
2237}
2238
2239static void set_func_state(struct cfi_state *state)
2240{
2241 state->cfa = initial_func_cfi.cfa;
2242 memcpy(&state->regs, &initial_func_cfi.regs,
2243 CFI_NUM_REGS * sizeof(struct cfi_reg));
2244 state->stack_size = initial_func_cfi.cfa.offset;
2245}
2246
2247static int read_unwind_hints(struct objtool_file *file)
2248{
2249 struct cfi_state cfi = init_cfi;
2250 struct section *sec, *relocsec;
2251 struct unwind_hint *hint;
2252 struct instruction *insn;
2253 struct reloc *reloc;
2254 int i;
2255
2256 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2257 if (!sec)
2258 return 0;
2259
2260 relocsec = sec->reloc;
2261 if (!relocsec) {
2262 WARN("missing .rela.discard.unwind_hints section");
2263 return -1;
2264 }
2265
2266 if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
2267 WARN("struct unwind_hint size mismatch");
2268 return -1;
2269 }
2270
2271 file->hints = true;
2272
2273 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
2274 hint = (struct unwind_hint *)sec->data->d_buf + i;
2275
2276 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2277 if (!reloc) {
2278 WARN("can't find reloc for unwind_hints[%d]", i);
2279 return -1;
2280 }
2281
2282 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2283 if (!insn) {
2284 WARN("can't find insn for unwind_hints[%d]", i);
2285 return -1;
2286 }
2287
2288 insn->hint = true;
2289
2290 if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2291 insn->hint = false;
2292 insn->save = true;
2293 continue;
2294 }
2295
2296 if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2297 insn->restore = true;
2298 continue;
2299 }
2300
2301 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2302 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2303
2304 if (sym && sym->bind == STB_GLOBAL) {
2305 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2306 WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
2307 insn->sec, insn->offset);
2308 }
2309
2310 insn->entry = 1;
2311 }
2312 }
2313
2314 if (hint->type == UNWIND_HINT_TYPE_ENTRY) {
2315 hint->type = UNWIND_HINT_TYPE_CALL;
2316 insn->entry = 1;
2317 }
2318
2319 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2320 insn->cfi = &func_cfi;
2321 continue;
2322 }
2323
2324 if (insn->cfi)
2325 cfi = *(insn->cfi);
2326
2327 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2328 WARN_FUNC("unsupported unwind_hint sp base reg %d",
2329 insn->sec, insn->offset, hint->sp_reg);
2330 return -1;
2331 }
2332
2333 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2334 cfi.type = hint->type;
2335 cfi.signal = hint->signal;
2336 cfi.end = hint->end;
2337
2338 insn->cfi = cfi_hash_find_or_add(&cfi);
2339 }
2340
2341 return 0;
2342}
2343
2344static int read_noendbr_hints(struct objtool_file *file)
2345{
2346 struct section *sec;
2347 struct instruction *insn;
2348 struct reloc *reloc;
2349
2350 sec = find_section_by_name(file->elf, ".rela.discard.noendbr");
2351 if (!sec)
2352 return 0;
2353
2354 list_for_each_entry(reloc, &sec->reloc_list, list) {
2355 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
2356 if (!insn) {
2357 WARN("bad .discard.noendbr entry");
2358 return -1;
2359 }
2360
2361 insn->noendbr = 1;
2362 }
2363
2364 return 0;
2365}
2366
2367static int read_retpoline_hints(struct objtool_file *file)
2368{
2369 struct section *sec;
2370 struct instruction *insn;
2371 struct reloc *reloc;
2372
2373 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
2374 if (!sec)
2375 return 0;
2376
2377 list_for_each_entry(reloc, &sec->reloc_list, list) {
2378 if (reloc->sym->type != STT_SECTION) {
2379 WARN("unexpected relocation symbol type in %s", sec->name);
2380 return -1;
2381 }
2382
2383 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2384 if (!insn) {
2385 WARN("bad .discard.retpoline_safe entry");
2386 return -1;
2387 }
2388
2389 if (insn->type != INSN_JUMP_DYNAMIC &&
2390 insn->type != INSN_CALL_DYNAMIC &&
2391 insn->type != INSN_RETURN &&
2392 insn->type != INSN_NOP) {
2393 WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop",
2394 insn->sec, insn->offset);
2395 return -1;
2396 }
2397
2398 insn->retpoline_safe = true;
2399 }
2400
2401 return 0;
2402}
2403
2404static int read_instr_hints(struct objtool_file *file)
2405{
2406 struct section *sec;
2407 struct instruction *insn;
2408 struct reloc *reloc;
2409
2410 sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
2411 if (!sec)
2412 return 0;
2413
2414 list_for_each_entry(reloc, &sec->reloc_list, list) {
2415 if (reloc->sym->type != STT_SECTION) {
2416 WARN("unexpected relocation symbol type in %s", sec->name);
2417 return -1;
2418 }
2419
2420 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2421 if (!insn) {
2422 WARN("bad .discard.instr_end entry");
2423 return -1;
2424 }
2425
2426 insn->instr--;
2427 }
2428
2429 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
2430 if (!sec)
2431 return 0;
2432
2433 list_for_each_entry(reloc, &sec->reloc_list, list) {
2434 if (reloc->sym->type != STT_SECTION) {
2435 WARN("unexpected relocation symbol type in %s", sec->name);
2436 return -1;
2437 }
2438
2439 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2440 if (!insn) {
2441 WARN("bad .discard.instr_begin entry");
2442 return -1;
2443 }
2444
2445 insn->instr++;
2446 }
2447
2448 return 0;
2449}
2450
2451static int read_intra_function_calls(struct objtool_file *file)
2452{
2453 struct instruction *insn;
2454 struct section *sec;
2455 struct reloc *reloc;
2456
2457 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
2458 if (!sec)
2459 return 0;
2460
2461 list_for_each_entry(reloc, &sec->reloc_list, list) {
2462 unsigned long dest_off;
2463
2464 if (reloc->sym->type != STT_SECTION) {
2465 WARN("unexpected relocation symbol type in %s",
2466 sec->name);
2467 return -1;
2468 }
2469
2470 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2471 if (!insn) {
2472 WARN("bad .discard.intra_function_call entry");
2473 return -1;
2474 }
2475
2476 if (insn->type != INSN_CALL) {
2477 WARN_FUNC("intra_function_call not a direct call",
2478 insn->sec, insn->offset);
2479 return -1;
2480 }
2481
2482 /*
2483 * Treat intra-function CALLs as JMPs, but with a stack_op.
2484 * See add_call_destinations(), which strips stack_ops from
2485 * normal CALLs.
2486 */
2487 insn->type = INSN_JUMP_UNCONDITIONAL;
2488
2489 dest_off = arch_jump_destination(insn);
2490 insn->jump_dest = find_insn(file, insn->sec, dest_off);
2491 if (!insn->jump_dest) {
2492 WARN_FUNC("can't find call dest at %s+0x%lx",
2493 insn->sec, insn->offset,
2494 insn->sec->name, dest_off);
2495 return -1;
2496 }
2497 }
2498
2499 return 0;
2500}
2501
2502/*
2503 * Return true if name matches an instrumentation function, where calls to that
2504 * function from noinstr code can safely be removed, but compilers won't do so.
2505 */
2506static bool is_profiling_func(const char *name)
2507{
2508 /*
2509 * Many compilers cannot disable KCOV with a function attribute.
2510 */
2511 if (!strncmp(name, "__sanitizer_cov_", 16))
2512 return true;
2513
2514 /*
2515 * Some compilers currently do not remove __tsan_func_entry/exit nor
2516 * __tsan_atomic_signal_fence (used for barrier instrumentation) with
2517 * the __no_sanitize_thread attribute, remove them. Once the kernel's
2518 * minimum Clang version is 14.0, this can be removed.
2519 */
2520 if (!strncmp(name, "__tsan_func_", 12) ||
2521 !strcmp(name, "__tsan_atomic_signal_fence"))
2522 return true;
2523
2524 return false;
2525}
2526
2527static int classify_symbols(struct objtool_file *file)
2528{
2529 struct section *sec;
2530 struct symbol *func;
2531
2532 for_each_sec(file, sec) {
2533 list_for_each_entry(func, &sec->symbol_list, list) {
2534 if (func->bind != STB_GLOBAL)
2535 continue;
2536
2537 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2538 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2539 func->static_call_tramp = true;
2540
2541 if (arch_is_retpoline(func))
2542 func->retpoline_thunk = true;
2543
2544 if (arch_is_rethunk(func))
2545 func->return_thunk = true;
2546
2547 if (arch_ftrace_match(func->name))
2548 func->fentry = true;
2549
2550 if (is_profiling_func(func->name))
2551 func->profiling_func = true;
2552 }
2553 }
2554
2555 return 0;
2556}
2557
2558static void mark_rodata(struct objtool_file *file)
2559{
2560 struct section *sec;
2561 bool found = false;
2562
2563 /*
2564 * Search for the following rodata sections, each of which can
2565 * potentially contain jump tables:
2566 *
2567 * - .rodata: can contain GCC switch tables
2568 * - .rodata.<func>: same, if -fdata-sections is being used
2569 * - .rodata..c_jump_table: contains C annotated jump tables
2570 *
2571 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2572 */
2573 for_each_sec(file, sec) {
2574 if (!strncmp(sec->name, ".rodata", 7) &&
2575 !strstr(sec->name, ".str1.")) {
2576 sec->rodata = true;
2577 found = true;
2578 }
2579 }
2580
2581 file->rodata = found;
2582}
2583
2584static int decode_sections(struct objtool_file *file)
2585{
2586 int ret;
2587
2588 mark_rodata(file);
2589
2590 ret = init_pv_ops(file);
2591 if (ret)
2592 return ret;
2593
2594 /*
2595 * Must be before add_{jump_call}_destination.
2596 */
2597 ret = classify_symbols(file);
2598 if (ret)
2599 return ret;
2600
2601 ret = decode_instructions(file);
2602 if (ret)
2603 return ret;
2604
2605 add_ignores(file);
2606 add_uaccess_safe(file);
2607
2608 ret = add_ignore_alternatives(file);
2609 if (ret)
2610 return ret;
2611
2612 /*
2613 * Must be before read_unwind_hints() since that needs insn->noendbr.
2614 */
2615 ret = read_noendbr_hints(file);
2616 if (ret)
2617 return ret;
2618
2619 /*
2620 * Must be before add_jump_destinations(), which depends on 'func'
2621 * being set for alternatives, to enable proper sibling call detection.
2622 */
2623 if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
2624 ret = add_special_section_alts(file);
2625 if (ret)
2626 return ret;
2627 }
2628
2629 ret = add_jump_destinations(file);
2630 if (ret)
2631 return ret;
2632
2633 /*
2634 * Must be before add_call_destination(); it changes INSN_CALL to
2635 * INSN_JUMP.
2636 */
2637 ret = read_intra_function_calls(file);
2638 if (ret)
2639 return ret;
2640
2641 ret = add_call_destinations(file);
2642 if (ret)
2643 return ret;
2644
2645 /*
2646 * Must be after add_call_destinations() such that it can override
2647 * dead_end_function() marks.
2648 */
2649 ret = add_dead_ends(file);
2650 if (ret)
2651 return ret;
2652
2653 ret = add_jump_table_alts(file);
2654 if (ret)
2655 return ret;
2656
2657 ret = read_unwind_hints(file);
2658 if (ret)
2659 return ret;
2660
2661 ret = read_retpoline_hints(file);
2662 if (ret)
2663 return ret;
2664
2665 ret = read_instr_hints(file);
2666 if (ret)
2667 return ret;
2668
2669 return 0;
2670}
2671
2672static bool is_fentry_call(struct instruction *insn)
2673{
2674 if (insn->type == INSN_CALL &&
2675 insn_call_dest(insn) &&
2676 insn_call_dest(insn)->fentry)
2677 return true;
2678
2679 return false;
2680}
2681
2682static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2683{
2684 struct cfi_state *cfi = &state->cfi;
2685 int i;
2686
2687 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2688 return true;
2689
2690 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2691 return true;
2692
2693 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2694 return true;
2695
2696 for (i = 0; i < CFI_NUM_REGS; i++) {
2697 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2698 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2699 return true;
2700 }
2701
2702 return false;
2703}
2704
2705static bool check_reg_frame_pos(const struct cfi_reg *reg,
2706 int expected_offset)
2707{
2708 return reg->base == CFI_CFA &&
2709 reg->offset == expected_offset;
2710}
2711
2712static bool has_valid_stack_frame(struct insn_state *state)
2713{
2714 struct cfi_state *cfi = &state->cfi;
2715
2716 if (cfi->cfa.base == CFI_BP &&
2717 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2718 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2719 return true;
2720
2721 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2722 return true;
2723
2724 return false;
2725}
2726
2727static int update_cfi_state_regs(struct instruction *insn,
2728 struct cfi_state *cfi,
2729 struct stack_op *op)
2730{
2731 struct cfi_reg *cfa = &cfi->cfa;
2732
2733 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2734 return 0;
2735
2736 /* push */
2737 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2738 cfa->offset += 8;
2739
2740 /* pop */
2741 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2742 cfa->offset -= 8;
2743
2744 /* add immediate to sp */
2745 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2746 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2747 cfa->offset -= op->src.offset;
2748
2749 return 0;
2750}
2751
2752static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2753{
2754 if (arch_callee_saved_reg(reg) &&
2755 cfi->regs[reg].base == CFI_UNDEFINED) {
2756 cfi->regs[reg].base = base;
2757 cfi->regs[reg].offset = offset;
2758 }
2759}
2760
2761static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2762{
2763 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2764 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2765}
2766
2767/*
2768 * A note about DRAP stack alignment:
2769 *
2770 * GCC has the concept of a DRAP register, which is used to help keep track of
2771 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2772 * register. The typical DRAP pattern is:
2773 *
2774 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2775 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2776 * 41 ff 72 f8 pushq -0x8(%r10)
2777 * 55 push %rbp
2778 * 48 89 e5 mov %rsp,%rbp
2779 * (more pushes)
2780 * 41 52 push %r10
2781 * ...
2782 * 41 5a pop %r10
2783 * (more pops)
2784 * 5d pop %rbp
2785 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2786 * c3 retq
2787 *
2788 * There are some variations in the epilogues, like:
2789 *
2790 * 5b pop %rbx
2791 * 41 5a pop %r10
2792 * 41 5c pop %r12
2793 * 41 5d pop %r13
2794 * 41 5e pop %r14
2795 * c9 leaveq
2796 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2797 * c3 retq
2798 *
2799 * and:
2800 *
2801 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2802 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2803 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2804 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2805 * c9 leaveq
2806 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2807 * c3 retq
2808 *
2809 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2810 * restored beforehand:
2811 *
2812 * 41 55 push %r13
2813 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2814 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2815 * ...
2816 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2817 * 41 5d pop %r13
2818 * c3 retq
2819 */
2820static int update_cfi_state(struct instruction *insn,
2821 struct instruction *next_insn,
2822 struct cfi_state *cfi, struct stack_op *op)
2823{
2824 struct cfi_reg *cfa = &cfi->cfa;
2825 struct cfi_reg *regs = cfi->regs;
2826
2827 /* stack operations don't make sense with an undefined CFA */
2828 if (cfa->base == CFI_UNDEFINED) {
2829 if (insn_func(insn)) {
2830 WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2831 return -1;
2832 }
2833 return 0;
2834 }
2835
2836 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2837 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2838 return update_cfi_state_regs(insn, cfi, op);
2839
2840 switch (op->dest.type) {
2841
2842 case OP_DEST_REG:
2843 switch (op->src.type) {
2844
2845 case OP_SRC_REG:
2846 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2847 cfa->base == CFI_SP &&
2848 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) {
2849
2850 /* mov %rsp, %rbp */
2851 cfa->base = op->dest.reg;
2852 cfi->bp_scratch = false;
2853 }
2854
2855 else if (op->src.reg == CFI_SP &&
2856 op->dest.reg == CFI_BP && cfi->drap) {
2857
2858 /* drap: mov %rsp, %rbp */
2859 regs[CFI_BP].base = CFI_BP;
2860 regs[CFI_BP].offset = -cfi->stack_size;
2861 cfi->bp_scratch = false;
2862 }
2863
2864 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2865
2866 /*
2867 * mov %rsp, %reg
2868 *
2869 * This is needed for the rare case where GCC
2870 * does:
2871 *
2872 * mov %rsp, %rax
2873 * ...
2874 * mov %rax, %rsp
2875 */
2876 cfi->vals[op->dest.reg].base = CFI_CFA;
2877 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2878 }
2879
2880 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2881 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2882
2883 /*
2884 * mov %rbp, %rsp
2885 *
2886 * Restore the original stack pointer (Clang).
2887 */
2888 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2889 }
2890
2891 else if (op->dest.reg == cfa->base) {
2892
2893 /* mov %reg, %rsp */
2894 if (cfa->base == CFI_SP &&
2895 cfi->vals[op->src.reg].base == CFI_CFA) {
2896
2897 /*
2898 * This is needed for the rare case
2899 * where GCC does something dumb like:
2900 *
2901 * lea 0x8(%rsp), %rcx
2902 * ...
2903 * mov %rcx, %rsp
2904 */
2905 cfa->offset = -cfi->vals[op->src.reg].offset;
2906 cfi->stack_size = cfa->offset;
2907
2908 } else if (cfa->base == CFI_SP &&
2909 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2910 cfi->vals[op->src.reg].offset == cfa->offset) {
2911
2912 /*
2913 * Stack swizzle:
2914 *
2915 * 1: mov %rsp, (%[tos])
2916 * 2: mov %[tos], %rsp
2917 * ...
2918 * 3: pop %rsp
2919 *
2920 * Where:
2921 *
2922 * 1 - places a pointer to the previous
2923 * stack at the Top-of-Stack of the
2924 * new stack.
2925 *
2926 * 2 - switches to the new stack.
2927 *
2928 * 3 - pops the Top-of-Stack to restore
2929 * the original stack.
2930 *
2931 * Note: we set base to SP_INDIRECT
2932 * here and preserve offset. Therefore
2933 * when the unwinder reaches ToS it
2934 * will dereference SP and then add the
2935 * offset to find the next frame, IOW:
2936 * (%rsp) + offset.
2937 */
2938 cfa->base = CFI_SP_INDIRECT;
2939
2940 } else {
2941 cfa->base = CFI_UNDEFINED;
2942 cfa->offset = 0;
2943 }
2944 }
2945
2946 else if (op->dest.reg == CFI_SP &&
2947 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2948 cfi->vals[op->src.reg].offset == cfa->offset) {
2949
2950 /*
2951 * The same stack swizzle case 2) as above. But
2952 * because we can't change cfa->base, case 3)
2953 * will become a regular POP. Pretend we're a
2954 * PUSH so things don't go unbalanced.
2955 */
2956 cfi->stack_size += 8;
2957 }
2958
2959
2960 break;
2961
2962 case OP_SRC_ADD:
2963 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2964
2965 /* add imm, %rsp */
2966 cfi->stack_size -= op->src.offset;
2967 if (cfa->base == CFI_SP)
2968 cfa->offset -= op->src.offset;
2969 break;
2970 }
2971
2972 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2973
2974 /* lea disp(%rbp), %rsp */
2975 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2976 break;
2977 }
2978
2979 if (!cfi->drap && op->src.reg == CFI_SP &&
2980 op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2981 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) {
2982
2983 /* lea disp(%rsp), %rbp */
2984 cfa->base = CFI_BP;
2985 cfa->offset -= op->src.offset;
2986 cfi->bp_scratch = false;
2987 break;
2988 }
2989
2990 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2991
2992 /* drap: lea disp(%rsp), %drap */
2993 cfi->drap_reg = op->dest.reg;
2994
2995 /*
2996 * lea disp(%rsp), %reg
2997 *
2998 * This is needed for the rare case where GCC
2999 * does something dumb like:
3000 *
3001 * lea 0x8(%rsp), %rcx
3002 * ...
3003 * mov %rcx, %rsp
3004 */
3005 cfi->vals[op->dest.reg].base = CFI_CFA;
3006 cfi->vals[op->dest.reg].offset = \
3007 -cfi->stack_size + op->src.offset;
3008
3009 break;
3010 }
3011
3012 if (cfi->drap && op->dest.reg == CFI_SP &&
3013 op->src.reg == cfi->drap_reg) {
3014
3015 /* drap: lea disp(%drap), %rsp */
3016 cfa->base = CFI_SP;
3017 cfa->offset = cfi->stack_size = -op->src.offset;
3018 cfi->drap_reg = CFI_UNDEFINED;
3019 cfi->drap = false;
3020 break;
3021 }
3022
3023 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
3024 WARN_FUNC("unsupported stack register modification",
3025 insn->sec, insn->offset);
3026 return -1;
3027 }
3028
3029 break;
3030
3031 case OP_SRC_AND:
3032 if (op->dest.reg != CFI_SP ||
3033 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
3034 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
3035 WARN_FUNC("unsupported stack pointer realignment",
3036 insn->sec, insn->offset);
3037 return -1;
3038 }
3039
3040 if (cfi->drap_reg != CFI_UNDEFINED) {
3041 /* drap: and imm, %rsp */
3042 cfa->base = cfi->drap_reg;
3043 cfa->offset = cfi->stack_size = 0;
3044 cfi->drap = true;
3045 }
3046
3047 /*
3048 * Older versions of GCC (4.8ish) realign the stack
3049 * without DRAP, with a frame pointer.
3050 */
3051
3052 break;
3053
3054 case OP_SRC_POP:
3055 case OP_SRC_POPF:
3056 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3057
3058 /* pop %rsp; # restore from a stack swizzle */
3059 cfa->base = CFI_SP;
3060 break;
3061 }
3062
3063 if (!cfi->drap && op->dest.reg == cfa->base) {
3064
3065 /* pop %rbp */
3066 cfa->base = CFI_SP;
3067 }
3068
3069 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3070 op->dest.reg == cfi->drap_reg &&
3071 cfi->drap_offset == -cfi->stack_size) {
3072
3073 /* drap: pop %drap */
3074 cfa->base = cfi->drap_reg;
3075 cfa->offset = 0;
3076 cfi->drap_offset = -1;
3077
3078 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3079
3080 /* pop %reg */
3081 restore_reg(cfi, op->dest.reg);
3082 }
3083
3084 cfi->stack_size -= 8;
3085 if (cfa->base == CFI_SP)
3086 cfa->offset -= 8;
3087
3088 break;
3089
3090 case OP_SRC_REG_INDIRECT:
3091 if (!cfi->drap && op->dest.reg == cfa->base &&
3092 op->dest.reg == CFI_BP) {
3093
3094 /* mov disp(%rsp), %rbp */
3095 cfa->base = CFI_SP;
3096 cfa->offset = cfi->stack_size;
3097 }
3098
3099 if (cfi->drap && op->src.reg == CFI_BP &&
3100 op->src.offset == cfi->drap_offset) {
3101
3102 /* drap: mov disp(%rbp), %drap */
3103 cfa->base = cfi->drap_reg;
3104 cfa->offset = 0;
3105 cfi->drap_offset = -1;
3106 }
3107
3108 if (cfi->drap && op->src.reg == CFI_BP &&
3109 op->src.offset == regs[op->dest.reg].offset) {
3110
3111 /* drap: mov disp(%rbp), %reg */
3112 restore_reg(cfi, op->dest.reg);
3113
3114 } else if (op->src.reg == cfa->base &&
3115 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3116
3117 /* mov disp(%rbp), %reg */
3118 /* mov disp(%rsp), %reg */
3119 restore_reg(cfi, op->dest.reg);
3120
3121 } else if (op->src.reg == CFI_SP &&
3122 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3123
3124 /* mov disp(%rsp), %reg */
3125 restore_reg(cfi, op->dest.reg);
3126 }
3127
3128 break;
3129
3130 default:
3131 WARN_FUNC("unknown stack-related instruction",
3132 insn->sec, insn->offset);
3133 return -1;
3134 }
3135
3136 break;
3137
3138 case OP_DEST_PUSH:
3139 case OP_DEST_PUSHF:
3140 cfi->stack_size += 8;
3141 if (cfa->base == CFI_SP)
3142 cfa->offset += 8;
3143
3144 if (op->src.type != OP_SRC_REG)
3145 break;
3146
3147 if (cfi->drap) {
3148 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3149
3150 /* drap: push %drap */
3151 cfa->base = CFI_BP_INDIRECT;
3152 cfa->offset = -cfi->stack_size;
3153
3154 /* save drap so we know when to restore it */
3155 cfi->drap_offset = -cfi->stack_size;
3156
3157 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3158
3159 /* drap: push %rbp */
3160 cfi->stack_size = 0;
3161
3162 } else {
3163
3164 /* drap: push %reg */
3165 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3166 }
3167
3168 } else {
3169
3170 /* push %reg */
3171 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3172 }
3173
3174 /* detect when asm code uses rbp as a scratch register */
3175 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3176 cfa->base != CFI_BP)
3177 cfi->bp_scratch = true;
3178 break;
3179
3180 case OP_DEST_REG_INDIRECT:
3181
3182 if (cfi->drap) {
3183 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3184
3185 /* drap: mov %drap, disp(%rbp) */
3186 cfa->base = CFI_BP_INDIRECT;
3187 cfa->offset = op->dest.offset;
3188
3189 /* save drap offset so we know when to restore it */
3190 cfi->drap_offset = op->dest.offset;
3191 } else {
3192
3193 /* drap: mov reg, disp(%rbp) */
3194 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3195 }
3196
3197 } else if (op->dest.reg == cfa->base) {
3198
3199 /* mov reg, disp(%rbp) */
3200 /* mov reg, disp(%rsp) */
3201 save_reg(cfi, op->src.reg, CFI_CFA,
3202 op->dest.offset - cfi->cfa.offset);
3203
3204 } else if (op->dest.reg == CFI_SP) {
3205
3206 /* mov reg, disp(%rsp) */
3207 save_reg(cfi, op->src.reg, CFI_CFA,
3208 op->dest.offset - cfi->stack_size);
3209
3210 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3211
3212 /* mov %rsp, (%reg); # setup a stack swizzle. */
3213 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3214 cfi->vals[op->dest.reg].offset = cfa->offset;
3215 }
3216
3217 break;
3218
3219 case OP_DEST_MEM:
3220 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3221 WARN_FUNC("unknown stack-related memory operation",
3222 insn->sec, insn->offset);
3223 return -1;
3224 }
3225
3226 /* pop mem */
3227 cfi->stack_size -= 8;
3228 if (cfa->base == CFI_SP)
3229 cfa->offset -= 8;
3230
3231 break;
3232
3233 default:
3234 WARN_FUNC("unknown stack-related instruction",
3235 insn->sec, insn->offset);
3236 return -1;
3237 }
3238
3239 return 0;
3240}
3241
3242/*
3243 * The stack layouts of alternatives instructions can sometimes diverge when
3244 * they have stack modifications. That's fine as long as the potential stack
3245 * layouts don't conflict at any given potential instruction boundary.
3246 *
3247 * Flatten the CFIs of the different alternative code streams (both original
3248 * and replacement) into a single shared CFI array which can be used to detect
3249 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3250 */
3251static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3252{
3253 struct cfi_state **alt_cfi;
3254 int group_off;
3255
3256 if (!insn->alt_group)
3257 return 0;
3258
3259 if (!insn->cfi) {
3260 WARN("CFI missing");
3261 return -1;
3262 }
3263
3264 alt_cfi = insn->alt_group->cfi;
3265 group_off = insn->offset - insn->alt_group->first_insn->offset;
3266
3267 if (!alt_cfi[group_off]) {
3268 alt_cfi[group_off] = insn->cfi;
3269 } else {
3270 if (cficmp(alt_cfi[group_off], insn->cfi)) {
3271 struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3272 struct instruction *orig = orig_group->first_insn;
3273 char *where = offstr(insn->sec, insn->offset);
3274 WARN_FUNC("stack layout conflict in alternatives: %s",
3275 orig->sec, orig->offset, where);
3276 free(where);
3277 return -1;
3278 }
3279 }
3280
3281 return 0;
3282}
3283
3284static int handle_insn_ops(struct instruction *insn,
3285 struct instruction *next_insn,
3286 struct insn_state *state)
3287{
3288 struct stack_op *op;
3289
3290 for (op = insn->stack_ops; op; op = op->next) {
3291
3292 if (update_cfi_state(insn, next_insn, &state->cfi, op))
3293 return 1;
3294
3295 if (!insn->alt_group)
3296 continue;
3297
3298 if (op->dest.type == OP_DEST_PUSHF) {
3299 if (!state->uaccess_stack) {
3300 state->uaccess_stack = 1;
3301 } else if (state->uaccess_stack >> 31) {
3302 WARN_FUNC("PUSHF stack exhausted",
3303 insn->sec, insn->offset);
3304 return 1;
3305 }
3306 state->uaccess_stack <<= 1;
3307 state->uaccess_stack |= state->uaccess;
3308 }
3309
3310 if (op->src.type == OP_SRC_POPF) {
3311 if (state->uaccess_stack) {
3312 state->uaccess = state->uaccess_stack & 1;
3313 state->uaccess_stack >>= 1;
3314 if (state->uaccess_stack == 1)
3315 state->uaccess_stack = 0;
3316 }
3317 }
3318 }
3319
3320 return 0;
3321}
3322
3323static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3324{
3325 struct cfi_state *cfi1 = insn->cfi;
3326 int i;
3327
3328 if (!cfi1) {
3329 WARN("CFI missing");
3330 return false;
3331 }
3332
3333 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3334
3335 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3336 insn->sec, insn->offset,
3337 cfi1->cfa.base, cfi1->cfa.offset,
3338 cfi2->cfa.base, cfi2->cfa.offset);
3339
3340 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3341 for (i = 0; i < CFI_NUM_REGS; i++) {
3342 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
3343 sizeof(struct cfi_reg)))
3344 continue;
3345
3346 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3347 insn->sec, insn->offset,
3348 i, cfi1->regs[i].base, cfi1->regs[i].offset,
3349 i, cfi2->regs[i].base, cfi2->regs[i].offset);
3350 break;
3351 }
3352
3353 } else if (cfi1->type != cfi2->type) {
3354
3355 WARN_FUNC("stack state mismatch: type1=%d type2=%d",
3356 insn->sec, insn->offset, cfi1->type, cfi2->type);
3357
3358 } else if (cfi1->drap != cfi2->drap ||
3359 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3360 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3361
3362 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3363 insn->sec, insn->offset,
3364 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3365 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3366
3367 } else
3368 return true;
3369
3370 return false;
3371}
3372
3373static inline bool func_uaccess_safe(struct symbol *func)
3374{
3375 if (func)
3376 return func->uaccess_safe;
3377
3378 return false;
3379}
3380
3381static inline const char *call_dest_name(struct instruction *insn)
3382{
3383 static char pvname[19];
3384 struct reloc *rel;
3385 int idx;
3386
3387 if (insn_call_dest(insn))
3388 return insn_call_dest(insn)->name;
3389
3390 rel = insn_reloc(NULL, insn);
3391 if (rel && !strcmp(rel->sym->name, "pv_ops")) {
3392 idx = (rel->addend / sizeof(void *));
3393 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3394 return pvname;
3395 }
3396
3397 return "{dynamic}";
3398}
3399
3400static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3401{
3402 struct symbol *target;
3403 struct reloc *rel;
3404 int idx;
3405
3406 rel = insn_reloc(file, insn);
3407 if (!rel || strcmp(rel->sym->name, "pv_ops"))
3408 return false;
3409
3410 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));
3411
3412 if (file->pv_ops[idx].clean)
3413 return true;
3414
3415 file->pv_ops[idx].clean = true;
3416
3417 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3418 if (!target->sec->noinstr) {
3419 WARN("pv_ops[%d]: %s", idx, target->name);
3420 file->pv_ops[idx].clean = false;
3421 }
3422 }
3423
3424 return file->pv_ops[idx].clean;
3425}
3426
3427static inline bool noinstr_call_dest(struct objtool_file *file,
3428 struct instruction *insn,
3429 struct symbol *func)
3430{
3431 /*
3432 * We can't deal with indirect function calls at present;
3433 * assume they're instrumented.
3434 */
3435 if (!func) {
3436 if (file->pv_ops)
3437 return pv_call_dest(file, insn);
3438
3439 return false;
3440 }
3441
3442 /*
3443 * If the symbol is from a noinstr section; we good.
3444 */
3445 if (func->sec->noinstr)
3446 return true;
3447
3448 /*
3449 * If the symbol is a static_call trampoline, we can't tell.
3450 */
3451 if (func->static_call_tramp)
3452 return true;
3453
3454 /*
3455 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3456 * something 'BAD' happened. At the risk of taking the machine down,
3457 * let them proceed to get the message out.
3458 */
3459 if (!strncmp(func->name, "__ubsan_handle_", 15))
3460 return true;
3461
3462 return false;
3463}
3464
3465static int validate_call(struct objtool_file *file,
3466 struct instruction *insn,
3467 struct insn_state *state)
3468{
3469 if (state->noinstr && state->instr <= 0 &&
3470 !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3471 WARN_FUNC("call to %s() leaves .noinstr.text section",
3472 insn->sec, insn->offset, call_dest_name(insn));
3473 return 1;
3474 }
3475
3476 if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3477 WARN_FUNC("call to %s() with UACCESS enabled",
3478 insn->sec, insn->offset, call_dest_name(insn));
3479 return 1;
3480 }
3481
3482 if (state->df) {
3483 WARN_FUNC("call to %s() with DF set",
3484 insn->sec, insn->offset, call_dest_name(insn));
3485 return 1;
3486 }
3487
3488 return 0;
3489}
3490
3491static int validate_sibling_call(struct objtool_file *file,
3492 struct instruction *insn,
3493 struct insn_state *state)
3494{
3495 if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3496 WARN_FUNC("sibling call from callable instruction with modified stack frame",
3497 insn->sec, insn->offset);
3498 return 1;
3499 }
3500
3501 return validate_call(file, insn, state);
3502}
3503
3504static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3505{
3506 if (state->noinstr && state->instr > 0) {
3507 WARN_FUNC("return with instrumentation enabled",
3508 insn->sec, insn->offset);
3509 return 1;
3510 }
3511
3512 if (state->uaccess && !func_uaccess_safe(func)) {
3513 WARN_FUNC("return with UACCESS enabled",
3514 insn->sec, insn->offset);
3515 return 1;
3516 }
3517
3518 if (!state->uaccess && func_uaccess_safe(func)) {
3519 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
3520 insn->sec, insn->offset);
3521 return 1;
3522 }
3523
3524 if (state->df) {
3525 WARN_FUNC("return with DF set",
3526 insn->sec, insn->offset);
3527 return 1;
3528 }
3529
3530 if (func && has_modified_stack_frame(insn, state)) {
3531 WARN_FUNC("return with modified stack frame",
3532 insn->sec, insn->offset);
3533 return 1;
3534 }
3535
3536 if (state->cfi.bp_scratch) {
3537 WARN_FUNC("BP used as a scratch register",
3538 insn->sec, insn->offset);
3539 return 1;
3540 }
3541
3542 return 0;
3543}
3544
3545static struct instruction *next_insn_to_validate(struct objtool_file *file,
3546 struct instruction *insn)
3547{
3548 struct alt_group *alt_group = insn->alt_group;
3549
3550 /*
3551 * Simulate the fact that alternatives are patched in-place. When the
3552 * end of a replacement alt_group is reached, redirect objtool flow to
3553 * the end of the original alt_group.
3554 *
3555 * insn->alts->insn -> alt_group->first_insn
3556 * ...
3557 * alt_group->last_insn
3558 * [alt_group->nop] -> next(orig_group->last_insn)
3559 */
3560 if (alt_group) {
3561 if (alt_group->nop) {
3562 /* ->nop implies ->orig_group */
3563 if (insn == alt_group->last_insn)
3564 return alt_group->nop;
3565 if (insn == alt_group->nop)
3566 goto next_orig;
3567 }
3568 if (insn == alt_group->last_insn && alt_group->orig_group)
3569 goto next_orig;
3570 }
3571
3572 return next_insn_same_sec(file, insn);
3573
3574next_orig:
3575 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3576}
3577
3578/*
3579 * Follow the branch starting at the given instruction, and recursively follow
3580 * any other branches (jumps). Meanwhile, track the frame pointer state at
3581 * each instruction and validate all the rules described in
3582 * tools/objtool/Documentation/objtool.txt.
3583 */
3584static int validate_branch(struct objtool_file *file, struct symbol *func,
3585 struct instruction *insn, struct insn_state state)
3586{
3587 struct alternative *alt;
3588 struct instruction *next_insn, *prev_insn = NULL;
3589 struct section *sec;
3590 u8 visited;
3591 int ret;
3592
3593 sec = insn->sec;
3594
3595 while (1) {
3596 next_insn = next_insn_to_validate(file, insn);
3597
3598 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3599 /* Ignore KCFI type preambles, which always fall through */
3600 if (!strncmp(func->name, "__cfi_", 6) ||
3601 !strncmp(func->name, "__pfx_", 6))
3602 return 0;
3603
3604 WARN("%s() falls through to next function %s()",
3605 func->name, insn_func(insn)->name);
3606 return 1;
3607 }
3608
3609 if (func && insn->ignore) {
3610 WARN_FUNC("BUG: why am I validating an ignored function?",
3611 sec, insn->offset);
3612 return 1;
3613 }
3614
3615 visited = VISITED_BRANCH << state.uaccess;
3616 if (insn->visited & VISITED_BRANCH_MASK) {
3617 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3618 return 1;
3619
3620 if (insn->visited & visited)
3621 return 0;
3622 } else {
3623 nr_insns_visited++;
3624 }
3625
3626 if (state.noinstr)
3627 state.instr += insn->instr;
3628
3629 if (insn->hint) {
3630 if (insn->restore) {
3631 struct instruction *save_insn, *i;
3632
3633 i = insn;
3634 save_insn = NULL;
3635
3636 sym_for_each_insn_continue_reverse(file, func, i) {
3637 if (i->save) {
3638 save_insn = i;
3639 break;
3640 }
3641 }
3642
3643 if (!save_insn) {
3644 WARN_FUNC("no corresponding CFI save for CFI restore",
3645 sec, insn->offset);
3646 return 1;
3647 }
3648
3649 if (!save_insn->visited) {
3650 WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
3651 sec, insn->offset);
3652 return 1;
3653 }
3654
3655 insn->cfi = save_insn->cfi;
3656 nr_cfi_reused++;
3657 }
3658
3659 state.cfi = *insn->cfi;
3660 } else {
3661 /* XXX track if we actually changed state.cfi */
3662
3663 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3664 insn->cfi = prev_insn->cfi;
3665 nr_cfi_reused++;
3666 } else {
3667 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3668 }
3669 }
3670
3671 insn->visited |= visited;
3672
3673 if (propagate_alt_cfi(file, insn))
3674 return 1;
3675
3676 if (!insn->ignore_alts && insn->alts) {
3677 bool skip_orig = false;
3678
3679 for (alt = insn->alts; alt; alt = alt->next) {
3680 if (alt->skip_orig)
3681 skip_orig = true;
3682
3683 ret = validate_branch(file, func, alt->insn, state);
3684 if (ret) {
3685 if (opts.backtrace)
3686 BT_FUNC("(alt)", insn);
3687 return ret;
3688 }
3689 }
3690
3691 if (skip_orig)
3692 return 0;
3693 }
3694
3695 if (handle_insn_ops(insn, next_insn, &state))
3696 return 1;
3697
3698 switch (insn->type) {
3699
3700 case INSN_RETURN:
3701 return validate_return(func, insn, &state);
3702
3703 case INSN_CALL:
3704 case INSN_CALL_DYNAMIC:
3705 ret = validate_call(file, insn, &state);
3706 if (ret)
3707 return ret;
3708
3709 if (opts.stackval && func && !is_fentry_call(insn) &&
3710 !has_valid_stack_frame(&state)) {
3711 WARN_FUNC("call without frame pointer save/setup",
3712 sec, insn->offset);
3713 return 1;
3714 }
3715
3716 if (insn->dead_end)
3717 return 0;
3718
3719 break;
3720
3721 case INSN_JUMP_CONDITIONAL:
3722 case INSN_JUMP_UNCONDITIONAL:
3723 if (is_sibling_call(insn)) {
3724 ret = validate_sibling_call(file, insn, &state);
3725 if (ret)
3726 return ret;
3727
3728 } else if (insn->jump_dest) {
3729 ret = validate_branch(file, func,
3730 insn->jump_dest, state);
3731 if (ret) {
3732 if (opts.backtrace)
3733 BT_FUNC("(branch)", insn);
3734 return ret;
3735 }
3736 }
3737
3738 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3739 return 0;
3740
3741 break;
3742
3743 case INSN_JUMP_DYNAMIC:
3744 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3745 if (is_sibling_call(insn)) {
3746 ret = validate_sibling_call(file, insn, &state);
3747 if (ret)
3748 return ret;
3749 }
3750
3751 if (insn->type == INSN_JUMP_DYNAMIC)
3752 return 0;
3753
3754 break;
3755
3756 case INSN_CONTEXT_SWITCH:
3757 if (func && (!next_insn || !next_insn->hint)) {
3758 WARN_FUNC("unsupported instruction in callable function",
3759 sec, insn->offset);
3760 return 1;
3761 }
3762 return 0;
3763
3764 case INSN_STAC:
3765 if (state.uaccess) {
3766 WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
3767 return 1;
3768 }
3769
3770 state.uaccess = true;
3771 break;
3772
3773 case INSN_CLAC:
3774 if (!state.uaccess && func) {
3775 WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
3776 return 1;
3777 }
3778
3779 if (func_uaccess_safe(func) && !state.uaccess_stack) {
3780 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
3781 return 1;
3782 }
3783
3784 state.uaccess = false;
3785 break;
3786
3787 case INSN_STD:
3788 if (state.df) {
3789 WARN_FUNC("recursive STD", sec, insn->offset);
3790 return 1;
3791 }
3792
3793 state.df = true;
3794 break;
3795
3796 case INSN_CLD:
3797 if (!state.df && func) {
3798 WARN_FUNC("redundant CLD", sec, insn->offset);
3799 return 1;
3800 }
3801
3802 state.df = false;
3803 break;
3804
3805 default:
3806 break;
3807 }
3808
3809 if (insn->dead_end)
3810 return 0;
3811
3812 if (!next_insn) {
3813 if (state.cfi.cfa.base == CFI_UNDEFINED)
3814 return 0;
3815 WARN("%s: unexpected end of section", sec->name);
3816 return 1;
3817 }
3818
3819 prev_insn = insn;
3820 insn = next_insn;
3821 }
3822
3823 return 0;
3824}
3825
3826static int validate_unwind_hint(struct objtool_file *file,
3827 struct instruction *insn,
3828 struct insn_state *state)
3829{
3830 if (insn->hint && !insn->visited && !insn->ignore) {
3831 int ret = validate_branch(file, insn_func(insn), insn, *state);
3832 if (ret && opts.backtrace)
3833 BT_FUNC("<=== (hint)", insn);
3834 return ret;
3835 }
3836
3837 return 0;
3838}
3839
3840static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3841{
3842 struct instruction *insn;
3843 struct insn_state state;
3844 int warnings = 0;
3845
3846 if (!file->hints)
3847 return 0;
3848
3849 init_insn_state(file, &state, sec);
3850
3851 if (sec) {
3852 sec_for_each_insn(file, sec, insn)
3853 warnings += validate_unwind_hint(file, insn, &state);
3854 } else {
3855 for_each_insn(file, insn)
3856 warnings += validate_unwind_hint(file, insn, &state);
3857 }
3858
3859 return warnings;
3860}
3861
3862/*
3863 * Validate rethunk entry constraint: must untrain RET before the first RET.
3864 *
3865 * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes
3866 * before an actual RET instruction.
3867 */
3868static int validate_entry(struct objtool_file *file, struct instruction *insn)
3869{
3870 struct instruction *next, *dest;
3871 int ret, warnings = 0;
3872
3873 for (;;) {
3874 next = next_insn_to_validate(file, insn);
3875
3876 if (insn->visited & VISITED_ENTRY)
3877 return 0;
3878
3879 insn->visited |= VISITED_ENTRY;
3880
3881 if (!insn->ignore_alts && insn->alts) {
3882 struct alternative *alt;
3883 bool skip_orig = false;
3884
3885 for (alt = insn->alts; alt; alt = alt->next) {
3886 if (alt->skip_orig)
3887 skip_orig = true;
3888
3889 ret = validate_entry(file, alt->insn);
3890 if (ret) {
3891 if (opts.backtrace)
3892 BT_FUNC("(alt)", insn);
3893 return ret;
3894 }
3895 }
3896
3897 if (skip_orig)
3898 return 0;
3899 }
3900
3901 switch (insn->type) {
3902
3903 case INSN_CALL_DYNAMIC:
3904 case INSN_JUMP_DYNAMIC:
3905 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3906 WARN_FUNC("early indirect call", insn->sec, insn->offset);
3907 return 1;
3908
3909 case INSN_JUMP_UNCONDITIONAL:
3910 case INSN_JUMP_CONDITIONAL:
3911 if (!is_sibling_call(insn)) {
3912 if (!insn->jump_dest) {
3913 WARN_FUNC("unresolved jump target after linking?!?",
3914 insn->sec, insn->offset);
3915 return -1;
3916 }
3917 ret = validate_entry(file, insn->jump_dest);
3918 if (ret) {
3919 if (opts.backtrace) {
3920 BT_FUNC("(branch%s)", insn,
3921 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3922 }
3923 return ret;
3924 }
3925
3926 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3927 return 0;
3928
3929 break;
3930 }
3931
3932 /* fallthrough */
3933 case INSN_CALL:
3934 dest = find_insn(file, insn_call_dest(insn)->sec,
3935 insn_call_dest(insn)->offset);
3936 if (!dest) {
3937 WARN("Unresolved function after linking!?: %s",
3938 insn_call_dest(insn)->name);
3939 return -1;
3940 }
3941
3942 ret = validate_entry(file, dest);
3943 if (ret) {
3944 if (opts.backtrace)
3945 BT_FUNC("(call)", insn);
3946 return ret;
3947 }
3948 /*
3949 * If a call returns without error, it must have seen UNTRAIN_RET.
3950 * Therefore any non-error return is a success.
3951 */
3952 return 0;
3953
3954 case INSN_RETURN:
3955 WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset);
3956 return 1;
3957
3958 case INSN_NOP:
3959 if (insn->retpoline_safe)
3960 return 0;
3961 break;
3962
3963 default:
3964 break;
3965 }
3966
3967 if (!next) {
3968 WARN_FUNC("teh end!", insn->sec, insn->offset);
3969 return -1;
3970 }
3971 insn = next;
3972 }
3973
3974 return warnings;
3975}
3976
3977/*
3978 * Validate that all branches starting at 'insn->entry' encounter UNRET_END
3979 * before RET.
3980 */
3981static int validate_unret(struct objtool_file *file)
3982{
3983 struct instruction *insn;
3984 int ret, warnings = 0;
3985
3986 for_each_insn(file, insn) {
3987 if (!insn->entry)
3988 continue;
3989
3990 ret = validate_entry(file, insn);
3991 if (ret < 0) {
3992 WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset);
3993 return ret;
3994 }
3995 warnings += ret;
3996 }
3997
3998 return warnings;
3999}
4000
4001static int validate_retpoline(struct objtool_file *file)
4002{
4003 struct instruction *insn;
4004 int warnings = 0;
4005
4006 for_each_insn(file, insn) {
4007 if (insn->type != INSN_JUMP_DYNAMIC &&
4008 insn->type != INSN_CALL_DYNAMIC &&
4009 insn->type != INSN_RETURN)
4010 continue;
4011
4012 if (insn->retpoline_safe)
4013 continue;
4014
4015 if (insn->sec->init)
4016 continue;
4017
4018 if (insn->type == INSN_RETURN) {
4019 if (opts.rethunk) {
4020 WARN_FUNC("'naked' return found in RETHUNK build",
4021 insn->sec, insn->offset);
4022 } else
4023 continue;
4024 } else {
4025 WARN_FUNC("indirect %s found in RETPOLINE build",
4026 insn->sec, insn->offset,
4027 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
4028 }
4029
4030 warnings++;
4031 }
4032
4033 return warnings;
4034}
4035
4036static bool is_kasan_insn(struct instruction *insn)
4037{
4038 return (insn->type == INSN_CALL &&
4039 !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4040}
4041
4042static bool is_ubsan_insn(struct instruction *insn)
4043{
4044 return (insn->type == INSN_CALL &&
4045 !strcmp(insn_call_dest(insn)->name,
4046 "__ubsan_handle_builtin_unreachable"));
4047}
4048
4049static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4050{
4051 int i;
4052 struct instruction *prev_insn;
4053
4054 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
4055 return true;
4056
4057 /*
4058 * Ignore alternative replacement instructions. This can happen
4059 * when a whitelisted function uses one of the ALTERNATIVE macros.
4060 */
4061 if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4062 !strcmp(insn->sec->name, ".altinstr_aux"))
4063 return true;
4064
4065 /*
4066 * Whole archive runs might encounter dead code from weak symbols.
4067 * This is where the linker will have dropped the weak symbol in
4068 * favour of a regular symbol, but leaves the code in place.
4069 *
4070 * In this case we'll find a piece of code (whole function) that is not
4071 * covered by a !section symbol. Ignore them.
4072 */
4073 if (opts.link && !insn_func(insn)) {
4074 int size = find_symbol_hole_containing(insn->sec, insn->offset);
4075 unsigned long end = insn->offset + size;
4076
4077 if (!size) /* not a hole */
4078 return false;
4079
4080 if (size < 0) /* hole until the end */
4081 return true;
4082
4083 sec_for_each_insn_continue(file, insn) {
4084 /*
4085 * If we reach a visited instruction at or before the
4086 * end of the hole, ignore the unreachable.
4087 */
4088 if (insn->visited)
4089 return true;
4090
4091 if (insn->offset >= end)
4092 break;
4093
4094 /*
4095 * If this hole jumps to a .cold function, mark it ignore too.
4096 */
4097 if (insn->jump_dest && insn_func(insn->jump_dest) &&
4098 strstr(insn_func(insn->jump_dest)->name, ".cold")) {
4099 struct instruction *dest = insn->jump_dest;
4100 func_for_each_insn(file, insn_func(dest), dest)
4101 dest->ignore = true;
4102 }
4103 }
4104
4105 return false;
4106 }
4107
4108 if (!insn_func(insn))
4109 return false;
4110
4111 if (insn_func(insn)->static_call_tramp)
4112 return true;
4113
4114 /*
4115 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4116 * __builtin_unreachable(). The BUG() macro has an unreachable() after
4117 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4118 * (or occasionally a JMP to UD2).
4119 *
4120 * It may also insert a UD2 after calling a __noreturn function.
4121 */
4122 prev_insn = prev_insn_same_sec(file, insn);
4123 if ((prev_insn->dead_end ||
4124 dead_end_function(file, insn_call_dest(prev_insn))) &&
4125 (insn->type == INSN_BUG ||
4126 (insn->type == INSN_JUMP_UNCONDITIONAL &&
4127 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4128 return true;
4129
4130 /*
4131 * Check if this (or a subsequent) instruction is related to
4132 * CONFIG_UBSAN or CONFIG_KASAN.
4133 *
4134 * End the search at 5 instructions to avoid going into the weeds.
4135 */
4136 for (i = 0; i < 5; i++) {
4137
4138 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4139 return true;
4140
4141 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4142 if (insn->jump_dest &&
4143 insn_func(insn->jump_dest) == insn_func(insn)) {
4144 insn = insn->jump_dest;
4145 continue;
4146 }
4147
4148 break;
4149 }
4150
4151 if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len)
4152 break;
4153
4154 insn = next_insn_same_sec(file, insn);
4155 }
4156
4157 return false;
4158}
4159
4160static int add_prefix_symbol(struct objtool_file *file, struct symbol *func,
4161 struct instruction *insn)
4162{
4163 if (!opts.prefix)
4164 return 0;
4165
4166 for (;;) {
4167 struct instruction *prev = prev_insn_same_sec(file, insn);
4168 u64 offset;
4169
4170 if (!prev)
4171 break;
4172
4173 if (prev->type != INSN_NOP)
4174 break;
4175
4176 offset = func->offset - prev->offset;
4177 if (offset >= opts.prefix) {
4178 if (offset == opts.prefix) {
4179 /*
4180 * Since the sec->symbol_list is ordered by
4181 * offset (see elf_add_symbol()) the added
4182 * symbol will not be seen by the iteration in
4183 * validate_section().
4184 *
4185 * Hence the lack of list_for_each_entry_safe()
4186 * there.
4187 *
4188 * The direct concequence is that prefix symbols
4189 * don't get visited (because pointless), except
4190 * for the logic in ignore_unreachable_insn()
4191 * that needs the terminating insn to be visited
4192 * otherwise it will report the hole.
4193 *
4194 * Hence mark the first instruction of the
4195 * prefix symbol as visisted.
4196 */
4197 prev->visited |= VISITED_BRANCH;
4198 elf_create_prefix_symbol(file->elf, func, opts.prefix);
4199 }
4200 break;
4201 }
4202 insn = prev;
4203 }
4204
4205 return 0;
4206}
4207
4208static int validate_symbol(struct objtool_file *file, struct section *sec,
4209 struct symbol *sym, struct insn_state *state)
4210{
4211 struct instruction *insn;
4212 int ret;
4213
4214 if (!sym->len) {
4215 WARN("%s() is missing an ELF size annotation", sym->name);
4216 return 1;
4217 }
4218
4219 if (sym->pfunc != sym || sym->alias != sym)
4220 return 0;
4221
4222 insn = find_insn(file, sec, sym->offset);
4223 if (!insn || insn->ignore || insn->visited)
4224 return 0;
4225
4226 add_prefix_symbol(file, sym, insn);
4227
4228 state->uaccess = sym->uaccess_safe;
4229
4230 ret = validate_branch(file, insn_func(insn), insn, *state);
4231 if (ret && opts.backtrace)
4232 BT_FUNC("<=== (sym)", insn);
4233 return ret;
4234}
4235
4236static int validate_section(struct objtool_file *file, struct section *sec)
4237{
4238 struct insn_state state;
4239 struct symbol *func;
4240 int warnings = 0;
4241
4242 list_for_each_entry(func, &sec->symbol_list, list) {
4243 if (func->type != STT_FUNC)
4244 continue;
4245
4246 init_insn_state(file, &state, sec);
4247 set_func_state(&state.cfi);
4248
4249 warnings += validate_symbol(file, sec, func, &state);
4250 }
4251
4252 return warnings;
4253}
4254
4255static int validate_noinstr_sections(struct objtool_file *file)
4256{
4257 struct section *sec;
4258 int warnings = 0;
4259
4260 sec = find_section_by_name(file->elf, ".noinstr.text");
4261 if (sec) {
4262 warnings += validate_section(file, sec);
4263 warnings += validate_unwind_hints(file, sec);
4264 }
4265
4266 sec = find_section_by_name(file->elf, ".entry.text");
4267 if (sec) {
4268 warnings += validate_section(file, sec);
4269 warnings += validate_unwind_hints(file, sec);
4270 }
4271
4272 sec = find_section_by_name(file->elf, ".cpuidle.text");
4273 if (sec) {
4274 warnings += validate_section(file, sec);
4275 warnings += validate_unwind_hints(file, sec);
4276 }
4277
4278 return warnings;
4279}
4280
4281static int validate_functions(struct objtool_file *file)
4282{
4283 struct section *sec;
4284 int warnings = 0;
4285
4286 for_each_sec(file, sec) {
4287 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4288 continue;
4289
4290 warnings += validate_section(file, sec);
4291 }
4292
4293 return warnings;
4294}
4295
4296static void mark_endbr_used(struct instruction *insn)
4297{
4298 if (!list_empty(&insn->call_node))
4299 list_del_init(&insn->call_node);
4300}
4301
4302static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4303{
4304 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4305 struct instruction *first;
4306
4307 if (!sym)
4308 return false;
4309
4310 first = find_insn(file, sym->sec, sym->offset);
4311 if (!first)
4312 return false;
4313
4314 if (first->type != INSN_ENDBR && !first->noendbr)
4315 return false;
4316
4317 return insn->offset == sym->offset + sym->len;
4318}
4319
4320static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4321{
4322 struct instruction *dest;
4323 struct reloc *reloc;
4324 unsigned long off;
4325 int warnings = 0;
4326
4327 /*
4328 * Looking for function pointer load relocations. Ignore
4329 * direct/indirect branches:
4330 */
4331 switch (insn->type) {
4332 case INSN_CALL:
4333 case INSN_CALL_DYNAMIC:
4334 case INSN_JUMP_CONDITIONAL:
4335 case INSN_JUMP_UNCONDITIONAL:
4336 case INSN_JUMP_DYNAMIC:
4337 case INSN_JUMP_DYNAMIC_CONDITIONAL:
4338 case INSN_RETURN:
4339 case INSN_NOP:
4340 return 0;
4341 default:
4342 break;
4343 }
4344
4345 for (reloc = insn_reloc(file, insn);
4346 reloc;
4347 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4348 reloc->offset + 1,
4349 (insn->offset + insn->len) - (reloc->offset + 1))) {
4350
4351 /*
4352 * static_call_update() references the trampoline, which
4353 * doesn't have (or need) ENDBR. Skip warning in that case.
4354 */
4355 if (reloc->sym->static_call_tramp)
4356 continue;
4357
4358 off = reloc->sym->offset;
4359 if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32)
4360 off += arch_dest_reloc_offset(reloc->addend);
4361 else
4362 off += reloc->addend;
4363
4364 dest = find_insn(file, reloc->sym->sec, off);
4365 if (!dest)
4366 continue;
4367
4368 if (dest->type == INSN_ENDBR) {
4369 mark_endbr_used(dest);
4370 continue;
4371 }
4372
4373 if (insn_func(dest) && insn_func(dest) == insn_func(insn)) {
4374 /*
4375 * Anything from->to self is either _THIS_IP_ or
4376 * IRET-to-self.
4377 *
4378 * There is no sane way to annotate _THIS_IP_ since the
4379 * compiler treats the relocation as a constant and is
4380 * happy to fold in offsets, skewing any annotation we
4381 * do, leading to vast amounts of false-positives.
4382 *
4383 * There's also compiler generated _THIS_IP_ through
4384 * KCOV and such which we have no hope of annotating.
4385 *
4386 * As such, blanket accept self-references without
4387 * issue.
4388 */
4389 continue;
4390 }
4391
4392 /*
4393 * Accept anything ANNOTATE_NOENDBR.
4394 */
4395 if (dest->noendbr)
4396 continue;
4397
4398 /*
4399 * Accept if this is the instruction after a symbol
4400 * that is (no)endbr -- typical code-range usage.
4401 */
4402 if (noendbr_range(file, dest))
4403 continue;
4404
4405 WARN_FUNC("relocation to !ENDBR: %s",
4406 insn->sec, insn->offset,
4407 offstr(dest->sec, dest->offset));
4408
4409 warnings++;
4410 }
4411
4412 return warnings;
4413}
4414
4415static int validate_ibt_data_reloc(struct objtool_file *file,
4416 struct reloc *reloc)
4417{
4418 struct instruction *dest;
4419
4420 dest = find_insn(file, reloc->sym->sec,
4421 reloc->sym->offset + reloc->addend);
4422 if (!dest)
4423 return 0;
4424
4425 if (dest->type == INSN_ENDBR) {
4426 mark_endbr_used(dest);
4427 return 0;
4428 }
4429
4430 if (dest->noendbr)
4431 return 0;
4432
4433 WARN_FUNC("data relocation to !ENDBR: %s",
4434 reloc->sec->base, reloc->offset,
4435 offstr(dest->sec, dest->offset));
4436
4437 return 1;
4438}
4439
4440/*
4441 * Validate IBT rules and remove used ENDBR instructions from the seal list.
4442 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4443 * NOPs) later, in create_ibt_endbr_seal_sections().
4444 */
4445static int validate_ibt(struct objtool_file *file)
4446{
4447 struct section *sec;
4448 struct reloc *reloc;
4449 struct instruction *insn;
4450 int warnings = 0;
4451
4452 for_each_insn(file, insn)
4453 warnings += validate_ibt_insn(file, insn);
4454
4455 for_each_sec(file, sec) {
4456
4457 /* Already done by validate_ibt_insn() */
4458 if (sec->sh.sh_flags & SHF_EXECINSTR)
4459 continue;
4460
4461 if (!sec->reloc)
4462 continue;
4463
4464 /*
4465 * These sections can reference text addresses, but not with
4466 * the intent to indirect branch to them.
4467 */
4468 if ((!strncmp(sec->name, ".discard", 8) &&
4469 strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
4470 !strncmp(sec->name, ".debug", 6) ||
4471 !strcmp(sec->name, ".altinstructions") ||
4472 !strcmp(sec->name, ".ibt_endbr_seal") ||
4473 !strcmp(sec->name, ".orc_unwind_ip") ||
4474 !strcmp(sec->name, ".parainstructions") ||
4475 !strcmp(sec->name, ".retpoline_sites") ||
4476 !strcmp(sec->name, ".smp_locks") ||
4477 !strcmp(sec->name, ".static_call_sites") ||
4478 !strcmp(sec->name, "_error_injection_whitelist") ||
4479 !strcmp(sec->name, "_kprobe_blacklist") ||
4480 !strcmp(sec->name, "__bug_table") ||
4481 !strcmp(sec->name, "__ex_table") ||
4482 !strcmp(sec->name, "__jump_table") ||
4483 !strcmp(sec->name, "__mcount_loc") ||
4484 !strcmp(sec->name, ".kcfi_traps") ||
4485 strstr(sec->name, "__patchable_function_entries"))
4486 continue;
4487
4488 list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
4489 warnings += validate_ibt_data_reloc(file, reloc);
4490 }
4491
4492 return warnings;
4493}
4494
4495static int validate_sls(struct objtool_file *file)
4496{
4497 struct instruction *insn, *next_insn;
4498 int warnings = 0;
4499
4500 for_each_insn(file, insn) {
4501 next_insn = next_insn_same_sec(file, insn);
4502
4503 if (insn->retpoline_safe)
4504 continue;
4505
4506 switch (insn->type) {
4507 case INSN_RETURN:
4508 if (!next_insn || next_insn->type != INSN_TRAP) {
4509 WARN_FUNC("missing int3 after ret",
4510 insn->sec, insn->offset);
4511 warnings++;
4512 }
4513
4514 break;
4515 case INSN_JUMP_DYNAMIC:
4516 if (!next_insn || next_insn->type != INSN_TRAP) {
4517 WARN_FUNC("missing int3 after indirect jump",
4518 insn->sec, insn->offset);
4519 warnings++;
4520 }
4521 break;
4522 default:
4523 break;
4524 }
4525 }
4526
4527 return warnings;
4528}
4529
4530static int validate_reachable_instructions(struct objtool_file *file)
4531{
4532 struct instruction *insn;
4533
4534 if (file->ignore_unreachables)
4535 return 0;
4536
4537 for_each_insn(file, insn) {
4538 if (insn->visited || ignore_unreachable_insn(file, insn))
4539 continue;
4540
4541 WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
4542 return 1;
4543 }
4544
4545 return 0;
4546}
4547
4548int check(struct objtool_file *file)
4549{
4550 int ret, warnings = 0;
4551
4552 arch_initial_func_cfi_state(&initial_func_cfi);
4553 init_cfi_state(&init_cfi);
4554 init_cfi_state(&func_cfi);
4555 set_func_state(&func_cfi);
4556
4557 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
4558 goto out;
4559
4560 cfi_hash_add(&init_cfi);
4561 cfi_hash_add(&func_cfi);
4562
4563 ret = decode_sections(file);
4564 if (ret < 0)
4565 goto out;
4566
4567 warnings += ret;
4568
4569 if (!nr_insns)
4570 goto out;
4571
4572 if (opts.retpoline) {
4573 ret = validate_retpoline(file);
4574 if (ret < 0)
4575 return ret;
4576 warnings += ret;
4577 }
4578
4579 if (opts.stackval || opts.orc || opts.uaccess) {
4580 ret = validate_functions(file);
4581 if (ret < 0)
4582 goto out;
4583 warnings += ret;
4584
4585 ret = validate_unwind_hints(file, NULL);
4586 if (ret < 0)
4587 goto out;
4588 warnings += ret;
4589
4590 if (!warnings) {
4591 ret = validate_reachable_instructions(file);
4592 if (ret < 0)
4593 goto out;
4594 warnings += ret;
4595 }
4596
4597 } else if (opts.noinstr) {
4598 ret = validate_noinstr_sections(file);
4599 if (ret < 0)
4600 goto out;
4601 warnings += ret;
4602 }
4603
4604 if (opts.unret) {
4605 /*
4606 * Must be after validate_branch() and friends, it plays
4607 * further games with insn->visited.
4608 */
4609 ret = validate_unret(file);
4610 if (ret < 0)
4611 return ret;
4612 warnings += ret;
4613 }
4614
4615 if (opts.ibt) {
4616 ret = validate_ibt(file);
4617 if (ret < 0)
4618 goto out;
4619 warnings += ret;
4620 }
4621
4622 if (opts.sls) {
4623 ret = validate_sls(file);
4624 if (ret < 0)
4625 goto out;
4626 warnings += ret;
4627 }
4628
4629 if (opts.static_call) {
4630 ret = create_static_call_sections(file);
4631 if (ret < 0)
4632 goto out;
4633 warnings += ret;
4634 }
4635
4636 if (opts.retpoline) {
4637 ret = create_retpoline_sites_sections(file);
4638 if (ret < 0)
4639 goto out;
4640 warnings += ret;
4641 }
4642
4643 if (opts.cfi) {
4644 ret = create_cfi_sections(file);
4645 if (ret < 0)
4646 goto out;
4647 warnings += ret;
4648 }
4649
4650 if (opts.rethunk) {
4651 ret = create_return_sites_sections(file);
4652 if (ret < 0)
4653 goto out;
4654 warnings += ret;
4655
4656 if (opts.hack_skylake) {
4657 ret = create_direct_call_sections(file);
4658 if (ret < 0)
4659 goto out;
4660 warnings += ret;
4661 }
4662 }
4663
4664 if (opts.mcount) {
4665 ret = create_mcount_loc_sections(file);
4666 if (ret < 0)
4667 goto out;
4668 warnings += ret;
4669 }
4670
4671 if (opts.ibt) {
4672 ret = create_ibt_endbr_seal_sections(file);
4673 if (ret < 0)
4674 goto out;
4675 warnings += ret;
4676 }
4677
4678 if (opts.orc && nr_insns) {
4679 ret = orc_create(file);
4680 if (ret < 0)
4681 goto out;
4682 warnings += ret;
4683 }
4684
4685
4686 if (opts.stats) {
4687 printf("nr_insns_visited: %ld\n", nr_insns_visited);
4688 printf("nr_cfi: %ld\n", nr_cfi);
4689 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4690 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4691 }
4692
4693out:
4694 /*
4695 * For now, don't fail the kernel build on fatal warnings. These
4696 * errors are still fairly common due to the growing matrix of
4697 * supported toolchains and their recent pace of change.
4698 */
4699 return 0;
4700}