at v6.19-rc8 1731 lines 42 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2#define _GNU_SOURCE /* memmem() */ 3#include <subcmd/parse-options.h> 4#include <stdlib.h> 5#include <string.h> 6#include <libgen.h> 7#include <stdio.h> 8#include <ctype.h> 9 10#include <objtool/objtool.h> 11#include <objtool/warn.h> 12#include <objtool/arch.h> 13#include <objtool/klp.h> 14#include <objtool/util.h> 15#include <arch/special.h> 16 17#include <linux/objtool_types.h> 18#include <linux/livepatch_external.h> 19#include <linux/stringify.h> 20#include <linux/string.h> 21#include <linux/jhash.h> 22 23#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) 24 25struct elfs { 26 struct elf *orig, *patched, *out; 27 const char *modname; 28}; 29 30struct export { 31 struct hlist_node hash; 32 char *mod, *sym; 33}; 34 35static const char * const klp_diff_usage[] = { 36 "objtool klp diff [<options>] <in1.o> <in2.o> <out.o>", 37 NULL, 38}; 39 40static const struct option klp_diff_options[] = { 41 OPT_GROUP("Options:"), 42 OPT_BOOLEAN('d', "debug", &debug, "enable debug output"), 43 OPT_END(), 44}; 45 46static DEFINE_HASHTABLE(exports, 15); 47 48static inline u32 str_hash(const char *str) 49{ 50 return jhash(str, strlen(str), 0); 51} 52 53static char *escape_str(const char *orig) 54{ 55 size_t len = 0; 56 const char *a; 57 char *b, *new; 58 59 for (a = orig; *a; a++) { 60 switch (*a) { 61 case '\001': len += 5; break; 62 case '\n': 63 case '\t': len += 2; break; 64 default: len++; 65 } 66 } 67 68 new = malloc(len + 1); 69 if (!new) 70 return NULL; 71 72 for (a = orig, b = new; *a; a++) { 73 switch (*a) { 74 case '\001': memcpy(b, "<SOH>", 5); b += 5; break; 75 case '\n': *b++ = '\\'; *b++ = 'n'; break; 76 case '\t': *b++ = '\\'; *b++ = 't'; break; 77 default: *b++ = *a; 78 } 79 } 80 81 *b = '\0'; 82 return new; 83} 84 85static int read_exports(void) 86{ 87 const char *symvers = "Module.symvers"; 88 char line[1024], *path = NULL; 89 unsigned int line_num = 1; 90 FILE *file; 91 92 file = fopen(symvers, "r"); 93 if (!file) { 94 path = top_level_dir(symvers); 95 if (!path) { 96 ERROR("can't open '%s', \"objtool diff\" should be run from the kernel tree", symvers); 97 return -1; 98 } 99 100 file = fopen(path, "r"); 101 if (!file) { 102 ERROR_GLIBC("fopen"); 103 return -1; 104 } 105 } 106 107 while (fgets(line, 1024, file)) { 108 char *sym, *mod, *type; 109 struct export *export; 110 111 sym = strchr(line, '\t'); 112 if (!sym) { 113 ERROR("malformed Module.symvers (sym) at line %d", line_num); 114 return -1; 115 } 116 117 *sym++ = '\0'; 118 119 mod = strchr(sym, '\t'); 120 if (!mod) { 121 ERROR("malformed Module.symvers (mod) at line %d", line_num); 122 return -1; 123 } 124 125 *mod++ = '\0'; 126 127 type = strchr(mod, '\t'); 128 if (!type) { 129 ERROR("malformed Module.symvers (type) at line %d", line_num); 130 return -1; 131 } 132 133 *type++ = '\0'; 134 135 if (*sym == '\0' || *mod == '\0') { 136 ERROR("malformed Module.symvers at line %d", line_num); 137 return -1; 138 } 139 140 export = calloc(1, sizeof(*export)); 141 if (!export) { 142 ERROR_GLIBC("calloc"); 143 return -1; 144 } 145 146 export->mod = strdup(mod); 147 if (!export->mod) { 148 ERROR_GLIBC("strdup"); 149 return -1; 150 } 151 152 export->sym = strdup(sym); 153 if (!export->sym) { 154 ERROR_GLIBC("strdup"); 155 return -1; 156 } 157 158 hash_add(exports, &export->hash, str_hash(sym)); 159 } 160 161 free(path); 162 fclose(file); 163 164 return 0; 165} 166 167static int read_sym_checksums(struct elf *elf) 168{ 169 struct section *sec; 170 171 sec = find_section_by_name(elf, ".discard.sym_checksum"); 172 if (!sec) { 173 ERROR("'%s' missing .discard.sym_checksum section, file not processed by 'objtool --checksum'?", 174 elf->name); 175 return -1; 176 } 177 178 if (!sec->rsec) { 179 ERROR("missing reloc section for .discard.sym_checksum"); 180 return -1; 181 } 182 183 if (sec_size(sec) % sizeof(struct sym_checksum)) { 184 ERROR("struct sym_checksum size mismatch"); 185 return -1; 186 } 187 188 for (int i = 0; i < sec_size(sec) / sizeof(struct sym_checksum); i++) { 189 struct sym_checksum *sym_checksum; 190 struct reloc *reloc; 191 struct symbol *sym; 192 193 sym_checksum = (struct sym_checksum *)sec->data->d_buf + i; 194 195 reloc = find_reloc_by_dest(elf, sec, i * sizeof(*sym_checksum)); 196 if (!reloc) { 197 ERROR("can't find reloc for sym_checksum[%d]", i); 198 return -1; 199 } 200 201 sym = reloc->sym; 202 203 if (is_sec_sym(sym)) { 204 ERROR("not sure how to handle section %s", sym->name); 205 return -1; 206 } 207 208 if (is_func_sym(sym)) 209 sym->csum.checksum = sym_checksum->checksum; 210 } 211 212 return 0; 213} 214 215static struct symbol *first_file_symbol(struct elf *elf) 216{ 217 struct symbol *sym; 218 219 for_each_sym(elf, sym) { 220 if (is_file_sym(sym)) 221 return sym; 222 } 223 224 return NULL; 225} 226 227static struct symbol *next_file_symbol(struct elf *elf, struct symbol *sym) 228{ 229 for_each_sym_continue(elf, sym) { 230 if (is_file_sym(sym)) 231 return sym; 232 } 233 234 return NULL; 235} 236 237/* 238 * Certain static local variables should never be correlated. They will be 239 * used in place rather than referencing the originals. 240 */ 241static bool is_uncorrelated_static_local(struct symbol *sym) 242{ 243 static const char * const vars[] = { 244 "__already_done.", 245 "__func__.", 246 "__key.", 247 "__warned.", 248 "_entry.", 249 "_entry_ptr.", 250 "_rs.", 251 "descriptor.", 252 "CSWTCH.", 253 }; 254 255 if (!is_object_sym(sym) || !is_local_sym(sym)) 256 return false; 257 258 if (!strcmp(sym->sec->name, ".data.once")) 259 return true; 260 261 for (int i = 0; i < ARRAY_SIZE(vars); i++) { 262 if (strstarts(sym->name, vars[i])) 263 return true; 264 } 265 266 return false; 267} 268 269/* 270 * Clang emits several useless .Ltmp_* code labels. 271 */ 272static bool is_clang_tmp_label(struct symbol *sym) 273{ 274 return sym->type == STT_NOTYPE && 275 is_text_sec(sym->sec) && 276 strstarts(sym->name, ".Ltmp") && 277 isdigit(sym->name[5]); 278} 279 280static bool is_special_section(struct section *sec) 281{ 282 static const char * const specials[] = { 283 ".altinstructions", 284 ".smp_locks", 285 "__bug_table", 286 "__ex_table", 287 "__jump_table", 288 "__mcount_loc", 289 290 /* 291 * Extract .static_call_sites here to inherit non-module 292 * preferential treatment. The later static call processing 293 * during klp module build will be skipped when it sees this 294 * section already exists. 295 */ 296 ".static_call_sites", 297 }; 298 299 static const char * const non_special_discards[] = { 300 ".discard.addressable", 301 ".discard.sym_checksum", 302 }; 303 304 if (is_text_sec(sec)) 305 return false; 306 307 for (int i = 0; i < ARRAY_SIZE(specials); i++) { 308 if (!strcmp(sec->name, specials[i])) 309 return true; 310 } 311 312 /* Most .discard data sections are special */ 313 for (int i = 0; i < ARRAY_SIZE(non_special_discards); i++) { 314 if (!strcmp(sec->name, non_special_discards[i])) 315 return false; 316 } 317 318 return strstarts(sec->name, ".discard."); 319} 320 321/* 322 * These sections are referenced by special sections but aren't considered 323 * special sections themselves. 324 */ 325static bool is_special_section_aux(struct section *sec) 326{ 327 static const char * const specials_aux[] = { 328 ".altinstr_replacement", 329 ".altinstr_aux", 330 }; 331 332 for (int i = 0; i < ARRAY_SIZE(specials_aux); i++) { 333 if (!strcmp(sec->name, specials_aux[i])) 334 return true; 335 } 336 337 return false; 338} 339 340/* 341 * These symbols should never be correlated, so their local patched versions 342 * are used instead of linking to the originals. 343 */ 344static bool dont_correlate(struct symbol *sym) 345{ 346 return is_file_sym(sym) || 347 is_null_sym(sym) || 348 is_sec_sym(sym) || 349 is_prefix_func(sym) || 350 is_uncorrelated_static_local(sym) || 351 is_clang_tmp_label(sym) || 352 is_string_sec(sym->sec) || 353 is_special_section(sym->sec) || 354 is_special_section_aux(sym->sec) || 355 strstarts(sym->name, "__initcall__"); 356} 357 358/* 359 * For each symbol in the original kernel, find its corresponding "twin" in the 360 * patched kernel. 361 */ 362static int correlate_symbols(struct elfs *e) 363{ 364 struct symbol *file1_sym, *file2_sym; 365 struct symbol *sym1, *sym2; 366 367 /* Correlate locals */ 368 for (file1_sym = first_file_symbol(e->orig), 369 file2_sym = first_file_symbol(e->patched); ; 370 file1_sym = next_file_symbol(e->orig, file1_sym), 371 file2_sym = next_file_symbol(e->patched, file2_sym)) { 372 373 if (!file1_sym && file2_sym) { 374 ERROR("FILE symbol mismatch: NULL != %s", file2_sym->name); 375 return -1; 376 } 377 378 if (file1_sym && !file2_sym) { 379 ERROR("FILE symbol mismatch: %s != NULL", file1_sym->name); 380 return -1; 381 } 382 383 if (!file1_sym) 384 break; 385 386 if (strcmp(file1_sym->name, file2_sym->name)) { 387 ERROR("FILE symbol mismatch: %s != %s", file1_sym->name, file2_sym->name); 388 return -1; 389 } 390 391 file1_sym->twin = file2_sym; 392 file2_sym->twin = file1_sym; 393 394 sym1 = file1_sym; 395 396 for_each_sym_continue(e->orig, sym1) { 397 if (is_file_sym(sym1) || !is_local_sym(sym1)) 398 break; 399 400 if (dont_correlate(sym1)) 401 continue; 402 403 sym2 = file2_sym; 404 for_each_sym_continue(e->patched, sym2) { 405 if (is_file_sym(sym2) || !is_local_sym(sym2)) 406 break; 407 408 if (sym2->twin || dont_correlate(sym2)) 409 continue; 410 411 if (strcmp(sym1->demangled_name, sym2->demangled_name)) 412 continue; 413 414 sym1->twin = sym2; 415 sym2->twin = sym1; 416 break; 417 } 418 } 419 } 420 421 /* Correlate globals */ 422 for_each_sym(e->orig, sym1) { 423 if (sym1->bind == STB_LOCAL) 424 continue; 425 426 sym2 = find_global_symbol_by_name(e->patched, sym1->name); 427 428 if (sym2 && !sym2->twin && !strcmp(sym1->name, sym2->name)) { 429 sym1->twin = sym2; 430 sym2->twin = sym1; 431 } 432 } 433 434 for_each_sym(e->orig, sym1) { 435 if (sym1->twin || dont_correlate(sym1)) 436 continue; 437 WARN("no correlation: %s", sym1->name); 438 } 439 440 return 0; 441} 442 443/* "sympos" is used by livepatch to disambiguate duplicate symbol names */ 444static unsigned long find_sympos(struct elf *elf, struct symbol *sym) 445{ 446 bool vmlinux = str_ends_with(objname, "vmlinux.o"); 447 unsigned long sympos = 0, nr_matches = 0; 448 bool has_dup = false; 449 struct symbol *s; 450 451 if (sym->bind != STB_LOCAL) 452 return 0; 453 454 if (vmlinux && sym->type == STT_FUNC) { 455 /* 456 * HACK: Unfortunately, symbol ordering can differ between 457 * vmlinux.o and vmlinux due to the linker script emitting 458 * .text.unlikely* before .text*. Count .text.unlikely* first. 459 * 460 * TODO: Disambiguate symbols more reliably (checksums?) 461 */ 462 for_each_sym(elf, s) { 463 if (strstarts(s->sec->name, ".text.unlikely") && 464 !strcmp(s->name, sym->name)) { 465 nr_matches++; 466 if (s == sym) 467 sympos = nr_matches; 468 else 469 has_dup = true; 470 } 471 } 472 for_each_sym(elf, s) { 473 if (!strstarts(s->sec->name, ".text.unlikely") && 474 !strcmp(s->name, sym->name)) { 475 nr_matches++; 476 if (s == sym) 477 sympos = nr_matches; 478 else 479 has_dup = true; 480 } 481 } 482 } else { 483 for_each_sym(elf, s) { 484 if (!strcmp(s->name, sym->name)) { 485 nr_matches++; 486 if (s == sym) 487 sympos = nr_matches; 488 else 489 has_dup = true; 490 } 491 } 492 } 493 494 if (!sympos) { 495 ERROR("can't find sympos for %s", sym->name); 496 return ULONG_MAX; 497 } 498 499 return has_dup ? sympos : 0; 500} 501 502static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym); 503 504static struct symbol *__clone_symbol(struct elf *elf, struct symbol *patched_sym, 505 bool data_too) 506{ 507 struct section *out_sec = NULL; 508 unsigned long offset = 0; 509 struct symbol *out_sym; 510 511 if (data_too && !is_undef_sym(patched_sym)) { 512 struct section *patched_sec = patched_sym->sec; 513 514 out_sec = find_section_by_name(elf, patched_sec->name); 515 if (!out_sec) { 516 out_sec = elf_create_section(elf, patched_sec->name, 0, 517 patched_sec->sh.sh_entsize, 518 patched_sec->sh.sh_type, 519 patched_sec->sh.sh_addralign, 520 patched_sec->sh.sh_flags); 521 if (!out_sec) 522 return NULL; 523 } 524 525 if (is_string_sec(patched_sym->sec)) { 526 out_sym = elf_create_section_symbol(elf, out_sec); 527 if (!out_sym) 528 return NULL; 529 530 goto sym_created; 531 } 532 533 if (!is_sec_sym(patched_sym)) 534 offset = sec_size(out_sec); 535 536 if (patched_sym->len || is_sec_sym(patched_sym)) { 537 void *data = NULL; 538 size_t size; 539 540 /* bss doesn't have data */ 541 if (patched_sym->sec->data->d_buf) 542 data = patched_sym->sec->data->d_buf + patched_sym->offset; 543 544 if (is_sec_sym(patched_sym)) 545 size = sec_size(patched_sym->sec); 546 else 547 size = patched_sym->len; 548 549 if (!elf_add_data(elf, out_sec, data, size)) 550 return NULL; 551 } 552 } 553 554 out_sym = elf_create_symbol(elf, patched_sym->name, out_sec, 555 patched_sym->bind, patched_sym->type, 556 offset, patched_sym->len); 557 if (!out_sym) 558 return NULL; 559 560sym_created: 561 patched_sym->clone = out_sym; 562 out_sym->clone = patched_sym; 563 564 return out_sym; 565} 566 567static const char *sym_type(struct symbol *sym) 568{ 569 switch (sym->type) { 570 case STT_NOTYPE: return "NOTYPE"; 571 case STT_OBJECT: return "OBJECT"; 572 case STT_FUNC: return "FUNC"; 573 case STT_SECTION: return "SECTION"; 574 case STT_FILE: return "FILE"; 575 default: return "UNKNOWN"; 576 } 577} 578 579static const char *sym_bind(struct symbol *sym) 580{ 581 switch (sym->bind) { 582 case STB_LOCAL: return "LOCAL"; 583 case STB_GLOBAL: return "GLOBAL"; 584 case STB_WEAK: return "WEAK"; 585 default: return "UNKNOWN"; 586 } 587} 588 589/* 590 * Copy a symbol to the output object, optionally including its data and 591 * relocations. 592 */ 593static struct symbol *clone_symbol(struct elfs *e, struct symbol *patched_sym, 594 bool data_too) 595{ 596 struct symbol *pfx; 597 598 if (patched_sym->clone) 599 return patched_sym->clone; 600 601 dbg_indent("%s%s", patched_sym->name, data_too ? " [+DATA]" : ""); 602 603 /* Make sure the prefix gets cloned first */ 604 if (is_func_sym(patched_sym) && data_too) { 605 pfx = get_func_prefix(patched_sym); 606 if (pfx) 607 clone_symbol(e, pfx, true); 608 } 609 610 if (!__clone_symbol(e->out, patched_sym, data_too)) 611 return NULL; 612 613 if (data_too && clone_sym_relocs(e, patched_sym)) 614 return NULL; 615 616 return patched_sym->clone; 617} 618 619static void mark_included_function(struct symbol *func) 620{ 621 struct symbol *pfx; 622 623 func->included = 1; 624 625 /* Include prefix function */ 626 pfx = get_func_prefix(func); 627 if (pfx) 628 pfx->included = 1; 629 630 /* Make sure .cold parent+child always stay together */ 631 if (func->cfunc && func->cfunc != func) 632 func->cfunc->included = 1; 633 if (func->pfunc && func->pfunc != func) 634 func->pfunc->included = 1; 635} 636 637/* 638 * Copy all changed functions (and their dependencies) from the patched object 639 * to the output object. 640 */ 641static int mark_changed_functions(struct elfs *e) 642{ 643 struct symbol *sym_orig, *patched_sym; 644 bool changed = false; 645 646 /* Find changed functions */ 647 for_each_sym(e->orig, sym_orig) { 648 if (!is_func_sym(sym_orig) || is_prefix_func(sym_orig)) 649 continue; 650 651 patched_sym = sym_orig->twin; 652 if (!patched_sym) 653 continue; 654 655 if (sym_orig->csum.checksum != patched_sym->csum.checksum) { 656 patched_sym->changed = 1; 657 mark_included_function(patched_sym); 658 changed = true; 659 } 660 } 661 662 /* Find added functions and print them */ 663 for_each_sym(e->patched, patched_sym) { 664 if (!is_func_sym(patched_sym) || is_prefix_func(patched_sym)) 665 continue; 666 667 if (!patched_sym->twin) { 668 printf("%s: new function: %s\n", objname, patched_sym->name); 669 mark_included_function(patched_sym); 670 changed = true; 671 } 672 } 673 674 /* Print changed functions */ 675 for_each_sym(e->patched, patched_sym) { 676 if (patched_sym->changed) 677 printf("%s: changed function: %s\n", objname, patched_sym->name); 678 } 679 680 return !changed ? -1 : 0; 681} 682 683static int clone_included_functions(struct elfs *e) 684{ 685 struct symbol *patched_sym; 686 687 for_each_sym(e->patched, patched_sym) { 688 if (patched_sym->included) { 689 if (!clone_symbol(e, patched_sym, true)) 690 return -1; 691 } 692 } 693 694 return 0; 695} 696 697/* 698 * Determine whether a relocation should reference the section rather than the 699 * underlying symbol. 700 */ 701static bool section_reference_needed(struct section *sec) 702{ 703 /* 704 * String symbols are zero-length and uncorrelated. It's easier to 705 * deal with them as section symbols. 706 */ 707 if (is_string_sec(sec)) 708 return true; 709 710 /* 711 * .rodata has mostly anonymous data so there's no way to determine the 712 * length of a needed reference. just copy the whole section if needed. 713 */ 714 if (strstarts(sec->name, ".rodata")) 715 return true; 716 717 /* UBSAN anonymous data */ 718 if (strstarts(sec->name, ".data..Lubsan") || /* GCC */ 719 strstarts(sec->name, ".data..L__unnamed_")) /* Clang */ 720 return true; 721 722 return false; 723} 724 725static bool is_reloc_allowed(struct reloc *reloc) 726{ 727 return section_reference_needed(reloc->sym->sec) == is_sec_sym(reloc->sym); 728} 729 730static struct export *find_export(struct symbol *sym) 731{ 732 struct export *export; 733 734 hash_for_each_possible(exports, export, hash, str_hash(sym->name)) { 735 if (!strcmp(export->sym, sym->name)) 736 return export; 737 } 738 739 return NULL; 740} 741 742static const char *__find_modname(struct elfs *e) 743{ 744 struct section *sec; 745 char *name; 746 747 sec = find_section_by_name(e->orig, ".modinfo"); 748 if (!sec) { 749 ERROR("missing .modinfo section"); 750 return NULL; 751 } 752 753 name = memmem(sec->data->d_buf, sec_size(sec), "\0name=", 6); 754 if (name) 755 return name + 6; 756 757 name = strdup(e->orig->name); 758 if (!name) { 759 ERROR_GLIBC("strdup"); 760 return NULL; 761 } 762 763 for (char *c = name; *c; c++) { 764 if (*c == '/') 765 name = c + 1; 766 else if (*c == '-') 767 *c = '_'; 768 else if (*c == '.') { 769 *c = '\0'; 770 break; 771 } 772 } 773 774 return name; 775} 776 777/* Get the object's module name as defined by the kernel (and klp_object) */ 778static const char *find_modname(struct elfs *e) 779{ 780 const char *modname; 781 782 if (e->modname) 783 return e->modname; 784 785 modname = __find_modname(e); 786 e->modname = modname; 787 return modname; 788} 789 790/* 791 * Copying a function from its native compiled environment to a kernel module 792 * removes its natural access to local functions/variables and unexported 793 * globals. References to such symbols need to be converted to KLP relocs so 794 * the kernel arch relocation code knows to apply them and where to find the 795 * symbols. Particularly, duplicate static symbols need to be disambiguated. 796 */ 797static bool klp_reloc_needed(struct reloc *patched_reloc) 798{ 799 struct symbol *patched_sym = patched_reloc->sym; 800 struct export *export; 801 802 /* no external symbol to reference */ 803 if (dont_correlate(patched_sym)) 804 return false; 805 806 /* For included functions, a regular reloc will do. */ 807 if (patched_sym->included) 808 return false; 809 810 /* 811 * If exported by a module, it has to be a klp reloc. Thanks to the 812 * clusterfunk that is late module patching, the patch module is 813 * allowed to be loaded before any modules it depends on. 814 * 815 * If exported by vmlinux, a normal reloc will do. 816 */ 817 export = find_export(patched_sym); 818 if (export) 819 return strcmp(export->mod, "vmlinux"); 820 821 if (!patched_sym->twin) { 822 /* 823 * Presumably the symbol and its reference were added by the 824 * patch. The symbol could be defined in this .o or in another 825 * .o in the patch module. 826 * 827 * This check needs to be *after* the export check due to the 828 * possibility of the patch adding a new UNDEF reference to an 829 * exported symbol. 830 */ 831 return false; 832 } 833 834 /* Unexported symbol which lives in the original vmlinux or module. */ 835 return true; 836} 837 838static int convert_reloc_sym_to_secsym(struct elf *elf, struct reloc *reloc) 839{ 840 struct symbol *sym = reloc->sym; 841 struct section *sec = sym->sec; 842 843 if (!sec->sym && !elf_create_section_symbol(elf, sec)) 844 return -1; 845 846 reloc->sym = sec->sym; 847 set_reloc_sym(elf, reloc, sym->idx); 848 set_reloc_addend(elf, reloc, sym->offset + reloc_addend(reloc)); 849 return 0; 850} 851 852static int convert_reloc_secsym_to_sym(struct elf *elf, struct reloc *reloc) 853{ 854 struct symbol *sym = reloc->sym; 855 struct section *sec = sym->sec; 856 857 /* If the symbol has a dedicated section, it's easy to find */ 858 sym = find_symbol_by_offset(sec, 0); 859 if (sym && sym->len == sec_size(sec)) 860 goto found_sym; 861 862 /* No dedicated section; find the symbol manually */ 863 sym = find_symbol_containing(sec, arch_adjusted_addend(reloc)); 864 if (!sym) { 865 /* 866 * This can happen for special section references to weak code 867 * whose symbol has been stripped by the linker. 868 */ 869 return -1; 870 } 871 872found_sym: 873 reloc->sym = sym; 874 set_reloc_sym(elf, reloc, sym->idx); 875 set_reloc_addend(elf, reloc, reloc_addend(reloc) - sym->offset); 876 return 0; 877} 878 879/* 880 * Convert a relocation symbol reference to the needed format: either a section 881 * symbol or the underlying symbol itself. 882 */ 883static int convert_reloc_sym(struct elf *elf, struct reloc *reloc) 884{ 885 if (is_reloc_allowed(reloc)) 886 return 0; 887 888 if (section_reference_needed(reloc->sym->sec)) 889 return convert_reloc_sym_to_secsym(elf, reloc); 890 else 891 return convert_reloc_secsym_to_sym(elf, reloc); 892} 893 894/* 895 * Convert a regular relocation to a klp relocation (sort of). 896 */ 897static int clone_reloc_klp(struct elfs *e, struct reloc *patched_reloc, 898 struct section *sec, unsigned long offset, 899 struct export *export) 900{ 901 struct symbol *patched_sym = patched_reloc->sym; 902 s64 addend = reloc_addend(patched_reloc); 903 const char *sym_modname, *sym_orig_name; 904 static struct section *klp_relocs; 905 struct symbol *sym, *klp_sym; 906 unsigned long klp_reloc_off; 907 char sym_name[SYM_NAME_LEN]; 908 struct klp_reloc klp_reloc; 909 unsigned long sympos; 910 911 if (!patched_sym->twin) { 912 ERROR("unexpected klp reloc for new symbol %s", patched_sym->name); 913 return -1; 914 } 915 916 /* 917 * Keep the original reloc intact for now to avoid breaking objtool run 918 * which relies on proper relocations for many of its features. This 919 * will be disabled later by "objtool klp post-link". 920 * 921 * Convert it to UNDEF (and WEAK to avoid modpost warnings). 922 */ 923 924 sym = patched_sym->clone; 925 if (!sym) { 926 /* STB_WEAK: avoid modpost undefined symbol warnings */ 927 sym = elf_create_symbol(e->out, patched_sym->name, NULL, 928 STB_WEAK, patched_sym->type, 0, 0); 929 if (!sym) 930 return -1; 931 932 patched_sym->clone = sym; 933 sym->clone = patched_sym; 934 } 935 936 if (!elf_create_reloc(e->out, sec, offset, sym, addend, reloc_type(patched_reloc))) 937 return -1; 938 939 /* 940 * Create the KLP symbol. 941 */ 942 943 if (export) { 944 sym_modname = export->mod; 945 sym_orig_name = export->sym; 946 sympos = 0; 947 } else { 948 sym_modname = find_modname(e); 949 if (!sym_modname) 950 return -1; 951 952 sym_orig_name = patched_sym->twin->name; 953 sympos = find_sympos(e->orig, patched_sym->twin); 954 if (sympos == ULONG_MAX) 955 return -1; 956 } 957 958 /* symbol format: .klp.sym.modname.sym_name,sympos */ 959 if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_SYM_PREFIX "%s.%s,%ld", 960 sym_modname, sym_orig_name, sympos)) 961 return -1; 962 963 klp_sym = find_symbol_by_name(e->out, sym_name); 964 if (!klp_sym) { 965 __dbg_indent("%s", sym_name); 966 967 /* STB_WEAK: avoid modpost undefined symbol warnings */ 968 klp_sym = elf_create_symbol(e->out, sym_name, NULL, 969 STB_WEAK, patched_sym->type, 0, 0); 970 if (!klp_sym) 971 return -1; 972 } 973 974 /* 975 * Create the __klp_relocs entry. This will be converted to an actual 976 * KLP rela by "objtool klp post-link". 977 * 978 * This intermediate step is necessary to prevent corruption by the 979 * linker, which doesn't know how to properly handle two rela sections 980 * applying to the same base section. 981 */ 982 983 if (!klp_relocs) { 984 klp_relocs = elf_create_section(e->out, KLP_RELOCS_SEC, 0, 985 0, SHT_PROGBITS, 8, SHF_ALLOC); 986 if (!klp_relocs) 987 return -1; 988 } 989 990 klp_reloc_off = sec_size(klp_relocs); 991 memset(&klp_reloc, 0, sizeof(klp_reloc)); 992 993 klp_reloc.type = reloc_type(patched_reloc); 994 if (!elf_add_data(e->out, klp_relocs, &klp_reloc, sizeof(klp_reloc))) 995 return -1; 996 997 /* klp_reloc.offset */ 998 if (!sec->sym && !elf_create_section_symbol(e->out, sec)) 999 return -1; 1000 1001 if (!elf_create_reloc(e->out, klp_relocs, 1002 klp_reloc_off + offsetof(struct klp_reloc, offset), 1003 sec->sym, offset, R_ABS64)) 1004 return -1; 1005 1006 /* klp_reloc.sym */ 1007 if (!elf_create_reloc(e->out, klp_relocs, 1008 klp_reloc_off + offsetof(struct klp_reloc, sym), 1009 klp_sym, addend, R_ABS64)) 1010 return -1; 1011 1012 return 0; 1013} 1014 1015#define dbg_clone_reloc(sec, offset, patched_sym, addend, export, klp) \ 1016 dbg_indent("%s+0x%lx: %s%s0x%lx [%s%s%s%s%s%s]", \ 1017 sec->name, offset, patched_sym->name, \ 1018 addend >= 0 ? "+" : "-", labs(addend), \ 1019 sym_type(patched_sym), \ 1020 patched_sym->type == STT_SECTION ? "" : " ", \ 1021 patched_sym->type == STT_SECTION ? "" : sym_bind(patched_sym), \ 1022 is_undef_sym(patched_sym) ? " UNDEF" : "", \ 1023 export ? " EXPORTED" : "", \ 1024 klp ? " KLP" : "") 1025 1026/* Copy a reloc and its symbol to the output object */ 1027static int clone_reloc(struct elfs *e, struct reloc *patched_reloc, 1028 struct section *sec, unsigned long offset) 1029{ 1030 struct symbol *patched_sym = patched_reloc->sym; 1031 struct export *export = find_export(patched_sym); 1032 long addend = reloc_addend(patched_reloc); 1033 struct symbol *out_sym; 1034 bool klp; 1035 1036 if (!is_reloc_allowed(patched_reloc)) { 1037 ERROR_FUNC(patched_reloc->sec->base, reloc_offset(patched_reloc), 1038 "missing symbol for reference to %s+%ld", 1039 patched_sym->name, addend); 1040 return -1; 1041 } 1042 1043 klp = klp_reloc_needed(patched_reloc); 1044 1045 dbg_clone_reloc(sec, offset, patched_sym, addend, export, klp); 1046 1047 if (klp) { 1048 if (clone_reloc_klp(e, patched_reloc, sec, offset, export)) 1049 return -1; 1050 1051 return 0; 1052 } 1053 1054 /* 1055 * Why !export sets 'data_too': 1056 * 1057 * Unexported non-klp symbols need to live in the patch module, 1058 * otherwise there will be unresolved symbols. Notably, this includes: 1059 * 1060 * - New functions/data 1061 * - String sections 1062 * - Special section entries 1063 * - Uncorrelated static local variables 1064 * - UBSAN sections 1065 */ 1066 out_sym = clone_symbol(e, patched_sym, patched_sym->included || !export); 1067 if (!out_sym) 1068 return -1; 1069 1070 /* 1071 * For strings, all references use section symbols, thanks to 1072 * section_reference_needed(). clone_symbol() has cloned an empty 1073 * version of the string section. Now copy the string itself. 1074 */ 1075 if (is_string_sec(patched_sym->sec)) { 1076 const char *str = patched_sym->sec->data->d_buf + addend; 1077 1078 __dbg_indent("\"%s\"", escape_str(str)); 1079 1080 addend = elf_add_string(e->out, out_sym->sec, str); 1081 if (addend == -1) 1082 return -1; 1083 } 1084 1085 if (!elf_create_reloc(e->out, sec, offset, out_sym, addend, 1086 reloc_type(patched_reloc))) 1087 return -1; 1088 1089 return 0; 1090} 1091 1092/* Copy all relocs needed for a symbol's contents */ 1093static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym) 1094{ 1095 struct section *patched_rsec = patched_sym->sec->rsec; 1096 struct reloc *patched_reloc; 1097 unsigned long start, end; 1098 struct symbol *out_sym; 1099 1100 out_sym = patched_sym->clone; 1101 if (!out_sym) { 1102 ERROR("no clone for %s", patched_sym->name); 1103 return -1; 1104 } 1105 1106 if (!patched_rsec) 1107 return 0; 1108 1109 if (!is_sec_sym(patched_sym) && !patched_sym->len) 1110 return 0; 1111 1112 if (is_string_sec(patched_sym->sec)) 1113 return 0; 1114 1115 if (is_sec_sym(patched_sym)) { 1116 start = 0; 1117 end = sec_size(patched_sym->sec); 1118 } else { 1119 start = patched_sym->offset; 1120 end = start + patched_sym->len; 1121 } 1122 1123 for_each_reloc(patched_rsec, patched_reloc) { 1124 unsigned long offset; 1125 1126 if (reloc_offset(patched_reloc) < start || 1127 reloc_offset(patched_reloc) >= end) 1128 continue; 1129 1130 /* 1131 * Skip any reloc referencing .altinstr_aux. Its code is 1132 * always patched by alternatives. See ALTERNATIVE_TERNARY(). 1133 */ 1134 if (patched_reloc->sym->sec && 1135 !strcmp(patched_reloc->sym->sec->name, ".altinstr_aux")) 1136 continue; 1137 1138 if (convert_reloc_sym(e->patched, patched_reloc)) { 1139 ERROR_FUNC(patched_rsec->base, reloc_offset(patched_reloc), 1140 "failed to convert reloc sym '%s' to its proper format", 1141 patched_reloc->sym->name); 1142 return -1; 1143 } 1144 1145 offset = out_sym->offset + (reloc_offset(patched_reloc) - patched_sym->offset); 1146 1147 if (clone_reloc(e, patched_reloc, out_sym->sec, offset)) 1148 return -1; 1149 } 1150 return 0; 1151 1152} 1153 1154static int create_fake_symbol(struct elf *elf, struct section *sec, 1155 unsigned long offset, size_t size) 1156{ 1157 char name[SYM_NAME_LEN]; 1158 unsigned int type; 1159 static int ctr; 1160 char *c; 1161 1162 if (snprintf_check(name, SYM_NAME_LEN, "%s_%d", sec->name, ctr++)) 1163 return -1; 1164 1165 for (c = name; *c; c++) 1166 if (*c == '.') 1167 *c = '_'; 1168 1169 /* 1170 * STT_NOTYPE: Prevent objtool from validating .altinstr_replacement 1171 * while still allowing objdump to disassemble it. 1172 */ 1173 type = is_text_sec(sec) ? STT_NOTYPE : STT_OBJECT; 1174 return elf_create_symbol(elf, name, sec, STB_LOCAL, type, offset, size) ? 0 : -1; 1175} 1176 1177/* 1178 * Special sections (alternatives, etc) are basically arrays of structs. 1179 * For all the special sections, create a symbol for each struct entry. This 1180 * is a bit cumbersome, but it makes the extracting of the individual entries 1181 * much more straightforward. 1182 * 1183 * There are three ways to identify the entry sizes for a special section: 1184 * 1185 * 1) ELF section header sh_entsize: Ideally this would be used almost 1186 * everywhere. But unfortunately the toolchains make it difficult. The 1187 * assembler .[push]section directive syntax only takes entsize when 1188 * combined with SHF_MERGE. But Clang disallows combining SHF_MERGE with 1189 * SHF_WRITE. And some special sections do need to be writable. 1190 * 1191 * Another place this wouldn't work is .altinstr_replacement, whose entries 1192 * don't have a fixed size. 1193 * 1194 * 2) ANNOTATE_DATA_SPECIAL: This is a lightweight objtool annotation which 1195 * points to the beginning of each entry. The size of the entry is then 1196 * inferred by the location of the subsequent annotation (or end of 1197 * section). 1198 * 1199 * 3) Simple array of pointers: If the special section is just a basic array of 1200 * pointers, the entry size can be inferred by the number of relocations. 1201 * No annotations needed. 1202 * 1203 * Note I also tried to create per-entry symbols at the time of creation, in 1204 * the original [inline] asm. Unfortunately, creating uniquely named symbols 1205 * is trickier than one might think, especially with Clang inline asm. I 1206 * eventually just gave up trying to make that work, in favor of using 1207 * ANNOTATE_DATA_SPECIAL and creating the symbols here after the fact. 1208 */ 1209static int create_fake_symbols(struct elf *elf) 1210{ 1211 struct section *sec; 1212 struct reloc *reloc; 1213 1214 /* 1215 * 1) Make symbols for all the ANNOTATE_DATA_SPECIAL entries: 1216 */ 1217 1218 sec = find_section_by_name(elf, ".discard.annotate_data"); 1219 if (!sec || !sec->rsec) 1220 return 0; 1221 1222 for_each_reloc(sec->rsec, reloc) { 1223 unsigned long offset, size; 1224 struct reloc *next_reloc; 1225 1226 if (annotype(elf, sec, reloc) != ANNOTYPE_DATA_SPECIAL) 1227 continue; 1228 1229 offset = reloc_addend(reloc); 1230 1231 size = 0; 1232 next_reloc = reloc; 1233 for_each_reloc_continue(sec->rsec, next_reloc) { 1234 if (annotype(elf, sec, next_reloc) != ANNOTYPE_DATA_SPECIAL || 1235 next_reloc->sym->sec != reloc->sym->sec) 1236 continue; 1237 1238 size = reloc_addend(next_reloc) - offset; 1239 break; 1240 } 1241 1242 if (!size) 1243 size = sec_size(reloc->sym->sec) - offset; 1244 1245 if (create_fake_symbol(elf, reloc->sym->sec, offset, size)) 1246 return -1; 1247 } 1248 1249 /* 1250 * 2) Make symbols for sh_entsize, and simple arrays of pointers: 1251 */ 1252 1253 for_each_sec(elf, sec) { 1254 unsigned int entry_size; 1255 unsigned long offset; 1256 1257 if (!is_special_section(sec) || find_symbol_by_offset(sec, 0)) 1258 continue; 1259 1260 if (!sec->rsec) { 1261 ERROR("%s: missing special section relocations", sec->name); 1262 return -1; 1263 } 1264 1265 entry_size = sec->sh.sh_entsize; 1266 if (!entry_size) { 1267 entry_size = arch_reloc_size(sec->rsec->relocs); 1268 if (sec_size(sec) != entry_size * sec_num_entries(sec->rsec)) { 1269 ERROR("%s: missing special section entsize or annotations", sec->name); 1270 return -1; 1271 } 1272 } 1273 1274 for (offset = 0; offset < sec_size(sec); offset += entry_size) { 1275 if (create_fake_symbol(elf, sec, offset, entry_size)) 1276 return -1; 1277 } 1278 } 1279 1280 return 0; 1281} 1282 1283/* Keep a special section entry if it references an included function */ 1284static bool should_keep_special_sym(struct elf *elf, struct symbol *sym) 1285{ 1286 struct reloc *reloc; 1287 1288 if (is_sec_sym(sym) || !sym->sec->rsec) 1289 return false; 1290 1291 sym_for_each_reloc(elf, sym, reloc) { 1292 if (convert_reloc_sym(elf, reloc)) 1293 continue; 1294 1295 if (is_func_sym(reloc->sym) && reloc->sym->included) 1296 return true; 1297 } 1298 1299 return false; 1300} 1301 1302/* 1303 * Klp relocations aren't allowed for __jump_table and .static_call_sites if 1304 * the referenced symbol lives in a kernel module, because such klp relocs may 1305 * be applied after static branch/call init, resulting in code corruption. 1306 * 1307 * Validate a special section entry to avoid that. Note that an inert 1308 * tracepoint is harmless enough, in that case just skip the entry and print a 1309 * warning. Otherwise, return an error. 1310 * 1311 * This is only a temporary limitation which will be fixed when livepatch adds 1312 * support for submodules: fully self-contained modules which are embedded in 1313 * the top-level livepatch module's data and which can be loaded on demand when 1314 * their corresponding to-be-patched module gets loaded. Then klp relocs can 1315 * be retired. 1316 * 1317 * Return: 1318 * -1: error: validation failed 1319 * 1: warning: tracepoint skipped 1320 * 0: success 1321 */ 1322static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym) 1323{ 1324 bool static_branch = !strcmp(sym->sec->name, "__jump_table"); 1325 bool static_call = !strcmp(sym->sec->name, ".static_call_sites"); 1326 struct symbol *code_sym = NULL; 1327 unsigned long code_offset = 0; 1328 struct reloc *reloc; 1329 int ret = 0; 1330 1331 if (!static_branch && !static_call) 1332 return 0; 1333 1334 sym_for_each_reloc(e->patched, sym, reloc) { 1335 const char *sym_modname; 1336 struct export *export; 1337 1338 /* Static branch/call keys are always STT_OBJECT */ 1339 if (reloc->sym->type != STT_OBJECT) { 1340 1341 /* Save code location which can be printed below */ 1342 if (reloc->sym->type == STT_FUNC && !code_sym) { 1343 code_sym = reloc->sym; 1344 code_offset = reloc_addend(reloc); 1345 } 1346 1347 continue; 1348 } 1349 1350 if (!klp_reloc_needed(reloc)) 1351 continue; 1352 1353 export = find_export(reloc->sym); 1354 if (export) { 1355 sym_modname = export->mod; 1356 } else { 1357 sym_modname = find_modname(e); 1358 if (!sym_modname) 1359 return -1; 1360 } 1361 1362 /* vmlinux keys are ok */ 1363 if (!strcmp(sym_modname, "vmlinux")) 1364 continue; 1365 1366 if (static_branch) { 1367 if (strstarts(reloc->sym->name, "__tracepoint_")) { 1368 WARN("%s: disabling unsupported tracepoint %s", 1369 code_sym->name, reloc->sym->name + 13); 1370 ret = 1; 1371 continue; 1372 } 1373 1374 ERROR("%s+0x%lx: unsupported static branch key %s. Use static_key_enabled() instead", 1375 code_sym->name, code_offset, reloc->sym->name); 1376 return -1; 1377 } 1378 1379 /* static call */ 1380 if (strstarts(reloc->sym->name, "__SCK__tp_func_")) { 1381 ret = 1; 1382 continue; 1383 } 1384 1385 ERROR("%s()+0x%lx: unsupported static call key %s. Use KLP_STATIC_CALL() instead", 1386 code_sym->name, code_offset, reloc->sym->name); 1387 return -1; 1388 } 1389 1390 return ret; 1391} 1392 1393static int clone_special_section(struct elfs *e, struct section *patched_sec) 1394{ 1395 struct symbol *patched_sym; 1396 1397 /* 1398 * Extract all special section symbols (and their dependencies) which 1399 * reference included functions. 1400 */ 1401 sec_for_each_sym(patched_sec, patched_sym) { 1402 int ret; 1403 1404 if (!is_object_sym(patched_sym)) 1405 continue; 1406 1407 if (!should_keep_special_sym(e->patched, patched_sym)) 1408 continue; 1409 1410 ret = validate_special_section_klp_reloc(e, patched_sym); 1411 if (ret < 0) 1412 return -1; 1413 if (ret > 0) 1414 continue; 1415 1416 if (!clone_symbol(e, patched_sym, true)) 1417 return -1; 1418 } 1419 1420 return 0; 1421} 1422 1423/* Extract only the needed bits from special sections */ 1424static int clone_special_sections(struct elfs *e) 1425{ 1426 struct section *patched_sec; 1427 1428 for_each_sec(e->patched, patched_sec) { 1429 if (is_special_section(patched_sec)) { 1430 if (clone_special_section(e, patched_sec)) 1431 return -1; 1432 } 1433 } 1434 1435 return 0; 1436} 1437 1438/* 1439 * Create __klp_objects and __klp_funcs sections which are intermediate 1440 * sections provided as input to the patch module's init code for building the 1441 * klp_patch, klp_object and klp_func structs for the livepatch API. 1442 */ 1443static int create_klp_sections(struct elfs *e) 1444{ 1445 size_t obj_size = sizeof(struct klp_object_ext); 1446 size_t func_size = sizeof(struct klp_func_ext); 1447 struct section *obj_sec, *funcs_sec, *str_sec; 1448 struct symbol *funcs_sym, *str_sym, *sym; 1449 char sym_name[SYM_NAME_LEN]; 1450 unsigned int nr_funcs = 0; 1451 const char *modname; 1452 void *obj_data; 1453 s64 addend; 1454 1455 obj_sec = elf_create_section_pair(e->out, KLP_OBJECTS_SEC, obj_size, 0, 0); 1456 if (!obj_sec) 1457 return -1; 1458 1459 funcs_sec = elf_create_section_pair(e->out, KLP_FUNCS_SEC, func_size, 0, 0); 1460 if (!funcs_sec) 1461 return -1; 1462 1463 funcs_sym = elf_create_section_symbol(e->out, funcs_sec); 1464 if (!funcs_sym) 1465 return -1; 1466 1467 str_sec = elf_create_section(e->out, KLP_STRINGS_SEC, 0, 0, 1468 SHT_PROGBITS, 1, 1469 SHF_ALLOC | SHF_STRINGS | SHF_MERGE); 1470 if (!str_sec) 1471 return -1; 1472 1473 if (elf_add_string(e->out, str_sec, "") == -1) 1474 return -1; 1475 1476 str_sym = elf_create_section_symbol(e->out, str_sec); 1477 if (!str_sym) 1478 return -1; 1479 1480 /* allocate klp_object_ext */ 1481 obj_data = elf_add_data(e->out, obj_sec, NULL, obj_size); 1482 if (!obj_data) 1483 return -1; 1484 1485 modname = find_modname(e); 1486 if (!modname) 1487 return -1; 1488 1489 /* klp_object_ext.name */ 1490 if (strcmp(modname, "vmlinux")) { 1491 addend = elf_add_string(e->out, str_sec, modname); 1492 if (addend == -1) 1493 return -1; 1494 1495 if (!elf_create_reloc(e->out, obj_sec, 1496 offsetof(struct klp_object_ext, name), 1497 str_sym, addend, R_ABS64)) 1498 return -1; 1499 } 1500 1501 /* klp_object_ext.funcs */ 1502 if (!elf_create_reloc(e->out, obj_sec, offsetof(struct klp_object_ext, funcs), 1503 funcs_sym, 0, R_ABS64)) 1504 return -1; 1505 1506 for_each_sym(e->out, sym) { 1507 unsigned long offset = nr_funcs * func_size; 1508 unsigned long sympos; 1509 void *func_data; 1510 1511 if (!is_func_sym(sym) || sym->cold || !sym->clone || !sym->clone->changed) 1512 continue; 1513 1514 /* allocate klp_func_ext */ 1515 func_data = elf_add_data(e->out, funcs_sec, NULL, func_size); 1516 if (!func_data) 1517 return -1; 1518 1519 /* klp_func_ext.old_name */ 1520 addend = elf_add_string(e->out, str_sec, sym->clone->twin->name); 1521 if (addend == -1) 1522 return -1; 1523 1524 if (!elf_create_reloc(e->out, funcs_sec, 1525 offset + offsetof(struct klp_func_ext, old_name), 1526 str_sym, addend, R_ABS64)) 1527 return -1; 1528 1529 /* klp_func_ext.new_func */ 1530 if (!elf_create_reloc(e->out, funcs_sec, 1531 offset + offsetof(struct klp_func_ext, new_func), 1532 sym, 0, R_ABS64)) 1533 return -1; 1534 1535 /* klp_func_ext.sympos */ 1536 BUILD_BUG_ON(sizeof(sympos) != sizeof_field(struct klp_func_ext, sympos)); 1537 sympos = find_sympos(e->orig, sym->clone->twin); 1538 if (sympos == ULONG_MAX) 1539 return -1; 1540 memcpy(func_data + offsetof(struct klp_func_ext, sympos), &sympos, 1541 sizeof_field(struct klp_func_ext, sympos)); 1542 1543 nr_funcs++; 1544 } 1545 1546 /* klp_object_ext.nr_funcs */ 1547 BUILD_BUG_ON(sizeof(nr_funcs) != sizeof_field(struct klp_object_ext, nr_funcs)); 1548 memcpy(obj_data + offsetof(struct klp_object_ext, nr_funcs), &nr_funcs, 1549 sizeof_field(struct klp_object_ext, nr_funcs)); 1550 1551 /* 1552 * Find callback pointers created by KLP_PRE_PATCH_CALLBACK() and 1553 * friends, and add them to the klp object. 1554 */ 1555 1556 if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_PATCH_PREFIX "%s", modname)) 1557 return -1; 1558 1559 sym = find_symbol_by_name(e->out, sym_name); 1560 if (sym) { 1561 struct reloc *reloc; 1562 1563 reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1564 1565 if (!elf_create_reloc(e->out, obj_sec, 1566 offsetof(struct klp_object_ext, callbacks) + 1567 offsetof(struct klp_callbacks, pre_patch), 1568 reloc->sym, reloc_addend(reloc), R_ABS64)) 1569 return -1; 1570 } 1571 1572 if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_PATCH_PREFIX "%s", modname)) 1573 return -1; 1574 1575 sym = find_symbol_by_name(e->out, sym_name); 1576 if (sym) { 1577 struct reloc *reloc; 1578 1579 reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1580 1581 if (!elf_create_reloc(e->out, obj_sec, 1582 offsetof(struct klp_object_ext, callbacks) + 1583 offsetof(struct klp_callbacks, post_patch), 1584 reloc->sym, reloc_addend(reloc), R_ABS64)) 1585 return -1; 1586 } 1587 1588 if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_UNPATCH_PREFIX "%s", modname)) 1589 return -1; 1590 1591 sym = find_symbol_by_name(e->out, sym_name); 1592 if (sym) { 1593 struct reloc *reloc; 1594 1595 reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1596 1597 if (!elf_create_reloc(e->out, obj_sec, 1598 offsetof(struct klp_object_ext, callbacks) + 1599 offsetof(struct klp_callbacks, pre_unpatch), 1600 reloc->sym, reloc_addend(reloc), R_ABS64)) 1601 return -1; 1602 } 1603 1604 if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_UNPATCH_PREFIX "%s", modname)) 1605 return -1; 1606 1607 sym = find_symbol_by_name(e->out, sym_name); 1608 if (sym) { 1609 struct reloc *reloc; 1610 1611 reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1612 1613 if (!elf_create_reloc(e->out, obj_sec, 1614 offsetof(struct klp_object_ext, callbacks) + 1615 offsetof(struct klp_callbacks, post_unpatch), 1616 reloc->sym, reloc_addend(reloc), R_ABS64)) 1617 return -1; 1618 } 1619 1620 return 0; 1621} 1622 1623/* 1624 * Copy all .modinfo import_ns= tags to ensure all namespaced exported symbols 1625 * can be accessed via normal relocs. 1626 */ 1627static int copy_import_ns(struct elfs *e) 1628{ 1629 struct section *patched_sec, *out_sec = NULL; 1630 char *import_ns, *data_end; 1631 1632 patched_sec = find_section_by_name(e->patched, ".modinfo"); 1633 if (!patched_sec) 1634 return 0; 1635 1636 import_ns = patched_sec->data->d_buf; 1637 if (!import_ns) 1638 return 0; 1639 1640 for (data_end = import_ns + sec_size(patched_sec); 1641 import_ns < data_end; 1642 import_ns += strlen(import_ns) + 1) { 1643 1644 import_ns = memmem(import_ns, data_end - import_ns, "import_ns=", 10); 1645 if (!import_ns) 1646 return 0; 1647 1648 if (!out_sec) { 1649 out_sec = find_section_by_name(e->out, ".modinfo"); 1650 if (!out_sec) { 1651 out_sec = elf_create_section(e->out, ".modinfo", 0, 1652 patched_sec->sh.sh_entsize, 1653 patched_sec->sh.sh_type, 1654 patched_sec->sh.sh_addralign, 1655 patched_sec->sh.sh_flags); 1656 if (!out_sec) 1657 return -1; 1658 } 1659 } 1660 1661 if (!elf_add_data(e->out, out_sec, import_ns, strlen(import_ns) + 1)) 1662 return -1; 1663 } 1664 1665 return 0; 1666} 1667 1668int cmd_klp_diff(int argc, const char **argv) 1669{ 1670 struct elfs e = {0}; 1671 1672 argc = parse_options(argc, argv, klp_diff_options, klp_diff_usage, 0); 1673 if (argc != 3) 1674 usage_with_options(klp_diff_usage, klp_diff_options); 1675 1676 objname = argv[0]; 1677 1678 e.orig = elf_open_read(argv[0], O_RDONLY); 1679 e.patched = elf_open_read(argv[1], O_RDONLY); 1680 e.out = NULL; 1681 1682 if (!e.orig || !e.patched) 1683 return -1; 1684 1685 if (read_exports()) 1686 return -1; 1687 1688 if (read_sym_checksums(e.orig)) 1689 return -1; 1690 1691 if (read_sym_checksums(e.patched)) 1692 return -1; 1693 1694 if (correlate_symbols(&e)) 1695 return -1; 1696 1697 if (mark_changed_functions(&e)) 1698 return 0; 1699 1700 e.out = elf_create_file(&e.orig->ehdr, argv[2]); 1701 if (!e.out) 1702 return -1; 1703 1704 /* 1705 * Special section fake symbols are needed so that individual special 1706 * section entries can be extracted by clone_special_sections(). 1707 * 1708 * Note the fake symbols are also needed by clone_included_functions() 1709 * because __WARN_printf() call sites add references to bug table 1710 * entries in the calling functions. 1711 */ 1712 if (create_fake_symbols(e.patched)) 1713 return -1; 1714 1715 if (clone_included_functions(&e)) 1716 return -1; 1717 1718 if (clone_special_sections(&e)) 1719 return -1; 1720 1721 if (create_klp_sections(&e)) 1722 return -1; 1723 1724 if (copy_import_ns(&e)) 1725 return -1; 1726 1727 if (elf_write(e.out)) 1728 return -1; 1729 1730 return elf_close(e.out); 1731}