Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching

Pull livepatching fixes from Jiri Kosina:

- symbol lookup locking fix, from Miroslav Benes

- error handling improvements in case of failure of the module coming
notifier, from Minfei Huang

- we were too pessimistic when kASLR has been enabled on x86 and were
dropping address hints on the floor unnecessarily in such case. Fix
from Jiri Kosina

- a few other small fixes and cleanups

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching:
livepatch: add module locking around kallsyms calls
livepatch: annotate klp_init() with __init
livepatch: introduce patch/func-walking helpers
livepatch: make kobject in klp_object statically allocated
livepatch: Prevent patch inconsistencies if the coming module notifier fails
livepatch: match return value to function signature
x86: kaslr: fix build due to missing ALIGN definition
livepatch: x86: make kASLR logic more accurate
x86: introduce kaslr_offset()

+79 -38
+1
arch/x86/include/asm/livepatch.h
··· 21 21 #ifndef _ASM_X86_LIVEPATCH_H 22 22 #define _ASM_X86_LIVEPATCH_H 23 23 24 + #include <asm/setup.h> 24 25 #include <linux/module.h> 25 26 #include <linux/ftrace.h> 26 27
+7
arch/x86/include/asm/setup.h
··· 60 60 #ifndef _SETUP 61 61 62 62 #include <asm/espfix.h> 63 + #include <linux/kernel.h> 63 64 64 65 /* 65 66 * This is set up by the setup-routine at boot-time 66 67 */ 67 68 extern struct boot_params boot_params; 69 + extern char _text[]; 68 70 69 71 static inline bool kaslr_enabled(void) 70 72 { 71 73 return !!(boot_params.hdr.loadflags & KASLR_FLAG); 74 + } 75 + 76 + static inline unsigned long kaslr_offset(void) 77 + { 78 + return (unsigned long)&_text - __START_KERNEL; 72 79 } 73 80 74 81 /*
+2 -1
arch/x86/kernel/machine_kexec_64.c
··· 26 26 #include <asm/io_apic.h> 27 27 #include <asm/debugreg.h> 28 28 #include <asm/kexec-bzimage64.h> 29 + #include <asm/setup.h> 29 30 30 31 #ifdef CONFIG_KEXEC_FILE 31 32 static struct kexec_file_ops *kexec_file_loaders[] = { ··· 336 335 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); 337 336 #endif 338 337 vmcoreinfo_append_str("KERNELOFFSET=%lx\n", 339 - (unsigned long)&_text - __START_KERNEL); 338 + kaslr_offset()); 340 339 } 341 340 342 341 /* arch-dependent functionality related to kexec file-based syscall */
+1 -1
arch/x86/kernel/setup.c
··· 836 836 { 837 837 if (kaslr_enabled()) { 838 838 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", 839 - (unsigned long)&_text - __START_KERNEL, 839 + kaslr_offset(), 840 840 __START_KERNEL, 841 841 __START_KERNEL_map, 842 842 MODULES_VADDR-1);
+7 -1
include/linux/livepatch.h
··· 99 99 struct klp_func *funcs; 100 100 101 101 /* internal */ 102 - struct kobject *kobj; 102 + struct kobject kobj; 103 103 struct module *mod; 104 104 enum klp_state state; 105 105 }; ··· 122 122 struct kobject kobj; 123 123 enum klp_state state; 124 124 }; 125 + 126 + #define klp_for_each_object(patch, obj) \ 127 + for (obj = patch->objs; obj->funcs; obj++) 128 + 129 + #define klp_for_each_func(obj, func) \ 130 + for (func = obj->funcs; func->old_name; func++) 125 131 126 132 int klp_register_patch(struct klp_patch *); 127 133 int klp_unregister_patch(struct klp_patch *);
+61 -35
kernel/livepatch/core.c
··· 128 128 129 129 static bool klp_initialized(void) 130 130 { 131 - return klp_root_kobj; 131 + return !!klp_root_kobj; 132 132 } 133 133 134 134 struct klp_find_arg { ··· 179 179 .count = 0 180 180 }; 181 181 182 + mutex_lock(&module_mutex); 182 183 kallsyms_on_each_symbol(klp_find_callback, &args); 184 + mutex_unlock(&module_mutex); 183 185 184 186 if (args.count == 0) 185 187 pr_err("symbol '%s' not found in symbol table\n", name); ··· 221 219 .name = name, 222 220 .addr = addr, 223 221 }; 222 + int ret; 224 223 225 - if (kallsyms_on_each_symbol(klp_verify_callback, &args)) 226 - return 0; 224 + mutex_lock(&module_mutex); 225 + ret = kallsyms_on_each_symbol(klp_verify_callback, &args); 226 + mutex_unlock(&module_mutex); 227 227 228 - pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n", 229 - name, addr); 230 - return -EINVAL; 228 + if (!ret) { 229 + pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n", 230 + name, addr); 231 + return -EINVAL; 232 + } 233 + 234 + return 0; 231 235 } 232 236 233 237 static int klp_find_verify_func_addr(struct klp_object *obj, ··· 242 234 int ret; 243 235 244 236 #if defined(CONFIG_RANDOMIZE_BASE) 245 - /* KASLR is enabled, disregard old_addr from user */ 246 - func->old_addr = 0; 237 + /* If KASLR has been enabled, adjust old_addr accordingly */ 238 + if (kaslr_enabled() && func->old_addr) 239 + func->old_addr += kaslr_offset(); 247 240 #endif 248 241 249 242 if (!func->old_addr || klp_is_module(obj)) ··· 431 422 { 432 423 struct klp_func *func; 433 424 434 - for (func = obj->funcs; func->old_name; func++) 425 + klp_for_each_func(obj, func) 435 426 if (func->state == KLP_ENABLED) 436 427 klp_disable_func(func); 437 428 ··· 449 440 if (WARN_ON(!klp_is_object_loaded(obj))) 450 441 return -EINVAL; 451 442 452 - for (func = obj->funcs; func->old_name; func++) { 443 + klp_for_each_func(obj, func) { 453 444 ret = klp_enable_func(func); 454 445 if (ret) { 455 446 klp_disable_object(obj); ··· 472 463 473 464 pr_notice("disabling patch '%s'\n", patch->mod->name); 474 465 475 - for (obj = patch->objs; obj->funcs; obj++) { 466 + klp_for_each_object(patch, obj) { 476 467 if (obj->state == KLP_ENABLED) 477 468 klp_disable_object(obj); 478 469 } ··· 532 523 533 524 pr_notice("enabling patch '%s'\n", patch->mod->name); 534 525 535 - for (obj = patch->objs; obj->funcs; obj++) { 526 + klp_for_each_object(patch, obj) { 536 527 if (!klp_is_object_loaded(obj)) 537 528 continue; 538 529 ··· 660 651 .default_attrs = klp_patch_attrs, 661 652 }; 662 653 654 + static void klp_kobj_release_object(struct kobject *kobj) 655 + { 656 + } 657 + 658 + static struct kobj_type klp_ktype_object = { 659 + .release = klp_kobj_release_object, 660 + .sysfs_ops = &kobj_sysfs_ops, 661 + }; 662 + 663 663 static void klp_kobj_release_func(struct kobject *kobj) 664 664 { 665 665 } ··· 698 680 699 681 obj->mod = NULL; 700 682 701 - for (func = obj->funcs; func->old_name; func++) 683 + klp_for_each_func(obj, func) 702 684 func->old_addr = 0; 703 685 } 704 686 ··· 713 695 714 696 for (obj = patch->objs; obj->funcs && obj != limit; obj++) { 715 697 klp_free_funcs_limited(obj, NULL); 716 - kobject_put(obj->kobj); 698 + kobject_put(&obj->kobj); 717 699 } 718 700 } 719 701 ··· 731 713 func->state = KLP_DISABLED; 732 714 733 715 return kobject_init_and_add(&func->kobj, &klp_ktype_func, 734 - obj->kobj, "%s", func->old_name); 716 + &obj->kobj, "%s", func->old_name); 735 717 } 736 718 737 719 /* parts of the initialization that is done only when the object is loaded */ ··· 747 729 return ret; 748 730 } 749 731 750 - for (func = obj->funcs; func->old_name; func++) { 732 + klp_for_each_func(obj, func) { 751 733 ret = klp_find_verify_func_addr(obj, func); 752 734 if (ret) 753 735 return ret; ··· 771 753 klp_find_object_module(obj); 772 754 773 755 name = klp_is_module(obj) ? obj->name : "vmlinux"; 774 - obj->kobj = kobject_create_and_add(name, &patch->kobj); 775 - if (!obj->kobj) 776 - return -ENOMEM; 756 + ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object, 757 + &patch->kobj, "%s", name); 758 + if (ret) 759 + return ret; 777 760 778 - for (func = obj->funcs; func->old_name; func++) { 761 + klp_for_each_func(obj, func) { 779 762 ret = klp_init_func(obj, func); 780 763 if (ret) 781 764 goto free; ··· 792 773 793 774 free: 794 775 klp_free_funcs_limited(obj, func); 795 - kobject_put(obj->kobj); 776 + kobject_put(&obj->kobj); 796 777 return ret; 797 778 } 798 779 ··· 813 794 if (ret) 814 795 goto unlock; 815 796 816 - for (obj = patch->objs; obj->funcs; obj++) { 797 + klp_for_each_object(patch, obj) { 817 798 ret = klp_init_object(patch, obj); 818 799 if (ret) 819 800 goto free; ··· 902 883 } 903 884 EXPORT_SYMBOL_GPL(klp_register_patch); 904 885 905 - static void klp_module_notify_coming(struct klp_patch *patch, 886 + static int klp_module_notify_coming(struct klp_patch *patch, 906 887 struct klp_object *obj) 907 888 { 908 889 struct module *pmod = patch->mod; ··· 910 891 int ret; 911 892 912 893 ret = klp_init_object_loaded(patch, obj); 913 - if (ret) 914 - goto err; 894 + if (ret) { 895 + pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n", 896 + pmod->name, mod->name, ret); 897 + return ret; 898 + } 915 899 916 900 if (patch->state == KLP_DISABLED) 917 - return; 901 + return 0; 918 902 919 903 pr_notice("applying patch '%s' to loading module '%s'\n", 920 904 pmod->name, mod->name); 921 905 922 906 ret = klp_enable_object(obj); 923 - if (!ret) 924 - return; 925 - 926 - err: 927 - pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", 928 - pmod->name, mod->name, ret); 907 + if (ret) 908 + pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", 909 + pmod->name, mod->name, ret); 910 + return ret; 929 911 } 930 912 931 913 static void klp_module_notify_going(struct klp_patch *patch, ··· 950 930 static int klp_module_notify(struct notifier_block *nb, unsigned long action, 951 931 void *data) 952 932 { 933 + int ret; 953 934 struct module *mod = data; 954 935 struct klp_patch *patch; 955 936 struct klp_object *obj; ··· 970 949 mod->klp_alive = false; 971 950 972 951 list_for_each_entry(patch, &klp_patches, list) { 973 - for (obj = patch->objs; obj->funcs; obj++) { 952 + klp_for_each_object(patch, obj) { 974 953 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 975 954 continue; 976 955 977 956 if (action == MODULE_STATE_COMING) { 978 957 obj->mod = mod; 979 - klp_module_notify_coming(patch, obj); 958 + ret = klp_module_notify_coming(patch, obj); 959 + if (ret) { 960 + obj->mod = NULL; 961 + pr_warn("patch '%s' is in an inconsistent state!\n", 962 + patch->mod->name); 963 + } 980 964 } else /* MODULE_STATE_GOING */ 981 965 klp_module_notify_going(patch, obj); 982 966 ··· 999 973 .priority = INT_MIN+1, /* called late but before ftrace notifier */ 1000 974 }; 1001 975 1002 - static int klp_init(void) 976 + static int __init klp_init(void) 1003 977 { 1004 978 int ret; 1005 979