at v4.15-rc4 7.1 kB view raw
1/* 2 * livepatch.h - Kernel Live Patching Core 3 * 4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> 5 * Copyright (C) 2014 SUSE 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21#ifndef _LINUX_LIVEPATCH_H_ 22#define _LINUX_LIVEPATCH_H_ 23 24#include <linux/module.h> 25#include <linux/ftrace.h> 26#include <linux/completion.h> 27 28#if IS_ENABLED(CONFIG_LIVEPATCH) 29 30#include <asm/livepatch.h> 31 32/* task patch states */ 33#define KLP_UNDEFINED -1 34#define KLP_UNPATCHED 0 35#define KLP_PATCHED 1 36 37/** 38 * struct klp_func - function structure for live patching 39 * @old_name: name of the function to be patched 40 * @new_func: pointer to the patched function code 41 * @old_sympos: a hint indicating which symbol position the old function 42 * can be found (optional) 43 * @immediate: patch the func immediately, bypassing safety mechanisms 44 * @old_addr: the address of the function being patched 45 * @kobj: kobject for sysfs resources 46 * @stack_node: list node for klp_ops func_stack list 47 * @old_size: size of the old function 48 * @new_size: size of the new function 49 * @patched: the func has been added to the klp_ops list 50 * @transition: the func is currently being applied or reverted 51 * 52 * The patched and transition variables define the func's patching state. When 53 * patching, a func is always in one of the following states: 54 * 55 * patched=0 transition=0: unpatched 56 * patched=0 transition=1: unpatched, temporary starting state 57 * patched=1 transition=1: patched, may be visible to some tasks 58 * patched=1 transition=0: patched, visible to all tasks 59 * 60 * And when unpatching, it goes in the reverse order: 61 * 62 * patched=1 transition=0: patched, visible to all tasks 63 * patched=1 transition=1: patched, may be visible to some tasks 64 * patched=0 transition=1: unpatched, temporary ending state 65 * patched=0 transition=0: unpatched 66 */ 67struct klp_func { 68 /* external */ 69 const char *old_name; 70 void *new_func; 71 /* 72 * The old_sympos field is optional and can be used to resolve 73 * duplicate symbol names in livepatch objects. If this field is zero, 74 * it is expected the symbol is unique, otherwise patching fails. If 75 * this value is greater than zero then that occurrence of the symbol 76 * in kallsyms for the given object is used. 77 */ 78 unsigned long old_sympos; 79 bool immediate; 80 81 /* internal */ 82 unsigned long old_addr; 83 struct kobject kobj; 84 struct list_head stack_node; 85 unsigned long old_size, new_size; 86 bool patched; 87 bool transition; 88}; 89 90struct klp_object; 91 92/** 93 * struct klp_callbacks - pre/post live-(un)patch callback structure 94 * @pre_patch: executed before code patching 95 * @post_patch: executed after code patching 96 * @pre_unpatch: executed before code unpatching 97 * @post_unpatch: executed after code unpatching 98 * @post_unpatch_enabled: flag indicating if post-unpatch callback 99 * should run 100 * 101 * All callbacks are optional. Only the pre-patch callback, if provided, 102 * will be unconditionally executed. If the parent klp_object fails to 103 * patch for any reason, including a non-zero error status returned from 104 * the pre-patch callback, no further callbacks will be executed. 105 */ 106struct klp_callbacks { 107 int (*pre_patch)(struct klp_object *obj); 108 void (*post_patch)(struct klp_object *obj); 109 void (*pre_unpatch)(struct klp_object *obj); 110 void (*post_unpatch)(struct klp_object *obj); 111 bool post_unpatch_enabled; 112}; 113 114/** 115 * struct klp_object - kernel object structure for live patching 116 * @name: module name (or NULL for vmlinux) 117 * @funcs: function entries for functions to be patched in the object 118 * @callbacks: functions to be executed pre/post (un)patching 119 * @kobj: kobject for sysfs resources 120 * @mod: kernel module associated with the patched object 121 * (NULL for vmlinux) 122 * @patched: the object's funcs have been added to the klp_ops list 123 */ 124struct klp_object { 125 /* external */ 126 const char *name; 127 struct klp_func *funcs; 128 struct klp_callbacks callbacks; 129 130 /* internal */ 131 struct kobject kobj; 132 struct module *mod; 133 bool patched; 134}; 135 136/** 137 * struct klp_patch - patch structure for live patching 138 * @mod: reference to the live patch module 139 * @objs: object entries for kernel objects to be patched 140 * @immediate: patch all funcs immediately, bypassing safety mechanisms 141 * @list: list node for global list of registered patches 142 * @kobj: kobject for sysfs resources 143 * @enabled: the patch is enabled (but operation may be incomplete) 144 * @finish: for waiting till it is safe to remove the patch module 145 */ 146struct klp_patch { 147 /* external */ 148 struct module *mod; 149 struct klp_object *objs; 150 bool immediate; 151 152 /* internal */ 153 struct list_head list; 154 struct kobject kobj; 155 bool enabled; 156 struct completion finish; 157}; 158 159#define klp_for_each_object(patch, obj) \ 160 for (obj = patch->objs; obj->funcs || obj->name; obj++) 161 162#define klp_for_each_func(obj, func) \ 163 for (func = obj->funcs; \ 164 func->old_name || func->new_func || func->old_sympos; \ 165 func++) 166 167int klp_register_patch(struct klp_patch *); 168int klp_unregister_patch(struct klp_patch *); 169int klp_enable_patch(struct klp_patch *); 170int klp_disable_patch(struct klp_patch *); 171 172void arch_klp_init_object_loaded(struct klp_patch *patch, 173 struct klp_object *obj); 174 175/* Called from the module loader during module coming/going states */ 176int klp_module_coming(struct module *mod); 177void klp_module_going(struct module *mod); 178 179void klp_copy_process(struct task_struct *child); 180void klp_update_patch_state(struct task_struct *task); 181 182static inline bool klp_patch_pending(struct task_struct *task) 183{ 184 return test_tsk_thread_flag(task, TIF_PATCH_PENDING); 185} 186 187static inline bool klp_have_reliable_stack(void) 188{ 189 return IS_ENABLED(CONFIG_STACKTRACE) && 190 IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); 191} 192 193void *klp_shadow_get(void *obj, unsigned long id); 194void *klp_shadow_alloc(void *obj, unsigned long id, void *data, 195 size_t size, gfp_t gfp_flags); 196void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, 197 size_t size, gfp_t gfp_flags); 198void klp_shadow_free(void *obj, unsigned long id); 199void klp_shadow_free_all(unsigned long id); 200 201#else /* !CONFIG_LIVEPATCH */ 202 203static inline int klp_module_coming(struct module *mod) { return 0; } 204static inline void klp_module_going(struct module *mod) {} 205static inline bool klp_patch_pending(struct task_struct *task) { return false; } 206static inline void klp_update_patch_state(struct task_struct *task) {} 207static inline void klp_copy_process(struct task_struct *child) {} 208 209#endif /* CONFIG_LIVEPATCH */ 210 211#endif /* _LINUX_LIVEPATCH_H_ */