Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] kprobes: Allow multiple kprobes at the same address

Allow registration of multiple kprobes at an address in an architecture
agnostic way. Corresponding handlers will be invoked in a sequence. But,
a kprobe and a jprobe can't (yet) co-exist at the same address.

Signed-off-by: Ananth N Mavinakayanahalli <amavin@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Ananth N Mavinakayanahalli and committed by
Linus Torvalds
64f562c6 04dea5f9

+134 -13
+3
include/linux/kprobes.h
··· 43 43 struct kprobe { 44 44 struct hlist_node hlist; 45 45 46 + /* list of kprobes for multi-handler support */ 47 + struct list_head list; 48 + 46 49 /* location of the probe point */ 47 50 kprobe_opcode_t *addr; 48 51
+131 -13
kernel/kprobes.c
··· 44 44 45 45 unsigned int kprobe_cpu = NR_CPUS; 46 46 static DEFINE_SPINLOCK(kprobe_lock); 47 + static struct kprobe *curr_kprobe; 47 48 48 49 /* Locks kprobe: irqs must be disabled */ 49 50 void lock_kprobes(void) ··· 74 73 return NULL; 75 74 } 76 75 76 + /* 77 + * Aggregate handlers for multiple kprobes support - these handlers 78 + * take care of invoking the individual kprobe handlers on p->list 79 + */ 80 + int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 81 + { 82 + struct kprobe *kp; 83 + 84 + list_for_each_entry(kp, &p->list, list) { 85 + if (kp->pre_handler) { 86 + curr_kprobe = kp; 87 + kp->pre_handler(kp, regs); 88 + curr_kprobe = NULL; 89 + } 90 + } 91 + return 0; 92 + } 93 + 94 + void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 95 + unsigned long flags) 96 + { 97 + struct kprobe *kp; 98 + 99 + list_for_each_entry(kp, &p->list, list) { 100 + if (kp->post_handler) { 101 + curr_kprobe = kp; 102 + kp->post_handler(kp, regs, flags); 103 + curr_kprobe = NULL; 104 + } 105 + } 106 + return; 107 + } 108 + 109 + int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) 110 + { 111 + /* 112 + * if we faulted "during" the execution of a user specified 113 + * probe handler, invoke just that probe's fault handler 114 + */ 115 + if (curr_kprobe && curr_kprobe->fault_handler) { 116 + if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr)) 117 + return 1; 118 + } 119 + return 0; 120 + } 121 + 122 + /* 123 + * Fill in the required fields of the "manager kprobe". Replace the 124 + * earlier kprobe in the hlist with the manager kprobe 125 + */ 126 + static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 127 + { 128 + ap->addr = p->addr; 129 + ap->opcode = p->opcode; 130 + memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn)); 131 + 132 + ap->pre_handler = aggr_pre_handler; 133 + ap->post_handler = aggr_post_handler; 134 + ap->fault_handler = aggr_fault_handler; 135 + 136 + INIT_LIST_HEAD(&ap->list); 137 + list_add(&p->list, &ap->list); 138 + 139 + INIT_HLIST_NODE(&ap->hlist); 140 + hlist_del(&p->hlist); 141 + hlist_add_head(&ap->hlist, 142 + &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]); 143 + } 144 + 145 + /* 146 + * This is the second or subsequent kprobe at the address - handle 147 + * the intricacies 148 + * TODO: Move kcalloc outside the spinlock 149 + */ 150 + static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) 151 + { 152 + int ret = 0; 153 + struct kprobe *ap; 154 + 155 + if (old_p->break_handler || p->break_handler) { 156 + ret = -EEXIST; /* kprobe and jprobe can't (yet) coexist */ 157 + } else if (old_p->pre_handler == aggr_pre_handler) { 158 + list_add(&p->list, &old_p->list); 159 + } else { 160 + ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC); 161 + if (!ap) 162 + return -ENOMEM; 163 + add_aggr_kprobe(ap, old_p); 164 + list_add(&p->list, &ap->list); 165 + } 166 + return ret; 167 + } 168 + 169 + /* kprobe removal house-keeping routines */ 170 + static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) 171 + { 172 + *p->addr = p->opcode; 173 + hlist_del(&p->hlist); 174 + flush_icache_range((unsigned long) p->addr, 175 + (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 176 + spin_unlock_irqrestore(&kprobe_lock, flags); 177 + arch_remove_kprobe(p); 178 + } 179 + 180 + static inline void cleanup_aggr_kprobe(struct kprobe *old_p, 181 + struct kprobe *p, unsigned long flags) 182 + { 183 + list_del(&p->list); 184 + if (list_empty(&old_p->list)) { 185 + cleanup_kprobe(old_p, flags); 186 + kfree(old_p); 187 + } else 188 + spin_unlock_irqrestore(&kprobe_lock, flags); 189 + } 190 + 77 191 int register_kprobe(struct kprobe *p) 78 192 { 79 193 int ret = 0; 80 194 unsigned long flags = 0; 195 + struct kprobe *old_p; 81 196 82 197 if ((ret = arch_prepare_kprobe(p)) != 0) { 83 198 goto rm_kprobe; 84 199 } 85 200 spin_lock_irqsave(&kprobe_lock, flags); 86 - INIT_HLIST_NODE(&p->hlist); 87 - if (get_kprobe(p->addr)) { 88 - ret = -EEXIST; 201 + old_p = get_kprobe(p->addr); 202 + if (old_p) { 203 + ret = register_aggr_kprobe(old_p, p); 89 204 goto out; 90 205 } 91 - arch_copy_kprobe(p); 92 206 207 + arch_copy_kprobe(p); 208 + INIT_HLIST_NODE(&p->hlist); 93 209 hlist_add_head(&p->hlist, 94 210 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 95 211 ··· 225 107 void unregister_kprobe(struct kprobe *p) 226 108 { 227 109 unsigned long flags; 110 + struct kprobe *old_p; 111 + 228 112 spin_lock_irqsave(&kprobe_lock, flags); 229 - if (!get_kprobe(p->addr)) { 113 + old_p = get_kprobe(p->addr); 114 + if (old_p) { 115 + if (old_p->pre_handler == aggr_pre_handler) 116 + cleanup_aggr_kprobe(old_p, p, flags); 117 + else 118 + cleanup_kprobe(p, flags); 119 + } else 230 120 spin_unlock_irqrestore(&kprobe_lock, flags); 231 - return; 232 - } 233 - *p->addr = p->opcode; 234 - hlist_del(&p->hlist); 235 - flush_icache_range((unsigned long) p->addr, 236 - (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 237 - spin_unlock_irqrestore(&kprobe_lock, flags); 238 - arch_remove_kprobe(p); 239 121 } 240 122 241 123 static struct notifier_block kprobe_exceptions_nb = {