| 1 | /* |
| 2 | * Kernel Probes (KProbes) |
| 3 | * kernel/kprobes.c |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 18 | * |
| 19 | * Copyright (C) IBM Corporation, 2002, 2004 |
| 20 | * |
| 21 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel |
| 22 | * Probes initial implementation (includes suggestions from |
| 23 | * Rusty Russell). |
| 24 | * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with |
| 25 | * hlists and exceptions notifier as suggested by Andi Kleen. |
| 26 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes |
| 27 | * interface to access function arguments. |
| 28 | * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes |
| 29 | * exceptions notifier to be first on the priority list. |
| 30 | * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston |
| 31 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi |
| 32 | * <prasanna@in.ibm.com> added function-return probes. |
| 33 | */ |
| 34 | #include <linux/kprobes.h> |
| 35 | #include <linux/hash.h> |
| 36 | #include <linux/init.h> |
| 37 | #include <linux/slab.h> |
| 38 | #include <linux/module.h> |
| 39 | #include <linux/moduleloader.h> |
| 40 | #include <asm-generic/sections.h> |
| 41 | #include <asm/cacheflush.h> |
| 42 | #include <asm/errno.h> |
| 43 | #include <asm/kdebug.h> |
| 44 | |
| 45 | #define KPROBE_HASH_BITS 6 |
| 46 | #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) |
| 47 | |
| 48 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
| 49 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
| 50 | |
| 51 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
| 52 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ |
| 53 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
| 54 | |
| 55 | #ifdef __ARCH_WANT_KPROBES_INSN_SLOT |
| 56 | /* |
| 57 | * kprobe->ainsn.insn points to the copy of the instruction to be |
| 58 | * single-stepped. x86_64, POWER4 and above have no-exec support and |
| 59 | * stepping on the instruction on a vmalloced/kmalloced/data page |
| 60 | * is a recipe for disaster |
| 61 | */ |
| 62 | #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) |
| 63 | |
| 64 | struct kprobe_insn_page { |
| 65 | struct hlist_node hlist; |
| 66 | kprobe_opcode_t *insns; /* Page of instruction slots */ |
| 67 | char slot_used[INSNS_PER_PAGE]; |
| 68 | int nused; |
| 69 | }; |
| 70 | |
| 71 | static struct hlist_head kprobe_insn_pages; |
| 72 | |
| 73 | /** |
| 74 | * get_insn_slot() - Find a slot on an executable page for an instruction. |
| 75 | * We allocate an executable page if there's no room on existing ones. |
| 76 | */ |
| 77 | kprobe_opcode_t __kprobes *get_insn_slot(void) |
| 78 | { |
| 79 | struct kprobe_insn_page *kip; |
| 80 | struct hlist_node *pos; |
| 81 | |
| 82 | hlist_for_each(pos, &kprobe_insn_pages) { |
| 83 | kip = hlist_entry(pos, struct kprobe_insn_page, hlist); |
| 84 | if (kip->nused < INSNS_PER_PAGE) { |
| 85 | int i; |
| 86 | for (i = 0; i < INSNS_PER_PAGE; i++) { |
| 87 | if (!kip->slot_used[i]) { |
| 88 | kip->slot_used[i] = 1; |
| 89 | kip->nused++; |
| 90 | return kip->insns + (i * MAX_INSN_SIZE); |
| 91 | } |
| 92 | } |
| 93 | /* Surprise! No unused slots. Fix kip->nused. */ |
| 94 | kip->nused = INSNS_PER_PAGE; |
| 95 | } |
| 96 | } |
| 97 | |
| 98 | /* All out of space. Need to allocate a new page. Use slot 0.*/ |
| 99 | kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); |
| 100 | if (!kip) { |
| 101 | return NULL; |
| 102 | } |
| 103 | |
| 104 | /* |
| 105 | * Use module_alloc so this page is within +/- 2GB of where the |
| 106 | * kernel image and loaded module images reside. This is required |
| 107 | * so x86_64 can correctly handle the %rip-relative fixups. |
| 108 | */ |
| 109 | kip->insns = module_alloc(PAGE_SIZE); |
| 110 | if (!kip->insns) { |
| 111 | kfree(kip); |
| 112 | return NULL; |
| 113 | } |
| 114 | INIT_HLIST_NODE(&kip->hlist); |
| 115 | hlist_add_head(&kip->hlist, &kprobe_insn_pages); |
| 116 | memset(kip->slot_used, 0, INSNS_PER_PAGE); |
| 117 | kip->slot_used[0] = 1; |
| 118 | kip->nused = 1; |
| 119 | return kip->insns; |
| 120 | } |
| 121 | |
| 122 | void __kprobes free_insn_slot(kprobe_opcode_t *slot) |
| 123 | { |
| 124 | struct kprobe_insn_page *kip; |
| 125 | struct hlist_node *pos; |
| 126 | |
| 127 | hlist_for_each(pos, &kprobe_insn_pages) { |
| 128 | kip = hlist_entry(pos, struct kprobe_insn_page, hlist); |
| 129 | if (kip->insns <= slot && |
| 130 | slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { |
| 131 | int i = (slot - kip->insns) / MAX_INSN_SIZE; |
| 132 | kip->slot_used[i] = 0; |
| 133 | kip->nused--; |
| 134 | if (kip->nused == 0) { |
| 135 | /* |
| 136 | * Page is no longer in use. Free it unless |
| 137 | * it's the last one. We keep the last one |
| 138 | * so as not to have to set it up again the |
| 139 | * next time somebody inserts a probe. |
| 140 | */ |
| 141 | hlist_del(&kip->hlist); |
| 142 | if (hlist_empty(&kprobe_insn_pages)) { |
| 143 | INIT_HLIST_NODE(&kip->hlist); |
| 144 | hlist_add_head(&kip->hlist, |
| 145 | &kprobe_insn_pages); |
| 146 | } else { |
| 147 | module_free(NULL, kip->insns); |
| 148 | kfree(kip); |
| 149 | } |
| 150 | } |
| 151 | return; |
| 152 | } |
| 153 | } |
| 154 | } |
| 155 | #endif |
| 156 | |
| 157 | /* We have preemption disabled.. so it is safe to use __ versions */ |
| 158 | static inline void set_kprobe_instance(struct kprobe *kp) |
| 159 | { |
| 160 | __get_cpu_var(kprobe_instance) = kp; |
| 161 | } |
| 162 | |
| 163 | static inline void reset_kprobe_instance(void) |
| 164 | { |
| 165 | __get_cpu_var(kprobe_instance) = NULL; |
| 166 | } |
| 167 | |
| 168 | /* |
| 169 | * This routine is called either: |
| 170 | * - under the kprobe_mutex - during kprobe_[un]register() |
| 171 | * OR |
| 172 | * - with preemption disabled - from arch/xxx/kernel/kprobes.c |
| 173 | */ |
| 174 | struct kprobe __kprobes *get_kprobe(void *addr) |
| 175 | { |
| 176 | struct hlist_head *head; |
| 177 | struct hlist_node *node; |
| 178 | struct kprobe *p; |
| 179 | |
| 180 | head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; |
| 181 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
| 182 | if (p->addr == addr) |
| 183 | return p; |
| 184 | } |
| 185 | return NULL; |
| 186 | } |
| 187 | |
| 188 | /* |
| 189 | * Aggregate handlers for multiple kprobes support - these handlers |
| 190 | * take care of invoking the individual kprobe handlers on p->list |
| 191 | */ |
| 192 | static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) |
| 193 | { |
| 194 | struct kprobe *kp; |
| 195 | |
| 196 | list_for_each_entry_rcu(kp, &p->list, list) { |
| 197 | if (kp->pre_handler) { |
| 198 | set_kprobe_instance(kp); |
| 199 | if (kp->pre_handler(kp, regs)) |
| 200 | return 1; |
| 201 | } |
| 202 | reset_kprobe_instance(); |
| 203 | } |
| 204 | return 0; |
| 205 | } |
| 206 | |
| 207 | static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, |
| 208 | unsigned long flags) |
| 209 | { |
| 210 | struct kprobe *kp; |
| 211 | |
| 212 | list_for_each_entry_rcu(kp, &p->list, list) { |
| 213 | if (kp->post_handler) { |
| 214 | set_kprobe_instance(kp); |
| 215 | kp->post_handler(kp, regs, flags); |
| 216 | reset_kprobe_instance(); |
| 217 | } |
| 218 | } |
| 219 | return; |
| 220 | } |
| 221 | |
| 222 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, |
| 223 | int trapnr) |
| 224 | { |
| 225 | struct kprobe *cur = __get_cpu_var(kprobe_instance); |
| 226 | |
| 227 | /* |
| 228 | * if we faulted "during" the execution of a user specified |
| 229 | * probe handler, invoke just that probe's fault handler |
| 230 | */ |
| 231 | if (cur && cur->fault_handler) { |
| 232 | if (cur->fault_handler(cur, regs, trapnr)) |
| 233 | return 1; |
| 234 | } |
| 235 | return 0; |
| 236 | } |
| 237 | |
| 238 | static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) |
| 239 | { |
| 240 | struct kprobe *cur = __get_cpu_var(kprobe_instance); |
| 241 | int ret = 0; |
| 242 | |
| 243 | if (cur && cur->break_handler) { |
| 244 | if (cur->break_handler(cur, regs)) |
| 245 | ret = 1; |
| 246 | } |
| 247 | reset_kprobe_instance(); |
| 248 | return ret; |
| 249 | } |
| 250 | |
| 251 | /* Walks the list and increments nmissed count for multiprobe case */ |
| 252 | void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) |
| 253 | { |
| 254 | struct kprobe *kp; |
| 255 | if (p->pre_handler != aggr_pre_handler) { |
| 256 | p->nmissed++; |
| 257 | } else { |
| 258 | list_for_each_entry_rcu(kp, &p->list, list) |
| 259 | kp->nmissed++; |
| 260 | } |
| 261 | return; |
| 262 | } |
| 263 | |
| 264 | /* Called with kretprobe_lock held */ |
| 265 | struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) |
| 266 | { |
| 267 | struct hlist_node *node; |
| 268 | struct kretprobe_instance *ri; |
| 269 | hlist_for_each_entry(ri, node, &rp->free_instances, uflist) |
| 270 | return ri; |
| 271 | return NULL; |
| 272 | } |
| 273 | |
| 274 | /* Called with kretprobe_lock held */ |
| 275 | static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe |
| 276 | *rp) |
| 277 | { |
| 278 | struct hlist_node *node; |
| 279 | struct kretprobe_instance *ri; |
| 280 | hlist_for_each_entry(ri, node, &rp->used_instances, uflist) |
| 281 | return ri; |
| 282 | return NULL; |
| 283 | } |
| 284 | |
| 285 | /* Called with kretprobe_lock held */ |
| 286 | void __kprobes add_rp_inst(struct kretprobe_instance *ri) |
| 287 | { |
| 288 | /* |
| 289 | * Remove rp inst off the free list - |
| 290 | * Add it back when probed function returns |
| 291 | */ |
| 292 | hlist_del(&ri->uflist); |
| 293 | |
| 294 | /* Add rp inst onto table */ |
| 295 | INIT_HLIST_NODE(&ri->hlist); |
| 296 | hlist_add_head(&ri->hlist, |
| 297 | &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]); |
| 298 | |
| 299 | /* Also add this rp inst to the used list. */ |
| 300 | INIT_HLIST_NODE(&ri->uflist); |
| 301 | hlist_add_head(&ri->uflist, &ri->rp->used_instances); |
| 302 | } |
| 303 | |
| 304 | /* Called with kretprobe_lock held */ |
| 305 | void __kprobes recycle_rp_inst(struct kretprobe_instance *ri) |
| 306 | { |
| 307 | /* remove rp inst off the rprobe_inst_table */ |
| 308 | hlist_del(&ri->hlist); |
| 309 | if (ri->rp) { |
| 310 | /* remove rp inst off the used list */ |
| 311 | hlist_del(&ri->uflist); |
| 312 | /* put rp inst back onto the free list */ |
| 313 | INIT_HLIST_NODE(&ri->uflist); |
| 314 | hlist_add_head(&ri->uflist, &ri->rp->free_instances); |
| 315 | } else |
| 316 | /* Unregistering */ |
| 317 | kfree(ri); |
| 318 | } |
| 319 | |
| 320 | struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) |
| 321 | { |
| 322 | return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; |
| 323 | } |
| 324 | |
| 325 | /* |
| 326 | * This function is called from finish_task_switch when task tk becomes dead, |
| 327 | * so that we can recycle any function-return probe instances associated |
| 328 | * with this task. These left over instances represent probed functions |
| 329 | * that have been called but will never return. |
| 330 | */ |
| 331 | void __kprobes kprobe_flush_task(struct task_struct *tk) |
| 332 | { |
| 333 | struct kretprobe_instance *ri; |
| 334 | struct hlist_head *head; |
| 335 | struct hlist_node *node, *tmp; |
| 336 | unsigned long flags = 0; |
| 337 | |
| 338 | spin_lock_irqsave(&kretprobe_lock, flags); |
| 339 | head = kretprobe_inst_table_head(tk); |
| 340 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
| 341 | if (ri->task == tk) |
| 342 | recycle_rp_inst(ri); |
| 343 | } |
| 344 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
| 345 | } |
| 346 | |
| 347 | static inline void free_rp_inst(struct kretprobe *rp) |
| 348 | { |
| 349 | struct kretprobe_instance *ri; |
| 350 | while ((ri = get_free_rp_inst(rp)) != NULL) { |
| 351 | hlist_del(&ri->uflist); |
| 352 | kfree(ri); |
| 353 | } |
| 354 | } |
| 355 | |
| 356 | /* |
| 357 | * Keep all fields in the kprobe consistent |
| 358 | */ |
| 359 | static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) |
| 360 | { |
| 361 | memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); |
| 362 | memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); |
| 363 | } |
| 364 | |
| 365 | /* |
| 366 | * Add the new probe to old_p->list. Fail if this is the |
| 367 | * second jprobe at the address - two jprobes can't coexist |
| 368 | */ |
| 369 | static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) |
| 370 | { |
| 371 | struct kprobe *kp; |
| 372 | |
| 373 | if (p->break_handler) { |
| 374 | list_for_each_entry_rcu(kp, &old_p->list, list) { |
| 375 | if (kp->break_handler) |
| 376 | return -EEXIST; |
| 377 | } |
| 378 | list_add_tail_rcu(&p->list, &old_p->list); |
| 379 | } else |
| 380 | list_add_rcu(&p->list, &old_p->list); |
| 381 | return 0; |
| 382 | } |
| 383 | |
| 384 | /* |
| 385 | * Fill in the required fields of the "manager kprobe". Replace the |
| 386 | * earlier kprobe in the hlist with the manager kprobe |
| 387 | */ |
| 388 | static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) |
| 389 | { |
| 390 | copy_kprobe(p, ap); |
| 391 | ap->addr = p->addr; |
| 392 | ap->pre_handler = aggr_pre_handler; |
| 393 | ap->post_handler = aggr_post_handler; |
| 394 | ap->fault_handler = aggr_fault_handler; |
| 395 | ap->break_handler = aggr_break_handler; |
| 396 | |
| 397 | INIT_LIST_HEAD(&ap->list); |
| 398 | list_add_rcu(&p->list, &ap->list); |
| 399 | |
| 400 | hlist_replace_rcu(&p->hlist, &ap->hlist); |
| 401 | } |
| 402 | |
| 403 | /* |
| 404 | * This is the second or subsequent kprobe at the address - handle |
| 405 | * the intricacies |
| 406 | */ |
| 407 | static int __kprobes register_aggr_kprobe(struct kprobe *old_p, |
| 408 | struct kprobe *p) |
| 409 | { |
| 410 | int ret = 0; |
| 411 | struct kprobe *ap; |
| 412 | |
| 413 | if (old_p->pre_handler == aggr_pre_handler) { |
| 414 | copy_kprobe(old_p, p); |
| 415 | ret = add_new_kprobe(old_p, p); |
| 416 | } else { |
| 417 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); |
| 418 | if (!ap) |
| 419 | return -ENOMEM; |
| 420 | add_aggr_kprobe(ap, old_p); |
| 421 | copy_kprobe(ap, p); |
| 422 | ret = add_new_kprobe(ap, p); |
| 423 | } |
| 424 | return ret; |
| 425 | } |
| 426 | |
| 427 | static int __kprobes in_kprobes_functions(unsigned long addr) |
| 428 | { |
| 429 | if (addr >= (unsigned long)__kprobes_text_start |
| 430 | && addr < (unsigned long)__kprobes_text_end) |
| 431 | return -EINVAL; |
| 432 | return 0; |
| 433 | } |
| 434 | |
| 435 | static int __kprobes __register_kprobe(struct kprobe *p, |
| 436 | unsigned long called_from) |
| 437 | { |
| 438 | int ret = 0; |
| 439 | struct kprobe *old_p; |
| 440 | struct module *probed_mod; |
| 441 | |
| 442 | if ((!kernel_text_address((unsigned long) p->addr)) || |
| 443 | in_kprobes_functions((unsigned long) p->addr)) |
| 444 | return -EINVAL; |
| 445 | |
| 446 | p->mod_refcounted = 0; |
| 447 | /* Check are we probing a module */ |
| 448 | if ((probed_mod = module_text_address((unsigned long) p->addr))) { |
| 449 | struct module *calling_mod = module_text_address(called_from); |
| 450 | /* We must allow modules to probe themself and |
| 451 | * in this case avoid incrementing the module refcount, |
| 452 | * so as to allow unloading of self probing modules. |
| 453 | */ |
| 454 | if (calling_mod && (calling_mod != probed_mod)) { |
| 455 | if (unlikely(!try_module_get(probed_mod))) |
| 456 | return -EINVAL; |
| 457 | p->mod_refcounted = 1; |
| 458 | } else |
| 459 | probed_mod = NULL; |
| 460 | } |
| 461 | |
| 462 | p->nmissed = 0; |
| 463 | mutex_lock(&kprobe_mutex); |
| 464 | old_p = get_kprobe(p->addr); |
| 465 | if (old_p) { |
| 466 | ret = register_aggr_kprobe(old_p, p); |
| 467 | goto out; |
| 468 | } |
| 469 | |
| 470 | if ((ret = arch_prepare_kprobe(p)) != 0) |
| 471 | goto out; |
| 472 | |
| 473 | INIT_HLIST_NODE(&p->hlist); |
| 474 | hlist_add_head_rcu(&p->hlist, |
| 475 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
| 476 | |
| 477 | arch_arm_kprobe(p); |
| 478 | |
| 479 | out: |
| 480 | mutex_unlock(&kprobe_mutex); |
| 481 | |
| 482 | if (ret && probed_mod) |
| 483 | module_put(probed_mod); |
| 484 | return ret; |
| 485 | } |
| 486 | |
| 487 | int __kprobes register_kprobe(struct kprobe *p) |
| 488 | { |
| 489 | return __register_kprobe(p, |
| 490 | (unsigned long)__builtin_return_address(0)); |
| 491 | } |
| 492 | |
| 493 | void __kprobes unregister_kprobe(struct kprobe *p) |
| 494 | { |
| 495 | struct module *mod; |
| 496 | struct kprobe *old_p, *list_p; |
| 497 | int cleanup_p; |
| 498 | |
| 499 | mutex_lock(&kprobe_mutex); |
| 500 | old_p = get_kprobe(p->addr); |
| 501 | if (unlikely(!old_p)) { |
| 502 | mutex_unlock(&kprobe_mutex); |
| 503 | return; |
| 504 | } |
| 505 | if (p != old_p) { |
| 506 | list_for_each_entry_rcu(list_p, &old_p->list, list) |
| 507 | if (list_p == p) |
| 508 | /* kprobe p is a valid probe */ |
| 509 | goto valid_p; |
| 510 | mutex_unlock(&kprobe_mutex); |
| 511 | return; |
| 512 | } |
| 513 | valid_p: |
| 514 | if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && |
| 515 | (p->list.next == &old_p->list) && |
| 516 | (p->list.prev == &old_p->list))) { |
| 517 | /* Only probe on the hash list */ |
| 518 | arch_disarm_kprobe(p); |
| 519 | hlist_del_rcu(&old_p->hlist); |
| 520 | cleanup_p = 1; |
| 521 | } else { |
| 522 | list_del_rcu(&p->list); |
| 523 | cleanup_p = 0; |
| 524 | } |
| 525 | |
| 526 | mutex_unlock(&kprobe_mutex); |
| 527 | |
| 528 | synchronize_sched(); |
| 529 | if (p->mod_refcounted && |
| 530 | (mod = module_text_address((unsigned long)p->addr))) |
| 531 | module_put(mod); |
| 532 | |
| 533 | if (cleanup_p) { |
| 534 | if (p != old_p) { |
| 535 | list_del_rcu(&p->list); |
| 536 | kfree(old_p); |
| 537 | } |
| 538 | arch_remove_kprobe(p); |
| 539 | } |
| 540 | } |
| 541 | |
| 542 | static struct notifier_block kprobe_exceptions_nb = { |
| 543 | .notifier_call = kprobe_exceptions_notify, |
| 544 | .priority = 0x7fffffff /* we need to notified first */ |
| 545 | }; |
| 546 | |
| 547 | int __kprobes register_jprobe(struct jprobe *jp) |
| 548 | { |
| 549 | /* Todo: Verify probepoint is a function entry point */ |
| 550 | jp->kp.pre_handler = setjmp_pre_handler; |
| 551 | jp->kp.break_handler = longjmp_break_handler; |
| 552 | |
| 553 | return __register_kprobe(&jp->kp, |
| 554 | (unsigned long)__builtin_return_address(0)); |
| 555 | } |
| 556 | |
| 557 | void __kprobes unregister_jprobe(struct jprobe *jp) |
| 558 | { |
| 559 | unregister_kprobe(&jp->kp); |
| 560 | } |
| 561 | |
| 562 | #ifdef ARCH_SUPPORTS_KRETPROBES |
| 563 | |
| 564 | /* |
| 565 | * This kprobe pre_handler is registered with every kretprobe. When probe |
| 566 | * hits it will set up the return probe. |
| 567 | */ |
| 568 | static int __kprobes pre_handler_kretprobe(struct kprobe *p, |
| 569 | struct pt_regs *regs) |
| 570 | { |
| 571 | struct kretprobe *rp = container_of(p, struct kretprobe, kp); |
| 572 | unsigned long flags = 0; |
| 573 | |
| 574 | /*TODO: consider to only swap the RA after the last pre_handler fired */ |
| 575 | spin_lock_irqsave(&kretprobe_lock, flags); |
| 576 | arch_prepare_kretprobe(rp, regs); |
| 577 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
| 578 | return 0; |
| 579 | } |
| 580 | |
| 581 | int __kprobes register_kretprobe(struct kretprobe *rp) |
| 582 | { |
| 583 | int ret = 0; |
| 584 | struct kretprobe_instance *inst; |
| 585 | int i; |
| 586 | |
| 587 | rp->kp.pre_handler = pre_handler_kretprobe; |
| 588 | rp->kp.post_handler = NULL; |
| 589 | rp->kp.fault_handler = NULL; |
| 590 | rp->kp.break_handler = NULL; |
| 591 | |
| 592 | /* Pre-allocate memory for max kretprobe instances */ |
| 593 | if (rp->maxactive <= 0) { |
| 594 | #ifdef CONFIG_PREEMPT |
| 595 | rp->maxactive = max(10, 2 * NR_CPUS); |
| 596 | #else |
| 597 | rp->maxactive = NR_CPUS; |
| 598 | #endif |
| 599 | } |
| 600 | INIT_HLIST_HEAD(&rp->used_instances); |
| 601 | INIT_HLIST_HEAD(&rp->free_instances); |
| 602 | for (i = 0; i < rp->maxactive; i++) { |
| 603 | inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL); |
| 604 | if (inst == NULL) { |
| 605 | free_rp_inst(rp); |
| 606 | return -ENOMEM; |
| 607 | } |
| 608 | INIT_HLIST_NODE(&inst->uflist); |
| 609 | hlist_add_head(&inst->uflist, &rp->free_instances); |
| 610 | } |
| 611 | |
| 612 | rp->nmissed = 0; |
| 613 | /* Establish function entry probe point */ |
| 614 | if ((ret = __register_kprobe(&rp->kp, |
| 615 | (unsigned long)__builtin_return_address(0))) != 0) |
| 616 | free_rp_inst(rp); |
| 617 | return ret; |
| 618 | } |
| 619 | |
| 620 | #else /* ARCH_SUPPORTS_KRETPROBES */ |
| 621 | |
| 622 | int __kprobes register_kretprobe(struct kretprobe *rp) |
| 623 | { |
| 624 | return -ENOSYS; |
| 625 | } |
| 626 | |
| 627 | #endif /* ARCH_SUPPORTS_KRETPROBES */ |
| 628 | |
| 629 | void __kprobes unregister_kretprobe(struct kretprobe *rp) |
| 630 | { |
| 631 | unsigned long flags; |
| 632 | struct kretprobe_instance *ri; |
| 633 | |
| 634 | unregister_kprobe(&rp->kp); |
| 635 | /* No race here */ |
| 636 | spin_lock_irqsave(&kretprobe_lock, flags); |
| 637 | while ((ri = get_used_rp_inst(rp)) != NULL) { |
| 638 | ri->rp = NULL; |
| 639 | hlist_del(&ri->uflist); |
| 640 | } |
| 641 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
| 642 | free_rp_inst(rp); |
| 643 | } |
| 644 | |
| 645 | static int __init init_kprobes(void) |
| 646 | { |
| 647 | int i, err = 0; |
| 648 | |
| 649 | /* FIXME allocate the probe table, currently defined statically */ |
| 650 | /* initialize all list heads */ |
| 651 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
| 652 | INIT_HLIST_HEAD(&kprobe_table[i]); |
| 653 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); |
| 654 | } |
| 655 | |
| 656 | err = arch_init_kprobes(); |
| 657 | if (!err) |
| 658 | err = register_die_notifier(&kprobe_exceptions_nb); |
| 659 | |
| 660 | return err; |
| 661 | } |
| 662 | |
| 663 | __initcall(init_kprobes); |
| 664 | |
| 665 | EXPORT_SYMBOL_GPL(register_kprobe); |
| 666 | EXPORT_SYMBOL_GPL(unregister_kprobe); |
| 667 | EXPORT_SYMBOL_GPL(register_jprobe); |
| 668 | EXPORT_SYMBOL_GPL(unregister_jprobe); |
| 669 | EXPORT_SYMBOL_GPL(jprobe_return); |
| 670 | EXPORT_SYMBOL_GPL(register_kretprobe); |
| 671 | EXPORT_SYMBOL_GPL(unregister_kretprobe); |
| 672 | |