reiserfs: proc support requires PROC_FS
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / kprobes.c
CommitLineData
1da177e4
LT
1/*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
b94cce92
HN
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
1da177e4
LT
33 */
34#include <linux/kprobes.h>
1da177e4
LT
35#include <linux/hash.h>
36#include <linux/init.h>
4e57b681 37#include <linux/slab.h>
1da177e4 38#include <linux/module.h>
9ec4b1f3 39#include <linux/moduleloader.h>
3a872d89 40#include <linux/kallsyms.h>
b4c6c34a 41#include <linux/freezer.h>
346fd59b
SD
42#include <linux/seq_file.h>
43#include <linux/debugfs.h>
d0aaff97 44#include <asm-generic/sections.h>
1da177e4
LT
45#include <asm/cacheflush.h>
46#include <asm/errno.h>
47#include <asm/kdebug.h>
48
49#define KPROBE_HASH_BITS 6
50#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
51
3a872d89
AM
52
53/*
54 * Some oddball architectures like 64bit powerpc have function descriptors
55 * so this must be overridable.
56 */
57#ifndef kprobe_lookup_name
58#define kprobe_lookup_name(name, addr) \
59 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
60#endif
61
1da177e4 62static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
b94cce92 63static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
e6f47f97 64static atomic_t kprobe_count;
1da177e4 65
7a7d1cf9 66DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
3516a460 67DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
e6584523 68static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
1da177e4 69
e6f47f97
AK
70static struct notifier_block kprobe_page_fault_nb = {
71 .notifier_call = kprobe_exceptions_notify,
72 .priority = 0x7fffffff /* we need to notified first */
73};
74
2d14e39d 75#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f3
AM
76/*
77 * kprobe->ainsn.insn points to the copy of the instruction to be
78 * single-stepped. x86_64, POWER4 and above have no-exec support and
79 * stepping on the instruction on a vmalloced/kmalloced/data page
80 * is a recipe for disaster
81 */
82#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
83
84struct kprobe_insn_page {
85 struct hlist_node hlist;
86 kprobe_opcode_t *insns; /* Page of instruction slots */
87 char slot_used[INSNS_PER_PAGE];
88 int nused;
b4c6c34a 89 int ngarbage;
9ec4b1f3
AM
90};
91
ab40c5c6
MH
92enum kprobe_slot_state {
93 SLOT_CLEAN = 0,
94 SLOT_DIRTY = 1,
95 SLOT_USED = 2,
96};
97
9ec4b1f3 98static struct hlist_head kprobe_insn_pages;
b4c6c34a
MH
99static int kprobe_garbage_slots;
100static int collect_garbage_slots(void);
101
102static int __kprobes check_safety(void)
103{
104 int ret = 0;
105#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
106 ret = freeze_processes();
107 if (ret == 0) {
108 struct task_struct *p, *q;
109 do_each_thread(p, q) {
110 if (p != current && p->state == TASK_RUNNING &&
111 p->pid != 0) {
112 printk("Check failed: %s is running\n",p->comm);
113 ret = -1;
114 goto loop_end;
115 }
116 } while_each_thread(p, q);
117 }
118loop_end:
119 thaw_processes();
120#else
121 synchronize_sched();
122#endif
123 return ret;
124}
9ec4b1f3
AM
125
126/**
127 * get_insn_slot() - Find a slot on an executable page for an instruction.
128 * We allocate an executable page if there's no room on existing ones.
129 */
d0aaff97 130kprobe_opcode_t __kprobes *get_insn_slot(void)
9ec4b1f3
AM
131{
132 struct kprobe_insn_page *kip;
133 struct hlist_node *pos;
134
b4c6c34a 135 retry:
9ec4b1f3
AM
136 hlist_for_each(pos, &kprobe_insn_pages) {
137 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
138 if (kip->nused < INSNS_PER_PAGE) {
139 int i;
140 for (i = 0; i < INSNS_PER_PAGE; i++) {
ab40c5c6
MH
141 if (kip->slot_used[i] == SLOT_CLEAN) {
142 kip->slot_used[i] = SLOT_USED;
9ec4b1f3
AM
143 kip->nused++;
144 return kip->insns + (i * MAX_INSN_SIZE);
145 }
146 }
147 /* Surprise! No unused slots. Fix kip->nused. */
148 kip->nused = INSNS_PER_PAGE;
149 }
150 }
151
b4c6c34a
MH
152 /* If there are any garbage slots, collect it and try again. */
153 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
154 goto retry;
155 }
156 /* All out of space. Need to allocate a new page. Use slot 0. */
9ec4b1f3
AM
157 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
158 if (!kip) {
159 return NULL;
160 }
161
162 /*
163 * Use module_alloc so this page is within +/- 2GB of where the
164 * kernel image and loaded module images reside. This is required
165 * so x86_64 can correctly handle the %rip-relative fixups.
166 */
167 kip->insns = module_alloc(PAGE_SIZE);
168 if (!kip->insns) {
169 kfree(kip);
170 return NULL;
171 }
172 INIT_HLIST_NODE(&kip->hlist);
173 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
ab40c5c6
MH
174 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
175 kip->slot_used[0] = SLOT_USED;
9ec4b1f3 176 kip->nused = 1;
b4c6c34a 177 kip->ngarbage = 0;
9ec4b1f3
AM
178 return kip->insns;
179}
180
b4c6c34a
MH
181/* Return 1 if all garbages are collected, otherwise 0. */
182static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
183{
ab40c5c6 184 kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a
MH
185 kip->nused--;
186 if (kip->nused == 0) {
187 /*
188 * Page is no longer in use. Free it unless
189 * it's the last one. We keep the last one
190 * so as not to have to set it up again the
191 * next time somebody inserts a probe.
192 */
193 hlist_del(&kip->hlist);
194 if (hlist_empty(&kprobe_insn_pages)) {
195 INIT_HLIST_NODE(&kip->hlist);
196 hlist_add_head(&kip->hlist,
197 &kprobe_insn_pages);
198 } else {
199 module_free(NULL, kip->insns);
200 kfree(kip);
201 }
202 return 1;
203 }
204 return 0;
205}
206
207static int __kprobes collect_garbage_slots(void)
208{
209 struct kprobe_insn_page *kip;
210 struct hlist_node *pos, *next;
211
212 /* Ensure no-one is preepmted on the garbages */
213 if (check_safety() != 0)
214 return -EAGAIN;
215
216 hlist_for_each_safe(pos, next, &kprobe_insn_pages) {
217 int i;
218 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
219 if (kip->ngarbage == 0)
220 continue;
221 kip->ngarbage = 0; /* we will collect all garbages */
222 for (i = 0; i < INSNS_PER_PAGE; i++) {
ab40c5c6 223 if (kip->slot_used[i] == SLOT_DIRTY &&
b4c6c34a
MH
224 collect_one_slot(kip, i))
225 break;
226 }
227 }
228 kprobe_garbage_slots = 0;
229 return 0;
230}
231
232void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
9ec4b1f3
AM
233{
234 struct kprobe_insn_page *kip;
235 struct hlist_node *pos;
236
237 hlist_for_each(pos, &kprobe_insn_pages) {
238 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
239 if (kip->insns <= slot &&
240 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
241 int i = (slot - kip->insns) / MAX_INSN_SIZE;
b4c6c34a 242 if (dirty) {
ab40c5c6 243 kip->slot_used[i] = SLOT_DIRTY;
b4c6c34a
MH
244 kip->ngarbage++;
245 } else {
246 collect_one_slot(kip, i);
9ec4b1f3 247 }
b4c6c34a 248 break;
9ec4b1f3
AM
249 }
250 }
b4c6c34a
MH
251 if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
252 collect_garbage_slots();
253 }
9ec4b1f3 254}
2d14e39d 255#endif
9ec4b1f3 256
e6584523
AM
257/* We have preemption disabled.. so it is safe to use __ versions */
258static inline void set_kprobe_instance(struct kprobe *kp)
259{
260 __get_cpu_var(kprobe_instance) = kp;
261}
262
263static inline void reset_kprobe_instance(void)
264{
265 __get_cpu_var(kprobe_instance) = NULL;
266}
267
3516a460
AM
268/*
269 * This routine is called either:
49a2a1b8 270 * - under the kprobe_mutex - during kprobe_[un]register()
3516a460 271 * OR
d217d545 272 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
3516a460 273 */
d0aaff97 274struct kprobe __kprobes *get_kprobe(void *addr)
1da177e4
LT
275{
276 struct hlist_head *head;
277 struct hlist_node *node;
3516a460 278 struct kprobe *p;
1da177e4
LT
279
280 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
3516a460 281 hlist_for_each_entry_rcu(p, node, head, hlist) {
1da177e4
LT
282 if (p->addr == addr)
283 return p;
284 }
285 return NULL;
286}
287
64f562c6
AM
288/*
289 * Aggregate handlers for multiple kprobes support - these handlers
290 * take care of invoking the individual kprobe handlers on p->list
291 */
d0aaff97 292static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6
AM
293{
294 struct kprobe *kp;
295
3516a460 296 list_for_each_entry_rcu(kp, &p->list, list) {
64f562c6 297 if (kp->pre_handler) {
e6584523 298 set_kprobe_instance(kp);
8b0914ea
PP
299 if (kp->pre_handler(kp, regs))
300 return 1;
64f562c6 301 }
e6584523 302 reset_kprobe_instance();
64f562c6
AM
303 }
304 return 0;
305}
306
d0aaff97
PP
307static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
308 unsigned long flags)
64f562c6
AM
309{
310 struct kprobe *kp;
311
3516a460 312 list_for_each_entry_rcu(kp, &p->list, list) {
64f562c6 313 if (kp->post_handler) {
e6584523 314 set_kprobe_instance(kp);
64f562c6 315 kp->post_handler(kp, regs, flags);
e6584523 316 reset_kprobe_instance();
64f562c6
AM
317 }
318 }
319 return;
320}
321
d0aaff97
PP
322static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
323 int trapnr)
64f562c6 324{
e6584523
AM
325 struct kprobe *cur = __get_cpu_var(kprobe_instance);
326
64f562c6
AM
327 /*
328 * if we faulted "during" the execution of a user specified
329 * probe handler, invoke just that probe's fault handler
330 */
e6584523
AM
331 if (cur && cur->fault_handler) {
332 if (cur->fault_handler(cur, regs, trapnr))
64f562c6
AM
333 return 1;
334 }
335 return 0;
336}
337
d0aaff97 338static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
8b0914ea 339{
e6584523
AM
340 struct kprobe *cur = __get_cpu_var(kprobe_instance);
341 int ret = 0;
342
343 if (cur && cur->break_handler) {
344 if (cur->break_handler(cur, regs))
345 ret = 1;
8b0914ea 346 }
e6584523
AM
347 reset_kprobe_instance();
348 return ret;
8b0914ea
PP
349}
350
bf8d5c52
KA
351/* Walks the list and increments nmissed count for multiprobe case */
352void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
353{
354 struct kprobe *kp;
355 if (p->pre_handler != aggr_pre_handler) {
356 p->nmissed++;
357 } else {
358 list_for_each_entry_rcu(kp, &p->list, list)
359 kp->nmissed++;
360 }
361 return;
362}
363
3516a460 364/* Called with kretprobe_lock held */
d0aaff97 365struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
b94cce92
HN
366{
367 struct hlist_node *node;
368 struct kretprobe_instance *ri;
369 hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
370 return ri;
371 return NULL;
372}
373
3516a460 374/* Called with kretprobe_lock held */
d0aaff97
PP
375static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
376 *rp)
b94cce92
HN
377{
378 struct hlist_node *node;
379 struct kretprobe_instance *ri;
380 hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
381 return ri;
382 return NULL;
383}
384
3516a460 385/* Called with kretprobe_lock held */
d0aaff97 386void __kprobes add_rp_inst(struct kretprobe_instance *ri)
b94cce92 387{
b94cce92
HN
388 /*
389 * Remove rp inst off the free list -
390 * Add it back when probed function returns
391 */
392 hlist_del(&ri->uflist);
802eae7c 393
b94cce92
HN
394 /* Add rp inst onto table */
395 INIT_HLIST_NODE(&ri->hlist);
396 hlist_add_head(&ri->hlist,
802eae7c 397 &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
b94cce92
HN
398
399 /* Also add this rp inst to the used list. */
400 INIT_HLIST_NODE(&ri->uflist);
401 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
402}
403
3516a460 404/* Called with kretprobe_lock held */
99219a3f 405void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
406 struct hlist_head *head)
b94cce92
HN
407{
408 /* remove rp inst off the rprobe_inst_table */
409 hlist_del(&ri->hlist);
410 if (ri->rp) {
411 /* remove rp inst off the used list */
412 hlist_del(&ri->uflist);
413 /* put rp inst back onto the free list */
414 INIT_HLIST_NODE(&ri->uflist);
415 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
416 } else
417 /* Unregistering */
99219a3f 418 hlist_add_head(&ri->hlist, head);
b94cce92
HN
419}
420
d0aaff97 421struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
b94cce92
HN
422{
423 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
424}
425
b94cce92 426/*
c6fd91f0 427 * This function is called from finish_task_switch when task tk becomes dead,
428 * so that we can recycle any function-return probe instances associated
429 * with this task. These left over instances represent probed functions
430 * that have been called but will never return.
b94cce92 431 */
d0aaff97 432void __kprobes kprobe_flush_task(struct task_struct *tk)
b94cce92 433{
62c27be0 434 struct kretprobe_instance *ri;
99219a3f 435 struct hlist_head *head, empty_rp;
802eae7c 436 struct hlist_node *node, *tmp;
0aa55e4d 437 unsigned long flags = 0;
802eae7c 438
99219a3f 439 INIT_HLIST_HEAD(&empty_rp);
3516a460 440 spin_lock_irqsave(&kretprobe_lock, flags);
62c27be0 441 head = kretprobe_inst_table_head(tk);
442 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
443 if (ri->task == tk)
99219a3f 444 recycle_rp_inst(ri, &empty_rp);
62c27be0 445 }
3516a460 446 spin_unlock_irqrestore(&kretprobe_lock, flags);
99219a3f 447
448 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
449 hlist_del(&ri->hlist);
450 kfree(ri);
451 }
b94cce92
HN
452}
453
b94cce92
HN
454static inline void free_rp_inst(struct kretprobe *rp)
455{
456 struct kretprobe_instance *ri;
457 while ((ri = get_free_rp_inst(rp)) != NULL) {
458 hlist_del(&ri->uflist);
459 kfree(ri);
460 }
461}
462
8b0914ea
PP
463/*
464 * Keep all fields in the kprobe consistent
465 */
466static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
467{
468 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
469 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
470}
471
472/*
473* Add the new probe to old_p->list. Fail if this is the
474* second jprobe at the address - two jprobes can't coexist
475*/
d0aaff97 476static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
8b0914ea 477{
8b0914ea 478 if (p->break_handler) {
36721656 479 if (old_p->break_handler)
480 return -EEXIST;
3516a460 481 list_add_tail_rcu(&p->list, &old_p->list);
36721656 482 old_p->break_handler = aggr_break_handler;
8b0914ea 483 } else
3516a460 484 list_add_rcu(&p->list, &old_p->list);
36721656 485 if (p->post_handler && !old_p->post_handler)
486 old_p->post_handler = aggr_post_handler;
8b0914ea
PP
487 return 0;
488}
489
64f562c6
AM
490/*
491 * Fill in the required fields of the "manager kprobe". Replace the
492 * earlier kprobe in the hlist with the manager kprobe
493 */
494static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
495{
8b0914ea 496 copy_kprobe(p, ap);
a9ad965e 497 flush_insn_slot(ap);
64f562c6 498 ap->addr = p->addr;
64f562c6 499 ap->pre_handler = aggr_pre_handler;
64f562c6 500 ap->fault_handler = aggr_fault_handler;
36721656 501 if (p->post_handler)
502 ap->post_handler = aggr_post_handler;
503 if (p->break_handler)
504 ap->break_handler = aggr_break_handler;
64f562c6
AM
505
506 INIT_LIST_HEAD(&ap->list);
3516a460 507 list_add_rcu(&p->list, &ap->list);
64f562c6 508
adad0f33 509 hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6
AM
510}
511
512/*
513 * This is the second or subsequent kprobe at the address - handle
514 * the intricacies
64f562c6 515 */
d0aaff97
PP
516static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
517 struct kprobe *p)
64f562c6
AM
518{
519 int ret = 0;
520 struct kprobe *ap;
521
8b0914ea
PP
522 if (old_p->pre_handler == aggr_pre_handler) {
523 copy_kprobe(old_p, p);
524 ret = add_new_kprobe(old_p, p);
64f562c6 525 } else {
a0d50069 526 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
64f562c6
AM
527 if (!ap)
528 return -ENOMEM;
529 add_aggr_kprobe(ap, old_p);
8b0914ea
PP
530 copy_kprobe(ap, p);
531 ret = add_new_kprobe(ap, p);
64f562c6
AM
532 }
533 return ret;
534}
535
d0aaff97
PP
536static int __kprobes in_kprobes_functions(unsigned long addr)
537{
538 if (addr >= (unsigned long)__kprobes_text_start
539 && addr < (unsigned long)__kprobes_text_end)
540 return -EINVAL;
541 return 0;
542}
543
df019b1d
KA
544static int __kprobes __register_kprobe(struct kprobe *p,
545 unsigned long called_from)
1da177e4
LT
546{
547 int ret = 0;
64f562c6 548 struct kprobe *old_p;
df019b1d 549 struct module *probed_mod;
b3e55c72 550
3a872d89
AM
551 /*
552 * If we have a symbol_name argument look it up,
553 * and add it to the address. That way the addr
554 * field can either be global or relative to a symbol.
555 */
556 if (p->symbol_name) {
557 if (p->addr)
558 return -EINVAL;
559 kprobe_lookup_name(p->symbol_name, p->addr);
560 }
561
562 if (!p->addr)
563 return -EINVAL;
564 p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
565
b3e55c72
MB
566 if ((!kernel_text_address((unsigned long) p->addr)) ||
567 in_kprobes_functions((unsigned long) p->addr))
568 return -EINVAL;
569
df019b1d
KA
570 p->mod_refcounted = 0;
571 /* Check are we probing a module */
572 if ((probed_mod = module_text_address((unsigned long) p->addr))) {
573 struct module *calling_mod = module_text_address(called_from);
574 /* We must allow modules to probe themself and
575 * in this case avoid incrementing the module refcount,
576 * so as to allow unloading of self probing modules.
577 */
578 if (calling_mod && (calling_mod != probed_mod)) {
579 if (unlikely(!try_module_get(probed_mod)))
580 return -EINVAL;
581 p->mod_refcounted = 1;
582 } else
583 probed_mod = NULL;
584 }
1da177e4 585
3516a460 586 p->nmissed = 0;
7a7d1cf9 587 mutex_lock(&kprobe_mutex);
64f562c6
AM
588 old_p = get_kprobe(p->addr);
589 if (old_p) {
590 ret = register_aggr_kprobe(old_p, p);
e6f47f97
AK
591 if (!ret)
592 atomic_inc(&kprobe_count);
1da177e4
LT
593 goto out;
594 }
1da177e4 595
49a2a1b8
AK
596 if ((ret = arch_prepare_kprobe(p)) != 0)
597 goto out;
598
64f562c6 599 INIT_HLIST_NODE(&p->hlist);
3516a460 600 hlist_add_head_rcu(&p->hlist,
1da177e4
LT
601 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
602
e6f47f97
AK
603 if (atomic_add_return(1, &kprobe_count) == \
604 (ARCH_INACTIVE_KPROBE_COUNT + 1))
605 register_page_fault_notifier(&kprobe_page_fault_nb);
606
62c27be0 607 arch_arm_kprobe(p);
7e1048b1 608
1da177e4 609out:
7a7d1cf9 610 mutex_unlock(&kprobe_mutex);
49a2a1b8 611
df019b1d
KA
612 if (ret && probed_mod)
613 module_put(probed_mod);
1da177e4
LT
614 return ret;
615}
616
df019b1d
KA
617int __kprobes register_kprobe(struct kprobe *p)
618{
619 return __register_kprobe(p,
620 (unsigned long)__builtin_return_address(0));
621}
622
d0aaff97 623void __kprobes unregister_kprobe(struct kprobe *p)
1da177e4 624{
b3e55c72 625 struct module *mod;
f709b122
KA
626 struct kprobe *old_p, *list_p;
627 int cleanup_p;
64f562c6 628
7a7d1cf9 629 mutex_lock(&kprobe_mutex);
64f562c6 630 old_p = get_kprobe(p->addr);
49a2a1b8 631 if (unlikely(!old_p)) {
7a7d1cf9 632 mutex_unlock(&kprobe_mutex);
49a2a1b8
AK
633 return;
634 }
f709b122
KA
635 if (p != old_p) {
636 list_for_each_entry_rcu(list_p, &old_p->list, list)
637 if (list_p == p)
638 /* kprobe p is a valid probe */
639 goto valid_p;
7a7d1cf9 640 mutex_unlock(&kprobe_mutex);
f709b122
KA
641 return;
642 }
643valid_p:
644 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
49a2a1b8 645 (p->list.next == &old_p->list) &&
f709b122
KA
646 (p->list.prev == &old_p->list))) {
647 /* Only probe on the hash list */
49a2a1b8
AK
648 arch_disarm_kprobe(p);
649 hlist_del_rcu(&old_p->hlist);
f709b122 650 cleanup_p = 1;
49a2a1b8
AK
651 } else {
652 list_del_rcu(&p->list);
f709b122 653 cleanup_p = 0;
49a2a1b8 654 }
3516a460 655
7a7d1cf9 656 mutex_unlock(&kprobe_mutex);
b3e55c72 657
49a2a1b8 658 synchronize_sched();
df019b1d
KA
659 if (p->mod_refcounted &&
660 (mod = module_text_address((unsigned long)p->addr)))
49a2a1b8 661 module_put(mod);
b3e55c72 662
49a2a1b8 663 if (cleanup_p) {
f709b122 664 if (p != old_p) {
49a2a1b8 665 list_del_rcu(&p->list);
3516a460 666 kfree(old_p);
49a2a1b8 667 }
0498b635 668 arch_remove_kprobe(p);
36721656 669 } else {
670 mutex_lock(&kprobe_mutex);
671 if (p->break_handler)
672 old_p->break_handler = NULL;
673 if (p->post_handler){
674 list_for_each_entry_rcu(list_p, &old_p->list, list){
675 if (list_p->post_handler){
676 cleanup_p = 2;
677 break;
678 }
679 }
680 if (cleanup_p == 0)
681 old_p->post_handler = NULL;
682 }
683 mutex_unlock(&kprobe_mutex);
49a2a1b8 684 }
e6f47f97
AK
685
686 /* Call unregister_page_fault_notifier()
687 * if no probes are active
688 */
689 mutex_lock(&kprobe_mutex);
690 if (atomic_add_return(-1, &kprobe_count) == \
691 ARCH_INACTIVE_KPROBE_COUNT)
692 unregister_page_fault_notifier(&kprobe_page_fault_nb);
693 mutex_unlock(&kprobe_mutex);
694 return;
1da177e4
LT
695}
696
697static struct notifier_block kprobe_exceptions_nb = {
3d5631e0
AK
698 .notifier_call = kprobe_exceptions_notify,
699 .priority = 0x7fffffff /* we need to be notified first */
700};
701
1da177e4 702
d0aaff97 703int __kprobes register_jprobe(struct jprobe *jp)
1da177e4
LT
704{
705 /* Todo: Verify probepoint is a function entry point */
706 jp->kp.pre_handler = setjmp_pre_handler;
707 jp->kp.break_handler = longjmp_break_handler;
708
df019b1d
KA
709 return __register_kprobe(&jp->kp,
710 (unsigned long)__builtin_return_address(0));
1da177e4
LT
711}
712
d0aaff97 713void __kprobes unregister_jprobe(struct jprobe *jp)
1da177e4
LT
714{
715 unregister_kprobe(&jp->kp);
716}
717
b94cce92
HN
718#ifdef ARCH_SUPPORTS_KRETPROBES
719
e65cefe8
AB
720/*
721 * This kprobe pre_handler is registered with every kretprobe. When probe
722 * hits it will set up the return probe.
723 */
724static int __kprobes pre_handler_kretprobe(struct kprobe *p,
725 struct pt_regs *regs)
726{
727 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
728 unsigned long flags = 0;
729
730 /*TODO: consider to only swap the RA after the last pre_handler fired */
731 spin_lock_irqsave(&kretprobe_lock, flags);
732 arch_prepare_kretprobe(rp, regs);
733 spin_unlock_irqrestore(&kretprobe_lock, flags);
734 return 0;
735}
736
d0aaff97 737int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
738{
739 int ret = 0;
740 struct kretprobe_instance *inst;
741 int i;
742
743 rp->kp.pre_handler = pre_handler_kretprobe;
7522a842
AM
744 rp->kp.post_handler = NULL;
745 rp->kp.fault_handler = NULL;
746 rp->kp.break_handler = NULL;
b94cce92
HN
747
748 /* Pre-allocate memory for max kretprobe instances */
749 if (rp->maxactive <= 0) {
750#ifdef CONFIG_PREEMPT
751 rp->maxactive = max(10, 2 * NR_CPUS);
752#else
753 rp->maxactive = NR_CPUS;
754#endif
755 }
756 INIT_HLIST_HEAD(&rp->used_instances);
757 INIT_HLIST_HEAD(&rp->free_instances);
758 for (i = 0; i < rp->maxactive; i++) {
759 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
760 if (inst == NULL) {
761 free_rp_inst(rp);
762 return -ENOMEM;
763 }
764 INIT_HLIST_NODE(&inst->uflist);
765 hlist_add_head(&inst->uflist, &rp->free_instances);
766 }
767
768 rp->nmissed = 0;
769 /* Establish function entry probe point */
df019b1d
KA
770 if ((ret = __register_kprobe(&rp->kp,
771 (unsigned long)__builtin_return_address(0))) != 0)
b94cce92
HN
772 free_rp_inst(rp);
773 return ret;
774}
775
776#else /* ARCH_SUPPORTS_KRETPROBES */
777
d0aaff97 778int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
779{
780 return -ENOSYS;
781}
782
346fd59b
SD
783static int __kprobes pre_handler_kretprobe(struct kprobe *p,
784 struct pt_regs *regs)
785{
786 return 0;
787}
788
b94cce92
HN
789#endif /* ARCH_SUPPORTS_KRETPROBES */
790
d0aaff97 791void __kprobes unregister_kretprobe(struct kretprobe *rp)
b94cce92
HN
792{
793 unsigned long flags;
794 struct kretprobe_instance *ri;
795
796 unregister_kprobe(&rp->kp);
797 /* No race here */
3516a460 798 spin_lock_irqsave(&kretprobe_lock, flags);
b94cce92
HN
799 while ((ri = get_used_rp_inst(rp)) != NULL) {
800 ri->rp = NULL;
801 hlist_del(&ri->uflist);
802 }
3516a460 803 spin_unlock_irqrestore(&kretprobe_lock, flags);
278ff953 804 free_rp_inst(rp);
b94cce92
HN
805}
806
1da177e4
LT
807static int __init init_kprobes(void)
808{
809 int i, err = 0;
810
811 /* FIXME allocate the probe table, currently defined statically */
812 /* initialize all list heads */
b94cce92 813 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1da177e4 814 INIT_HLIST_HEAD(&kprobe_table[i]);
b94cce92
HN
815 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
816 }
e6f47f97 817 atomic_set(&kprobe_count, 0);
1da177e4 818
6772926b 819 err = arch_init_kprobes();
802eae7c
RL
820 if (!err)
821 err = register_die_notifier(&kprobe_exceptions_nb);
822
1da177e4
LT
823 return err;
824}
825
346fd59b
SD
826#ifdef CONFIG_DEBUG_FS
827static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
828 const char *sym, int offset,char *modname)
829{
830 char *kprobe_type;
831
832 if (p->pre_handler == pre_handler_kretprobe)
833 kprobe_type = "r";
834 else if (p->pre_handler == setjmp_pre_handler)
835 kprobe_type = "j";
836 else
837 kprobe_type = "k";
838 if (sym)
839 seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type,
840 sym, offset, (modname ? modname : " "));
841 else
842 seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
843}
844
845static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
846{
847 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
848}
849
850static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
851{
852 (*pos)++;
853 if (*pos >= KPROBE_TABLE_SIZE)
854 return NULL;
855 return pos;
856}
857
858static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
859{
860 /* Nothing to do */
861}
862
863static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
864{
865 struct hlist_head *head;
866 struct hlist_node *node;
867 struct kprobe *p, *kp;
868 const char *sym = NULL;
869 unsigned int i = *(loff_t *) v;
870 unsigned long size, offset = 0;
871 char *modname, namebuf[128];
872
873 head = &kprobe_table[i];
874 preempt_disable();
875 hlist_for_each_entry_rcu(p, node, head, hlist) {
876 sym = kallsyms_lookup((unsigned long)p->addr, &size,
877 &offset, &modname, namebuf);
878 if (p->pre_handler == aggr_pre_handler) {
879 list_for_each_entry_rcu(kp, &p->list, list)
880 report_probe(pi, kp, sym, offset, modname);
881 } else
882 report_probe(pi, p, sym, offset, modname);
883 }
884 preempt_enable();
885 return 0;
886}
887
888static struct seq_operations kprobes_seq_ops = {
889 .start = kprobe_seq_start,
890 .next = kprobe_seq_next,
891 .stop = kprobe_seq_stop,
892 .show = show_kprobe_addr
893};
894
895static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
896{
897 return seq_open(filp, &kprobes_seq_ops);
898}
899
900static struct file_operations debugfs_kprobes_operations = {
901 .open = kprobes_open,
902 .read = seq_read,
903 .llseek = seq_lseek,
904 .release = seq_release,
905};
906
907static int __kprobes debugfs_kprobe_init(void)
908{
909 struct dentry *dir, *file;
910
911 dir = debugfs_create_dir("kprobes", NULL);
912 if (!dir)
913 return -ENOMEM;
914
915 file = debugfs_create_file("list", 0444, dir , 0 ,
916 &debugfs_kprobes_operations);
917 if (!file) {
918 debugfs_remove(dir);
919 return -ENOMEM;
920 }
921
922 return 0;
923}
924
925late_initcall(debugfs_kprobe_init);
926#endif /* CONFIG_DEBUG_FS */
927
928module_init(init_kprobes);
1da177e4
LT
929
930EXPORT_SYMBOL_GPL(register_kprobe);
931EXPORT_SYMBOL_GPL(unregister_kprobe);
932EXPORT_SYMBOL_GPL(register_jprobe);
933EXPORT_SYMBOL_GPL(unregister_jprobe);
934EXPORT_SYMBOL_GPL(jprobe_return);
b94cce92
HN
935EXPORT_SYMBOL_GPL(register_kretprobe);
936EXPORT_SYMBOL_GPL(unregister_kretprobe);