[PATCH] fs: fix __block_write_full_page error case buffer submission
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / kprobes.c
CommitLineData
1da177e4
LT
1/*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
b94cce92
HN
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
1da177e4
LT
33 */
34#include <linux/kprobes.h>
1da177e4
LT
35#include <linux/hash.h>
36#include <linux/init.h>
4e57b681 37#include <linux/slab.h>
1da177e4 38#include <linux/module.h>
9ec4b1f3 39#include <linux/moduleloader.h>
3a872d89 40#include <linux/kallsyms.h>
b4c6c34a 41#include <linux/freezer.h>
d0aaff97 42#include <asm-generic/sections.h>
1da177e4
LT
43#include <asm/cacheflush.h>
44#include <asm/errno.h>
45#include <asm/kdebug.h>
46
47#define KPROBE_HASH_BITS 6
48#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
49
3a872d89
AM
50
51/*
52 * Some oddball architectures like 64bit powerpc have function descriptors
53 * so this must be overridable.
54 */
55#ifndef kprobe_lookup_name
56#define kprobe_lookup_name(name, addr) \
57 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
58#endif
59
1da177e4 60static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
b94cce92 61static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
e6f47f97 62static atomic_t kprobe_count;
1da177e4 63
7a7d1cf9 64DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
3516a460 65DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
e6584523 66static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
1da177e4 67
e6f47f97
AK
68static struct notifier_block kprobe_page_fault_nb = {
69 .notifier_call = kprobe_exceptions_notify,
70 .priority = 0x7fffffff /* we need to notified first */
71};
72
2d14e39d 73#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f3
AM
74/*
75 * kprobe->ainsn.insn points to the copy of the instruction to be
76 * single-stepped. x86_64, POWER4 and above have no-exec support and
77 * stepping on the instruction on a vmalloced/kmalloced/data page
78 * is a recipe for disaster
79 */
80#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
81
82struct kprobe_insn_page {
83 struct hlist_node hlist;
84 kprobe_opcode_t *insns; /* Page of instruction slots */
85 char slot_used[INSNS_PER_PAGE];
86 int nused;
b4c6c34a 87 int ngarbage;
9ec4b1f3
AM
88};
89
ab40c5c6
MH
90enum kprobe_slot_state {
91 SLOT_CLEAN = 0,
92 SLOT_DIRTY = 1,
93 SLOT_USED = 2,
94};
95
9ec4b1f3 96static struct hlist_head kprobe_insn_pages;
b4c6c34a
MH
97static int kprobe_garbage_slots;
98static int collect_garbage_slots(void);
99
100static int __kprobes check_safety(void)
101{
102 int ret = 0;
103#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
104 ret = freeze_processes();
105 if (ret == 0) {
106 struct task_struct *p, *q;
107 do_each_thread(p, q) {
108 if (p != current && p->state == TASK_RUNNING &&
109 p->pid != 0) {
110 printk("Check failed: %s is running\n",p->comm);
111 ret = -1;
112 goto loop_end;
113 }
114 } while_each_thread(p, q);
115 }
116loop_end:
117 thaw_processes();
118#else
119 synchronize_sched();
120#endif
121 return ret;
122}
9ec4b1f3
AM
123
124/**
125 * get_insn_slot() - Find a slot on an executable page for an instruction.
126 * We allocate an executable page if there's no room on existing ones.
127 */
d0aaff97 128kprobe_opcode_t __kprobes *get_insn_slot(void)
9ec4b1f3
AM
129{
130 struct kprobe_insn_page *kip;
131 struct hlist_node *pos;
132
b4c6c34a 133 retry:
9ec4b1f3
AM
134 hlist_for_each(pos, &kprobe_insn_pages) {
135 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
136 if (kip->nused < INSNS_PER_PAGE) {
137 int i;
138 for (i = 0; i < INSNS_PER_PAGE; i++) {
ab40c5c6
MH
139 if (kip->slot_used[i] == SLOT_CLEAN) {
140 kip->slot_used[i] = SLOT_USED;
9ec4b1f3
AM
141 kip->nused++;
142 return kip->insns + (i * MAX_INSN_SIZE);
143 }
144 }
145 /* Surprise! No unused slots. Fix kip->nused. */
146 kip->nused = INSNS_PER_PAGE;
147 }
148 }
149
b4c6c34a
MH
150 /* If there are any garbage slots, collect it and try again. */
151 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
152 goto retry;
153 }
154 /* All out of space. Need to allocate a new page. Use slot 0. */
9ec4b1f3
AM
155 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
156 if (!kip) {
157 return NULL;
158 }
159
160 /*
161 * Use module_alloc so this page is within +/- 2GB of where the
162 * kernel image and loaded module images reside. This is required
163 * so x86_64 can correctly handle the %rip-relative fixups.
164 */
165 kip->insns = module_alloc(PAGE_SIZE);
166 if (!kip->insns) {
167 kfree(kip);
168 return NULL;
169 }
170 INIT_HLIST_NODE(&kip->hlist);
171 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
ab40c5c6
MH
172 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
173 kip->slot_used[0] = SLOT_USED;
9ec4b1f3 174 kip->nused = 1;
b4c6c34a 175 kip->ngarbage = 0;
9ec4b1f3
AM
176 return kip->insns;
177}
178
b4c6c34a
MH
179/* Return 1 if all garbages are collected, otherwise 0. */
180static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
181{
ab40c5c6 182 kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a
MH
183 kip->nused--;
184 if (kip->nused == 0) {
185 /*
186 * Page is no longer in use. Free it unless
187 * it's the last one. We keep the last one
188 * so as not to have to set it up again the
189 * next time somebody inserts a probe.
190 */
191 hlist_del(&kip->hlist);
192 if (hlist_empty(&kprobe_insn_pages)) {
193 INIT_HLIST_NODE(&kip->hlist);
194 hlist_add_head(&kip->hlist,
195 &kprobe_insn_pages);
196 } else {
197 module_free(NULL, kip->insns);
198 kfree(kip);
199 }
200 return 1;
201 }
202 return 0;
203}
204
205static int __kprobes collect_garbage_slots(void)
206{
207 struct kprobe_insn_page *kip;
208 struct hlist_node *pos, *next;
209
210 /* Ensure no-one is preepmted on the garbages */
211 if (check_safety() != 0)
212 return -EAGAIN;
213
214 hlist_for_each_safe(pos, next, &kprobe_insn_pages) {
215 int i;
216 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
217 if (kip->ngarbage == 0)
218 continue;
219 kip->ngarbage = 0; /* we will collect all garbages */
220 for (i = 0; i < INSNS_PER_PAGE; i++) {
ab40c5c6 221 if (kip->slot_used[i] == SLOT_DIRTY &&
b4c6c34a
MH
222 collect_one_slot(kip, i))
223 break;
224 }
225 }
226 kprobe_garbage_slots = 0;
227 return 0;
228}
229
230void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
9ec4b1f3
AM
231{
232 struct kprobe_insn_page *kip;
233 struct hlist_node *pos;
234
235 hlist_for_each(pos, &kprobe_insn_pages) {
236 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
237 if (kip->insns <= slot &&
238 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
239 int i = (slot - kip->insns) / MAX_INSN_SIZE;
b4c6c34a 240 if (dirty) {
ab40c5c6 241 kip->slot_used[i] = SLOT_DIRTY;
b4c6c34a
MH
242 kip->ngarbage++;
243 } else {
244 collect_one_slot(kip, i);
9ec4b1f3 245 }
b4c6c34a 246 break;
9ec4b1f3
AM
247 }
248 }
b4c6c34a
MH
249 if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
250 collect_garbage_slots();
251 }
9ec4b1f3 252}
2d14e39d 253#endif
9ec4b1f3 254
e6584523
AM
255/* We have preemption disabled.. so it is safe to use __ versions */
256static inline void set_kprobe_instance(struct kprobe *kp)
257{
258 __get_cpu_var(kprobe_instance) = kp;
259}
260
261static inline void reset_kprobe_instance(void)
262{
263 __get_cpu_var(kprobe_instance) = NULL;
264}
265
3516a460
AM
266/*
267 * This routine is called either:
49a2a1b8 268 * - under the kprobe_mutex - during kprobe_[un]register()
3516a460 269 * OR
d217d545 270 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
3516a460 271 */
d0aaff97 272struct kprobe __kprobes *get_kprobe(void *addr)
1da177e4
LT
273{
274 struct hlist_head *head;
275 struct hlist_node *node;
3516a460 276 struct kprobe *p;
1da177e4
LT
277
278 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
3516a460 279 hlist_for_each_entry_rcu(p, node, head, hlist) {
1da177e4
LT
280 if (p->addr == addr)
281 return p;
282 }
283 return NULL;
284}
285
64f562c6
AM
286/*
287 * Aggregate handlers for multiple kprobes support - these handlers
288 * take care of invoking the individual kprobe handlers on p->list
289 */
d0aaff97 290static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6
AM
291{
292 struct kprobe *kp;
293
3516a460 294 list_for_each_entry_rcu(kp, &p->list, list) {
64f562c6 295 if (kp->pre_handler) {
e6584523 296 set_kprobe_instance(kp);
8b0914ea
PP
297 if (kp->pre_handler(kp, regs))
298 return 1;
64f562c6 299 }
e6584523 300 reset_kprobe_instance();
64f562c6
AM
301 }
302 return 0;
303}
304
d0aaff97
PP
305static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
306 unsigned long flags)
64f562c6
AM
307{
308 struct kprobe *kp;
309
3516a460 310 list_for_each_entry_rcu(kp, &p->list, list) {
64f562c6 311 if (kp->post_handler) {
e6584523 312 set_kprobe_instance(kp);
64f562c6 313 kp->post_handler(kp, regs, flags);
e6584523 314 reset_kprobe_instance();
64f562c6
AM
315 }
316 }
317 return;
318}
319
d0aaff97
PP
320static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
321 int trapnr)
64f562c6 322{
e6584523
AM
323 struct kprobe *cur = __get_cpu_var(kprobe_instance);
324
64f562c6
AM
325 /*
326 * if we faulted "during" the execution of a user specified
327 * probe handler, invoke just that probe's fault handler
328 */
e6584523
AM
329 if (cur && cur->fault_handler) {
330 if (cur->fault_handler(cur, regs, trapnr))
64f562c6
AM
331 return 1;
332 }
333 return 0;
334}
335
d0aaff97 336static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
8b0914ea 337{
e6584523
AM
338 struct kprobe *cur = __get_cpu_var(kprobe_instance);
339 int ret = 0;
340
341 if (cur && cur->break_handler) {
342 if (cur->break_handler(cur, regs))
343 ret = 1;
8b0914ea 344 }
e6584523
AM
345 reset_kprobe_instance();
346 return ret;
8b0914ea
PP
347}
348
bf8d5c52
KA
349/* Walks the list and increments nmissed count for multiprobe case */
350void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
351{
352 struct kprobe *kp;
353 if (p->pre_handler != aggr_pre_handler) {
354 p->nmissed++;
355 } else {
356 list_for_each_entry_rcu(kp, &p->list, list)
357 kp->nmissed++;
358 }
359 return;
360}
361
3516a460 362/* Called with kretprobe_lock held */
d0aaff97 363struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
b94cce92
HN
364{
365 struct hlist_node *node;
366 struct kretprobe_instance *ri;
367 hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
368 return ri;
369 return NULL;
370}
371
3516a460 372/* Called with kretprobe_lock held */
d0aaff97
PP
373static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
374 *rp)
b94cce92
HN
375{
376 struct hlist_node *node;
377 struct kretprobe_instance *ri;
378 hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
379 return ri;
380 return NULL;
381}
382
3516a460 383/* Called with kretprobe_lock held */
d0aaff97 384void __kprobes add_rp_inst(struct kretprobe_instance *ri)
b94cce92 385{
b94cce92
HN
386 /*
387 * Remove rp inst off the free list -
388 * Add it back when probed function returns
389 */
390 hlist_del(&ri->uflist);
802eae7c 391
b94cce92
HN
392 /* Add rp inst onto table */
393 INIT_HLIST_NODE(&ri->hlist);
394 hlist_add_head(&ri->hlist,
802eae7c 395 &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
b94cce92
HN
396
397 /* Also add this rp inst to the used list. */
398 INIT_HLIST_NODE(&ri->uflist);
399 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
400}
401
3516a460 402/* Called with kretprobe_lock held */
99219a3f 403void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
404 struct hlist_head *head)
b94cce92
HN
405{
406 /* remove rp inst off the rprobe_inst_table */
407 hlist_del(&ri->hlist);
408 if (ri->rp) {
409 /* remove rp inst off the used list */
410 hlist_del(&ri->uflist);
411 /* put rp inst back onto the free list */
412 INIT_HLIST_NODE(&ri->uflist);
413 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
414 } else
415 /* Unregistering */
99219a3f 416 hlist_add_head(&ri->hlist, head);
b94cce92
HN
417}
418
d0aaff97 419struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
b94cce92
HN
420{
421 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
422}
423
b94cce92 424/*
c6fd91f0 425 * This function is called from finish_task_switch when task tk becomes dead,
426 * so that we can recycle any function-return probe instances associated
427 * with this task. These left over instances represent probed functions
428 * that have been called but will never return.
b94cce92 429 */
d0aaff97 430void __kprobes kprobe_flush_task(struct task_struct *tk)
b94cce92 431{
62c27be0 432 struct kretprobe_instance *ri;
99219a3f 433 struct hlist_head *head, empty_rp;
802eae7c 434 struct hlist_node *node, *tmp;
0aa55e4d 435 unsigned long flags = 0;
802eae7c 436
99219a3f 437 INIT_HLIST_HEAD(&empty_rp);
3516a460 438 spin_lock_irqsave(&kretprobe_lock, flags);
62c27be0 439 head = kretprobe_inst_table_head(tk);
440 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
441 if (ri->task == tk)
99219a3f 442 recycle_rp_inst(ri, &empty_rp);
62c27be0 443 }
3516a460 444 spin_unlock_irqrestore(&kretprobe_lock, flags);
99219a3f 445
446 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
447 hlist_del(&ri->hlist);
448 kfree(ri);
449 }
b94cce92
HN
450}
451
b94cce92
HN
452static inline void free_rp_inst(struct kretprobe *rp)
453{
454 struct kretprobe_instance *ri;
455 while ((ri = get_free_rp_inst(rp)) != NULL) {
456 hlist_del(&ri->uflist);
457 kfree(ri);
458 }
459}
460
8b0914ea
PP
461/*
462 * Keep all fields in the kprobe consistent
463 */
464static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
465{
466 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
467 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
468}
469
470/*
471* Add the new probe to old_p->list. Fail if this is the
472* second jprobe at the address - two jprobes can't coexist
473*/
d0aaff97 474static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
8b0914ea 475{
8b0914ea 476 if (p->break_handler) {
36721656 477 if (old_p->break_handler)
478 return -EEXIST;
3516a460 479 list_add_tail_rcu(&p->list, &old_p->list);
36721656 480 old_p->break_handler = aggr_break_handler;
8b0914ea 481 } else
3516a460 482 list_add_rcu(&p->list, &old_p->list);
36721656 483 if (p->post_handler && !old_p->post_handler)
484 old_p->post_handler = aggr_post_handler;
8b0914ea
PP
485 return 0;
486}
487
64f562c6
AM
488/*
489 * Fill in the required fields of the "manager kprobe". Replace the
490 * earlier kprobe in the hlist with the manager kprobe
491 */
492static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
493{
8b0914ea 494 copy_kprobe(p, ap);
a9ad965e 495 flush_insn_slot(ap);
64f562c6 496 ap->addr = p->addr;
64f562c6 497 ap->pre_handler = aggr_pre_handler;
64f562c6 498 ap->fault_handler = aggr_fault_handler;
36721656 499 if (p->post_handler)
500 ap->post_handler = aggr_post_handler;
501 if (p->break_handler)
502 ap->break_handler = aggr_break_handler;
64f562c6
AM
503
504 INIT_LIST_HEAD(&ap->list);
3516a460 505 list_add_rcu(&p->list, &ap->list);
64f562c6 506
adad0f33 507 hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6
AM
508}
509
510/*
511 * This is the second or subsequent kprobe at the address - handle
512 * the intricacies
64f562c6 513 */
d0aaff97
PP
514static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
515 struct kprobe *p)
64f562c6
AM
516{
517 int ret = 0;
518 struct kprobe *ap;
519
8b0914ea
PP
520 if (old_p->pre_handler == aggr_pre_handler) {
521 copy_kprobe(old_p, p);
522 ret = add_new_kprobe(old_p, p);
64f562c6 523 } else {
a0d50069 524 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
64f562c6
AM
525 if (!ap)
526 return -ENOMEM;
527 add_aggr_kprobe(ap, old_p);
8b0914ea
PP
528 copy_kprobe(ap, p);
529 ret = add_new_kprobe(ap, p);
64f562c6
AM
530 }
531 return ret;
532}
533
d0aaff97
PP
534static int __kprobes in_kprobes_functions(unsigned long addr)
535{
536 if (addr >= (unsigned long)__kprobes_text_start
537 && addr < (unsigned long)__kprobes_text_end)
538 return -EINVAL;
539 return 0;
540}
541
df019b1d
KA
542static int __kprobes __register_kprobe(struct kprobe *p,
543 unsigned long called_from)
1da177e4
LT
544{
545 int ret = 0;
64f562c6 546 struct kprobe *old_p;
df019b1d 547 struct module *probed_mod;
b3e55c72 548
3a872d89
AM
549 /*
550 * If we have a symbol_name argument look it up,
551 * and add it to the address. That way the addr
552 * field can either be global or relative to a symbol.
553 */
554 if (p->symbol_name) {
555 if (p->addr)
556 return -EINVAL;
557 kprobe_lookup_name(p->symbol_name, p->addr);
558 }
559
560 if (!p->addr)
561 return -EINVAL;
562 p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
563
b3e55c72
MB
564 if ((!kernel_text_address((unsigned long) p->addr)) ||
565 in_kprobes_functions((unsigned long) p->addr))
566 return -EINVAL;
567
df019b1d
KA
568 p->mod_refcounted = 0;
569 /* Check are we probing a module */
570 if ((probed_mod = module_text_address((unsigned long) p->addr))) {
571 struct module *calling_mod = module_text_address(called_from);
572 /* We must allow modules to probe themself and
573 * in this case avoid incrementing the module refcount,
574 * so as to allow unloading of self probing modules.
575 */
576 if (calling_mod && (calling_mod != probed_mod)) {
577 if (unlikely(!try_module_get(probed_mod)))
578 return -EINVAL;
579 p->mod_refcounted = 1;
580 } else
581 probed_mod = NULL;
582 }
1da177e4 583
3516a460 584 p->nmissed = 0;
7a7d1cf9 585 mutex_lock(&kprobe_mutex);
64f562c6
AM
586 old_p = get_kprobe(p->addr);
587 if (old_p) {
588 ret = register_aggr_kprobe(old_p, p);
e6f47f97
AK
589 if (!ret)
590 atomic_inc(&kprobe_count);
1da177e4
LT
591 goto out;
592 }
1da177e4 593
49a2a1b8
AK
594 if ((ret = arch_prepare_kprobe(p)) != 0)
595 goto out;
596
64f562c6 597 INIT_HLIST_NODE(&p->hlist);
3516a460 598 hlist_add_head_rcu(&p->hlist,
1da177e4
LT
599 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
600
e6f47f97
AK
601 if (atomic_add_return(1, &kprobe_count) == \
602 (ARCH_INACTIVE_KPROBE_COUNT + 1))
603 register_page_fault_notifier(&kprobe_page_fault_nb);
604
62c27be0 605 arch_arm_kprobe(p);
7e1048b1 606
1da177e4 607out:
7a7d1cf9 608 mutex_unlock(&kprobe_mutex);
49a2a1b8 609
df019b1d
KA
610 if (ret && probed_mod)
611 module_put(probed_mod);
1da177e4
LT
612 return ret;
613}
614
df019b1d
KA
615int __kprobes register_kprobe(struct kprobe *p)
616{
617 return __register_kprobe(p,
618 (unsigned long)__builtin_return_address(0));
619}
620
d0aaff97 621void __kprobes unregister_kprobe(struct kprobe *p)
1da177e4 622{
b3e55c72 623 struct module *mod;
f709b122
KA
624 struct kprobe *old_p, *list_p;
625 int cleanup_p;
64f562c6 626
7a7d1cf9 627 mutex_lock(&kprobe_mutex);
64f562c6 628 old_p = get_kprobe(p->addr);
49a2a1b8 629 if (unlikely(!old_p)) {
7a7d1cf9 630 mutex_unlock(&kprobe_mutex);
49a2a1b8
AK
631 return;
632 }
f709b122
KA
633 if (p != old_p) {
634 list_for_each_entry_rcu(list_p, &old_p->list, list)
635 if (list_p == p)
636 /* kprobe p is a valid probe */
637 goto valid_p;
7a7d1cf9 638 mutex_unlock(&kprobe_mutex);
f709b122
KA
639 return;
640 }
641valid_p:
642 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
49a2a1b8 643 (p->list.next == &old_p->list) &&
f709b122
KA
644 (p->list.prev == &old_p->list))) {
645 /* Only probe on the hash list */
49a2a1b8
AK
646 arch_disarm_kprobe(p);
647 hlist_del_rcu(&old_p->hlist);
f709b122 648 cleanup_p = 1;
49a2a1b8
AK
649 } else {
650 list_del_rcu(&p->list);
f709b122 651 cleanup_p = 0;
49a2a1b8 652 }
3516a460 653
7a7d1cf9 654 mutex_unlock(&kprobe_mutex);
b3e55c72 655
49a2a1b8 656 synchronize_sched();
df019b1d
KA
657 if (p->mod_refcounted &&
658 (mod = module_text_address((unsigned long)p->addr)))
49a2a1b8 659 module_put(mod);
b3e55c72 660
49a2a1b8 661 if (cleanup_p) {
f709b122 662 if (p != old_p) {
49a2a1b8 663 list_del_rcu(&p->list);
3516a460 664 kfree(old_p);
49a2a1b8 665 }
0498b635 666 arch_remove_kprobe(p);
36721656 667 } else {
668 mutex_lock(&kprobe_mutex);
669 if (p->break_handler)
670 old_p->break_handler = NULL;
671 if (p->post_handler){
672 list_for_each_entry_rcu(list_p, &old_p->list, list){
673 if (list_p->post_handler){
674 cleanup_p = 2;
675 break;
676 }
677 }
678 if (cleanup_p == 0)
679 old_p->post_handler = NULL;
680 }
681 mutex_unlock(&kprobe_mutex);
49a2a1b8 682 }
e6f47f97
AK
683
684 /* Call unregister_page_fault_notifier()
685 * if no probes are active
686 */
687 mutex_lock(&kprobe_mutex);
688 if (atomic_add_return(-1, &kprobe_count) == \
689 ARCH_INACTIVE_KPROBE_COUNT)
690 unregister_page_fault_notifier(&kprobe_page_fault_nb);
691 mutex_unlock(&kprobe_mutex);
692 return;
1da177e4
LT
693}
694
695static struct notifier_block kprobe_exceptions_nb = {
3d5631e0
AK
696 .notifier_call = kprobe_exceptions_notify,
697 .priority = 0x7fffffff /* we need to be notified first */
698};
699
1da177e4 700
d0aaff97 701int __kprobes register_jprobe(struct jprobe *jp)
1da177e4
LT
702{
703 /* Todo: Verify probepoint is a function entry point */
704 jp->kp.pre_handler = setjmp_pre_handler;
705 jp->kp.break_handler = longjmp_break_handler;
706
df019b1d
KA
707 return __register_kprobe(&jp->kp,
708 (unsigned long)__builtin_return_address(0));
1da177e4
LT
709}
710
d0aaff97 711void __kprobes unregister_jprobe(struct jprobe *jp)
1da177e4
LT
712{
713 unregister_kprobe(&jp->kp);
714}
715
b94cce92
HN
716#ifdef ARCH_SUPPORTS_KRETPROBES
717
e65cefe8
AB
718/*
719 * This kprobe pre_handler is registered with every kretprobe. When probe
720 * hits it will set up the return probe.
721 */
722static int __kprobes pre_handler_kretprobe(struct kprobe *p,
723 struct pt_regs *regs)
724{
725 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
726 unsigned long flags = 0;
727
728 /*TODO: consider to only swap the RA after the last pre_handler fired */
729 spin_lock_irqsave(&kretprobe_lock, flags);
730 arch_prepare_kretprobe(rp, regs);
731 spin_unlock_irqrestore(&kretprobe_lock, flags);
732 return 0;
733}
734
d0aaff97 735int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
736{
737 int ret = 0;
738 struct kretprobe_instance *inst;
739 int i;
740
741 rp->kp.pre_handler = pre_handler_kretprobe;
7522a842
AM
742 rp->kp.post_handler = NULL;
743 rp->kp.fault_handler = NULL;
744 rp->kp.break_handler = NULL;
b94cce92
HN
745
746 /* Pre-allocate memory for max kretprobe instances */
747 if (rp->maxactive <= 0) {
748#ifdef CONFIG_PREEMPT
749 rp->maxactive = max(10, 2 * NR_CPUS);
750#else
751 rp->maxactive = NR_CPUS;
752#endif
753 }
754 INIT_HLIST_HEAD(&rp->used_instances);
755 INIT_HLIST_HEAD(&rp->free_instances);
756 for (i = 0; i < rp->maxactive; i++) {
757 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
758 if (inst == NULL) {
759 free_rp_inst(rp);
760 return -ENOMEM;
761 }
762 INIT_HLIST_NODE(&inst->uflist);
763 hlist_add_head(&inst->uflist, &rp->free_instances);
764 }
765
766 rp->nmissed = 0;
767 /* Establish function entry probe point */
df019b1d
KA
768 if ((ret = __register_kprobe(&rp->kp,
769 (unsigned long)__builtin_return_address(0))) != 0)
b94cce92
HN
770 free_rp_inst(rp);
771 return ret;
772}
773
774#else /* ARCH_SUPPORTS_KRETPROBES */
775
d0aaff97 776int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
777{
778 return -ENOSYS;
779}
780
781#endif /* ARCH_SUPPORTS_KRETPROBES */
782
d0aaff97 783void __kprobes unregister_kretprobe(struct kretprobe *rp)
b94cce92
HN
784{
785 unsigned long flags;
786 struct kretprobe_instance *ri;
787
788 unregister_kprobe(&rp->kp);
789 /* No race here */
3516a460 790 spin_lock_irqsave(&kretprobe_lock, flags);
b94cce92
HN
791 while ((ri = get_used_rp_inst(rp)) != NULL) {
792 ri->rp = NULL;
793 hlist_del(&ri->uflist);
794 }
3516a460 795 spin_unlock_irqrestore(&kretprobe_lock, flags);
278ff953 796 free_rp_inst(rp);
b94cce92
HN
797}
798
1da177e4
LT
799static int __init init_kprobes(void)
800{
801 int i, err = 0;
802
803 /* FIXME allocate the probe table, currently defined statically */
804 /* initialize all list heads */
b94cce92 805 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1da177e4 806 INIT_HLIST_HEAD(&kprobe_table[i]);
b94cce92
HN
807 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
808 }
e6f47f97 809 atomic_set(&kprobe_count, 0);
1da177e4 810
6772926b 811 err = arch_init_kprobes();
802eae7c
RL
812 if (!err)
813 err = register_die_notifier(&kprobe_exceptions_nb);
814
1da177e4
LT
815 return err;
816}
817
818__initcall(init_kprobes);
819
820EXPORT_SYMBOL_GPL(register_kprobe);
821EXPORT_SYMBOL_GPL(unregister_kprobe);
822EXPORT_SYMBOL_GPL(register_jprobe);
823EXPORT_SYMBOL_GPL(unregister_jprobe);
824EXPORT_SYMBOL_GPL(jprobe_return);
b94cce92
HN
825EXPORT_SYMBOL_GPL(register_kretprobe);
826EXPORT_SYMBOL_GPL(unregister_kretprobe);
827