kprobes/x86-64: Fix to move common_interrupt to .kprobes.text
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / kprobes.c
CommitLineData
1da177e4
LT
1/*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
b94cce92
HN
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
1da177e4
LT
33 */
34#include <linux/kprobes.h>
1da177e4
LT
35#include <linux/hash.h>
36#include <linux/init.h>
4e57b681 37#include <linux/slab.h>
e3869792 38#include <linux/stddef.h>
1da177e4 39#include <linux/module.h>
9ec4b1f3 40#include <linux/moduleloader.h>
3a872d89 41#include <linux/kallsyms.h>
b4c6c34a 42#include <linux/freezer.h>
346fd59b
SD
43#include <linux/seq_file.h>
44#include <linux/debugfs.h>
1eeb66a1 45#include <linux/kdebug.h>
4460fdad 46#include <linux/memory.h>
bf8f6e5b 47
d0aaff97 48#include <asm-generic/sections.h>
1da177e4
LT
49#include <asm/cacheflush.h>
50#include <asm/errno.h>
bf8f6e5b 51#include <asm/uaccess.h>
1da177e4
LT
52
53#define KPROBE_HASH_BITS 6
54#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
55
3a872d89
AM
56
57/*
58 * Some oddball architectures like 64bit powerpc have function descriptors
59 * so this must be overridable.
60 */
61#ifndef kprobe_lookup_name
62#define kprobe_lookup_name(name, addr) \
63 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
64#endif
65
ef53d9c5 66static int kprobes_initialized;
1da177e4 67static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
b94cce92 68static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
1da177e4 69
bf8f6e5b 70/* NOTE: change this value only with kprobe_mutex held */
e579abeb 71static bool kprobes_all_disarmed;
bf8f6e5b 72
12941560 73static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
e6584523 74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
ef53d9c5 75static struct {
7e036d04 76 spinlock_t lock ____cacheline_aligned_in_smp;
ef53d9c5
S
77} kretprobe_table_locks[KPROBE_TABLE_SIZE];
78
79static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
80{
81 return &(kretprobe_table_locks[hash].lock);
82}
1da177e4 83
3d8d996e
SD
84/*
85 * Normally, functions that we'd want to prohibit kprobes in, are marked
86 * __kprobes. But, there are cases where such functions already belong to
87 * a different section (__sched for preempt_schedule)
88 *
89 * For such cases, we now have a blacklist
90 */
544304b2 91static struct kprobe_blackpoint kprobe_blacklist[] = {
3d8d996e
SD
92 {"preempt_schedule",},
93 {NULL} /* Terminator */
94};
95
2d14e39d 96#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
9ec4b1f3
AM
97/*
98 * kprobe->ainsn.insn points to the copy of the instruction to be
99 * single-stepped. x86_64, POWER4 and above have no-exec support and
100 * stepping on the instruction on a vmalloced/kmalloced/data page
101 * is a recipe for disaster
102 */
103#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
104
105struct kprobe_insn_page {
c5cb5a2d 106 struct list_head list;
9ec4b1f3
AM
107 kprobe_opcode_t *insns; /* Page of instruction slots */
108 char slot_used[INSNS_PER_PAGE];
109 int nused;
b4c6c34a 110 int ngarbage;
9ec4b1f3
AM
111};
112
ab40c5c6
MH
113enum kprobe_slot_state {
114 SLOT_CLEAN = 0,
115 SLOT_DIRTY = 1,
116 SLOT_USED = 2,
117};
118
12941560 119static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
c5cb5a2d 120static LIST_HEAD(kprobe_insn_pages);
b4c6c34a
MH
121static int kprobe_garbage_slots;
122static int collect_garbage_slots(void);
123
124static int __kprobes check_safety(void)
125{
126 int ret = 0;
5a4ccaf3 127#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
b4c6c34a
MH
128 ret = freeze_processes();
129 if (ret == 0) {
130 struct task_struct *p, *q;
131 do_each_thread(p, q) {
132 if (p != current && p->state == TASK_RUNNING &&
133 p->pid != 0) {
134 printk("Check failed: %s is running\n",p->comm);
135 ret = -1;
136 goto loop_end;
137 }
138 } while_each_thread(p, q);
139 }
140loop_end:
141 thaw_processes();
142#else
143 synchronize_sched();
144#endif
145 return ret;
146}
9ec4b1f3
AM
147
148/**
12941560 149 * __get_insn_slot() - Find a slot on an executable page for an instruction.
9ec4b1f3
AM
150 * We allocate an executable page if there's no room on existing ones.
151 */
12941560 152static kprobe_opcode_t __kprobes *__get_insn_slot(void)
9ec4b1f3
AM
153{
154 struct kprobe_insn_page *kip;
9ec4b1f3 155
6f716acd 156 retry:
c5cb5a2d 157 list_for_each_entry(kip, &kprobe_insn_pages, list) {
9ec4b1f3
AM
158 if (kip->nused < INSNS_PER_PAGE) {
159 int i;
160 for (i = 0; i < INSNS_PER_PAGE; i++) {
ab40c5c6
MH
161 if (kip->slot_used[i] == SLOT_CLEAN) {
162 kip->slot_used[i] = SLOT_USED;
9ec4b1f3
AM
163 kip->nused++;
164 return kip->insns + (i * MAX_INSN_SIZE);
165 }
166 }
167 /* Surprise! No unused slots. Fix kip->nused. */
168 kip->nused = INSNS_PER_PAGE;
169 }
170 }
171
b4c6c34a
MH
172 /* If there are any garbage slots, collect it and try again. */
173 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
174 goto retry;
175 }
176 /* All out of space. Need to allocate a new page. Use slot 0. */
9ec4b1f3 177 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
6f716acd 178 if (!kip)
9ec4b1f3 179 return NULL;
9ec4b1f3
AM
180
181 /*
182 * Use module_alloc so this page is within +/- 2GB of where the
183 * kernel image and loaded module images reside. This is required
184 * so x86_64 can correctly handle the %rip-relative fixups.
185 */
186 kip->insns = module_alloc(PAGE_SIZE);
187 if (!kip->insns) {
188 kfree(kip);
189 return NULL;
190 }
c5cb5a2d
MH
191 INIT_LIST_HEAD(&kip->list);
192 list_add(&kip->list, &kprobe_insn_pages);
ab40c5c6
MH
193 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
194 kip->slot_used[0] = SLOT_USED;
9ec4b1f3 195 kip->nused = 1;
b4c6c34a 196 kip->ngarbage = 0;
9ec4b1f3
AM
197 return kip->insns;
198}
199
12941560
MH
200kprobe_opcode_t __kprobes *get_insn_slot(void)
201{
202 kprobe_opcode_t *ret;
203 mutex_lock(&kprobe_insn_mutex);
204 ret = __get_insn_slot();
205 mutex_unlock(&kprobe_insn_mutex);
206 return ret;
207}
208
b4c6c34a
MH
209/* Return 1 if all garbages are collected, otherwise 0. */
210static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
211{
ab40c5c6 212 kip->slot_used[idx] = SLOT_CLEAN;
b4c6c34a
MH
213 kip->nused--;
214 if (kip->nused == 0) {
215 /*
216 * Page is no longer in use. Free it unless
217 * it's the last one. We keep the last one
218 * so as not to have to set it up again the
219 * next time somebody inserts a probe.
220 */
c5cb5a2d
MH
221 if (!list_is_singular(&kprobe_insn_pages)) {
222 list_del(&kip->list);
b4c6c34a
MH
223 module_free(NULL, kip->insns);
224 kfree(kip);
225 }
226 return 1;
227 }
228 return 0;
229}
230
231static int __kprobes collect_garbage_slots(void)
232{
c5cb5a2d 233 struct kprobe_insn_page *kip, *next;
b4c6c34a
MH
234
235 /* Ensure no-one is preepmted on the garbages */
4a2bb6fc 236 if (check_safety())
b4c6c34a
MH
237 return -EAGAIN;
238
c5cb5a2d 239 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
b4c6c34a 240 int i;
b4c6c34a
MH
241 if (kip->ngarbage == 0)
242 continue;
243 kip->ngarbage = 0; /* we will collect all garbages */
244 for (i = 0; i < INSNS_PER_PAGE; i++) {
ab40c5c6 245 if (kip->slot_used[i] == SLOT_DIRTY &&
b4c6c34a
MH
246 collect_one_slot(kip, i))
247 break;
248 }
249 }
250 kprobe_garbage_slots = 0;
251 return 0;
252}
253
254void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
9ec4b1f3
AM
255{
256 struct kprobe_insn_page *kip;
9ec4b1f3 257
12941560 258 mutex_lock(&kprobe_insn_mutex);
c5cb5a2d 259 list_for_each_entry(kip, &kprobe_insn_pages, list) {
9ec4b1f3
AM
260 if (kip->insns <= slot &&
261 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
262 int i = (slot - kip->insns) / MAX_INSN_SIZE;
b4c6c34a 263 if (dirty) {
ab40c5c6 264 kip->slot_used[i] = SLOT_DIRTY;
b4c6c34a 265 kip->ngarbage++;
c5cb5a2d 266 } else
b4c6c34a 267 collect_one_slot(kip, i);
b4c6c34a 268 break;
9ec4b1f3
AM
269 }
270 }
6f716acd
CH
271
272 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
b4c6c34a 273 collect_garbage_slots();
12941560
MH
274
275 mutex_unlock(&kprobe_insn_mutex);
9ec4b1f3 276}
2d14e39d 277#endif
9ec4b1f3 278
e6584523
AM
279/* We have preemption disabled.. so it is safe to use __ versions */
280static inline void set_kprobe_instance(struct kprobe *kp)
281{
282 __get_cpu_var(kprobe_instance) = kp;
283}
284
285static inline void reset_kprobe_instance(void)
286{
287 __get_cpu_var(kprobe_instance) = NULL;
288}
289
3516a460
AM
290/*
291 * This routine is called either:
49a2a1b8 292 * - under the kprobe_mutex - during kprobe_[un]register()
3516a460 293 * OR
d217d545 294 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
3516a460 295 */
d0aaff97 296struct kprobe __kprobes *get_kprobe(void *addr)
1da177e4
LT
297{
298 struct hlist_head *head;
299 struct hlist_node *node;
3516a460 300 struct kprobe *p;
1da177e4
LT
301
302 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
3516a460 303 hlist_for_each_entry_rcu(p, node, head, hlist) {
1da177e4
LT
304 if (p->addr == addr)
305 return p;
306 }
307 return NULL;
308}
309
201517a7
MH
310/* Arm a kprobe with text_mutex */
311static void __kprobes arm_kprobe(struct kprobe *kp)
312{
313 mutex_lock(&text_mutex);
314 arch_arm_kprobe(kp);
315 mutex_unlock(&text_mutex);
316}
317
318/* Disarm a kprobe with text_mutex */
319static void __kprobes disarm_kprobe(struct kprobe *kp)
320{
321 mutex_lock(&text_mutex);
322 arch_disarm_kprobe(kp);
323 mutex_unlock(&text_mutex);
324}
325
64f562c6
AM
326/*
327 * Aggregate handlers for multiple kprobes support - these handlers
328 * take care of invoking the individual kprobe handlers on p->list
329 */
d0aaff97 330static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
64f562c6
AM
331{
332 struct kprobe *kp;
333
3516a460 334 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 335 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
e6584523 336 set_kprobe_instance(kp);
8b0914ea
PP
337 if (kp->pre_handler(kp, regs))
338 return 1;
64f562c6 339 }
e6584523 340 reset_kprobe_instance();
64f562c6
AM
341 }
342 return 0;
343}
344
d0aaff97
PP
345static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
346 unsigned long flags)
64f562c6
AM
347{
348 struct kprobe *kp;
349
3516a460 350 list_for_each_entry_rcu(kp, &p->list, list) {
de5bd88d 351 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
e6584523 352 set_kprobe_instance(kp);
64f562c6 353 kp->post_handler(kp, regs, flags);
e6584523 354 reset_kprobe_instance();
64f562c6
AM
355 }
356 }
64f562c6
AM
357}
358
d0aaff97
PP
359static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
360 int trapnr)
64f562c6 361{
e6584523
AM
362 struct kprobe *cur = __get_cpu_var(kprobe_instance);
363
64f562c6
AM
364 /*
365 * if we faulted "during" the execution of a user specified
366 * probe handler, invoke just that probe's fault handler
367 */
e6584523
AM
368 if (cur && cur->fault_handler) {
369 if (cur->fault_handler(cur, regs, trapnr))
64f562c6
AM
370 return 1;
371 }
372 return 0;
373}
374
d0aaff97 375static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
8b0914ea 376{
e6584523
AM
377 struct kprobe *cur = __get_cpu_var(kprobe_instance);
378 int ret = 0;
379
380 if (cur && cur->break_handler) {
381 if (cur->break_handler(cur, regs))
382 ret = 1;
8b0914ea 383 }
e6584523
AM
384 reset_kprobe_instance();
385 return ret;
8b0914ea
PP
386}
387
bf8d5c52
KA
388/* Walks the list and increments nmissed count for multiprobe case */
389void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
390{
391 struct kprobe *kp;
392 if (p->pre_handler != aggr_pre_handler) {
393 p->nmissed++;
394 } else {
395 list_for_each_entry_rcu(kp, &p->list, list)
396 kp->nmissed++;
397 }
398 return;
399}
400
99219a3f 401void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
402 struct hlist_head *head)
b94cce92 403{
ef53d9c5
S
404 struct kretprobe *rp = ri->rp;
405
b94cce92
HN
406 /* remove rp inst off the rprobe_inst_table */
407 hlist_del(&ri->hlist);
ef53d9c5
S
408 INIT_HLIST_NODE(&ri->hlist);
409 if (likely(rp)) {
410 spin_lock(&rp->lock);
411 hlist_add_head(&ri->hlist, &rp->free_instances);
412 spin_unlock(&rp->lock);
b94cce92
HN
413 } else
414 /* Unregistering */
99219a3f 415 hlist_add_head(&ri->hlist, head);
b94cce92
HN
416}
417
017c39bd 418void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
ef53d9c5
S
419 struct hlist_head **head, unsigned long *flags)
420{
421 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
422 spinlock_t *hlist_lock;
423
424 *head = &kretprobe_inst_table[hash];
425 hlist_lock = kretprobe_table_lock_ptr(hash);
426 spin_lock_irqsave(hlist_lock, *flags);
427}
428
017c39bd
MH
429static void __kprobes kretprobe_table_lock(unsigned long hash,
430 unsigned long *flags)
b94cce92 431{
ef53d9c5
S
432 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
433 spin_lock_irqsave(hlist_lock, *flags);
434}
435
017c39bd
MH
436void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
437 unsigned long *flags)
ef53d9c5
S
438{
439 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
440 spinlock_t *hlist_lock;
441
442 hlist_lock = kretprobe_table_lock_ptr(hash);
443 spin_unlock_irqrestore(hlist_lock, *flags);
444}
445
017c39bd 446void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
ef53d9c5
S
447{
448 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
449 spin_unlock_irqrestore(hlist_lock, *flags);
b94cce92
HN
450}
451
b94cce92 452/*
c6fd91f0 453 * This function is called from finish_task_switch when task tk becomes dead,
454 * so that we can recycle any function-return probe instances associated
455 * with this task. These left over instances represent probed functions
456 * that have been called but will never return.
b94cce92 457 */
d0aaff97 458void __kprobes kprobe_flush_task(struct task_struct *tk)
b94cce92 459{
62c27be0 460 struct kretprobe_instance *ri;
99219a3f 461 struct hlist_head *head, empty_rp;
802eae7c 462 struct hlist_node *node, *tmp;
ef53d9c5 463 unsigned long hash, flags = 0;
802eae7c 464
ef53d9c5
S
465 if (unlikely(!kprobes_initialized))
466 /* Early boot. kretprobe_table_locks not yet initialized. */
467 return;
468
469 hash = hash_ptr(tk, KPROBE_HASH_BITS);
470 head = &kretprobe_inst_table[hash];
471 kretprobe_table_lock(hash, &flags);
62c27be0 472 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
473 if (ri->task == tk)
99219a3f 474 recycle_rp_inst(ri, &empty_rp);
62c27be0 475 }
ef53d9c5
S
476 kretprobe_table_unlock(hash, &flags);
477 INIT_HLIST_HEAD(&empty_rp);
99219a3f 478 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
479 hlist_del(&ri->hlist);
480 kfree(ri);
481 }
b94cce92
HN
482}
483
b94cce92
HN
484static inline void free_rp_inst(struct kretprobe *rp)
485{
486 struct kretprobe_instance *ri;
4c4308cb
CH
487 struct hlist_node *pos, *next;
488
ef53d9c5
S
489 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
490 hlist_del(&ri->hlist);
b94cce92
HN
491 kfree(ri);
492 }
493}
494
4a296e07
MH
495static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
496{
ef53d9c5 497 unsigned long flags, hash;
4a296e07
MH
498 struct kretprobe_instance *ri;
499 struct hlist_node *pos, *next;
ef53d9c5
S
500 struct hlist_head *head;
501
4a296e07 502 /* No race here */
ef53d9c5
S
503 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
504 kretprobe_table_lock(hash, &flags);
505 head = &kretprobe_inst_table[hash];
506 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
507 if (ri->rp == rp)
508 ri->rp = NULL;
509 }
510 kretprobe_table_unlock(hash, &flags);
4a296e07 511 }
4a296e07
MH
512 free_rp_inst(rp);
513}
514
8b0914ea
PP
515/*
516 * Keep all fields in the kprobe consistent
517 */
518static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
519{
520 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
521 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
522}
523
524/*
b918e5e6 525* Add the new probe to ap->list. Fail if this is the
8b0914ea
PP
526* second jprobe at the address - two jprobes can't coexist
527*/
b918e5e6 528static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
8b0914ea 529{
de5bd88d 530 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
8b0914ea 531 if (p->break_handler) {
b918e5e6 532 if (ap->break_handler)
36721656 533 return -EEXIST;
b918e5e6
MH
534 list_add_tail_rcu(&p->list, &ap->list);
535 ap->break_handler = aggr_break_handler;
8b0914ea 536 } else
b918e5e6
MH
537 list_add_rcu(&p->list, &ap->list);
538 if (p->post_handler && !ap->post_handler)
539 ap->post_handler = aggr_post_handler;
de5bd88d
MH
540
541 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
542 ap->flags &= ~KPROBE_FLAG_DISABLED;
543 if (!kprobes_all_disarmed)
544 /* Arm the breakpoint again. */
201517a7 545 arm_kprobe(ap);
de5bd88d 546 }
8b0914ea
PP
547 return 0;
548}
549
64f562c6
AM
550/*
551 * Fill in the required fields of the "manager kprobe". Replace the
552 * earlier kprobe in the hlist with the manager kprobe
553 */
554static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
555{
8b0914ea 556 copy_kprobe(p, ap);
a9ad965e 557 flush_insn_slot(ap);
64f562c6 558 ap->addr = p->addr;
b918e5e6 559 ap->flags = p->flags;
64f562c6 560 ap->pre_handler = aggr_pre_handler;
64f562c6 561 ap->fault_handler = aggr_fault_handler;
e8386a0c
MH
562 /* We don't care the kprobe which has gone. */
563 if (p->post_handler && !kprobe_gone(p))
36721656 564 ap->post_handler = aggr_post_handler;
e8386a0c 565 if (p->break_handler && !kprobe_gone(p))
36721656 566 ap->break_handler = aggr_break_handler;
64f562c6
AM
567
568 INIT_LIST_HEAD(&ap->list);
3516a460 569 list_add_rcu(&p->list, &ap->list);
64f562c6 570
adad0f33 571 hlist_replace_rcu(&p->hlist, &ap->hlist);
64f562c6
AM
572}
573
574/*
575 * This is the second or subsequent kprobe at the address - handle
576 * the intricacies
64f562c6 577 */
d0aaff97
PP
578static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
579 struct kprobe *p)
64f562c6
AM
580{
581 int ret = 0;
b918e5e6 582 struct kprobe *ap = old_p;
64f562c6 583
b918e5e6
MH
584 if (old_p->pre_handler != aggr_pre_handler) {
585 /* If old_p is not an aggr_probe, create new aggr_kprobe. */
586 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
587 if (!ap)
588 return -ENOMEM;
589 add_aggr_kprobe(ap, old_p);
590 }
591
592 if (kprobe_gone(ap)) {
e8386a0c
MH
593 /*
594 * Attempting to insert new probe at the same location that
595 * had a probe in the module vaddr area which already
596 * freed. So, the instruction slot has already been
597 * released. We need a new slot for the new probe.
598 */
b918e5e6 599 ret = arch_prepare_kprobe(ap);
e8386a0c 600 if (ret)
b918e5e6
MH
601 /*
602 * Even if fail to allocate new slot, don't need to
603 * free aggr_probe. It will be used next time, or
604 * freed by unregister_kprobe.
605 */
e8386a0c 606 return ret;
de5bd88d 607
e8386a0c 608 /*
de5bd88d
MH
609 * Clear gone flag to prevent allocating new slot again, and
610 * set disabled flag because it is not armed yet.
e8386a0c 611 */
de5bd88d
MH
612 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
613 | KPROBE_FLAG_DISABLED;
e8386a0c 614 }
b918e5e6
MH
615
616 copy_kprobe(ap, p);
617 return add_new_kprobe(ap, p);
64f562c6
AM
618}
619
de5bd88d
MH
620/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
621static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
622{
623 struct kprobe *kp;
624
625 list_for_each_entry_rcu(kp, &p->list, list) {
626 if (!kprobe_disabled(kp))
627 /*
628 * There is an active probe on the list.
629 * We can't disable aggr_kprobe.
630 */
631 return 0;
632 }
633 p->flags |= KPROBE_FLAG_DISABLED;
634 return 1;
635}
636
d0aaff97
PP
637static int __kprobes in_kprobes_functions(unsigned long addr)
638{
3d8d996e
SD
639 struct kprobe_blackpoint *kb;
640
6f716acd
CH
641 if (addr >= (unsigned long)__kprobes_text_start &&
642 addr < (unsigned long)__kprobes_text_end)
d0aaff97 643 return -EINVAL;
3d8d996e
SD
644 /*
645 * If there exists a kprobe_blacklist, verify and
646 * fail any probe registration in the prohibited area
647 */
648 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
649 if (kb->start_addr) {
650 if (addr >= kb->start_addr &&
651 addr < (kb->start_addr + kb->range))
652 return -EINVAL;
653 }
654 }
d0aaff97
PP
655 return 0;
656}
657
b2a5cd69
MH
658/*
659 * If we have a symbol_name argument, look it up and add the offset field
660 * to it. This way, we can specify a relative address to a symbol.
661 */
662static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
663{
664 kprobe_opcode_t *addr = p->addr;
665 if (p->symbol_name) {
666 if (addr)
667 return NULL;
668 kprobe_lookup_name(p->symbol_name, addr);
669 }
670
671 if (!addr)
672 return NULL;
673 return (kprobe_opcode_t *)(((char *)addr) + p->offset);
674}
675
49ad2fd7 676int __kprobes register_kprobe(struct kprobe *p)
1da177e4
LT
677{
678 int ret = 0;
64f562c6 679 struct kprobe *old_p;
df019b1d 680 struct module *probed_mod;
b2a5cd69 681 kprobe_opcode_t *addr;
b3e55c72 682
b2a5cd69
MH
683 addr = kprobe_addr(p);
684 if (!addr)
3a872d89 685 return -EINVAL;
b2a5cd69 686 p->addr = addr;
3a872d89 687
a189d035 688 preempt_disable();
ec30c5f3 689 if (!kernel_text_address((unsigned long) p->addr) ||
a189d035
MH
690 in_kprobes_functions((unsigned long) p->addr)) {
691 preempt_enable();
b3e55c72 692 return -EINVAL;
a189d035 693 }
b3e55c72 694
de5bd88d
MH
695 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
696 p->flags &= KPROBE_FLAG_DISABLED;
697
6f716acd
CH
698 /*
699 * Check if are we probing a module.
700 */
a189d035 701 probed_mod = __module_text_address((unsigned long) p->addr);
6f716acd 702 if (probed_mod) {
6f716acd 703 /*
e8386a0c
MH
704 * We must hold a refcount of the probed module while updating
705 * its code to prohibit unexpected unloading.
df019b1d 706 */
49ad2fd7
MH
707 if (unlikely(!try_module_get(probed_mod))) {
708 preempt_enable();
709 return -EINVAL;
710 }
f24659d9
MH
711 /*
712 * If the module freed .init.text, we couldn't insert
713 * kprobes in there.
714 */
715 if (within_module_init((unsigned long)p->addr, probed_mod) &&
716 probed_mod->state != MODULE_STATE_COMING) {
717 module_put(probed_mod);
718 preempt_enable();
719 return -EINVAL;
720 }
df019b1d 721 }
a189d035 722 preempt_enable();
1da177e4 723
3516a460 724 p->nmissed = 0;
9861668f 725 INIT_LIST_HEAD(&p->list);
7a7d1cf9 726 mutex_lock(&kprobe_mutex);
64f562c6
AM
727 old_p = get_kprobe(p->addr);
728 if (old_p) {
729 ret = register_aggr_kprobe(old_p, p);
1da177e4
LT
730 goto out;
731 }
1da177e4 732
4460fdad 733 mutex_lock(&text_mutex);
6f716acd
CH
734 ret = arch_prepare_kprobe(p);
735 if (ret)
4460fdad 736 goto out_unlock_text;
49a2a1b8 737
64f562c6 738 INIT_HLIST_NODE(&p->hlist);
3516a460 739 hlist_add_head_rcu(&p->hlist,
1da177e4
LT
740 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
741
de5bd88d 742 if (!kprobes_all_disarmed && !kprobe_disabled(p))
bf8f6e5b 743 arch_arm_kprobe(p);
74a0b576 744
4460fdad
MD
745out_unlock_text:
746 mutex_unlock(&text_mutex);
1da177e4 747out:
7a7d1cf9 748 mutex_unlock(&kprobe_mutex);
49a2a1b8 749
e8386a0c 750 if (probed_mod)
df019b1d 751 module_put(probed_mod);
e8386a0c 752
1da177e4
LT
753 return ret;
754}
99081ab5 755EXPORT_SYMBOL_GPL(register_kprobe);
1da177e4 756
de5bd88d
MH
757/* Check passed kprobe is valid and return kprobe in kprobe_table. */
758static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
1da177e4 759{
f709b122 760 struct kprobe *old_p, *list_p;
64f562c6 761
64f562c6 762 old_p = get_kprobe(p->addr);
9861668f 763 if (unlikely(!old_p))
de5bd88d 764 return NULL;
9861668f 765
f709b122
KA
766 if (p != old_p) {
767 list_for_each_entry_rcu(list_p, &old_p->list, list)
768 if (list_p == p)
769 /* kprobe p is a valid probe */
de5bd88d
MH
770 goto valid;
771 return NULL;
f709b122 772 }
de5bd88d
MH
773valid:
774 return old_p;
775}
776
777/*
778 * Unregister a kprobe without a scheduler synchronization.
779 */
780static int __kprobes __unregister_kprobe_top(struct kprobe *p)
781{
782 struct kprobe *old_p, *list_p;
783
784 old_p = __get_valid_kprobe(p);
785 if (old_p == NULL)
786 return -EINVAL;
787
6f716acd
CH
788 if (old_p == p ||
789 (old_p->pre_handler == aggr_pre_handler &&
9861668f 790 list_is_singular(&old_p->list))) {
bf8f6e5b
AM
791 /*
792 * Only probe on the hash list. Disarm only if kprobes are
e8386a0c
MH
793 * enabled and not gone - otherwise, the breakpoint would
794 * already have been removed. We save on flushing icache.
bf8f6e5b 795 */
201517a7
MH
796 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
797 disarm_kprobe(p);
49a2a1b8 798 hlist_del_rcu(&old_p->hlist);
49a2a1b8 799 } else {
e8386a0c 800 if (p->break_handler && !kprobe_gone(p))
9861668f 801 old_p->break_handler = NULL;
e8386a0c 802 if (p->post_handler && !kprobe_gone(p)) {
9861668f
MH
803 list_for_each_entry_rcu(list_p, &old_p->list, list) {
804 if ((list_p != p) && (list_p->post_handler))
805 goto noclean;
806 }
807 old_p->post_handler = NULL;
808 }
809noclean:
49a2a1b8 810 list_del_rcu(&p->list);
de5bd88d
MH
811 if (!kprobe_disabled(old_p)) {
812 try_to_disable_aggr_kprobe(old_p);
813 if (!kprobes_all_disarmed && kprobe_disabled(old_p))
201517a7 814 disarm_kprobe(old_p);
de5bd88d 815 }
49a2a1b8 816 }
9861668f
MH
817 return 0;
818}
3516a460 819
9861668f
MH
820static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
821{
9861668f 822 struct kprobe *old_p;
b3e55c72 823
e8386a0c 824 if (list_empty(&p->list))
0498b635 825 arch_remove_kprobe(p);
e8386a0c
MH
826 else if (list_is_singular(&p->list)) {
827 /* "p" is the last child of an aggr_kprobe */
828 old_p = list_entry(p->list.next, struct kprobe, list);
829 list_del(&p->list);
830 arch_remove_kprobe(old_p);
831 kfree(old_p);
9861668f
MH
832 }
833}
834
49ad2fd7 835int __kprobes register_kprobes(struct kprobe **kps, int num)
9861668f
MH
836{
837 int i, ret = 0;
838
839 if (num <= 0)
840 return -EINVAL;
841 for (i = 0; i < num; i++) {
49ad2fd7 842 ret = register_kprobe(kps[i]);
67dddaad
MH
843 if (ret < 0) {
844 if (i > 0)
845 unregister_kprobes(kps, i);
9861668f 846 break;
36721656 847 }
49a2a1b8 848 }
9861668f
MH
849 return ret;
850}
99081ab5 851EXPORT_SYMBOL_GPL(register_kprobes);
9861668f 852
9861668f
MH
853void __kprobes unregister_kprobe(struct kprobe *p)
854{
855 unregister_kprobes(&p, 1);
856}
99081ab5 857EXPORT_SYMBOL_GPL(unregister_kprobe);
9861668f 858
9861668f
MH
859void __kprobes unregister_kprobes(struct kprobe **kps, int num)
860{
861 int i;
862
863 if (num <= 0)
864 return;
865 mutex_lock(&kprobe_mutex);
866 for (i = 0; i < num; i++)
867 if (__unregister_kprobe_top(kps[i]) < 0)
868 kps[i]->addr = NULL;
869 mutex_unlock(&kprobe_mutex);
870
871 synchronize_sched();
872 for (i = 0; i < num; i++)
873 if (kps[i]->addr)
874 __unregister_kprobe_bottom(kps[i]);
1da177e4 875}
99081ab5 876EXPORT_SYMBOL_GPL(unregister_kprobes);
1da177e4
LT
877
878static struct notifier_block kprobe_exceptions_nb = {
3d5631e0
AK
879 .notifier_call = kprobe_exceptions_notify,
880 .priority = 0x7fffffff /* we need to be notified first */
881};
882
3d7e3382
ME
883unsigned long __weak arch_deref_entry_point(void *entry)
884{
885 return (unsigned long)entry;
886}
1da177e4 887
49ad2fd7 888int __kprobes register_jprobes(struct jprobe **jps, int num)
1da177e4 889{
26b31c19
MH
890 struct jprobe *jp;
891 int ret = 0, i;
3d7e3382 892
26b31c19 893 if (num <= 0)
3d7e3382 894 return -EINVAL;
26b31c19
MH
895 for (i = 0; i < num; i++) {
896 unsigned long addr;
897 jp = jps[i];
898 addr = arch_deref_entry_point(jp->entry);
899
900 if (!kernel_text_address(addr))
901 ret = -EINVAL;
902 else {
903 /* Todo: Verify probepoint is a function entry point */
904 jp->kp.pre_handler = setjmp_pre_handler;
905 jp->kp.break_handler = longjmp_break_handler;
49ad2fd7 906 ret = register_kprobe(&jp->kp);
26b31c19 907 }
67dddaad
MH
908 if (ret < 0) {
909 if (i > 0)
910 unregister_jprobes(jps, i);
26b31c19
MH
911 break;
912 }
913 }
914 return ret;
915}
99081ab5 916EXPORT_SYMBOL_GPL(register_jprobes);
3d7e3382 917
26b31c19
MH
918int __kprobes register_jprobe(struct jprobe *jp)
919{
49ad2fd7 920 return register_jprobes(&jp, 1);
1da177e4 921}
99081ab5 922EXPORT_SYMBOL_GPL(register_jprobe);
1da177e4 923
d0aaff97 924void __kprobes unregister_jprobe(struct jprobe *jp)
1da177e4 925{
26b31c19
MH
926 unregister_jprobes(&jp, 1);
927}
99081ab5 928EXPORT_SYMBOL_GPL(unregister_jprobe);
26b31c19 929
26b31c19
MH
930void __kprobes unregister_jprobes(struct jprobe **jps, int num)
931{
932 int i;
933
934 if (num <= 0)
935 return;
936 mutex_lock(&kprobe_mutex);
937 for (i = 0; i < num; i++)
938 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
939 jps[i]->kp.addr = NULL;
940 mutex_unlock(&kprobe_mutex);
941
942 synchronize_sched();
943 for (i = 0; i < num; i++) {
944 if (jps[i]->kp.addr)
945 __unregister_kprobe_bottom(&jps[i]->kp);
946 }
1da177e4 947}
99081ab5 948EXPORT_SYMBOL_GPL(unregister_jprobes);
1da177e4 949
9edddaa2 950#ifdef CONFIG_KRETPROBES
e65cefe8
AB
951/*
952 * This kprobe pre_handler is registered with every kretprobe. When probe
953 * hits it will set up the return probe.
954 */
955static int __kprobes pre_handler_kretprobe(struct kprobe *p,
956 struct pt_regs *regs)
957{
958 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
ef53d9c5
S
959 unsigned long hash, flags = 0;
960 struct kretprobe_instance *ri;
e65cefe8
AB
961
962 /*TODO: consider to only swap the RA after the last pre_handler fired */
ef53d9c5
S
963 hash = hash_ptr(current, KPROBE_HASH_BITS);
964 spin_lock_irqsave(&rp->lock, flags);
4c4308cb 965 if (!hlist_empty(&rp->free_instances)) {
4c4308cb 966 ri = hlist_entry(rp->free_instances.first,
ef53d9c5
S
967 struct kretprobe_instance, hlist);
968 hlist_del(&ri->hlist);
969 spin_unlock_irqrestore(&rp->lock, flags);
970
4c4308cb
CH
971 ri->rp = rp;
972 ri->task = current;
f47cd9b5 973
f02b8624 974 if (rp->entry_handler && rp->entry_handler(ri, regs))
f47cd9b5 975 return 0;
f47cd9b5 976
4c4308cb
CH
977 arch_prepare_kretprobe(ri, regs);
978
979 /* XXX(hch): why is there no hlist_move_head? */
ef53d9c5
S
980 INIT_HLIST_NODE(&ri->hlist);
981 kretprobe_table_lock(hash, &flags);
982 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
983 kretprobe_table_unlock(hash, &flags);
984 } else {
4c4308cb 985 rp->nmissed++;
ef53d9c5
S
986 spin_unlock_irqrestore(&rp->lock, flags);
987 }
e65cefe8
AB
988 return 0;
989}
990
49ad2fd7 991int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
992{
993 int ret = 0;
994 struct kretprobe_instance *inst;
995 int i;
b2a5cd69 996 void *addr;
f438d914
MH
997
998 if (kretprobe_blacklist_size) {
b2a5cd69
MH
999 addr = kprobe_addr(&rp->kp);
1000 if (!addr)
1001 return -EINVAL;
f438d914
MH
1002
1003 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1004 if (kretprobe_blacklist[i].addr == addr)
1005 return -EINVAL;
1006 }
1007 }
b94cce92
HN
1008
1009 rp->kp.pre_handler = pre_handler_kretprobe;
7522a842
AM
1010 rp->kp.post_handler = NULL;
1011 rp->kp.fault_handler = NULL;
1012 rp->kp.break_handler = NULL;
b94cce92
HN
1013
1014 /* Pre-allocate memory for max kretprobe instances */
1015 if (rp->maxactive <= 0) {
1016#ifdef CONFIG_PREEMPT
1017 rp->maxactive = max(10, 2 * NR_CPUS);
1018#else
1019 rp->maxactive = NR_CPUS;
1020#endif
1021 }
ef53d9c5 1022 spin_lock_init(&rp->lock);
b94cce92
HN
1023 INIT_HLIST_HEAD(&rp->free_instances);
1024 for (i = 0; i < rp->maxactive; i++) {
f47cd9b5
AS
1025 inst = kmalloc(sizeof(struct kretprobe_instance) +
1026 rp->data_size, GFP_KERNEL);
b94cce92
HN
1027 if (inst == NULL) {
1028 free_rp_inst(rp);
1029 return -ENOMEM;
1030 }
ef53d9c5
S
1031 INIT_HLIST_NODE(&inst->hlist);
1032 hlist_add_head(&inst->hlist, &rp->free_instances);
b94cce92
HN
1033 }
1034
1035 rp->nmissed = 0;
1036 /* Establish function entry probe point */
49ad2fd7 1037 ret = register_kprobe(&rp->kp);
4a296e07 1038 if (ret != 0)
b94cce92
HN
1039 free_rp_inst(rp);
1040 return ret;
1041}
99081ab5 1042EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 1043
49ad2fd7 1044int __kprobes register_kretprobes(struct kretprobe **rps, int num)
4a296e07
MH
1045{
1046 int ret = 0, i;
1047
1048 if (num <= 0)
1049 return -EINVAL;
1050 for (i = 0; i < num; i++) {
49ad2fd7 1051 ret = register_kretprobe(rps[i]);
67dddaad
MH
1052 if (ret < 0) {
1053 if (i > 0)
1054 unregister_kretprobes(rps, i);
4a296e07
MH
1055 break;
1056 }
1057 }
1058 return ret;
1059}
99081ab5 1060EXPORT_SYMBOL_GPL(register_kretprobes);
4a296e07 1061
4a296e07
MH
1062void __kprobes unregister_kretprobe(struct kretprobe *rp)
1063{
1064 unregister_kretprobes(&rp, 1);
1065}
99081ab5 1066EXPORT_SYMBOL_GPL(unregister_kretprobe);
4a296e07 1067
4a296e07
MH
1068void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1069{
1070 int i;
1071
1072 if (num <= 0)
1073 return;
1074 mutex_lock(&kprobe_mutex);
1075 for (i = 0; i < num; i++)
1076 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1077 rps[i]->kp.addr = NULL;
1078 mutex_unlock(&kprobe_mutex);
1079
1080 synchronize_sched();
1081 for (i = 0; i < num; i++) {
1082 if (rps[i]->kp.addr) {
1083 __unregister_kprobe_bottom(&rps[i]->kp);
1084 cleanup_rp_inst(rps[i]);
1085 }
1086 }
1087}
99081ab5 1088EXPORT_SYMBOL_GPL(unregister_kretprobes);
4a296e07 1089
9edddaa2 1090#else /* CONFIG_KRETPROBES */
d0aaff97 1091int __kprobes register_kretprobe(struct kretprobe *rp)
b94cce92
HN
1092{
1093 return -ENOSYS;
1094}
99081ab5 1095EXPORT_SYMBOL_GPL(register_kretprobe);
b94cce92 1096
4a296e07 1097int __kprobes register_kretprobes(struct kretprobe **rps, int num)
346fd59b 1098{
4a296e07 1099 return -ENOSYS;
346fd59b 1100}
99081ab5
MH
1101EXPORT_SYMBOL_GPL(register_kretprobes);
1102
d0aaff97 1103void __kprobes unregister_kretprobe(struct kretprobe *rp)
b94cce92 1104{
4a296e07 1105}
99081ab5 1106EXPORT_SYMBOL_GPL(unregister_kretprobe);
b94cce92 1107
4a296e07
MH
1108void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1109{
1110}
99081ab5 1111EXPORT_SYMBOL_GPL(unregister_kretprobes);
4c4308cb 1112
4a296e07
MH
1113static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1114 struct pt_regs *regs)
1115{
1116 return 0;
b94cce92
HN
1117}
1118
4a296e07
MH
1119#endif /* CONFIG_KRETPROBES */
1120
e8386a0c
MH
1121/* Set the kprobe gone and remove its instruction buffer. */
1122static void __kprobes kill_kprobe(struct kprobe *p)
1123{
1124 struct kprobe *kp;
de5bd88d 1125
e8386a0c
MH
1126 p->flags |= KPROBE_FLAG_GONE;
1127 if (p->pre_handler == aggr_pre_handler) {
1128 /*
1129 * If this is an aggr_kprobe, we have to list all the
1130 * chained probes and mark them GONE.
1131 */
1132 list_for_each_entry_rcu(kp, &p->list, list)
1133 kp->flags |= KPROBE_FLAG_GONE;
1134 p->post_handler = NULL;
1135 p->break_handler = NULL;
1136 }
1137 /*
1138 * Here, we can remove insn_slot safely, because no thread calls
1139 * the original probed function (which will be freed soon) any more.
1140 */
1141 arch_remove_kprobe(p);
1142}
1143
24851d24
FW
1144void __kprobes dump_kprobe(struct kprobe *kp)
1145{
1146 printk(KERN_WARNING "Dumping kprobe:\n");
1147 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1148 kp->symbol_name, kp->addr, kp->offset);
1149}
1150
e8386a0c
MH
1151/* Module notifier call back, checking kprobes on the module */
1152static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1153 unsigned long val, void *data)
1154{
1155 struct module *mod = data;
1156 struct hlist_head *head;
1157 struct hlist_node *node;
1158 struct kprobe *p;
1159 unsigned int i;
f24659d9 1160 int checkcore = (val == MODULE_STATE_GOING);
e8386a0c 1161
f24659d9 1162 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
e8386a0c
MH
1163 return NOTIFY_DONE;
1164
1165 /*
f24659d9
MH
1166 * When MODULE_STATE_GOING was notified, both of module .text and
1167 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1168 * notified, only .init.text section would be freed. We need to
1169 * disable kprobes which have been inserted in the sections.
e8386a0c
MH
1170 */
1171 mutex_lock(&kprobe_mutex);
1172 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1173 head = &kprobe_table[i];
1174 hlist_for_each_entry_rcu(p, node, head, hlist)
f24659d9
MH
1175 if (within_module_init((unsigned long)p->addr, mod) ||
1176 (checkcore &&
1177 within_module_core((unsigned long)p->addr, mod))) {
e8386a0c
MH
1178 /*
1179 * The vaddr this probe is installed will soon
1180 * be vfreed buy not synced to disk. Hence,
1181 * disarming the breakpoint isn't needed.
1182 */
1183 kill_kprobe(p);
1184 }
1185 }
1186 mutex_unlock(&kprobe_mutex);
1187 return NOTIFY_DONE;
1188}
1189
1190static struct notifier_block kprobe_module_nb = {
1191 .notifier_call = kprobes_module_callback,
1192 .priority = 0
1193};
1194
1da177e4
LT
1195static int __init init_kprobes(void)
1196{
1197 int i, err = 0;
3d8d996e
SD
1198 unsigned long offset = 0, size = 0;
1199 char *modname, namebuf[128];
1200 const char *symbol_name;
1201 void *addr;
1202 struct kprobe_blackpoint *kb;
1da177e4
LT
1203
1204 /* FIXME allocate the probe table, currently defined statically */
1205 /* initialize all list heads */
b94cce92 1206 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1da177e4 1207 INIT_HLIST_HEAD(&kprobe_table[i]);
b94cce92 1208 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
ef53d9c5 1209 spin_lock_init(&(kretprobe_table_locks[i].lock));
b94cce92 1210 }
1da177e4 1211
3d8d996e
SD
1212 /*
1213 * Lookup and populate the kprobe_blacklist.
1214 *
1215 * Unlike the kretprobe blacklist, we'll need to determine
1216 * the range of addresses that belong to the said functions,
1217 * since a kprobe need not necessarily be at the beginning
1218 * of a function.
1219 */
1220 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1221 kprobe_lookup_name(kb->name, addr);
1222 if (!addr)
1223 continue;
1224
1225 kb->start_addr = (unsigned long)addr;
1226 symbol_name = kallsyms_lookup(kb->start_addr,
1227 &size, &offset, &modname, namebuf);
1228 if (!symbol_name)
1229 kb->range = 0;
1230 else
1231 kb->range = size;
1232 }
1233
f438d914
MH
1234 if (kretprobe_blacklist_size) {
1235 /* lookup the function address from its name */
1236 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1237 kprobe_lookup_name(kretprobe_blacklist[i].name,
1238 kretprobe_blacklist[i].addr);
1239 if (!kretprobe_blacklist[i].addr)
1240 printk("kretprobe: lookup failed: %s\n",
1241 kretprobe_blacklist[i].name);
1242 }
1243 }
1244
e579abeb
MH
1245 /* By default, kprobes are armed */
1246 kprobes_all_disarmed = false;
bf8f6e5b 1247
6772926b 1248 err = arch_init_kprobes();
802eae7c
RL
1249 if (!err)
1250 err = register_die_notifier(&kprobe_exceptions_nb);
e8386a0c
MH
1251 if (!err)
1252 err = register_module_notifier(&kprobe_module_nb);
1253
ef53d9c5 1254 kprobes_initialized = (err == 0);
802eae7c 1255
8c1c9356
AM
1256 if (!err)
1257 init_test_probes();
1da177e4
LT
1258 return err;
1259}
1260
346fd59b
SD
1261#ifdef CONFIG_DEBUG_FS
1262static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
bf8f6e5b 1263 const char *sym, int offset,char *modname)
346fd59b
SD
1264{
1265 char *kprobe_type;
1266
1267 if (p->pre_handler == pre_handler_kretprobe)
1268 kprobe_type = "r";
1269 else if (p->pre_handler == setjmp_pre_handler)
1270 kprobe_type = "j";
1271 else
1272 kprobe_type = "k";
1273 if (sym)
de5bd88d
MH
1274 seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
1275 p->addr, kprobe_type, sym, offset,
1276 (modname ? modname : " "),
1277 (kprobe_gone(p) ? "[GONE]" : ""),
1278 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1279 "[DISABLED]" : ""));
346fd59b 1280 else
de5bd88d
MH
1281 seq_printf(pi, "%p %s %p %s%s\n",
1282 p->addr, kprobe_type, p->addr,
1283 (kprobe_gone(p) ? "[GONE]" : ""),
1284 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1285 "[DISABLED]" : ""));
346fd59b
SD
1286}
1287
1288static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1289{
1290 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1291}
1292
1293static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1294{
1295 (*pos)++;
1296 if (*pos >= KPROBE_TABLE_SIZE)
1297 return NULL;
1298 return pos;
1299}
1300
1301static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1302{
1303 /* Nothing to do */
1304}
1305
1306static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1307{
1308 struct hlist_head *head;
1309 struct hlist_node *node;
1310 struct kprobe *p, *kp;
1311 const char *sym = NULL;
1312 unsigned int i = *(loff_t *) v;
ffb45122 1313 unsigned long offset = 0;
346fd59b
SD
1314 char *modname, namebuf[128];
1315
1316 head = &kprobe_table[i];
1317 preempt_disable();
1318 hlist_for_each_entry_rcu(p, node, head, hlist) {
ffb45122 1319 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
346fd59b
SD
1320 &offset, &modname, namebuf);
1321 if (p->pre_handler == aggr_pre_handler) {
1322 list_for_each_entry_rcu(kp, &p->list, list)
1323 report_probe(pi, kp, sym, offset, modname);
1324 } else
1325 report_probe(pi, p, sym, offset, modname);
1326 }
1327 preempt_enable();
1328 return 0;
1329}
1330
1331static struct seq_operations kprobes_seq_ops = {
1332 .start = kprobe_seq_start,
1333 .next = kprobe_seq_next,
1334 .stop = kprobe_seq_stop,
1335 .show = show_kprobe_addr
1336};
1337
1338static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1339{
1340 return seq_open(filp, &kprobes_seq_ops);
1341}
1342
1343static struct file_operations debugfs_kprobes_operations = {
1344 .open = kprobes_open,
1345 .read = seq_read,
1346 .llseek = seq_lseek,
1347 .release = seq_release,
1348};
1349
de5bd88d
MH
1350/* Disable one kprobe */
1351int __kprobes disable_kprobe(struct kprobe *kp)
1352{
1353 int ret = 0;
1354 struct kprobe *p;
1355
1356 mutex_lock(&kprobe_mutex);
1357
1358 /* Check whether specified probe is valid. */
1359 p = __get_valid_kprobe(kp);
1360 if (unlikely(p == NULL)) {
1361 ret = -EINVAL;
1362 goto out;
1363 }
1364
1365 /* If the probe is already disabled (or gone), just return */
1366 if (kprobe_disabled(kp))
1367 goto out;
1368
1369 kp->flags |= KPROBE_FLAG_DISABLED;
1370 if (p != kp)
1371 /* When kp != p, p is always enabled. */
1372 try_to_disable_aggr_kprobe(p);
1373
1374 if (!kprobes_all_disarmed && kprobe_disabled(p))
201517a7 1375 disarm_kprobe(p);
de5bd88d
MH
1376out:
1377 mutex_unlock(&kprobe_mutex);
1378 return ret;
1379}
1380EXPORT_SYMBOL_GPL(disable_kprobe);
1381
1382/* Enable one kprobe */
1383int __kprobes enable_kprobe(struct kprobe *kp)
1384{
1385 int ret = 0;
1386 struct kprobe *p;
1387
1388 mutex_lock(&kprobe_mutex);
1389
1390 /* Check whether specified probe is valid. */
1391 p = __get_valid_kprobe(kp);
1392 if (unlikely(p == NULL)) {
1393 ret = -EINVAL;
1394 goto out;
1395 }
1396
1397 if (kprobe_gone(kp)) {
1398 /* This kprobe has gone, we couldn't enable it. */
1399 ret = -EINVAL;
1400 goto out;
1401 }
1402
1403 if (!kprobes_all_disarmed && kprobe_disabled(p))
201517a7 1404 arm_kprobe(p);
de5bd88d
MH
1405
1406 p->flags &= ~KPROBE_FLAG_DISABLED;
1407 if (p != kp)
1408 kp->flags &= ~KPROBE_FLAG_DISABLED;
1409out:
1410 mutex_unlock(&kprobe_mutex);
1411 return ret;
1412}
1413EXPORT_SYMBOL_GPL(enable_kprobe);
1414
e579abeb 1415static void __kprobes arm_all_kprobes(void)
bf8f6e5b
AM
1416{
1417 struct hlist_head *head;
1418 struct hlist_node *node;
1419 struct kprobe *p;
1420 unsigned int i;
1421
1422 mutex_lock(&kprobe_mutex);
1423
e579abeb
MH
1424 /* If kprobes are armed, just return */
1425 if (!kprobes_all_disarmed)
bf8f6e5b
AM
1426 goto already_enabled;
1427
4460fdad 1428 mutex_lock(&text_mutex);
bf8f6e5b
AM
1429 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1430 head = &kprobe_table[i];
1431 hlist_for_each_entry_rcu(p, node, head, hlist)
de5bd88d 1432 if (!kprobe_disabled(p))
e8386a0c 1433 arch_arm_kprobe(p);
bf8f6e5b 1434 }
4460fdad 1435 mutex_unlock(&text_mutex);
bf8f6e5b 1436
e579abeb 1437 kprobes_all_disarmed = false;
bf8f6e5b
AM
1438 printk(KERN_INFO "Kprobes globally enabled\n");
1439
1440already_enabled:
1441 mutex_unlock(&kprobe_mutex);
1442 return;
1443}
1444
e579abeb 1445static void __kprobes disarm_all_kprobes(void)
bf8f6e5b
AM
1446{
1447 struct hlist_head *head;
1448 struct hlist_node *node;
1449 struct kprobe *p;
1450 unsigned int i;
1451
1452 mutex_lock(&kprobe_mutex);
1453
e579abeb
MH
1454 /* If kprobes are already disarmed, just return */
1455 if (kprobes_all_disarmed)
bf8f6e5b
AM
1456 goto already_disabled;
1457
e579abeb 1458 kprobes_all_disarmed = true;
bf8f6e5b 1459 printk(KERN_INFO "Kprobes globally disabled\n");
4460fdad 1460 mutex_lock(&text_mutex);
bf8f6e5b
AM
1461 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1462 head = &kprobe_table[i];
1463 hlist_for_each_entry_rcu(p, node, head, hlist) {
de5bd88d 1464 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
bf8f6e5b
AM
1465 arch_disarm_kprobe(p);
1466 }
1467 }
1468
4460fdad 1469 mutex_unlock(&text_mutex);
bf8f6e5b
AM
1470 mutex_unlock(&kprobe_mutex);
1471 /* Allow all currently running kprobes to complete */
1472 synchronize_sched();
74a0b576 1473 return;
bf8f6e5b
AM
1474
1475already_disabled:
1476 mutex_unlock(&kprobe_mutex);
1477 return;
1478}
1479
1480/*
1481 * XXX: The debugfs bool file interface doesn't allow for callbacks
1482 * when the bool state is switched. We can reuse that facility when
1483 * available
1484 */
1485static ssize_t read_enabled_file_bool(struct file *file,
1486 char __user *user_buf, size_t count, loff_t *ppos)
1487{
1488 char buf[3];
1489
e579abeb 1490 if (!kprobes_all_disarmed)
bf8f6e5b
AM
1491 buf[0] = '1';
1492 else
1493 buf[0] = '0';
1494 buf[1] = '\n';
1495 buf[2] = 0x00;
1496 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1497}
1498
1499static ssize_t write_enabled_file_bool(struct file *file,
1500 const char __user *user_buf, size_t count, loff_t *ppos)
1501{
1502 char buf[32];
1503 int buf_size;
1504
1505 buf_size = min(count, (sizeof(buf)-1));
1506 if (copy_from_user(buf, user_buf, buf_size))
1507 return -EFAULT;
1508
1509 switch (buf[0]) {
1510 case 'y':
1511 case 'Y':
1512 case '1':
e579abeb 1513 arm_all_kprobes();
bf8f6e5b
AM
1514 break;
1515 case 'n':
1516 case 'N':
1517 case '0':
e579abeb 1518 disarm_all_kprobes();
bf8f6e5b
AM
1519 break;
1520 }
1521
1522 return count;
1523}
1524
1525static struct file_operations fops_kp = {
1526 .read = read_enabled_file_bool,
1527 .write = write_enabled_file_bool,
1528};
1529
346fd59b
SD
1530static int __kprobes debugfs_kprobe_init(void)
1531{
1532 struct dentry *dir, *file;
bf8f6e5b 1533 unsigned int value = 1;
346fd59b
SD
1534
1535 dir = debugfs_create_dir("kprobes", NULL);
1536 if (!dir)
1537 return -ENOMEM;
1538
e3869792 1539 file = debugfs_create_file("list", 0444, dir, NULL,
346fd59b
SD
1540 &debugfs_kprobes_operations);
1541 if (!file) {
1542 debugfs_remove(dir);
1543 return -ENOMEM;
1544 }
1545
bf8f6e5b
AM
1546 file = debugfs_create_file("enabled", 0600, dir,
1547 &value, &fops_kp);
1548 if (!file) {
1549 debugfs_remove(dir);
1550 return -ENOMEM;
1551 }
1552
346fd59b
SD
1553 return 0;
1554}
1555
1556late_initcall(debugfs_kprobe_init);
1557#endif /* CONFIG_DEBUG_FS */
1558
1559module_init(init_kprobes);
1da177e4 1560
99081ab5 1561/* defined in arch/.../kernel/kprobes.c */
1da177e4 1562EXPORT_SYMBOL_GPL(jprobe_return);