[ERD][NEUS7920-76] [COMMON] lib: dss: support to output notifier call functions
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / kernel / kprobes.c
1 /*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
33 */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/export.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/sysctl.h>
46 #include <linux/kdebug.h>
47 #include <linux/memory.h>
48 #include <linux/ftrace.h>
49 #include <linux/cpu.h>
50 #include <linux/jump_label.h>
51
52 #include <asm/sections.h>
53 #include <asm/cacheflush.h>
54 #include <asm/errno.h>
55 #include <linux/uaccess.h>
56
57 #define KPROBE_HASH_BITS 6
58 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
59
60
61 static int kprobes_initialized;
62 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
63 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
64
65 /* NOTE: change this value only with kprobe_mutex held */
66 static bool kprobes_all_disarmed;
67
68 /* This protects kprobe_table and optimizing_list */
69 static DEFINE_MUTEX(kprobe_mutex);
70 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
71 static struct {
72 raw_spinlock_t lock ____cacheline_aligned_in_smp;
73 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
74
75 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
76 unsigned int __unused)
77 {
78 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
79 }
80
81 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
82 {
83 return &(kretprobe_table_locks[hash].lock);
84 }
85
86 /* Blacklist -- list of struct kprobe_blacklist_entry */
87 static LIST_HEAD(kprobe_blacklist);
88
89 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
90 /*
91 * kprobe->ainsn.insn points to the copy of the instruction to be
92 * single-stepped. x86_64, POWER4 and above have no-exec support and
93 * stepping on the instruction on a vmalloced/kmalloced/data page
94 * is a recipe for disaster
95 */
96 struct kprobe_insn_page {
97 struct list_head list;
98 kprobe_opcode_t *insns; /* Page of instruction slots */
99 struct kprobe_insn_cache *cache;
100 int nused;
101 int ngarbage;
102 char slot_used[];
103 };
104
105 #define KPROBE_INSN_PAGE_SIZE(slots) \
106 (offsetof(struct kprobe_insn_page, slot_used) + \
107 (sizeof(char) * (slots)))
108
109 static int slots_per_page(struct kprobe_insn_cache *c)
110 {
111 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
112 }
113
114 enum kprobe_slot_state {
115 SLOT_CLEAN = 0,
116 SLOT_DIRTY = 1,
117 SLOT_USED = 2,
118 };
119
120 static void *alloc_insn_page(void)
121 {
122 return module_alloc(PAGE_SIZE);
123 }
124
125 void __weak free_insn_page(void *page)
126 {
127 module_memfree(page);
128 }
129
130 struct kprobe_insn_cache kprobe_insn_slots = {
131 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
132 .alloc = alloc_insn_page,
133 .free = free_insn_page,
134 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
135 .insn_size = MAX_INSN_SIZE,
136 .nr_garbage = 0,
137 };
138 static int collect_garbage_slots(struct kprobe_insn_cache *c);
139
140 /**
141 * __get_insn_slot() - Find a slot on an executable page for an instruction.
142 * We allocate an executable page if there's no room on existing ones.
143 */
144 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
145 {
146 struct kprobe_insn_page *kip;
147 kprobe_opcode_t *slot = NULL;
148
149 /* Since the slot array is not protected by rcu, we need a mutex */
150 mutex_lock(&c->mutex);
151 retry:
152 rcu_read_lock();
153 list_for_each_entry_rcu(kip, &c->pages, list) {
154 if (kip->nused < slots_per_page(c)) {
155 int i;
156 for (i = 0; i < slots_per_page(c); i++) {
157 if (kip->slot_used[i] == SLOT_CLEAN) {
158 kip->slot_used[i] = SLOT_USED;
159 kip->nused++;
160 slot = kip->insns + (i * c->insn_size);
161 rcu_read_unlock();
162 goto out;
163 }
164 }
165 /* kip->nused is broken. Fix it. */
166 kip->nused = slots_per_page(c);
167 WARN_ON(1);
168 }
169 }
170 rcu_read_unlock();
171
172 /* If there are any garbage slots, collect it and try again. */
173 if (c->nr_garbage && collect_garbage_slots(c) == 0)
174 goto retry;
175
176 /* All out of space. Need to allocate a new page. */
177 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
178 if (!kip)
179 goto out;
180
181 /*
182 * Use module_alloc so this page is within +/- 2GB of where the
183 * kernel image and loaded module images reside. This is required
184 * so x86_64 can correctly handle the %rip-relative fixups.
185 */
186 kip->insns = c->alloc();
187 if (!kip->insns) {
188 kfree(kip);
189 goto out;
190 }
191 INIT_LIST_HEAD(&kip->list);
192 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
193 kip->slot_used[0] = SLOT_USED;
194 kip->nused = 1;
195 kip->ngarbage = 0;
196 kip->cache = c;
197 list_add_rcu(&kip->list, &c->pages);
198 slot = kip->insns;
199 out:
200 mutex_unlock(&c->mutex);
201 return slot;
202 }
203
204 /* Return 1 if all garbages are collected, otherwise 0. */
205 static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
206 {
207 kip->slot_used[idx] = SLOT_CLEAN;
208 kip->nused--;
209 if (kip->nused == 0) {
210 /*
211 * Page is no longer in use. Free it unless
212 * it's the last one. We keep the last one
213 * so as not to have to set it up again the
214 * next time somebody inserts a probe.
215 */
216 if (!list_is_singular(&kip->list)) {
217 list_del_rcu(&kip->list);
218 synchronize_rcu();
219 kip->cache->free(kip->insns);
220 kfree(kip);
221 }
222 return 1;
223 }
224 return 0;
225 }
226
227 static int collect_garbage_slots(struct kprobe_insn_cache *c)
228 {
229 struct kprobe_insn_page *kip, *next;
230
231 /* Ensure no-one is interrupted on the garbages */
232 synchronize_sched();
233
234 list_for_each_entry_safe(kip, next, &c->pages, list) {
235 int i;
236 if (kip->ngarbage == 0)
237 continue;
238 kip->ngarbage = 0; /* we will collect all garbages */
239 for (i = 0; i < slots_per_page(c); i++) {
240 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
241 break;
242 }
243 }
244 c->nr_garbage = 0;
245 return 0;
246 }
247
248 void __free_insn_slot(struct kprobe_insn_cache *c,
249 kprobe_opcode_t *slot, int dirty)
250 {
251 struct kprobe_insn_page *kip;
252 long idx;
253
254 mutex_lock(&c->mutex);
255 rcu_read_lock();
256 list_for_each_entry_rcu(kip, &c->pages, list) {
257 idx = ((long)slot - (long)kip->insns) /
258 (c->insn_size * sizeof(kprobe_opcode_t));
259 if (idx >= 0 && idx < slots_per_page(c))
260 goto out;
261 }
262 /* Could not find this slot. */
263 WARN_ON(1);
264 kip = NULL;
265 out:
266 rcu_read_unlock();
267 /* Mark and sweep: this may sleep */
268 if (kip) {
269 /* Check double free */
270 WARN_ON(kip->slot_used[idx] != SLOT_USED);
271 if (dirty) {
272 kip->slot_used[idx] = SLOT_DIRTY;
273 kip->ngarbage++;
274 if (++c->nr_garbage > slots_per_page(c))
275 collect_garbage_slots(c);
276 } else {
277 collect_one_slot(kip, idx);
278 }
279 }
280 mutex_unlock(&c->mutex);
281 }
282
283 /*
284 * Check given address is on the page of kprobe instruction slots.
285 * This will be used for checking whether the address on a stack
286 * is on a text area or not.
287 */
288 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
289 {
290 struct kprobe_insn_page *kip;
291 bool ret = false;
292
293 rcu_read_lock();
294 list_for_each_entry_rcu(kip, &c->pages, list) {
295 if (addr >= (unsigned long)kip->insns &&
296 addr < (unsigned long)kip->insns + PAGE_SIZE) {
297 ret = true;
298 break;
299 }
300 }
301 rcu_read_unlock();
302
303 return ret;
304 }
305
306 #ifdef CONFIG_OPTPROBES
307 /* For optimized_kprobe buffer */
308 struct kprobe_insn_cache kprobe_optinsn_slots = {
309 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
310 .alloc = alloc_insn_page,
311 .free = free_insn_page,
312 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
313 /* .insn_size is initialized later */
314 .nr_garbage = 0,
315 };
316 #endif
317 #endif
318
319 /* We have preemption disabled.. so it is safe to use __ versions */
320 static inline void set_kprobe_instance(struct kprobe *kp)
321 {
322 __this_cpu_write(kprobe_instance, kp);
323 }
324
325 static inline void reset_kprobe_instance(void)
326 {
327 __this_cpu_write(kprobe_instance, NULL);
328 }
329
330 /*
331 * This routine is called either:
332 * - under the kprobe_mutex - during kprobe_[un]register()
333 * OR
334 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
335 */
336 struct kprobe *get_kprobe(void *addr)
337 {
338 struct hlist_head *head;
339 struct kprobe *p;
340
341 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
342 hlist_for_each_entry_rcu(p, head, hlist) {
343 if (p->addr == addr)
344 return p;
345 }
346
347 return NULL;
348 }
349 NOKPROBE_SYMBOL(get_kprobe);
350
351 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
352
353 /* Return true if the kprobe is an aggregator */
354 static inline int kprobe_aggrprobe(struct kprobe *p)
355 {
356 return p->pre_handler == aggr_pre_handler;
357 }
358
359 /* Return true(!0) if the kprobe is unused */
360 static inline int kprobe_unused(struct kprobe *p)
361 {
362 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
363 list_empty(&p->list);
364 }
365
366 /*
367 * Keep all fields in the kprobe consistent
368 */
369 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
370 {
371 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
372 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
373 }
374
375 #ifdef CONFIG_OPTPROBES
376 /* NOTE: change this value only with kprobe_mutex held */
377 static bool kprobes_allow_optimization;
378
379 /*
380 * Call all pre_handler on the list, but ignores its return value.
381 * This must be called from arch-dep optimized caller.
382 */
383 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
384 {
385 struct kprobe *kp;
386
387 list_for_each_entry_rcu(kp, &p->list, list) {
388 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
389 set_kprobe_instance(kp);
390 kp->pre_handler(kp, regs);
391 }
392 reset_kprobe_instance();
393 }
394 }
395 NOKPROBE_SYMBOL(opt_pre_handler);
396
397 /* Free optimized instructions and optimized_kprobe */
398 static void free_aggr_kprobe(struct kprobe *p)
399 {
400 struct optimized_kprobe *op;
401
402 op = container_of(p, struct optimized_kprobe, kp);
403 arch_remove_optimized_kprobe(op);
404 arch_remove_kprobe(p);
405 kfree(op);
406 }
407
408 /* Return true(!0) if the kprobe is ready for optimization. */
409 static inline int kprobe_optready(struct kprobe *p)
410 {
411 struct optimized_kprobe *op;
412
413 if (kprobe_aggrprobe(p)) {
414 op = container_of(p, struct optimized_kprobe, kp);
415 return arch_prepared_optinsn(&op->optinsn);
416 }
417
418 return 0;
419 }
420
421 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
422 static inline int kprobe_disarmed(struct kprobe *p)
423 {
424 struct optimized_kprobe *op;
425
426 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
427 if (!kprobe_aggrprobe(p))
428 return kprobe_disabled(p);
429
430 op = container_of(p, struct optimized_kprobe, kp);
431
432 return kprobe_disabled(p) && list_empty(&op->list);
433 }
434
435 /* Return true(!0) if the probe is queued on (un)optimizing lists */
436 static int kprobe_queued(struct kprobe *p)
437 {
438 struct optimized_kprobe *op;
439
440 if (kprobe_aggrprobe(p)) {
441 op = container_of(p, struct optimized_kprobe, kp);
442 if (!list_empty(&op->list))
443 return 1;
444 }
445 return 0;
446 }
447
448 /*
449 * Return an optimized kprobe whose optimizing code replaces
450 * instructions including addr (exclude breakpoint).
451 */
452 static struct kprobe *get_optimized_kprobe(unsigned long addr)
453 {
454 int i;
455 struct kprobe *p = NULL;
456 struct optimized_kprobe *op;
457
458 /* Don't check i == 0, since that is a breakpoint case. */
459 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
460 p = get_kprobe((void *)(addr - i));
461
462 if (p && kprobe_optready(p)) {
463 op = container_of(p, struct optimized_kprobe, kp);
464 if (arch_within_optimized_kprobe(op, addr))
465 return p;
466 }
467
468 return NULL;
469 }
470
471 /* Optimization staging list, protected by kprobe_mutex */
472 static LIST_HEAD(optimizing_list);
473 static LIST_HEAD(unoptimizing_list);
474 static LIST_HEAD(freeing_list);
475
476 static void kprobe_optimizer(struct work_struct *work);
477 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
478 #define OPTIMIZE_DELAY 5
479
480 /*
481 * Optimize (replace a breakpoint with a jump) kprobes listed on
482 * optimizing_list.
483 */
484 static void do_optimize_kprobes(void)
485 {
486 /*
487 * The optimization/unoptimization refers online_cpus via
488 * stop_machine() and cpu-hotplug modifies online_cpus.
489 * And same time, text_mutex will be held in cpu-hotplug and here.
490 * This combination can cause a deadlock (cpu-hotplug try to lock
491 * text_mutex but stop_machine can not be done because online_cpus
492 * has been changed)
493 * To avoid this deadlock, caller must have locked cpu hotplug
494 * for preventing cpu-hotplug outside of text_mutex locking.
495 */
496 lockdep_assert_cpus_held();
497
498 /* Optimization never be done when disarmed */
499 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
500 list_empty(&optimizing_list))
501 return;
502
503 mutex_lock(&text_mutex);
504 arch_optimize_kprobes(&optimizing_list);
505 mutex_unlock(&text_mutex);
506 }
507
508 /*
509 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
510 * if need) kprobes listed on unoptimizing_list.
511 */
512 static void do_unoptimize_kprobes(void)
513 {
514 struct optimized_kprobe *op, *tmp;
515
516 /* See comment in do_optimize_kprobes() */
517 lockdep_assert_cpus_held();
518
519 /* Unoptimization must be done anytime */
520 if (list_empty(&unoptimizing_list))
521 return;
522
523 mutex_lock(&text_mutex);
524 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
525 /* Loop free_list for disarming */
526 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
527 /* Disarm probes if marked disabled */
528 if (kprobe_disabled(&op->kp))
529 arch_disarm_kprobe(&op->kp);
530 if (kprobe_unused(&op->kp)) {
531 /*
532 * Remove unused probes from hash list. After waiting
533 * for synchronization, these probes are reclaimed.
534 * (reclaiming is done by do_free_cleaned_kprobes.)
535 */
536 hlist_del_rcu(&op->kp.hlist);
537 } else
538 list_del_init(&op->list);
539 }
540 mutex_unlock(&text_mutex);
541 }
542
543 /* Reclaim all kprobes on the free_list */
544 static void do_free_cleaned_kprobes(void)
545 {
546 struct optimized_kprobe *op, *tmp;
547
548 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
549 BUG_ON(!kprobe_unused(&op->kp));
550 list_del_init(&op->list);
551 free_aggr_kprobe(&op->kp);
552 }
553 }
554
555 /* Start optimizer after OPTIMIZE_DELAY passed */
556 static void kick_kprobe_optimizer(void)
557 {
558 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
559 }
560
561 /* Kprobe jump optimizer */
562 static void kprobe_optimizer(struct work_struct *work)
563 {
564 mutex_lock(&kprobe_mutex);
565 cpus_read_lock();
566 /* Lock modules while optimizing kprobes */
567 mutex_lock(&module_mutex);
568
569 /*
570 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
571 * kprobes before waiting for quiesence period.
572 */
573 do_unoptimize_kprobes();
574
575 /*
576 * Step 2: Wait for quiesence period to ensure all potentially
577 * preempted tasks to have normally scheduled. Because optprobe
578 * may modify multiple instructions, there is a chance that Nth
579 * instruction is preempted. In that case, such tasks can return
580 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
581 * Note that on non-preemptive kernel, this is transparently converted
582 * to synchronoze_sched() to wait for all interrupts to have completed.
583 */
584 synchronize_rcu_tasks();
585
586 /* Step 3: Optimize kprobes after quiesence period */
587 do_optimize_kprobes();
588
589 /* Step 4: Free cleaned kprobes after quiesence period */
590 do_free_cleaned_kprobes();
591
592 mutex_unlock(&module_mutex);
593 cpus_read_unlock();
594 mutex_unlock(&kprobe_mutex);
595
596 /* Step 5: Kick optimizer again if needed */
597 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
598 kick_kprobe_optimizer();
599 }
600
601 /* Wait for completing optimization and unoptimization */
602 void wait_for_kprobe_optimizer(void)
603 {
604 mutex_lock(&kprobe_mutex);
605
606 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
607 mutex_unlock(&kprobe_mutex);
608
609 /* this will also make optimizing_work execute immmediately */
610 flush_delayed_work(&optimizing_work);
611 /* @optimizing_work might not have been queued yet, relax */
612 cpu_relax();
613
614 mutex_lock(&kprobe_mutex);
615 }
616
617 mutex_unlock(&kprobe_mutex);
618 }
619
620 /* Optimize kprobe if p is ready to be optimized */
621 static void optimize_kprobe(struct kprobe *p)
622 {
623 struct optimized_kprobe *op;
624
625 /* Check if the kprobe is disabled or not ready for optimization. */
626 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
627 (kprobe_disabled(p) || kprobes_all_disarmed))
628 return;
629
630 /* Both of break_handler and post_handler are not supported. */
631 if (p->break_handler || p->post_handler)
632 return;
633
634 op = container_of(p, struct optimized_kprobe, kp);
635
636 /* Check there is no other kprobes at the optimized instructions */
637 if (arch_check_optimized_kprobe(op) < 0)
638 return;
639
640 /* Check if it is already optimized. */
641 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
642 return;
643 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
644
645 if (!list_empty(&op->list))
646 /* This is under unoptimizing. Just dequeue the probe */
647 list_del_init(&op->list);
648 else {
649 list_add(&op->list, &optimizing_list);
650 kick_kprobe_optimizer();
651 }
652 }
653
654 /* Short cut to direct unoptimizing */
655 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
656 {
657 lockdep_assert_cpus_held();
658 arch_unoptimize_kprobe(op);
659 if (kprobe_disabled(&op->kp))
660 arch_disarm_kprobe(&op->kp);
661 }
662
663 /* Unoptimize a kprobe if p is optimized */
664 static void unoptimize_kprobe(struct kprobe *p, bool force)
665 {
666 struct optimized_kprobe *op;
667
668 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
669 return; /* This is not an optprobe nor optimized */
670
671 op = container_of(p, struct optimized_kprobe, kp);
672 if (!kprobe_optimized(p)) {
673 /* Unoptimized or unoptimizing case */
674 if (force && !list_empty(&op->list)) {
675 /*
676 * Only if this is unoptimizing kprobe and forced,
677 * forcibly unoptimize it. (No need to unoptimize
678 * unoptimized kprobe again :)
679 */
680 list_del_init(&op->list);
681 force_unoptimize_kprobe(op);
682 }
683 return;
684 }
685
686 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
687 if (!list_empty(&op->list)) {
688 /* Dequeue from the optimization queue */
689 list_del_init(&op->list);
690 return;
691 }
692 /* Optimized kprobe case */
693 if (force)
694 /* Forcibly update the code: this is a special case */
695 force_unoptimize_kprobe(op);
696 else {
697 list_add(&op->list, &unoptimizing_list);
698 kick_kprobe_optimizer();
699 }
700 }
701
702 /* Cancel unoptimizing for reusing */
703 static int reuse_unused_kprobe(struct kprobe *ap)
704 {
705 struct optimized_kprobe *op;
706 int ret;
707
708 BUG_ON(!kprobe_unused(ap));
709 /*
710 * Unused kprobe MUST be on the way of delayed unoptimizing (means
711 * there is still a relative jump) and disabled.
712 */
713 op = container_of(ap, struct optimized_kprobe, kp);
714 if (unlikely(list_empty(&op->list)))
715 printk(KERN_WARNING "Warning: found a stray unused "
716 "aggrprobe@%p\n", ap->addr);
717 /* Enable the probe again */
718 ap->flags &= ~KPROBE_FLAG_DISABLED;
719 /* Optimize it again (remove from op->list) */
720 ret = kprobe_optready(ap);
721 if (ret)
722 return ret;
723
724 optimize_kprobe(ap);
725 return 0;
726 }
727
728 /* Remove optimized instructions */
729 static void kill_optimized_kprobe(struct kprobe *p)
730 {
731 struct optimized_kprobe *op;
732
733 op = container_of(p, struct optimized_kprobe, kp);
734 if (!list_empty(&op->list))
735 /* Dequeue from the (un)optimization queue */
736 list_del_init(&op->list);
737 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
738
739 if (kprobe_unused(p)) {
740 /* Enqueue if it is unused */
741 list_add(&op->list, &freeing_list);
742 /*
743 * Remove unused probes from the hash list. After waiting
744 * for synchronization, this probe is reclaimed.
745 * (reclaiming is done by do_free_cleaned_kprobes().)
746 */
747 hlist_del_rcu(&op->kp.hlist);
748 }
749
750 /* Don't touch the code, because it is already freed. */
751 arch_remove_optimized_kprobe(op);
752 }
753
754 static inline
755 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
756 {
757 if (!kprobe_ftrace(p))
758 arch_prepare_optimized_kprobe(op, p);
759 }
760
761 /* Try to prepare optimized instructions */
762 static void prepare_optimized_kprobe(struct kprobe *p)
763 {
764 struct optimized_kprobe *op;
765
766 op = container_of(p, struct optimized_kprobe, kp);
767 __prepare_optimized_kprobe(op, p);
768 }
769
770 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
771 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
772 {
773 struct optimized_kprobe *op;
774
775 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
776 if (!op)
777 return NULL;
778
779 INIT_LIST_HEAD(&op->list);
780 op->kp.addr = p->addr;
781 __prepare_optimized_kprobe(op, p);
782
783 return &op->kp;
784 }
785
786 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
787
788 /*
789 * Prepare an optimized_kprobe and optimize it
790 * NOTE: p must be a normal registered kprobe
791 */
792 static void try_to_optimize_kprobe(struct kprobe *p)
793 {
794 struct kprobe *ap;
795 struct optimized_kprobe *op;
796
797 /* Impossible to optimize ftrace-based kprobe */
798 if (kprobe_ftrace(p))
799 return;
800
801 /* For preparing optimization, jump_label_text_reserved() is called */
802 cpus_read_lock();
803 jump_label_lock();
804 mutex_lock(&text_mutex);
805
806 ap = alloc_aggr_kprobe(p);
807 if (!ap)
808 goto out;
809
810 op = container_of(ap, struct optimized_kprobe, kp);
811 if (!arch_prepared_optinsn(&op->optinsn)) {
812 /* If failed to setup optimizing, fallback to kprobe */
813 arch_remove_optimized_kprobe(op);
814 kfree(op);
815 goto out;
816 }
817
818 init_aggr_kprobe(ap, p);
819 optimize_kprobe(ap); /* This just kicks optimizer thread */
820
821 out:
822 mutex_unlock(&text_mutex);
823 jump_label_unlock();
824 cpus_read_unlock();
825 }
826
827 #ifdef CONFIG_SYSCTL
828 static void optimize_all_kprobes(void)
829 {
830 struct hlist_head *head;
831 struct kprobe *p;
832 unsigned int i;
833
834 mutex_lock(&kprobe_mutex);
835 /* If optimization is already allowed, just return */
836 if (kprobes_allow_optimization)
837 goto out;
838
839 cpus_read_lock();
840 kprobes_allow_optimization = true;
841 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
842 head = &kprobe_table[i];
843 hlist_for_each_entry_rcu(p, head, hlist)
844 if (!kprobe_disabled(p))
845 optimize_kprobe(p);
846 }
847 cpus_read_unlock();
848 printk(KERN_INFO "Kprobes globally optimized\n");
849 out:
850 mutex_unlock(&kprobe_mutex);
851 }
852
853 static void unoptimize_all_kprobes(void)
854 {
855 struct hlist_head *head;
856 struct kprobe *p;
857 unsigned int i;
858
859 mutex_lock(&kprobe_mutex);
860 /* If optimization is already prohibited, just return */
861 if (!kprobes_allow_optimization) {
862 mutex_unlock(&kprobe_mutex);
863 return;
864 }
865
866 cpus_read_lock();
867 kprobes_allow_optimization = false;
868 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
869 head = &kprobe_table[i];
870 hlist_for_each_entry_rcu(p, head, hlist) {
871 if (!kprobe_disabled(p))
872 unoptimize_kprobe(p, false);
873 }
874 }
875 cpus_read_unlock();
876 mutex_unlock(&kprobe_mutex);
877
878 /* Wait for unoptimizing completion */
879 wait_for_kprobe_optimizer();
880 printk(KERN_INFO "Kprobes globally unoptimized\n");
881 }
882
883 static DEFINE_MUTEX(kprobe_sysctl_mutex);
884 int sysctl_kprobes_optimization;
885 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
886 void __user *buffer, size_t *length,
887 loff_t *ppos)
888 {
889 int ret;
890
891 mutex_lock(&kprobe_sysctl_mutex);
892 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
893 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
894
895 if (sysctl_kprobes_optimization)
896 optimize_all_kprobes();
897 else
898 unoptimize_all_kprobes();
899 mutex_unlock(&kprobe_sysctl_mutex);
900
901 return ret;
902 }
903 #endif /* CONFIG_SYSCTL */
904
905 /* Put a breakpoint for a probe. Must be called with text_mutex locked */
906 static void __arm_kprobe(struct kprobe *p)
907 {
908 struct kprobe *_p;
909
910 /* Check collision with other optimized kprobes */
911 _p = get_optimized_kprobe((unsigned long)p->addr);
912 if (unlikely(_p))
913 /* Fallback to unoptimized kprobe */
914 unoptimize_kprobe(_p, true);
915
916 arch_arm_kprobe(p);
917 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
918 }
919
920 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
921 static void __disarm_kprobe(struct kprobe *p, bool reopt)
922 {
923 struct kprobe *_p;
924
925 /* Try to unoptimize */
926 unoptimize_kprobe(p, kprobes_all_disarmed);
927
928 if (!kprobe_queued(p)) {
929 arch_disarm_kprobe(p);
930 /* If another kprobe was blocked, optimize it. */
931 _p = get_optimized_kprobe((unsigned long)p->addr);
932 if (unlikely(_p) && reopt)
933 optimize_kprobe(_p);
934 }
935 /* TODO: reoptimize others after unoptimized this probe */
936 }
937
938 #else /* !CONFIG_OPTPROBES */
939
940 #define optimize_kprobe(p) do {} while (0)
941 #define unoptimize_kprobe(p, f) do {} while (0)
942 #define kill_optimized_kprobe(p) do {} while (0)
943 #define prepare_optimized_kprobe(p) do {} while (0)
944 #define try_to_optimize_kprobe(p) do {} while (0)
945 #define __arm_kprobe(p) arch_arm_kprobe(p)
946 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
947 #define kprobe_disarmed(p) kprobe_disabled(p)
948 #define wait_for_kprobe_optimizer() do {} while (0)
949
950 static int reuse_unused_kprobe(struct kprobe *ap)
951 {
952 /*
953 * If the optimized kprobe is NOT supported, the aggr kprobe is
954 * released at the same time that the last aggregated kprobe is
955 * unregistered.
956 * Thus there should be no chance to reuse unused kprobe.
957 */
958 printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
959 return -EINVAL;
960 }
961
962 static void free_aggr_kprobe(struct kprobe *p)
963 {
964 arch_remove_kprobe(p);
965 kfree(p);
966 }
967
968 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
969 {
970 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
971 }
972 #endif /* CONFIG_OPTPROBES */
973
974 #ifdef CONFIG_KPROBES_ON_FTRACE
975 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
976 .func = kprobe_ftrace_handler,
977 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
978 };
979 static int kprobe_ftrace_enabled;
980
981 /* Must ensure p->addr is really on ftrace */
982 static int prepare_kprobe(struct kprobe *p)
983 {
984 if (!kprobe_ftrace(p))
985 return arch_prepare_kprobe(p);
986
987 return arch_prepare_kprobe_ftrace(p);
988 }
989
990 /* Caller must lock kprobe_mutex */
991 static void arm_kprobe_ftrace(struct kprobe *p)
992 {
993 int ret;
994
995 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
996 (unsigned long)p->addr, 0, 0);
997 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
998 kprobe_ftrace_enabled++;
999 if (kprobe_ftrace_enabled == 1) {
1000 ret = register_ftrace_function(&kprobe_ftrace_ops);
1001 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
1002 }
1003 }
1004
1005 /* Caller must lock kprobe_mutex */
1006 static void disarm_kprobe_ftrace(struct kprobe *p)
1007 {
1008 int ret;
1009
1010 kprobe_ftrace_enabled--;
1011 if (kprobe_ftrace_enabled == 0) {
1012 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
1013 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
1014 }
1015 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
1016 (unsigned long)p->addr, 1, 0);
1017 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
1018 }
1019 #else /* !CONFIG_KPROBES_ON_FTRACE */
1020 #define prepare_kprobe(p) arch_prepare_kprobe(p)
1021 #define arm_kprobe_ftrace(p) do {} while (0)
1022 #define disarm_kprobe_ftrace(p) do {} while (0)
1023 #endif
1024
1025 /* Arm a kprobe with text_mutex */
1026 static void arm_kprobe(struct kprobe *kp)
1027 {
1028 if (unlikely(kprobe_ftrace(kp))) {
1029 arm_kprobe_ftrace(kp);
1030 return;
1031 }
1032 cpus_read_lock();
1033 mutex_lock(&text_mutex);
1034 __arm_kprobe(kp);
1035 mutex_unlock(&text_mutex);
1036 cpus_read_unlock();
1037 }
1038
1039 /* Disarm a kprobe with text_mutex */
1040 static void disarm_kprobe(struct kprobe *kp, bool reopt)
1041 {
1042 if (unlikely(kprobe_ftrace(kp))) {
1043 disarm_kprobe_ftrace(kp);
1044 return;
1045 }
1046
1047 cpus_read_lock();
1048 mutex_lock(&text_mutex);
1049 __disarm_kprobe(kp, reopt);
1050 mutex_unlock(&text_mutex);
1051 cpus_read_unlock();
1052 }
1053
1054 /*
1055 * Aggregate handlers for multiple kprobes support - these handlers
1056 * take care of invoking the individual kprobe handlers on p->list
1057 */
1058 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1059 {
1060 struct kprobe *kp;
1061
1062 list_for_each_entry_rcu(kp, &p->list, list) {
1063 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1064 set_kprobe_instance(kp);
1065 if (kp->pre_handler(kp, regs))
1066 return 1;
1067 }
1068 reset_kprobe_instance();
1069 }
1070 return 0;
1071 }
1072 NOKPROBE_SYMBOL(aggr_pre_handler);
1073
1074 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1075 unsigned long flags)
1076 {
1077 struct kprobe *kp;
1078
1079 list_for_each_entry_rcu(kp, &p->list, list) {
1080 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1081 set_kprobe_instance(kp);
1082 kp->post_handler(kp, regs, flags);
1083 reset_kprobe_instance();
1084 }
1085 }
1086 }
1087 NOKPROBE_SYMBOL(aggr_post_handler);
1088
1089 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1090 int trapnr)
1091 {
1092 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1093
1094 /*
1095 * if we faulted "during" the execution of a user specified
1096 * probe handler, invoke just that probe's fault handler
1097 */
1098 if (cur && cur->fault_handler) {
1099 if (cur->fault_handler(cur, regs, trapnr))
1100 return 1;
1101 }
1102 return 0;
1103 }
1104 NOKPROBE_SYMBOL(aggr_fault_handler);
1105
1106 static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1107 {
1108 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1109 int ret = 0;
1110
1111 if (cur && cur->break_handler) {
1112 if (cur->break_handler(cur, regs))
1113 ret = 1;
1114 }
1115 reset_kprobe_instance();
1116 return ret;
1117 }
1118 NOKPROBE_SYMBOL(aggr_break_handler);
1119
1120 /* Walks the list and increments nmissed count for multiprobe case */
1121 void kprobes_inc_nmissed_count(struct kprobe *p)
1122 {
1123 struct kprobe *kp;
1124 if (!kprobe_aggrprobe(p)) {
1125 p->nmissed++;
1126 } else {
1127 list_for_each_entry_rcu(kp, &p->list, list)
1128 kp->nmissed++;
1129 }
1130 return;
1131 }
1132 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1133
1134 void recycle_rp_inst(struct kretprobe_instance *ri,
1135 struct hlist_head *head)
1136 {
1137 struct kretprobe *rp = ri->rp;
1138
1139 /* remove rp inst off the rprobe_inst_table */
1140 hlist_del(&ri->hlist);
1141 INIT_HLIST_NODE(&ri->hlist);
1142 if (likely(rp)) {
1143 raw_spin_lock(&rp->lock);
1144 hlist_add_head(&ri->hlist, &rp->free_instances);
1145 raw_spin_unlock(&rp->lock);
1146 } else
1147 /* Unregistering */
1148 hlist_add_head(&ri->hlist, head);
1149 }
1150 NOKPROBE_SYMBOL(recycle_rp_inst);
1151
1152 void kretprobe_hash_lock(struct task_struct *tsk,
1153 struct hlist_head **head, unsigned long *flags)
1154 __acquires(hlist_lock)
1155 {
1156 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1157 raw_spinlock_t *hlist_lock;
1158
1159 *head = &kretprobe_inst_table[hash];
1160 hlist_lock = kretprobe_table_lock_ptr(hash);
1161 raw_spin_lock_irqsave(hlist_lock, *flags);
1162 }
1163 NOKPROBE_SYMBOL(kretprobe_hash_lock);
1164
1165 static void kretprobe_table_lock(unsigned long hash,
1166 unsigned long *flags)
1167 __acquires(hlist_lock)
1168 {
1169 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1170 raw_spin_lock_irqsave(hlist_lock, *flags);
1171 }
1172 NOKPROBE_SYMBOL(kretprobe_table_lock);
1173
1174 void kretprobe_hash_unlock(struct task_struct *tsk,
1175 unsigned long *flags)
1176 __releases(hlist_lock)
1177 {
1178 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1179 raw_spinlock_t *hlist_lock;
1180
1181 hlist_lock = kretprobe_table_lock_ptr(hash);
1182 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1183 }
1184 NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1185
1186 static void kretprobe_table_unlock(unsigned long hash,
1187 unsigned long *flags)
1188 __releases(hlist_lock)
1189 {
1190 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1191 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1192 }
1193 NOKPROBE_SYMBOL(kretprobe_table_unlock);
1194
1195 /*
1196 * This function is called from finish_task_switch when task tk becomes dead,
1197 * so that we can recycle any function-return probe instances associated
1198 * with this task. These left over instances represent probed functions
1199 * that have been called but will never return.
1200 */
1201 void kprobe_flush_task(struct task_struct *tk)
1202 {
1203 struct kretprobe_instance *ri;
1204 struct hlist_head *head, empty_rp;
1205 struct hlist_node *tmp;
1206 unsigned long hash, flags = 0;
1207
1208 if (unlikely(!kprobes_initialized))
1209 /* Early boot. kretprobe_table_locks not yet initialized. */
1210 return;
1211
1212 INIT_HLIST_HEAD(&empty_rp);
1213 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1214 head = &kretprobe_inst_table[hash];
1215 kretprobe_table_lock(hash, &flags);
1216 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1217 if (ri->task == tk)
1218 recycle_rp_inst(ri, &empty_rp);
1219 }
1220 kretprobe_table_unlock(hash, &flags);
1221 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1222 hlist_del(&ri->hlist);
1223 kfree(ri);
1224 }
1225 }
1226 NOKPROBE_SYMBOL(kprobe_flush_task);
1227
1228 static inline void free_rp_inst(struct kretprobe *rp)
1229 {
1230 struct kretprobe_instance *ri;
1231 struct hlist_node *next;
1232
1233 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1234 hlist_del(&ri->hlist);
1235 kfree(ri);
1236 }
1237 }
1238
1239 static void cleanup_rp_inst(struct kretprobe *rp)
1240 {
1241 unsigned long flags, hash;
1242 struct kretprobe_instance *ri;
1243 struct hlist_node *next;
1244 struct hlist_head *head;
1245
1246 /* No race here */
1247 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1248 kretprobe_table_lock(hash, &flags);
1249 head = &kretprobe_inst_table[hash];
1250 hlist_for_each_entry_safe(ri, next, head, hlist) {
1251 if (ri->rp == rp)
1252 ri->rp = NULL;
1253 }
1254 kretprobe_table_unlock(hash, &flags);
1255 }
1256 free_rp_inst(rp);
1257 }
1258 NOKPROBE_SYMBOL(cleanup_rp_inst);
1259
1260 /*
1261 * Add the new probe to ap->list. Fail if this is the
1262 * second jprobe at the address - two jprobes can't coexist
1263 */
1264 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1265 {
1266 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1267
1268 if (p->break_handler || p->post_handler)
1269 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
1270
1271 if (p->break_handler) {
1272 if (ap->break_handler)
1273 return -EEXIST;
1274 list_add_tail_rcu(&p->list, &ap->list);
1275 ap->break_handler = aggr_break_handler;
1276 } else
1277 list_add_rcu(&p->list, &ap->list);
1278 if (p->post_handler && !ap->post_handler)
1279 ap->post_handler = aggr_post_handler;
1280
1281 return 0;
1282 }
1283
1284 /*
1285 * Fill in the required fields of the "manager kprobe". Replace the
1286 * earlier kprobe in the hlist with the manager kprobe
1287 */
1288 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1289 {
1290 /* Copy p's insn slot to ap */
1291 copy_kprobe(p, ap);
1292 flush_insn_slot(ap);
1293 ap->addr = p->addr;
1294 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1295 ap->pre_handler = aggr_pre_handler;
1296 ap->fault_handler = aggr_fault_handler;
1297 /* We don't care the kprobe which has gone. */
1298 if (p->post_handler && !kprobe_gone(p))
1299 ap->post_handler = aggr_post_handler;
1300 if (p->break_handler && !kprobe_gone(p))
1301 ap->break_handler = aggr_break_handler;
1302
1303 INIT_LIST_HEAD(&ap->list);
1304 INIT_HLIST_NODE(&ap->hlist);
1305
1306 list_add_rcu(&p->list, &ap->list);
1307 hlist_replace_rcu(&p->hlist, &ap->hlist);
1308 }
1309
1310 /*
1311 * This is the second or subsequent kprobe at the address - handle
1312 * the intricacies
1313 */
1314 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1315 {
1316 int ret = 0;
1317 struct kprobe *ap = orig_p;
1318
1319 cpus_read_lock();
1320
1321 /* For preparing optimization, jump_label_text_reserved() is called */
1322 jump_label_lock();
1323 mutex_lock(&text_mutex);
1324
1325 if (!kprobe_aggrprobe(orig_p)) {
1326 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1327 ap = alloc_aggr_kprobe(orig_p);
1328 if (!ap) {
1329 ret = -ENOMEM;
1330 goto out;
1331 }
1332 init_aggr_kprobe(ap, orig_p);
1333 } else if (kprobe_unused(ap)) {
1334 /* This probe is going to die. Rescue it */
1335 ret = reuse_unused_kprobe(ap);
1336 if (ret)
1337 goto out;
1338 }
1339
1340 if (kprobe_gone(ap)) {
1341 /*
1342 * Attempting to insert new probe at the same location that
1343 * had a probe in the module vaddr area which already
1344 * freed. So, the instruction slot has already been
1345 * released. We need a new slot for the new probe.
1346 */
1347 ret = arch_prepare_kprobe(ap);
1348 if (ret)
1349 /*
1350 * Even if fail to allocate new slot, don't need to
1351 * free aggr_probe. It will be used next time, or
1352 * freed by unregister_kprobe.
1353 */
1354 goto out;
1355
1356 /* Prepare optimized instructions if possible. */
1357 prepare_optimized_kprobe(ap);
1358
1359 /*
1360 * Clear gone flag to prevent allocating new slot again, and
1361 * set disabled flag because it is not armed yet.
1362 */
1363 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1364 | KPROBE_FLAG_DISABLED;
1365 }
1366
1367 /* Copy ap's insn slot to p */
1368 copy_kprobe(ap, p);
1369 ret = add_new_kprobe(ap, p);
1370
1371 out:
1372 mutex_unlock(&text_mutex);
1373 jump_label_unlock();
1374 cpus_read_unlock();
1375
1376 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1377 ap->flags &= ~KPROBE_FLAG_DISABLED;
1378 if (!kprobes_all_disarmed)
1379 /* Arm the breakpoint again. */
1380 arm_kprobe(ap);
1381 }
1382 return ret;
1383 }
1384
1385 bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1386 {
1387 /* The __kprobes marked functions and entry code must not be probed */
1388 return addr >= (unsigned long)__kprobes_text_start &&
1389 addr < (unsigned long)__kprobes_text_end;
1390 }
1391
1392 bool within_kprobe_blacklist(unsigned long addr)
1393 {
1394 struct kprobe_blacklist_entry *ent;
1395
1396 if (arch_within_kprobe_blacklist(addr))
1397 return true;
1398 /*
1399 * If there exists a kprobe_blacklist, verify and
1400 * fail any probe registration in the prohibited area
1401 */
1402 list_for_each_entry(ent, &kprobe_blacklist, list) {
1403 if (addr >= ent->start_addr && addr < ent->end_addr)
1404 return true;
1405 }
1406
1407 return false;
1408 }
1409
1410 /*
1411 * If we have a symbol_name argument, look it up and add the offset field
1412 * to it. This way, we can specify a relative address to a symbol.
1413 * This returns encoded errors if it fails to look up symbol or invalid
1414 * combination of parameters.
1415 */
1416 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1417 const char *symbol_name, unsigned int offset)
1418 {
1419 if ((symbol_name && addr) || (!symbol_name && !addr))
1420 goto invalid;
1421
1422 if (symbol_name) {
1423 addr = kprobe_lookup_name(symbol_name, offset);
1424 if (!addr)
1425 return ERR_PTR(-ENOENT);
1426 }
1427
1428 addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1429 if (addr)
1430 return addr;
1431
1432 invalid:
1433 return ERR_PTR(-EINVAL);
1434 }
1435
1436 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1437 {
1438 return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1439 }
1440
1441 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
1442 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1443 {
1444 struct kprobe *ap, *list_p;
1445
1446 ap = get_kprobe(p->addr);
1447 if (unlikely(!ap))
1448 return NULL;
1449
1450 if (p != ap) {
1451 list_for_each_entry_rcu(list_p, &ap->list, list)
1452 if (list_p == p)
1453 /* kprobe p is a valid probe */
1454 goto valid;
1455 return NULL;
1456 }
1457 valid:
1458 return ap;
1459 }
1460
1461 /* Return error if the kprobe is being re-registered */
1462 static inline int check_kprobe_rereg(struct kprobe *p)
1463 {
1464 int ret = 0;
1465
1466 mutex_lock(&kprobe_mutex);
1467 if (__get_valid_kprobe(p))
1468 ret = -EINVAL;
1469 mutex_unlock(&kprobe_mutex);
1470
1471 return ret;
1472 }
1473
1474 int __weak arch_check_ftrace_location(struct kprobe *p)
1475 {
1476 unsigned long ftrace_addr;
1477
1478 ftrace_addr = ftrace_location((unsigned long)p->addr);
1479 if (ftrace_addr) {
1480 #ifdef CONFIG_KPROBES_ON_FTRACE
1481 /* Given address is not on the instruction boundary */
1482 if ((unsigned long)p->addr != ftrace_addr)
1483 return -EILSEQ;
1484 p->flags |= KPROBE_FLAG_FTRACE;
1485 #else /* !CONFIG_KPROBES_ON_FTRACE */
1486 return -EINVAL;
1487 #endif
1488 }
1489 return 0;
1490 }
1491
1492 static int check_kprobe_address_safe(struct kprobe *p,
1493 struct module **probed_mod)
1494 {
1495 int ret;
1496
1497 ret = arch_check_ftrace_location(p);
1498 if (ret)
1499 return ret;
1500 jump_label_lock();
1501 preempt_disable();
1502
1503 /* Ensure it is not in reserved area nor out of text */
1504 if (!kernel_text_address((unsigned long) p->addr) ||
1505 within_kprobe_blacklist((unsigned long) p->addr) ||
1506 jump_label_text_reserved(p->addr, p->addr)) {
1507 ret = -EINVAL;
1508 goto out;
1509 }
1510
1511 /* Check if are we probing a module */
1512 *probed_mod = __module_text_address((unsigned long) p->addr);
1513 if (*probed_mod) {
1514 /*
1515 * We must hold a refcount of the probed module while updating
1516 * its code to prohibit unexpected unloading.
1517 */
1518 if (unlikely(!try_module_get(*probed_mod))) {
1519 ret = -ENOENT;
1520 goto out;
1521 }
1522
1523 /*
1524 * If the module freed .init.text, we couldn't insert
1525 * kprobes in there.
1526 */
1527 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1528 (*probed_mod)->state != MODULE_STATE_COMING) {
1529 module_put(*probed_mod);
1530 *probed_mod = NULL;
1531 ret = -ENOENT;
1532 }
1533 }
1534 out:
1535 preempt_enable();
1536 jump_label_unlock();
1537
1538 return ret;
1539 }
1540
1541 int register_kprobe(struct kprobe *p)
1542 {
1543 int ret;
1544 struct kprobe *old_p;
1545 struct module *probed_mod;
1546 kprobe_opcode_t *addr;
1547
1548 /* Adjust probe address from symbol */
1549 addr = kprobe_addr(p);
1550 if (IS_ERR(addr))
1551 return PTR_ERR(addr);
1552 p->addr = addr;
1553
1554 ret = check_kprobe_rereg(p);
1555 if (ret)
1556 return ret;
1557
1558 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1559 p->flags &= KPROBE_FLAG_DISABLED;
1560 p->nmissed = 0;
1561 INIT_LIST_HEAD(&p->list);
1562
1563 ret = check_kprobe_address_safe(p, &probed_mod);
1564 if (ret)
1565 return ret;
1566
1567 mutex_lock(&kprobe_mutex);
1568
1569 old_p = get_kprobe(p->addr);
1570 if (old_p) {
1571 /* Since this may unoptimize old_p, locking text_mutex. */
1572 ret = register_aggr_kprobe(old_p, p);
1573 goto out;
1574 }
1575
1576 cpus_read_lock();
1577 /* Prevent text modification */
1578 mutex_lock(&text_mutex);
1579 ret = prepare_kprobe(p);
1580 mutex_unlock(&text_mutex);
1581 cpus_read_unlock();
1582 if (ret)
1583 goto out;
1584
1585 INIT_HLIST_NODE(&p->hlist);
1586 hlist_add_head_rcu(&p->hlist,
1587 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1588
1589 if (!kprobes_all_disarmed && !kprobe_disabled(p))
1590 arm_kprobe(p);
1591
1592 /* Try to optimize kprobe */
1593 try_to_optimize_kprobe(p);
1594 out:
1595 mutex_unlock(&kprobe_mutex);
1596
1597 if (probed_mod)
1598 module_put(probed_mod);
1599
1600 return ret;
1601 }
1602 EXPORT_SYMBOL_GPL(register_kprobe);
1603
1604 /* Check if all probes on the aggrprobe are disabled */
1605 static int aggr_kprobe_disabled(struct kprobe *ap)
1606 {
1607 struct kprobe *kp;
1608
1609 list_for_each_entry_rcu(kp, &ap->list, list)
1610 if (!kprobe_disabled(kp))
1611 /*
1612 * There is an active probe on the list.
1613 * We can't disable this ap.
1614 */
1615 return 0;
1616
1617 return 1;
1618 }
1619
1620 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1621 static struct kprobe *__disable_kprobe(struct kprobe *p)
1622 {
1623 struct kprobe *orig_p;
1624
1625 /* Get an original kprobe for return */
1626 orig_p = __get_valid_kprobe(p);
1627 if (unlikely(orig_p == NULL))
1628 return NULL;
1629
1630 if (!kprobe_disabled(p)) {
1631 /* Disable probe if it is a child probe */
1632 if (p != orig_p)
1633 p->flags |= KPROBE_FLAG_DISABLED;
1634
1635 /* Try to disarm and disable this/parent probe */
1636 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1637 /*
1638 * If kprobes_all_disarmed is set, orig_p
1639 * should have already been disarmed, so
1640 * skip unneed disarming process.
1641 */
1642 if (!kprobes_all_disarmed)
1643 disarm_kprobe(orig_p, true);
1644 orig_p->flags |= KPROBE_FLAG_DISABLED;
1645 }
1646 }
1647
1648 return orig_p;
1649 }
1650
1651 /*
1652 * Unregister a kprobe without a scheduler synchronization.
1653 */
1654 static int __unregister_kprobe_top(struct kprobe *p)
1655 {
1656 struct kprobe *ap, *list_p;
1657
1658 /* Disable kprobe. This will disarm it if needed. */
1659 ap = __disable_kprobe(p);
1660 if (ap == NULL)
1661 return -EINVAL;
1662
1663 if (ap == p)
1664 /*
1665 * This probe is an independent(and non-optimized) kprobe
1666 * (not an aggrprobe). Remove from the hash list.
1667 */
1668 goto disarmed;
1669
1670 /* Following process expects this probe is an aggrprobe */
1671 WARN_ON(!kprobe_aggrprobe(ap));
1672
1673 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1674 /*
1675 * !disarmed could be happen if the probe is under delayed
1676 * unoptimizing.
1677 */
1678 goto disarmed;
1679 else {
1680 /* If disabling probe has special handlers, update aggrprobe */
1681 if (p->break_handler && !kprobe_gone(p))
1682 ap->break_handler = NULL;
1683 if (p->post_handler && !kprobe_gone(p)) {
1684 list_for_each_entry_rcu(list_p, &ap->list, list) {
1685 if ((list_p != p) && (list_p->post_handler))
1686 goto noclean;
1687 }
1688 ap->post_handler = NULL;
1689 }
1690 noclean:
1691 /*
1692 * Remove from the aggrprobe: this path will do nothing in
1693 * __unregister_kprobe_bottom().
1694 */
1695 list_del_rcu(&p->list);
1696 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1697 /*
1698 * Try to optimize this probe again, because post
1699 * handler may have been changed.
1700 */
1701 optimize_kprobe(ap);
1702 }
1703 return 0;
1704
1705 disarmed:
1706 BUG_ON(!kprobe_disarmed(ap));
1707 hlist_del_rcu(&ap->hlist);
1708 return 0;
1709 }
1710
1711 static void __unregister_kprobe_bottom(struct kprobe *p)
1712 {
1713 struct kprobe *ap;
1714
1715 if (list_empty(&p->list))
1716 /* This is an independent kprobe */
1717 arch_remove_kprobe(p);
1718 else if (list_is_singular(&p->list)) {
1719 /* This is the last child of an aggrprobe */
1720 ap = list_entry(p->list.next, struct kprobe, list);
1721 list_del(&p->list);
1722 free_aggr_kprobe(ap);
1723 }
1724 /* Otherwise, do nothing. */
1725 }
1726
1727 int register_kprobes(struct kprobe **kps, int num)
1728 {
1729 int i, ret = 0;
1730
1731 if (num <= 0)
1732 return -EINVAL;
1733 for (i = 0; i < num; i++) {
1734 ret = register_kprobe(kps[i]);
1735 if (ret < 0) {
1736 if (i > 0)
1737 unregister_kprobes(kps, i);
1738 break;
1739 }
1740 }
1741 return ret;
1742 }
1743 EXPORT_SYMBOL_GPL(register_kprobes);
1744
1745 void unregister_kprobe(struct kprobe *p)
1746 {
1747 unregister_kprobes(&p, 1);
1748 }
1749 EXPORT_SYMBOL_GPL(unregister_kprobe);
1750
1751 void unregister_kprobes(struct kprobe **kps, int num)
1752 {
1753 int i;
1754
1755 if (num <= 0)
1756 return;
1757 mutex_lock(&kprobe_mutex);
1758 for (i = 0; i < num; i++)
1759 if (__unregister_kprobe_top(kps[i]) < 0)
1760 kps[i]->addr = NULL;
1761 mutex_unlock(&kprobe_mutex);
1762
1763 synchronize_sched();
1764 for (i = 0; i < num; i++)
1765 if (kps[i]->addr)
1766 __unregister_kprobe_bottom(kps[i]);
1767 }
1768 EXPORT_SYMBOL_GPL(unregister_kprobes);
1769
1770 int __weak kprobe_exceptions_notify(struct notifier_block *self,
1771 unsigned long val, void *data)
1772 {
1773 return NOTIFY_DONE;
1774 }
1775 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1776
1777 static struct notifier_block kprobe_exceptions_nb = {
1778 .notifier_call = kprobe_exceptions_notify,
1779 .priority = 0x7fffffff /* we need to be notified first */
1780 };
1781
1782 unsigned long __weak arch_deref_entry_point(void *entry)
1783 {
1784 return (unsigned long)entry;
1785 }
1786
1787 int register_jprobes(struct jprobe **jps, int num)
1788 {
1789 int ret = 0, i;
1790
1791 if (num <= 0)
1792 return -EINVAL;
1793
1794 for (i = 0; i < num; i++) {
1795 ret = register_jprobe(jps[i]);
1796
1797 if (ret < 0) {
1798 if (i > 0)
1799 unregister_jprobes(jps, i);
1800 break;
1801 }
1802 }
1803
1804 return ret;
1805 }
1806 EXPORT_SYMBOL_GPL(register_jprobes);
1807
1808 int register_jprobe(struct jprobe *jp)
1809 {
1810 unsigned long addr, offset;
1811 struct kprobe *kp = &jp->kp;
1812
1813 /*
1814 * Verify probepoint as well as the jprobe handler are
1815 * valid function entry points.
1816 */
1817 addr = arch_deref_entry_point(jp->entry);
1818
1819 if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
1820 kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
1821 kp->pre_handler = setjmp_pre_handler;
1822 kp->break_handler = longjmp_break_handler;
1823 return register_kprobe(kp);
1824 }
1825
1826 return -EINVAL;
1827 }
1828 EXPORT_SYMBOL_GPL(register_jprobe);
1829
1830 void unregister_jprobe(struct jprobe *jp)
1831 {
1832 unregister_jprobes(&jp, 1);
1833 }
1834 EXPORT_SYMBOL_GPL(unregister_jprobe);
1835
1836 void unregister_jprobes(struct jprobe **jps, int num)
1837 {
1838 int i;
1839
1840 if (num <= 0)
1841 return;
1842 mutex_lock(&kprobe_mutex);
1843 for (i = 0; i < num; i++)
1844 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1845 jps[i]->kp.addr = NULL;
1846 mutex_unlock(&kprobe_mutex);
1847
1848 synchronize_sched();
1849 for (i = 0; i < num; i++) {
1850 if (jps[i]->kp.addr)
1851 __unregister_kprobe_bottom(&jps[i]->kp);
1852 }
1853 }
1854 EXPORT_SYMBOL_GPL(unregister_jprobes);
1855
1856 #ifdef CONFIG_KRETPROBES
1857 /*
1858 * This kprobe pre_handler is registered with every kretprobe. When probe
1859 * hits it will set up the return probe.
1860 */
1861 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1862 {
1863 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1864 unsigned long hash, flags = 0;
1865 struct kretprobe_instance *ri;
1866
1867 /*
1868 * To avoid deadlocks, prohibit return probing in NMI contexts,
1869 * just skip the probe and increase the (inexact) 'nmissed'
1870 * statistical counter, so that the user is informed that
1871 * something happened:
1872 */
1873 if (unlikely(in_nmi())) {
1874 rp->nmissed++;
1875 return 0;
1876 }
1877
1878 /* TODO: consider to only swap the RA after the last pre_handler fired */
1879 hash = hash_ptr(current, KPROBE_HASH_BITS);
1880 raw_spin_lock_irqsave(&rp->lock, flags);
1881 if (!hlist_empty(&rp->free_instances)) {
1882 ri = hlist_entry(rp->free_instances.first,
1883 struct kretprobe_instance, hlist);
1884 hlist_del(&ri->hlist);
1885 raw_spin_unlock_irqrestore(&rp->lock, flags);
1886
1887 ri->rp = rp;
1888 ri->task = current;
1889
1890 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1891 raw_spin_lock_irqsave(&rp->lock, flags);
1892 hlist_add_head(&ri->hlist, &rp->free_instances);
1893 raw_spin_unlock_irqrestore(&rp->lock, flags);
1894 return 0;
1895 }
1896
1897 arch_prepare_kretprobe(ri, regs);
1898
1899 /* XXX(hch): why is there no hlist_move_head? */
1900 INIT_HLIST_NODE(&ri->hlist);
1901 kretprobe_table_lock(hash, &flags);
1902 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1903 kretprobe_table_unlock(hash, &flags);
1904 } else {
1905 rp->nmissed++;
1906 raw_spin_unlock_irqrestore(&rp->lock, flags);
1907 }
1908 return 0;
1909 }
1910 NOKPROBE_SYMBOL(pre_handler_kretprobe);
1911
1912 bool __weak arch_kprobe_on_func_entry(unsigned long offset)
1913 {
1914 return !offset;
1915 }
1916
1917 bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1918 {
1919 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1920
1921 if (IS_ERR(kp_addr))
1922 return false;
1923
1924 if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
1925 !arch_kprobe_on_func_entry(offset))
1926 return false;
1927
1928 return true;
1929 }
1930
1931 int register_kretprobe(struct kretprobe *rp)
1932 {
1933 int ret = 0;
1934 struct kretprobe_instance *inst;
1935 int i;
1936 void *addr;
1937
1938 if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
1939 return -EINVAL;
1940
1941 if (kretprobe_blacklist_size) {
1942 addr = kprobe_addr(&rp->kp);
1943 if (IS_ERR(addr))
1944 return PTR_ERR(addr);
1945
1946 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1947 if (kretprobe_blacklist[i].addr == addr)
1948 return -EINVAL;
1949 }
1950 }
1951
1952 rp->kp.pre_handler = pre_handler_kretprobe;
1953 rp->kp.post_handler = NULL;
1954 rp->kp.fault_handler = NULL;
1955 rp->kp.break_handler = NULL;
1956
1957 /* Pre-allocate memory for max kretprobe instances */
1958 if (rp->maxactive <= 0) {
1959 #ifdef CONFIG_PREEMPT
1960 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1961 #else
1962 rp->maxactive = num_possible_cpus();
1963 #endif
1964 }
1965 raw_spin_lock_init(&rp->lock);
1966 INIT_HLIST_HEAD(&rp->free_instances);
1967 for (i = 0; i < rp->maxactive; i++) {
1968 inst = kmalloc(sizeof(struct kretprobe_instance) +
1969 rp->data_size, GFP_KERNEL);
1970 if (inst == NULL) {
1971 free_rp_inst(rp);
1972 return -ENOMEM;
1973 }
1974 INIT_HLIST_NODE(&inst->hlist);
1975 hlist_add_head(&inst->hlist, &rp->free_instances);
1976 }
1977
1978 rp->nmissed = 0;
1979 /* Establish function entry probe point */
1980 ret = register_kprobe(&rp->kp);
1981 if (ret != 0)
1982 free_rp_inst(rp);
1983 return ret;
1984 }
1985 EXPORT_SYMBOL_GPL(register_kretprobe);
1986
1987 int register_kretprobes(struct kretprobe **rps, int num)
1988 {
1989 int ret = 0, i;
1990
1991 if (num <= 0)
1992 return -EINVAL;
1993 for (i = 0; i < num; i++) {
1994 ret = register_kretprobe(rps[i]);
1995 if (ret < 0) {
1996 if (i > 0)
1997 unregister_kretprobes(rps, i);
1998 break;
1999 }
2000 }
2001 return ret;
2002 }
2003 EXPORT_SYMBOL_GPL(register_kretprobes);
2004
2005 void unregister_kretprobe(struct kretprobe *rp)
2006 {
2007 unregister_kretprobes(&rp, 1);
2008 }
2009 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2010
2011 void unregister_kretprobes(struct kretprobe **rps, int num)
2012 {
2013 int i;
2014
2015 if (num <= 0)
2016 return;
2017 mutex_lock(&kprobe_mutex);
2018 for (i = 0; i < num; i++)
2019 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2020 rps[i]->kp.addr = NULL;
2021 mutex_unlock(&kprobe_mutex);
2022
2023 synchronize_sched();
2024 for (i = 0; i < num; i++) {
2025 if (rps[i]->kp.addr) {
2026 __unregister_kprobe_bottom(&rps[i]->kp);
2027 cleanup_rp_inst(rps[i]);
2028 }
2029 }
2030 }
2031 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2032
2033 #else /* CONFIG_KRETPROBES */
2034 int register_kretprobe(struct kretprobe *rp)
2035 {
2036 return -ENOSYS;
2037 }
2038 EXPORT_SYMBOL_GPL(register_kretprobe);
2039
2040 int register_kretprobes(struct kretprobe **rps, int num)
2041 {
2042 return -ENOSYS;
2043 }
2044 EXPORT_SYMBOL_GPL(register_kretprobes);
2045
2046 void unregister_kretprobe(struct kretprobe *rp)
2047 {
2048 }
2049 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2050
2051 void unregister_kretprobes(struct kretprobe **rps, int num)
2052 {
2053 }
2054 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2055
2056 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2057 {
2058 return 0;
2059 }
2060 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2061
2062 #endif /* CONFIG_KRETPROBES */
2063
2064 /* Set the kprobe gone and remove its instruction buffer. */
2065 static void kill_kprobe(struct kprobe *p)
2066 {
2067 struct kprobe *kp;
2068
2069 p->flags |= KPROBE_FLAG_GONE;
2070 if (kprobe_aggrprobe(p)) {
2071 /*
2072 * If this is an aggr_kprobe, we have to list all the
2073 * chained probes and mark them GONE.
2074 */
2075 list_for_each_entry_rcu(kp, &p->list, list)
2076 kp->flags |= KPROBE_FLAG_GONE;
2077 p->post_handler = NULL;
2078 p->break_handler = NULL;
2079 kill_optimized_kprobe(p);
2080 }
2081 /*
2082 * Here, we can remove insn_slot safely, because no thread calls
2083 * the original probed function (which will be freed soon) any more.
2084 */
2085 arch_remove_kprobe(p);
2086 }
2087
2088 /* Disable one kprobe */
2089 int disable_kprobe(struct kprobe *kp)
2090 {
2091 int ret = 0;
2092
2093 mutex_lock(&kprobe_mutex);
2094
2095 /* Disable this kprobe */
2096 if (__disable_kprobe(kp) == NULL)
2097 ret = -EINVAL;
2098
2099 mutex_unlock(&kprobe_mutex);
2100 return ret;
2101 }
2102 EXPORT_SYMBOL_GPL(disable_kprobe);
2103
2104 /* Enable one kprobe */
2105 int enable_kprobe(struct kprobe *kp)
2106 {
2107 int ret = 0;
2108 struct kprobe *p;
2109
2110 mutex_lock(&kprobe_mutex);
2111
2112 /* Check whether specified probe is valid. */
2113 p = __get_valid_kprobe(kp);
2114 if (unlikely(p == NULL)) {
2115 ret = -EINVAL;
2116 goto out;
2117 }
2118
2119 if (kprobe_gone(kp)) {
2120 /* This kprobe has gone, we couldn't enable it. */
2121 ret = -EINVAL;
2122 goto out;
2123 }
2124
2125 if (p != kp)
2126 kp->flags &= ~KPROBE_FLAG_DISABLED;
2127
2128 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2129 p->flags &= ~KPROBE_FLAG_DISABLED;
2130 arm_kprobe(p);
2131 }
2132 out:
2133 mutex_unlock(&kprobe_mutex);
2134 return ret;
2135 }
2136 EXPORT_SYMBOL_GPL(enable_kprobe);
2137
2138 void dump_kprobe(struct kprobe *kp)
2139 {
2140 printk(KERN_WARNING "Dumping kprobe:\n");
2141 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2142 kp->symbol_name, kp->addr, kp->offset);
2143 }
2144 NOKPROBE_SYMBOL(dump_kprobe);
2145
2146 /*
2147 * Lookup and populate the kprobe_blacklist.
2148 *
2149 * Unlike the kretprobe blacklist, we'll need to determine
2150 * the range of addresses that belong to the said functions,
2151 * since a kprobe need not necessarily be at the beginning
2152 * of a function.
2153 */
2154 static int __init populate_kprobe_blacklist(unsigned long *start,
2155 unsigned long *end)
2156 {
2157 unsigned long *iter;
2158 struct kprobe_blacklist_entry *ent;
2159 unsigned long entry, offset = 0, size = 0;
2160
2161 for (iter = start; iter < end; iter++) {
2162 entry = arch_deref_entry_point((void *)*iter);
2163
2164 if (!kernel_text_address(entry) ||
2165 !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2166 pr_err("Failed to find blacklist at %p\n",
2167 (void *)entry);
2168 continue;
2169 }
2170
2171 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2172 if (!ent)
2173 return -ENOMEM;
2174 ent->start_addr = entry;
2175 ent->end_addr = entry + size;
2176 INIT_LIST_HEAD(&ent->list);
2177 list_add_tail(&ent->list, &kprobe_blacklist);
2178 }
2179 return 0;
2180 }
2181
2182 /* Module notifier call back, checking kprobes on the module */
2183 static int kprobes_module_callback(struct notifier_block *nb,
2184 unsigned long val, void *data)
2185 {
2186 struct module *mod = data;
2187 struct hlist_head *head;
2188 struct kprobe *p;
2189 unsigned int i;
2190 int checkcore = (val == MODULE_STATE_GOING);
2191
2192 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2193 return NOTIFY_DONE;
2194
2195 /*
2196 * When MODULE_STATE_GOING was notified, both of module .text and
2197 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2198 * notified, only .init.text section would be freed. We need to
2199 * disable kprobes which have been inserted in the sections.
2200 */
2201 mutex_lock(&kprobe_mutex);
2202 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2203 head = &kprobe_table[i];
2204 hlist_for_each_entry_rcu(p, head, hlist)
2205 if (within_module_init((unsigned long)p->addr, mod) ||
2206 (checkcore &&
2207 within_module_core((unsigned long)p->addr, mod))) {
2208 /*
2209 * The vaddr this probe is installed will soon
2210 * be vfreed buy not synced to disk. Hence,
2211 * disarming the breakpoint isn't needed.
2212 *
2213 * Note, this will also move any optimized probes
2214 * that are pending to be removed from their
2215 * corresponding lists to the freeing_list and
2216 * will not be touched by the delayed
2217 * kprobe_optimizer work handler.
2218 */
2219 kill_kprobe(p);
2220 }
2221 }
2222 mutex_unlock(&kprobe_mutex);
2223 return NOTIFY_DONE;
2224 }
2225
2226 static struct notifier_block kprobe_module_nb = {
2227 .notifier_call = kprobes_module_callback,
2228 .priority = 0
2229 };
2230
2231 /* Markers of _kprobe_blacklist section */
2232 extern unsigned long __start_kprobe_blacklist[];
2233 extern unsigned long __stop_kprobe_blacklist[];
2234
2235 static int __init init_kprobes(void)
2236 {
2237 int i, err = 0;
2238
2239 /* FIXME allocate the probe table, currently defined statically */
2240 /* initialize all list heads */
2241 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2242 INIT_HLIST_HEAD(&kprobe_table[i]);
2243 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2244 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2245 }
2246
2247 err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2248 __stop_kprobe_blacklist);
2249 if (err) {
2250 pr_err("kprobes: failed to populate blacklist: %d\n", err);
2251 pr_err("Please take care of using kprobes.\n");
2252 }
2253
2254 if (kretprobe_blacklist_size) {
2255 /* lookup the function address from its name */
2256 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2257 kretprobe_blacklist[i].addr =
2258 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2259 if (!kretprobe_blacklist[i].addr)
2260 printk("kretprobe: lookup failed: %s\n",
2261 kretprobe_blacklist[i].name);
2262 }
2263 }
2264
2265 #if defined(CONFIG_OPTPROBES)
2266 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2267 /* Init kprobe_optinsn_slots */
2268 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2269 #endif
2270 /* By default, kprobes can be optimized */
2271 kprobes_allow_optimization = true;
2272 #endif
2273
2274 /* By default, kprobes are armed */
2275 kprobes_all_disarmed = false;
2276
2277 err = arch_init_kprobes();
2278 if (!err)
2279 err = register_die_notifier(&kprobe_exceptions_nb);
2280 if (!err)
2281 err = register_module_notifier(&kprobe_module_nb);
2282
2283 kprobes_initialized = (err == 0);
2284
2285 if (!err)
2286 init_test_probes();
2287 return err;
2288 }
2289
2290 #ifdef CONFIG_DEBUG_FS
2291 static void report_probe(struct seq_file *pi, struct kprobe *p,
2292 const char *sym, int offset, char *modname, struct kprobe *pp)
2293 {
2294 char *kprobe_type;
2295
2296 if (p->pre_handler == pre_handler_kretprobe)
2297 kprobe_type = "r";
2298 else if (p->pre_handler == setjmp_pre_handler)
2299 kprobe_type = "j";
2300 else
2301 kprobe_type = "k";
2302
2303 if (sym)
2304 seq_printf(pi, "%p %s %s+0x%x %s ",
2305 p->addr, kprobe_type, sym, offset,
2306 (modname ? modname : " "));
2307 else
2308 seq_printf(pi, "%p %s %p ",
2309 p->addr, kprobe_type, p->addr);
2310
2311 if (!pp)
2312 pp = p;
2313 seq_printf(pi, "%s%s%s%s\n",
2314 (kprobe_gone(p) ? "[GONE]" : ""),
2315 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2316 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2317 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2318 }
2319
2320 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2321 {
2322 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2323 }
2324
2325 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2326 {
2327 (*pos)++;
2328 if (*pos >= KPROBE_TABLE_SIZE)
2329 return NULL;
2330 return pos;
2331 }
2332
2333 static void kprobe_seq_stop(struct seq_file *f, void *v)
2334 {
2335 /* Nothing to do */
2336 }
2337
2338 static int show_kprobe_addr(struct seq_file *pi, void *v)
2339 {
2340 struct hlist_head *head;
2341 struct kprobe *p, *kp;
2342 const char *sym = NULL;
2343 unsigned int i = *(loff_t *) v;
2344 unsigned long offset = 0;
2345 char *modname, namebuf[KSYM_NAME_LEN];
2346
2347 head = &kprobe_table[i];
2348 preempt_disable();
2349 hlist_for_each_entry_rcu(p, head, hlist) {
2350 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2351 &offset, &modname, namebuf);
2352 if (kprobe_aggrprobe(p)) {
2353 list_for_each_entry_rcu(kp, &p->list, list)
2354 report_probe(pi, kp, sym, offset, modname, p);
2355 } else
2356 report_probe(pi, p, sym, offset, modname, NULL);
2357 }
2358 preempt_enable();
2359 return 0;
2360 }
2361
2362 static const struct seq_operations kprobes_seq_ops = {
2363 .start = kprobe_seq_start,
2364 .next = kprobe_seq_next,
2365 .stop = kprobe_seq_stop,
2366 .show = show_kprobe_addr
2367 };
2368
2369 static int kprobes_open(struct inode *inode, struct file *filp)
2370 {
2371 return seq_open(filp, &kprobes_seq_ops);
2372 }
2373
2374 static const struct file_operations debugfs_kprobes_operations = {
2375 .open = kprobes_open,
2376 .read = seq_read,
2377 .llseek = seq_lseek,
2378 .release = seq_release,
2379 };
2380
2381 /* kprobes/blacklist -- shows which functions can not be probed */
2382 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2383 {
2384 return seq_list_start(&kprobe_blacklist, *pos);
2385 }
2386
2387 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2388 {
2389 return seq_list_next(v, &kprobe_blacklist, pos);
2390 }
2391
2392 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2393 {
2394 struct kprobe_blacklist_entry *ent =
2395 list_entry(v, struct kprobe_blacklist_entry, list);
2396
2397 seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
2398 (void *)ent->end_addr, (void *)ent->start_addr);
2399 return 0;
2400 }
2401
2402 static const struct seq_operations kprobe_blacklist_seq_ops = {
2403 .start = kprobe_blacklist_seq_start,
2404 .next = kprobe_blacklist_seq_next,
2405 .stop = kprobe_seq_stop, /* Reuse void function */
2406 .show = kprobe_blacklist_seq_show,
2407 };
2408
2409 static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2410 {
2411 return seq_open(filp, &kprobe_blacklist_seq_ops);
2412 }
2413
2414 static const struct file_operations debugfs_kprobe_blacklist_ops = {
2415 .open = kprobe_blacklist_open,
2416 .read = seq_read,
2417 .llseek = seq_lseek,
2418 .release = seq_release,
2419 };
2420
2421 static void arm_all_kprobes(void)
2422 {
2423 struct hlist_head *head;
2424 struct kprobe *p;
2425 unsigned int i;
2426
2427 mutex_lock(&kprobe_mutex);
2428
2429 /* If kprobes are armed, just return */
2430 if (!kprobes_all_disarmed)
2431 goto already_enabled;
2432
2433 /*
2434 * optimize_kprobe() called by arm_kprobe() checks
2435 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2436 * arm_kprobe.
2437 */
2438 kprobes_all_disarmed = false;
2439 /* Arming kprobes doesn't optimize kprobe itself */
2440 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2441 head = &kprobe_table[i];
2442 hlist_for_each_entry_rcu(p, head, hlist)
2443 if (!kprobe_disabled(p))
2444 arm_kprobe(p);
2445 }
2446
2447 printk(KERN_INFO "Kprobes globally enabled\n");
2448
2449 already_enabled:
2450 mutex_unlock(&kprobe_mutex);
2451 return;
2452 }
2453
2454 static void disarm_all_kprobes(void)
2455 {
2456 struct hlist_head *head;
2457 struct kprobe *p;
2458 unsigned int i;
2459
2460 mutex_lock(&kprobe_mutex);
2461
2462 /* If kprobes are already disarmed, just return */
2463 if (kprobes_all_disarmed) {
2464 mutex_unlock(&kprobe_mutex);
2465 return;
2466 }
2467
2468 kprobes_all_disarmed = true;
2469 printk(KERN_INFO "Kprobes globally disabled\n");
2470
2471 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2472 head = &kprobe_table[i];
2473 hlist_for_each_entry_rcu(p, head, hlist) {
2474 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2475 disarm_kprobe(p, false);
2476 }
2477 }
2478 mutex_unlock(&kprobe_mutex);
2479
2480 /* Wait for disarming all kprobes by optimizer */
2481 wait_for_kprobe_optimizer();
2482 }
2483
2484 /*
2485 * XXX: The debugfs bool file interface doesn't allow for callbacks
2486 * when the bool state is switched. We can reuse that facility when
2487 * available
2488 */
2489 static ssize_t read_enabled_file_bool(struct file *file,
2490 char __user *user_buf, size_t count, loff_t *ppos)
2491 {
2492 char buf[3];
2493
2494 if (!kprobes_all_disarmed)
2495 buf[0] = '1';
2496 else
2497 buf[0] = '0';
2498 buf[1] = '\n';
2499 buf[2] = 0x00;
2500 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2501 }
2502
2503 static ssize_t write_enabled_file_bool(struct file *file,
2504 const char __user *user_buf, size_t count, loff_t *ppos)
2505 {
2506 char buf[32];
2507 size_t buf_size;
2508
2509 buf_size = min(count, (sizeof(buf)-1));
2510 if (copy_from_user(buf, user_buf, buf_size))
2511 return -EFAULT;
2512
2513 buf[buf_size] = '\0';
2514 switch (buf[0]) {
2515 case 'y':
2516 case 'Y':
2517 case '1':
2518 arm_all_kprobes();
2519 break;
2520 case 'n':
2521 case 'N':
2522 case '0':
2523 disarm_all_kprobes();
2524 break;
2525 default:
2526 return -EINVAL;
2527 }
2528
2529 return count;
2530 }
2531
2532 static const struct file_operations fops_kp = {
2533 .read = read_enabled_file_bool,
2534 .write = write_enabled_file_bool,
2535 .llseek = default_llseek,
2536 };
2537
2538 static int __init debugfs_kprobe_init(void)
2539 {
2540 struct dentry *dir, *file;
2541 unsigned int value = 1;
2542
2543 dir = debugfs_create_dir("kprobes", NULL);
2544 if (!dir)
2545 return -ENOMEM;
2546
2547 file = debugfs_create_file("list", 0400, dir, NULL,
2548 &debugfs_kprobes_operations);
2549 if (!file)
2550 goto error;
2551
2552 file = debugfs_create_file("enabled", 0600, dir,
2553 &value, &fops_kp);
2554 if (!file)
2555 goto error;
2556
2557 file = debugfs_create_file("blacklist", 0400, dir, NULL,
2558 &debugfs_kprobe_blacklist_ops);
2559 if (!file)
2560 goto error;
2561
2562 return 0;
2563
2564 error:
2565 debugfs_remove(dir);
2566 return -ENOMEM;
2567 }
2568
2569 late_initcall(debugfs_kprobe_init);
2570 #endif /* CONFIG_DEBUG_FS */
2571
2572 module_init(init_kprobes);
2573
2574 /* defined in arch/.../kernel/kprobes.c */
2575 EXPORT_SYMBOL_GPL(jprobe_return);