From: Anil S Keshavamurthy Date: Mon, 26 Jun 2006 07:25:29 +0000 (-0700) Subject: [PATCH] Notify page fault call chain X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=e6f47f978bcd5413fff610613b18e9e0eab9bc1b;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git [PATCH] Notify page fault call chain With this patch Kprobes now registers for page fault notifications only when their is an active probe registered. Once all the active probes are unregistered their is no need to be notified of page faults and kprobes unregisters itself from the page fault notifications. Hence we will have ZERO side effects when no probes are active. Signed-off-by: Anil S Keshavamurthy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index 57d157c5cf89..0730a20f6db8 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h @@ -44,6 +44,7 @@ typedef u8 kprobe_opcode_t; #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES +#define ARCH_INACTIVE_KPROBE_COUNT 0 void arch_remove_kprobe(struct kprobe *p); void kretprobe_trampoline(void); diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index 8c0fc227f0fb..2418a787c405 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h @@ -82,6 +82,7 @@ struct kprobe_ctlblk { #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES +#define ARCH_INACTIVE_KPROBE_COUNT 1 #define SLOT0_OPCODE_SHIFT (37) #define SLOT1_p1_OPCODE_SHIFT (37 - (64-46)) diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h index f466bc804f41..2d0af52c823d 100644 --- a/include/asm-powerpc/kprobes.h +++ b/include/asm-powerpc/kprobes.h @@ -50,6 +50,8 @@ typedef unsigned int kprobe_opcode_t; IS_TWI(instr) || IS_TDI(instr)) #define ARCH_SUPPORTS_KRETPROBES +#define ARCH_INACTIVE_KPROBE_COUNT 1 + void kretprobe_trampoline(void); extern void arch_remove_kprobe(struct kprobe *p); diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h index e9bb26f770ed..15065af566c2 100644 --- a/include/asm-sparc64/kprobes.h +++ b/include/asm-sparc64/kprobes.h @@ -12,6 +12,7 @@ typedef u32 kprobe_opcode_t; #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define arch_remove_kprobe(p) do {} while (0) +#define ARCH_INACTIVE_KPROBE_COUNT 0 /* Architecture specific copy of original instruction*/ struct arch_specific_insn { diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h index 98a1e95ddb98..d36febd9bb18 100644 --- a/include/asm-x86_64/kprobes.h +++ b/include/asm-x86_64/kprobes.h @@ -43,6 +43,7 @@ typedef u8 kprobe_opcode_t; #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES +#define ARCH_INACTIVE_KPROBE_COUNT 1 void kretprobe_trampoline(void); extern void arch_remove_kprobe(struct kprobe *p); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 507f26e7ae7c..64aab081153b 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -47,11 +47,17 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; +static atomic_t kprobe_count; DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; +static struct notifier_block kprobe_page_fault_nb = { + .notifier_call = kprobe_exceptions_notify, + .priority = 0x7fffffff /* we need to notified first */ +}; + #ifdef __ARCH_WANT_KPROBES_INSN_SLOT /* * kprobe->ainsn.insn points to the copy of the instruction to be @@ -465,6 +471,8 @@ static int __kprobes __register_kprobe(struct kprobe *p, old_p = get_kprobe(p->addr); if (old_p) { ret = register_aggr_kprobe(old_p, p); + if (!ret) + atomic_inc(&kprobe_count); goto out; } @@ -475,6 +483,10 @@ static int __kprobes __register_kprobe(struct kprobe *p, hlist_add_head_rcu(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); + if (atomic_add_return(1, &kprobe_count) == \ + (ARCH_INACTIVE_KPROBE_COUNT + 1)) + register_page_fault_notifier(&kprobe_page_fault_nb); + arch_arm_kprobe(p); out: @@ -553,6 +565,16 @@ valid_p: } mutex_unlock(&kprobe_mutex); } + + /* Call unregister_page_fault_notifier() + * if no probes are active + */ + mutex_lock(&kprobe_mutex); + if (atomic_add_return(-1, &kprobe_count) == \ + ARCH_INACTIVE_KPROBE_COUNT) + unregister_page_fault_notifier(&kprobe_page_fault_nb); + mutex_unlock(&kprobe_mutex); + return; } static struct notifier_block kprobe_exceptions_nb = { @@ -560,10 +582,6 @@ static struct notifier_block kprobe_exceptions_nb = { .priority = 0x7fffffff /* we need to be notified first */ }; -static struct notifier_block kprobe_page_fault_nb = { - .notifier_call = kprobe_exceptions_notify, - .priority = 0x7fffffff /* we need to notified first */ -}; int __kprobes register_jprobe(struct jprobe *jp) { @@ -673,14 +691,12 @@ static int __init init_kprobes(void) INIT_HLIST_HEAD(&kprobe_table[i]); INIT_HLIST_HEAD(&kretprobe_inst_table[i]); } + atomic_set(&kprobe_count, 0); err = arch_init_kprobes(); if (!err) err = register_die_notifier(&kprobe_exceptions_nb); - if (!err) - err = register_page_fault_notifier(&kprobe_page_fault_nb); - return err; }