tracing/kprobes: Use global event perf buffers in kprobe tracer
authorMasami Hiramatsu <mhiramat@redhat.com>
Fri, 25 Sep 2009 18:20:12 +0000 (11:20 -0700)
committerFrederic Weisbecker <fweisbec@gmail.com>
Sat, 3 Oct 2009 00:21:39 +0000 (02:21 +0200)
Use new percpu global event buffer instead of stack in kprobe
tracer while tracing through perf.

Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Frank Ch. Eigler <fche@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: K.Prasad <prasad@linux.vnet.ibm.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <20090925182011.10157.60140.stgit@omoto>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
kernel/trace/trace_kprobe.c

index 09cba270392df3a2fed08da801541af7d9686bfe..97309d4714f79b112a152da22a9cad840c2bd75b 100644 (file)
@@ -1149,35 +1149,49 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
        struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
        struct ftrace_event_call *call = &tp->call;
        struct kprobe_trace_entry *entry;
-       int size, __size, i, pc;
+       struct trace_entry *ent;
+       int size, __size, i, pc, __cpu;
        unsigned long irq_flags;
+       char *raw_data;
 
-       local_save_flags(irq_flags);
        pc = preempt_count();
-
        __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
+       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+                    "profile buffer not large enough"))
+               return 0;
 
-       do {
-               char raw_data[size];
-               struct trace_entry *ent;
-               /*
-                * Zero dead bytes from alignment to avoid stack leak
-                * to userspace
-                */
-               *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
-               entry = (struct kprobe_trace_entry *)raw_data;
-               ent = &entry->ent;
-
-               tracing_generic_entry_update(ent, irq_flags, pc);
-               ent->type = call->id;
-               entry->nargs = tp->nr_args;
-               entry->ip = (unsigned long)kp->addr;
-               for (i = 0; i < tp->nr_args; i++)
-                       entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
-               perf_tp_event(call->id, entry->ip, 1, entry, size);
-       } while (0);
+       /*
+        * Protect the non nmi buffer
+        * This also protects the rcu read side
+        */
+       local_irq_save(irq_flags);
+       __cpu = smp_processor_id();
+
+       if (in_nmi())
+               raw_data = rcu_dereference(trace_profile_buf_nmi);
+       else
+               raw_data = rcu_dereference(trace_profile_buf);
+
+       if (!raw_data)
+               goto end;
+
+       raw_data = per_cpu_ptr(raw_data, __cpu);
+       /* Zero dead bytes from alignment to avoid buffer leak to userspace */
+       *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+       entry = (struct kprobe_trace_entry *)raw_data;
+       ent = &entry->ent;
+
+       tracing_generic_entry_update(ent, irq_flags, pc);
+       ent->type = call->id;
+       entry->nargs = tp->nr_args;
+       entry->ip = (unsigned long)kp->addr;
+       for (i = 0; i < tp->nr_args; i++)
+               entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
+       perf_tp_event(call->id, entry->ip, 1, entry, size);
+end:
+       local_irq_restore(irq_flags);
        return 0;
 }
 
@@ -1188,33 +1202,50 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
        struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
        struct ftrace_event_call *call = &tp->call;
        struct kretprobe_trace_entry *entry;
-       int size, __size, i, pc;
+       struct trace_entry *ent;
+       int size, __size, i, pc, __cpu;
        unsigned long irq_flags;
+       char *raw_data;
 
-       local_save_flags(irq_flags);
        pc = preempt_count();
-
        __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
+       if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+                    "profile buffer not large enough"))
+               return 0;
+
+       /*
+        * Protect the non nmi buffer
+        * This also protects the rcu read side
+        */
+       local_irq_save(irq_flags);
+       __cpu = smp_processor_id();
+
+       if (in_nmi())
+               raw_data = rcu_dereference(trace_profile_buf_nmi);
+       else
+               raw_data = rcu_dereference(trace_profile_buf);
+
+       if (!raw_data)
+               goto end;
+
+       raw_data = per_cpu_ptr(raw_data, __cpu);
+       /* Zero dead bytes from alignment to avoid buffer leak to userspace */
+       *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+       entry = (struct kretprobe_trace_entry *)raw_data;
+       ent = &entry->ent;
 
-       do {
-               char raw_data[size];
-               struct trace_entry *ent;
-
-               *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
-               entry = (struct kretprobe_trace_entry *)raw_data;
-               ent = &entry->ent;
-
-               tracing_generic_entry_update(ent, irq_flags, pc);
-               ent->type = call->id;
-               entry->nargs = tp->nr_args;
-               entry->func = (unsigned long)tp->rp.kp.addr;
-               entry->ret_ip = (unsigned long)ri->ret_addr;
-               for (i = 0; i < tp->nr_args; i++)
-                       entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
-               perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
-       } while (0);
+       tracing_generic_entry_update(ent, irq_flags, pc);
+       ent->type = call->id;
+       entry->nargs = tp->nr_args;
+       entry->func = (unsigned long)tp->rp.kp.addr;
+       entry->ret_ip = (unsigned long)ri->ret_addr;
+       for (i = 0; i < tp->nr_args; i++)
+               entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
+       perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
+end:
+       local_irq_restore(irq_flags);
        return 0;
 }