From: Linus Torvalds Date: Thu, 18 Mar 2010 23:52:46 +0000 (-0700) Subject: Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel... X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=f82c37e7bb4c4d9b6a476c642d5c2d2efbd6f240;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git Merge branch 'perf-fixes-for-linus' of git://git./linux/kernel/git/tip/linux-2.6-tip * 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (35 commits) perf: Fix unexported generic perf_arch_fetch_caller_regs perf record: Don't try to find buildids in a zero sized file perf: export perf_trace_regs and perf_arch_fetch_caller_regs perf, x86: Fix hw_perf_enable() event assignment perf, ppc: Fix compile error due to new cpu notifiers perf: Make the install relative to DESTDIR if specified kprobes: Calculate the index correctly when freeing the out-of-line execution slot perf tools: Fix sparse CPU numbering related bugs perf_event: Fix oops triggered by cpu offline/online perf: Drop the obsolete profile naming for trace events perf: Take a hot regs snapshot for trace events perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot perf/x86-64: Use frame pointer to walk on irq and process stacks lockdep: Move lock events under lockdep recursion protection perf report: Print the map table just after samples for which no map was found perf report: Add multiple event support perf session: Change perf_session post processing functions to take histogram tree perf session: Add storage for seperating event types in report perf session: Change add_hist_entry to take the tree root instead of session perf record: Add ID and to recorded event data when recording multiple events ... --- f82c37e7bb4c4d9b6a476c642d5c2d2efbd6f240 diff --cc kernel/trace/trace_event_perf.c index 000000000000,7d79a10c3cde..81f691eb3a30 mode 000000,100644..100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@@ -1,0 -1,168 +1,168 @@@ + /* + * trace event based perf event profiling/tracing + * + * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra + * Copyright (C) 2009-2010 Frederic Weisbecker + */ + + #include + #include + #include "trace.h" + + DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); + EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); + + EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); + + static char *perf_trace_buf; + static char *perf_trace_buf_nmi; + + typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; + + /* Count the events in use (per event id, not per instance) */ + static int total_ref_count; + + static int perf_trace_event_enable(struct ftrace_event_call *event) + { + char *buf; + int ret = -ENOMEM; + + if (event->perf_refcount++ > 0) + return 0; + + if (!total_ref_count) { + buf = (char *)alloc_percpu(perf_trace_t); + if (!buf) + goto fail_buf; + + rcu_assign_pointer(perf_trace_buf, buf); + + buf = (char *)alloc_percpu(perf_trace_t); + if (!buf) + goto fail_buf_nmi; + + rcu_assign_pointer(perf_trace_buf_nmi, buf); + } + + ret = event->perf_event_enable(event); + if (!ret) { + total_ref_count++; + return 0; + } + + fail_buf_nmi: + if (!total_ref_count) { + free_percpu(perf_trace_buf_nmi); + free_percpu(perf_trace_buf); + perf_trace_buf_nmi = NULL; + perf_trace_buf = NULL; + } + fail_buf: + event->perf_refcount--; + + return ret; + } + + int perf_trace_enable(int event_id) + { + struct ftrace_event_call *event; + int ret = -EINVAL; + + mutex_lock(&event_mutex); + list_for_each_entry(event, &ftrace_events, list) { + if (event->id == event_id && event->perf_event_enable && + try_module_get(event->mod)) { + ret = perf_trace_event_enable(event); + break; + } + } + mutex_unlock(&event_mutex); + + return ret; + } + + static void perf_trace_event_disable(struct ftrace_event_call *event) + { + char *buf, *nmi_buf; + + if (--event->perf_refcount > 0) + return; + + event->perf_event_disable(event); + + if (!--total_ref_count) { + buf = perf_trace_buf; + rcu_assign_pointer(perf_trace_buf, NULL); + + nmi_buf = perf_trace_buf_nmi; + rcu_assign_pointer(perf_trace_buf_nmi, NULL); + + /* + * Ensure every events in profiling have finished before + * releasing the buffers + */ + synchronize_sched(); + + free_percpu(buf); + free_percpu(nmi_buf); + } + } + + void perf_trace_disable(int event_id) + { + struct ftrace_event_call *event; + + mutex_lock(&event_mutex); + list_for_each_entry(event, &ftrace_events, list) { + if (event->id == event_id) { + perf_trace_event_disable(event); + module_put(event->mod); + break; + } + } + mutex_unlock(&event_mutex); + } + + __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, + int *rctxp, unsigned long *irq_flags) + { + struct trace_entry *entry; + char *trace_buf, *raw_data; + int pc, cpu; + + pc = preempt_count(); + + /* Protect the per cpu buffer, begin the rcu read side */ + local_irq_save(*irq_flags); + + *rctxp = perf_swevent_get_recursion_context(); + if (*rctxp < 0) + goto err_recursion; + + cpu = smp_processor_id(); + + if (in_nmi()) - trace_buf = rcu_dereference(perf_trace_buf_nmi); ++ trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); + else - trace_buf = rcu_dereference(perf_trace_buf); ++ trace_buf = rcu_dereference_sched(perf_trace_buf); + + if (!trace_buf) + goto err; + + raw_data = per_cpu_ptr(trace_buf, cpu); + + /* zero the dead bytes from align to not leak stack to user */ + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + + entry = (struct trace_entry *)raw_data; + tracing_generic_entry_update(entry, *irq_flags, pc); + entry->type = type; + + return raw_data; + err: + perf_swevent_put_recursion_context(*rctxp); + err_recursion: + local_irq_restore(*irq_flags); + return NULL; + } + EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);