return ret;
}
+static void
+perf_evsel__config_callgraph(struct perf_evsel *evsel,
+ struct record_opts *opts)
+{
+ bool function = perf_evsel__is_function_event(evsel);
+ struct perf_event_attr *attr = &evsel->attr;
+
+ perf_evsel__set_sample_bit(evsel, CALLCHAIN);
+
+ if (opts->call_graph == CALLCHAIN_DWARF) {
+ if (!function) {
+ perf_evsel__set_sample_bit(evsel, REGS_USER);
+ perf_evsel__set_sample_bit(evsel, STACK_USER);
+ attr->sample_regs_user = PERF_REGS_MASK;
+ attr->sample_stack_user = opts->stack_dump_size;
+ attr->exclude_callchain_user = 1;
+ } else {
+ pr_info("Cannot use DWARF unwind for function trace event,"
+ " falling back to framepointers.\n");
+ }
+ }
+
+ if (function) {
+ pr_info("Disabling user space callchains for function trace event.\n");
+ attr->exclude_callchain_user = 1;
+ }
+}
+
/*
* The enable_on_exec/disabled value strategy:
*
attr->mmap_data = track;
}
- if (opts->call_graph_enabled) {
- perf_evsel__set_sample_bit(evsel, CALLCHAIN);
-
- if (opts->call_graph == CALLCHAIN_DWARF) {
- perf_evsel__set_sample_bit(evsel, REGS_USER);
- perf_evsel__set_sample_bit(evsel, STACK_USER);
- attr->sample_regs_user = PERF_REGS_MASK;
- attr->sample_stack_user = opts->stack_dump_size;
- attr->exclude_callchain_user = 1;
- }
- }
+ if (opts->call_graph_enabled)
+ perf_evsel__config_callgraph(evsel, opts);
if (target__has_cpu(&opts->target))
perf_evsel__set_sample_bit(evsel, CPU);
return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
}
+/**
+ * perf_evsel__is_function_event - Return whether given evsel is a function
+ * trace event
+ *
+ * @evsel - evsel selector to be tested
+ *
+ * Return %true if event is function trace event
+ */
+static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel)
+{
+#define FUNCTION_EVENT "ftrace:function"
+
+ return evsel->name &&
+ !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
+
+#undef FUNCTION_EVENT
+}
+
struct perf_attr_details {
bool freq;
bool verbose;