tracing: Make __buffer_unlock_commit() always_inline
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>
Thu, 24 Nov 2016 01:28:38 +0000 (20:28 -0500)
committerSteven Rostedt <rostedt@goodmis.org>
Thu, 24 Nov 2016 01:30:51 +0000 (20:30 -0500)
The function __buffer_unlock_commit() is called in a few places outside of
trace.c. But for the most part, it should really be inlined, as it is in the
hot path of the trace_events. For the callers outside of trace.c, create a
new function trace_buffer_unlock_commit_nostack(), as the reason it was used
was to avoid the stack tracing that trace_buffer_unlock_commit() could do.

Link: http://lkml.kernel.org/r/20161121183700.GW26852@two.firstfloor.org
Reported-by: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_branch.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_hwlat.c

index 725e8b2c453f855b0481f4b34a0df424a79068c2..60416bf7c591c522494f32081a15e710bdeaf025 100644 (file)
@@ -794,6 +794,22 @@ void tracing_on(void)
 }
 EXPORT_SYMBOL_GPL(tracing_on);
 
+
+static __always_inline void
+__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
+{
+       __this_cpu_write(trace_cmdline_save, true);
+
+       /* If this is the temp buffer, we need to commit fully */
+       if (this_cpu_read(trace_buffered_event) == event) {
+               /* Length is in event->array[0] */
+               ring_buffer_write(buffer, event->array[0], &event->array[1]);
+               /* Release the temp buffer */
+               this_cpu_dec(trace_buffered_event_cnt);
+       } else
+               ring_buffer_unlock_commit(buffer, event);
+}
+
 /**
  * __trace_puts - write a constant string into the trace buffer.
  * @ip:           The address of the caller
@@ -2059,21 +2075,6 @@ void trace_buffered_event_disable(void)
        preempt_enable();
 }
 
-void
-__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
-{
-       __this_cpu_write(trace_cmdline_save, true);
-
-       /* If this is the temp buffer, we need to commit fully */
-       if (this_cpu_read(trace_buffered_event) == event) {
-               /* Length is in event->array[0] */
-               ring_buffer_write(buffer, event->array[0], &event->array[1]);
-               /* Release the temp buffer */
-               this_cpu_dec(trace_buffered_event_cnt);
-       } else
-               ring_buffer_unlock_commit(buffer, event);
-}
-
 static struct ring_buffer *temp_buffer;
 
 struct ring_buffer_event *
@@ -2214,6 +2215,16 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
        ftrace_trace_userstack(buffer, flags, pc);
 }
 
+/*
+ * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
+ */
+void
+trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
+                                  struct ring_buffer_event *event)
+{
+       __buffer_unlock_commit(buffer, event);
+}
+
 static void
 trace_process_export(struct trace_export *export,
               struct ring_buffer_event *event)
index 9294f8606adeb70e44925c10a6976f0a9707d4ed..37602e7223360c3ce26794cb2fc27ba76ff34150 100644 (file)
@@ -602,8 +602,8 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
                                          int *ent_cpu, u64 *ent_ts);
 
-void __buffer_unlock_commit(struct ring_buffer *buffer,
-                           struct ring_buffer_event *event);
+void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
+                                       struct ring_buffer_event *event);
 
 int trace_empty(struct trace_iterator *iter);
 
index 3a2a73716a5bd9ef56bec370f1eef4df3a7e0a0d..75489de546b6092c2485d150480f83ab313df615 100644 (file)
@@ -81,7 +81,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        entry->correct = val == expect;
 
        if (!call_filter_check_discard(call, entry, buffer, event))
-               __buffer_unlock_commit(buffer, event);
+               trace_buffer_unlock_commit_nostack(buffer, event);
 
  out:
        current->trace_recursion &= ~TRACE_BRANCH_BIT;
index 4e480e8704746ec85a48de6f9990a8c6ff060cd2..8e1a115439fa8e668e371efc65288b3231db7754 100644 (file)
@@ -358,7 +358,7 @@ int __trace_graph_entry(struct trace_array *tr,
        entry   = ring_buffer_event_data(event);
        entry->graph_ent                        = *trace;
        if (!call_filter_check_discard(call, entry, buffer, event))
-               __buffer_unlock_commit(buffer, event);
+               trace_buffer_unlock_commit_nostack(buffer, event);
 
        return 1;
 }
@@ -469,7 +469,7 @@ void __trace_graph_return(struct trace_array *tr,
        entry   = ring_buffer_event_data(event);
        entry->ret                              = *trace;
        if (!call_filter_check_discard(call, entry, buffer, event))
-               __buffer_unlock_commit(buffer, event);
+               trace_buffer_unlock_commit_nostack(buffer, event);
 }
 
 void trace_graph_return(struct ftrace_graph_ret *trace)
index b97286c48735b898957d7951e0b536f0209cdad2..775569ec50d03fbf0ca4f755f79d106a63acaf1a 100644 (file)
@@ -127,7 +127,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
        entry->nmi_count                = sample->nmi_count;
 
        if (!call_filter_check_discard(call, entry, buffer, event))
-               __buffer_unlock_commit(buffer, event);
+               trace_buffer_unlock_commit_nostack(buffer, event);
 }
 
 /* Macros to encapsulate the time capturing infrastructure */