ring-buffer: Micro-optimize with some strategic inlining
authorSteven Rostedt <srostedt@redhat.com>
Tue, 19 Oct 2010 17:17:08 +0000 (13:17 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Wed, 20 Oct 2010 19:17:57 +0000 (15:17 -0400)
By using inline and noinline, we are able to make the fast path of
recording an event 4% faster.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kernel/trace/ring_buffer.c

index d9f3e7a82137ef135a4638137b6ed901323e855d..f5007d0d932d28aa32e10df15a9c9d879112ddb4 100644 (file)
@@ -2078,7 +2078,7 @@ static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
        local_inc(&cpu_buffer->commits);
 }
 
-static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
+static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
 {
        unsigned long commits;
 
@@ -2193,13 +2193,9 @@ rb_reserve_next_event(struct ring_buffer *buffer,
 
 #define TRACE_RECURSIVE_DEPTH 16
 
-static int trace_recursive_lock(void)
+/* Keep this code out of the fast path cache */
+static noinline void trace_recursive_fail(void)
 {
-       current->trace_recursion++;
-
-       if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
-               return 0;
-
        /* Disable all tracing before we do anything else */
        tracing_off_permanent();
 
@@ -2211,10 +2207,21 @@ static int trace_recursive_lock(void)
                    in_nmi());
 
        WARN_ON_ONCE(1);
+}
+
+static inline int trace_recursive_lock(void)
+{
+       current->trace_recursion++;
+
+       if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
+               return 0;
+
+       trace_recursive_fail();
+
        return -1;
 }
 
-static void trace_recursive_unlock(void)
+static inline void trace_recursive_unlock(void)
 {
        WARN_ON_ONCE(!current->trace_recursion);