tracing/ring-buffer: Only have tracing_on disable tracing buffers
authorSteven Rostedt <srostedt@redhat.com>
Wed, 22 Feb 2012 20:50:28 +0000 (15:50 -0500)
committerSteven Rostedt <rostedt@goodmis.org>
Wed, 22 Feb 2012 20:50:28 +0000 (15:50 -0500)
As the ring-buffer code is being used by other facilities in the
kernel, having tracing_on file disable *all* buffers is not a desired
affect. It should only disable the ftrace buffers that are being used.

Move the code into the trace.c file and use the buffer disabling
for tracing_on() and tracing_off(). This way only the ftrace buffers
will be affected by them and other kernel utilities will not be
confused to why their output suddenly stopped.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
include/linux/ring_buffer.h
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h

index 67be0376d8e35e0593945dd9df507ed8528d02ce..7be2e88f23fdae28f322f484c36a409e00c39eb3 100644 (file)
@@ -151,6 +151,9 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
 
 void ring_buffer_record_disable(struct ring_buffer *buffer);
 void ring_buffer_record_enable(struct ring_buffer *buffer);
+void ring_buffer_record_off(struct ring_buffer *buffer);
+void ring_buffer_record_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_on(struct ring_buffer *buffer);
 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
 
index f5b7b5c1195beaf806f19d59e0ce7114c0e36add..cf8d11e91efdf92d95dad58d6fa771d2ac998786 100644 (file)
@@ -154,33 +154,10 @@ enum {
 
 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
 
-#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
-
-/**
- * tracing_on - enable all tracing buffers
- *
- * This function enables all tracing buffers that may have been
- * disabled with tracing_off.
- */
-void tracing_on(void)
-{
-       set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
-}
-EXPORT_SYMBOL_GPL(tracing_on);
+/* Used for individual buffers (after the counter) */
+#define RB_BUFFER_OFF          (1 << 20)
 
-/**
- * tracing_off - turn off all tracing buffers
- *
- * This function stops all tracing buffers from recording data.
- * It does not disable any overhead the tracers themselves may
- * be causing. This function simply causes all recording to
- * the ring buffers to fail.
- */
-void tracing_off(void)
-{
-       clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
-}
-EXPORT_SYMBOL_GPL(tracing_off);
+#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 
 /**
  * tracing_off_permanent - permanently disable ring buffers
@@ -193,15 +170,6 @@ void tracing_off_permanent(void)
        set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
 }
 
-/**
- * tracing_is_on - show state of ring buffers enabled
- */
-int tracing_is_on(void)
-{
-       return ring_buffer_flags == RB_BUFFERS_ON;
-}
-EXPORT_SYMBOL_GPL(tracing_is_on);
-
 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 #define RB_ALIGNMENT           4U
 #define RB_MAX_SMALL_DATA      (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@ -2618,6 +2586,63 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
 
+/**
+ * ring_buffer_record_off - stop all writes into the buffer
+ * @buffer: The ring buffer to stop writes to.
+ *
+ * This prevents all writes to the buffer. Any attempt to write
+ * to the buffer after this will fail and return NULL.
+ *
+ * This is different than ring_buffer_record_disable() as
+ * it works like an on/off switch, where as the disable() verison
+ * must be paired with a enable().
+ */
+void ring_buffer_record_off(struct ring_buffer *buffer)
+{
+       unsigned int rd;
+       unsigned int new_rd;
+
+       do {
+               rd = atomic_read(&buffer->record_disabled);
+               new_rd = rd | RB_BUFFER_OFF;
+       } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+}
+EXPORT_SYMBOL_GPL(ring_buffer_record_off);
+
+/**
+ * ring_buffer_record_on - restart writes into the buffer
+ * @buffer: The ring buffer to start writes to.
+ *
+ * This enables all writes to the buffer that was disabled by
+ * ring_buffer_record_off().
+ *
+ * This is different than ring_buffer_record_enable() as
+ * it works like an on/off switch, where as the enable() verison
+ * must be paired with a disable().
+ */
+void ring_buffer_record_on(struct ring_buffer *buffer)
+{
+       unsigned int rd;
+       unsigned int new_rd;
+
+       do {
+               rd = atomic_read(&buffer->record_disabled);
+               new_rd = rd & ~RB_BUFFER_OFF;
+       } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+}
+EXPORT_SYMBOL_GPL(ring_buffer_record_on);
+
+/**
+ * ring_buffer_record_is_on - return true if the ring buffer can write
+ * @buffer: The ring buffer to see if write is enabled
+ *
+ * Returns true if the ring buffer is in a state that it accepts writes.
+ */
+int ring_buffer_record_is_on(struct ring_buffer *buffer)
+{
+       return !atomic_read(&buffer->record_disabled);
+}
+
 /**
  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  * @buffer: The ring buffer to stop writes to.
@@ -4039,68 +4064,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
 }
 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
 
-#ifdef CONFIG_TRACING
-static ssize_t
-rb_simple_read(struct file *filp, char __user *ubuf,
-              size_t cnt, loff_t *ppos)
-{
-       unsigned long *p = filp->private_data;
-       char buf[64];
-       int r;
-
-       if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
-               r = sprintf(buf, "permanently disabled\n");
-       else
-               r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
-
-       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t
-rb_simple_write(struct file *filp, const char __user *ubuf,
-               size_t cnt, loff_t *ppos)
-{
-       unsigned long *p = filp->private_data;
-       unsigned long val;
-       int ret;
-
-       ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
-       if (ret)
-               return ret;
-
-       if (val)
-               set_bit(RB_BUFFERS_ON_BIT, p);
-       else
-               clear_bit(RB_BUFFERS_ON_BIT, p);
-
-       (*ppos)++;
-
-       return cnt;
-}
-
-static const struct file_operations rb_simple_fops = {
-       .open           = tracing_open_generic,
-       .read           = rb_simple_read,
-       .write          = rb_simple_write,
-       .llseek         = default_llseek,
-};
-
-
-static __init int rb_init_debugfs(void)
-{
-       struct dentry *d_tracer;
-
-       d_tracer = tracing_init_dentry();
-
-       trace_create_file("tracing_on", 0644, d_tracer,
-                           &ring_buffer_flags, &rb_simple_fops);
-
-       return 0;
-}
-
-fs_initcall(rb_init_debugfs);
-#endif
-
 #ifdef CONFIG_HOTPLUG_CPU
 static int rb_cpu_notify(struct notifier_block *self,
                         unsigned long action, void *hcpu)
index 10d5503f0d04d1782a98888ba8e31e05b096516c..f3c13d63d06480ba0675f1ce0f0898a7e7023e1e 100644 (file)
@@ -351,6 +351,59 @@ static void wakeup_work_handler(struct work_struct *work)
 
 static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
 
+/**
+ * tracing_on - enable tracing buffers
+ *
+ * This function enables tracing buffers that may have been
+ * disabled with tracing_off.
+ */
+void tracing_on(void)
+{
+       if (global_trace.buffer)
+               ring_buffer_record_on(global_trace.buffer);
+       /*
+        * This flag is only looked at when buffers haven't been
+        * allocated yet. We don't really care about the race
+        * between setting this flag and actually turning
+        * on the buffer.
+        */
+       global_trace.buffer_disabled = 0;
+}
+EXPORT_SYMBOL_GPL(tracing_on);
+
+/**
+ * tracing_off - turn off tracing buffers
+ *
+ * This function stops the tracing buffers from recording data.
+ * It does not disable any overhead the tracers themselves may
+ * be causing. This function simply causes all recording to
+ * the ring buffers to fail.
+ */
+void tracing_off(void)
+{
+       if (global_trace.buffer)
+               ring_buffer_record_on(global_trace.buffer);
+       /*
+        * This flag is only looked at when buffers haven't been
+        * allocated yet. We don't really care about the race
+        * between setting this flag and actually turning
+        * on the buffer.
+        */
+       global_trace.buffer_disabled = 1;
+}
+EXPORT_SYMBOL_GPL(tracing_off);
+
+/**
+ * tracing_is_on - show state of ring buffers enabled
+ */
+int tracing_is_on(void)
+{
+       if (global_trace.buffer)
+               return ring_buffer_record_is_on(global_trace.buffer);
+       return !global_trace.buffer_disabled;
+}
+EXPORT_SYMBOL_GPL(tracing_is_on);
+
 /**
  * trace_wake_up - wake up tasks waiting for trace input
  *
@@ -4567,6 +4620,55 @@ static __init void create_trace_options_dir(void)
                create_trace_option_core_file(trace_options[i], i);
 }
 
+static ssize_t
+rb_simple_read(struct file *filp, char __user *ubuf,
+              size_t cnt, loff_t *ppos)
+{
+       struct ring_buffer *buffer = filp->private_data;
+       char buf[64];
+       int r;
+
+       if (buffer)
+               r = ring_buffer_record_is_on(buffer);
+       else
+               r = 0;
+
+       r = sprintf(buf, "%d\n", r);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+rb_simple_write(struct file *filp, const char __user *ubuf,
+               size_t cnt, loff_t *ppos)
+{
+       struct ring_buffer *buffer = filp->private_data;
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+       if (ret)
+               return ret;
+
+       if (buffer) {
+               if (val)
+                       ring_buffer_record_on(buffer);
+               else
+                       ring_buffer_record_off(buffer);
+       }
+
+       (*ppos)++;
+
+       return cnt;
+}
+
+static const struct file_operations rb_simple_fops = {
+       .open           = tracing_open_generic,
+       .read           = rb_simple_read,
+       .write          = rb_simple_write,
+       .llseek         = default_llseek,
+};
+
 static __init int tracer_init_debugfs(void)
 {
        struct dentry *d_tracer;
@@ -4626,6 +4728,9 @@ static __init int tracer_init_debugfs(void)
        trace_create_file("trace_clock", 0644, d_tracer, NULL,
                          &trace_clock_fops);
 
+       trace_create_file("tracing_on", 0644, d_tracer,
+                           global_trace.buffer, &rb_simple_fops);
+
 #ifdef CONFIG_DYNAMIC_FTRACE
        trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
                        &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -4863,6 +4968,8 @@ __init static int tracer_alloc_buffers(void)
                goto out_free_cpumask;
        }
        global_trace.entries = ring_buffer_size(global_trace.buffer);
+       if (global_trace.buffer_disabled)
+               tracing_off();
 
 
 #ifdef CONFIG_TRACER_MAX_TRACE
index 54faec790bc18c53ddcc40df65640d340d2f01d3..ce887c0eca56065075d7e4c01188deeac0f64e36 100644 (file)
@@ -154,6 +154,7 @@ struct trace_array {
        struct ring_buffer      *buffer;
        unsigned long           entries;
        int                     cpu;
+       int                     buffer_disabled;
        cycle_t                 time_start;
        struct task_struct      *waiter;
        struct trace_array_cpu  *data[NR_CPUS];