ring-buffer: make reentrant
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / trace_sched_switch.c
CommitLineData
35e8e302
SR
1/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
35e8e302 12#include <linux/ftrace.h>
b07c3f19 13#include <trace/sched.h>
35e8e302
SR
14
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
5b82a1b0 19static atomic_t sched_ref;
35e8e302 20
e309b41d 21static void
b07c3f19 22probe_sched_switch(struct rq *__rq, struct task_struct *prev,
5b82a1b0 23 struct task_struct *next)
35e8e302 24{
35e8e302
SR
25 struct trace_array_cpu *data;
26 unsigned long flags;
27 long disabled;
28 int cpu;
38697053 29 int pc;
35e8e302 30
b07c3f19
MD
31 if (!atomic_read(&sched_ref))
32 return;
33
41bc8144
SR
34 tracing_record_cmdline(prev);
35 tracing_record_cmdline(next);
36
35e8e302
SR
37 if (!tracer_enabled)
38 return;
39
38697053 40 pc = preempt_count();
18cef379 41 local_irq_save(flags);
35e8e302 42 cpu = raw_smp_processor_id();
b07c3f19 43 data = ctx_trace->data[cpu];
35e8e302
SR
44 disabled = atomic_inc_return(&data->disabled);
45
4d9493c9 46 if (likely(disabled == 1))
38697053 47 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
35e8e302
SR
48
49 atomic_dec(&data->disabled);
18cef379 50 local_irq_restore(flags);
35e8e302
SR
51}
52
4e655519 53static void
b07c3f19 54probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
57422797 55{
57422797
IM
56 struct trace_array_cpu *data;
57 unsigned long flags;
58 long disabled;
38697053 59 int cpu, pc;
57422797 60
b07c3f19 61 if (!likely(tracer_enabled))
57422797
IM
62 return;
63
38697053 64 pc = preempt_count();
b07c3f19 65 tracing_record_cmdline(current);
d9af56fb 66
57422797
IM
67 local_irq_save(flags);
68 cpu = raw_smp_processor_id();
b07c3f19 69 data = ctx_trace->data[cpu];
57422797
IM
70 disabled = atomic_inc_return(&data->disabled);
71
4d9493c9 72 if (likely(disabled == 1))
b07c3f19 73 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
38697053 74 flags, pc);
57422797
IM
75
76 atomic_dec(&data->disabled);
77 local_irq_restore(flags);
78}
79
e309b41d 80static void sched_switch_reset(struct trace_array *tr)
35e8e302
SR
81{
82 int cpu;
83
750ed1a4 84 tr->time_start = ftrace_now(tr->cpu);
35e8e302
SR
85
86 for_each_online_cpu(cpu)
3928a8a2 87 tracing_reset(tr, cpu);
35e8e302
SR
88}
89
5b82a1b0
MD
90static int tracing_sched_register(void)
91{
92 int ret;
93
b07c3f19 94 ret = register_trace_sched_wakeup(probe_sched_wakeup);
5b82a1b0 95 if (ret) {
b07c3f19 96 pr_info("wakeup trace: Couldn't activate tracepoint"
5b82a1b0
MD
97 " probe to kernel_sched_wakeup\n");
98 return ret;
99 }
100
b07c3f19 101 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
5b82a1b0 102 if (ret) {
b07c3f19 103 pr_info("wakeup trace: Couldn't activate tracepoint"
5b82a1b0
MD
104 " probe to kernel_sched_wakeup_new\n");
105 goto fail_deprobe;
106 }
107
b07c3f19 108 ret = register_trace_sched_switch(probe_sched_switch);
5b82a1b0 109 if (ret) {
b07c3f19 110 pr_info("sched trace: Couldn't activate tracepoint"
5b82a1b0
MD
111 " probe to kernel_sched_schedule\n");
112 goto fail_deprobe_wake_new;
113 }
114
115 return ret;
116fail_deprobe_wake_new:
b07c3f19 117 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
5b82a1b0 118fail_deprobe:
b07c3f19 119 unregister_trace_sched_wakeup(probe_sched_wakeup);
5b82a1b0
MD
120 return ret;
121}
122
123static void tracing_sched_unregister(void)
124{
b07c3f19
MD
125 unregister_trace_sched_switch(probe_sched_switch);
126 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
127 unregister_trace_sched_wakeup(probe_sched_wakeup);
5b82a1b0
MD
128}
129
f2252935 130static void tracing_start_sched_switch(void)
5b82a1b0
MD
131{
132 long ref;
133
134 ref = atomic_inc_return(&sched_ref);
135 if (ref == 1)
136 tracing_sched_register();
137}
138
f2252935 139static void tracing_stop_sched_switch(void)
5b82a1b0
MD
140{
141 long ref;
142
143 ref = atomic_dec_and_test(&sched_ref);
144 if (ref)
145 tracing_sched_unregister();
146}
147
41bc8144
SR
148void tracing_start_cmdline_record(void)
149{
150 tracing_start_sched_switch();
151}
152
153void tracing_stop_cmdline_record(void)
154{
155 tracing_stop_sched_switch();
156}
157
e309b41d 158static void start_sched_trace(struct trace_array *tr)
35e8e302
SR
159{
160 sched_switch_reset(tr);
41bc8144 161 tracing_start_cmdline_record();
007c05d4 162 tracer_enabled = 1;
35e8e302
SR
163}
164
e309b41d 165static void stop_sched_trace(struct trace_array *tr)
35e8e302
SR
166{
167 tracer_enabled = 0;
007c05d4 168 tracing_stop_cmdline_record();
35e8e302
SR
169}
170
e309b41d 171static void sched_switch_trace_init(struct trace_array *tr)
35e8e302
SR
172{
173 ctx_trace = tr;
174
175 if (tr->ctrl)
176 start_sched_trace(tr);
177}
178
e309b41d 179static void sched_switch_trace_reset(struct trace_array *tr)
35e8e302
SR
180{
181 if (tr->ctrl)
182 stop_sched_trace(tr);
183}
184
185static void sched_switch_trace_ctrl_update(struct trace_array *tr)
186{
187 /* When starting a new trace, reset the buffers */
188 if (tr->ctrl)
189 start_sched_trace(tr);
190 else
191 stop_sched_trace(tr);
192}
193
194static struct tracer sched_switch_trace __read_mostly =
195{
196 .name = "sched_switch",
197 .init = sched_switch_trace_init,
198 .reset = sched_switch_trace_reset,
199 .ctrl_update = sched_switch_trace_ctrl_update,
60a11774
SR
200#ifdef CONFIG_FTRACE_SELFTEST
201 .selftest = trace_selftest_startup_sched_switch,
202#endif
35e8e302
SR
203};
204
205__init static int init_sched_switch_trace(void)
206{
5b82a1b0
MD
207 int ret = 0;
208
209 if (atomic_read(&sched_ref))
210 ret = tracing_sched_register();
211 if (ret) {
212 pr_info("error registering scheduler trace\n");
213 return ret;
214 }
35e8e302
SR
215 return register_tracer(&sched_switch_trace);
216}
217device_initcall(init_sched_switch_trace);