Commit | Line | Data |
---|---|---|
bc0c38d1 SR |
1 | #ifndef _LINUX_KERNEL_TRACE_H |
2 | #define _LINUX_KERNEL_TRACE_H | |
3 | ||
4 | #include <linux/fs.h> | |
5 | #include <asm/atomic.h> | |
6 | #include <linux/sched.h> | |
7 | #include <linux/clocksource.h> | |
3928a8a2 | 8 | #include <linux/ring_buffer.h> |
bd8ac686 | 9 | #include <linux/mmiotrace.h> |
d13744cd | 10 | #include <linux/ftrace.h> |
bc0c38d1 | 11 | |
72829bc3 TG |
12 | enum trace_type { |
13 | __TRACE_FIRST_TYPE = 0, | |
14 | ||
15 | TRACE_FN, | |
16 | TRACE_CTX, | |
17 | TRACE_WAKE, | |
dd0e545f | 18 | TRACE_CONT, |
72829bc3 | 19 | TRACE_STACK, |
dd0e545f | 20 | TRACE_PRINT, |
72829bc3 | 21 | TRACE_SPECIAL, |
bd8ac686 PP |
22 | TRACE_MMIO_RW, |
23 | TRACE_MMIO_MAP, | |
d13744cd | 24 | TRACE_BOOT, |
72829bc3 TG |
25 | |
26 | __TRACE_LAST_TYPE | |
27 | }; | |
28 | ||
777e208d SR |
29 | /* |
30 | * The trace entry - the most basic unit of tracing. This is what | |
31 | * is printed in the end as a single line in the trace output, such as: | |
32 | * | |
33 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | |
34 | */ | |
35 | struct trace_entry { | |
36 | unsigned char type; | |
37 | unsigned char cpu; | |
38 | unsigned char flags; | |
39 | unsigned char preempt_count; | |
40 | int pid; | |
41 | }; | |
42 | ||
bc0c38d1 SR |
43 | /* |
44 | * Function trace entry - function address and parent function addres: | |
45 | */ | |
46 | struct ftrace_entry { | |
777e208d | 47 | struct trace_entry ent; |
bc0c38d1 SR |
48 | unsigned long ip; |
49 | unsigned long parent_ip; | |
50 | }; | |
d13744cd | 51 | extern struct tracer boot_tracer; |
bc0c38d1 SR |
52 | |
53 | /* | |
54 | * Context switch trace entry - which task (and prio) we switched from/to: | |
55 | */ | |
56 | struct ctx_switch_entry { | |
777e208d | 57 | struct trace_entry ent; |
bc0c38d1 SR |
58 | unsigned int prev_pid; |
59 | unsigned char prev_prio; | |
60 | unsigned char prev_state; | |
61 | unsigned int next_pid; | |
62 | unsigned char next_prio; | |
bac524d3 | 63 | unsigned char next_state; |
80b5e940 | 64 | unsigned int next_cpu; |
bc0c38d1 SR |
65 | }; |
66 | ||
f0a920d5 IM |
67 | /* |
68 | * Special (free-form) trace entry: | |
69 | */ | |
70 | struct special_entry { | |
777e208d | 71 | struct trace_entry ent; |
f0a920d5 IM |
72 | unsigned long arg1; |
73 | unsigned long arg2; | |
74 | unsigned long arg3; | |
75 | }; | |
76 | ||
86387f7e IM |
77 | /* |
78 | * Stack-trace entry: | |
79 | */ | |
80 | ||
74f4e369 | 81 | #define FTRACE_STACK_ENTRIES 8 |
86387f7e IM |
82 | |
83 | struct stack_entry { | |
777e208d | 84 | struct trace_entry ent; |
86387f7e IM |
85 | unsigned long caller[FTRACE_STACK_ENTRIES]; |
86 | }; | |
87 | ||
dd0e545f SR |
88 | /* |
89 | * ftrace_printk entry: | |
90 | */ | |
91 | struct print_entry { | |
777e208d | 92 | struct trace_entry ent; |
dd0e545f SR |
93 | unsigned long ip; |
94 | char buf[]; | |
95 | }; | |
96 | ||
777e208d SR |
97 | #define TRACE_OLD_SIZE 88 |
98 | ||
99 | struct trace_field_cont { | |
100 | unsigned char type; | |
101 | /* Temporary till we get rid of this completely */ | |
102 | char buf[TRACE_OLD_SIZE - 1]; | |
103 | }; | |
104 | ||
105 | struct trace_mmiotrace_rw { | |
106 | struct trace_entry ent; | |
107 | struct mmiotrace_rw rw; | |
108 | }; | |
109 | ||
110 | struct trace_mmiotrace_map { | |
111 | struct trace_entry ent; | |
112 | struct mmiotrace_map map; | |
113 | }; | |
114 | ||
115 | struct trace_boot { | |
116 | struct trace_entry ent; | |
117 | struct boot_trace initcall; | |
118 | }; | |
119 | ||
fc5e27ae PP |
120 | /* |
121 | * trace_flag_type is an enumeration that holds different | |
122 | * states when a trace occurs. These are: | |
9244489a SR |
123 | * IRQS_OFF - interrupts were disabled |
124 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags | |
125 | * NEED_RESCED - reschedule is requested | |
126 | * HARDIRQ - inside an interrupt handler | |
127 | * SOFTIRQ - inside a softirq handler | |
128 | * CONT - multiple entries hold the trace item | |
fc5e27ae PP |
129 | */ |
130 | enum trace_flag_type { | |
131 | TRACE_FLAG_IRQS_OFF = 0x01, | |
9244489a SR |
132 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
133 | TRACE_FLAG_NEED_RESCHED = 0x04, | |
134 | TRACE_FLAG_HARDIRQ = 0x08, | |
135 | TRACE_FLAG_SOFTIRQ = 0x10, | |
136 | TRACE_FLAG_CONT = 0x20, | |
fc5e27ae PP |
137 | }; |
138 | ||
5bf9a1ee | 139 | #define TRACE_BUF_SIZE 1024 |
bc0c38d1 SR |
140 | |
141 | /* | |
142 | * The CPU trace array - it consists of thousands of trace entries | |
143 | * plus some other descriptor data: (for example which task started | |
144 | * the trace, etc.) | |
145 | */ | |
146 | struct trace_array_cpu { | |
bc0c38d1 | 147 | atomic_t disabled; |
4e3c3333 | 148 | |
c7aafc54 | 149 | /* these fields get copied into max-trace: */ |
c7aafc54 | 150 | unsigned long trace_idx; |
53d0aa77 | 151 | unsigned long overrun; |
bc0c38d1 SR |
152 | unsigned long saved_latency; |
153 | unsigned long critical_start; | |
154 | unsigned long critical_end; | |
155 | unsigned long critical_sequence; | |
156 | unsigned long nice; | |
157 | unsigned long policy; | |
158 | unsigned long rt_priority; | |
159 | cycle_t preempt_timestamp; | |
160 | pid_t pid; | |
161 | uid_t uid; | |
162 | char comm[TASK_COMM_LEN]; | |
163 | }; | |
164 | ||
165 | struct trace_iterator; | |
166 | ||
167 | /* | |
168 | * The trace array - an array of per-CPU trace arrays. This is the | |
169 | * highest level data structure that individual tracers deal with. | |
170 | * They have on/off state as well: | |
171 | */ | |
172 | struct trace_array { | |
3928a8a2 | 173 | struct ring_buffer *buffer; |
bc0c38d1 | 174 | unsigned long entries; |
bc0c38d1 SR |
175 | int cpu; |
176 | cycle_t time_start; | |
b3806b43 | 177 | struct task_struct *waiter; |
bc0c38d1 SR |
178 | struct trace_array_cpu *data[NR_CPUS]; |
179 | }; | |
180 | ||
7104f300 SR |
181 | #define FTRACE_CMP_TYPE(var, type) \ |
182 | __builtin_types_compatible_p(typeof(var), type *) | |
183 | ||
184 | #undef IF_ASSIGN | |
185 | #define IF_ASSIGN(var, entry, etype, id) \ | |
186 | if (FTRACE_CMP_TYPE(var, etype)) { \ | |
187 | var = (typeof(var))(entry); \ | |
188 | WARN_ON(id && (entry)->type != id); \ | |
189 | break; \ | |
190 | } | |
191 | ||
192 | /* Will cause compile errors if type is not found. */ | |
193 | extern void __ftrace_bad_type(void); | |
194 | ||
195 | /* | |
196 | * The trace_assign_type is a verifier that the entry type is | |
197 | * the same as the type being assigned. To add new types simply | |
198 | * add a line with the following format: | |
199 | * | |
200 | * IF_ASSIGN(var, ent, type, id); | |
201 | * | |
202 | * Where "type" is the trace type that includes the trace_entry | |
203 | * as the "ent" item. And "id" is the trace identifier that is | |
204 | * used in the trace_type enum. | |
205 | * | |
206 | * If the type can have more than one id, then use zero. | |
207 | */ | |
208 | #define trace_assign_type(var, ent) \ | |
209 | do { \ | |
210 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | |
211 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | |
212 | IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ | |
213 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ | |
214 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | |
215 | IF_ASSIGN(var, ent, struct special_entry, 0); \ | |
216 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | |
217 | TRACE_MMIO_RW); \ | |
218 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | |
219 | TRACE_MMIO_MAP); \ | |
220 | IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \ | |
221 | __ftrace_bad_type(); \ | |
222 | } while (0) | |
2c4f035f FW |
223 | |
224 | /* Return values for print_line callback */ | |
225 | enum print_line_t { | |
226 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | |
227 | TRACE_TYPE_HANDLED = 1, | |
228 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ | |
229 | }; | |
230 | ||
bc0c38d1 SR |
231 | /* |
232 | * A specific tracer, represented by methods that operate on a trace array: | |
233 | */ | |
234 | struct tracer { | |
235 | const char *name; | |
236 | void (*init)(struct trace_array *tr); | |
237 | void (*reset)(struct trace_array *tr); | |
9036990d SR |
238 | void (*start)(struct trace_array *tr); |
239 | void (*stop)(struct trace_array *tr); | |
bc0c38d1 | 240 | void (*open)(struct trace_iterator *iter); |
107bad8b | 241 | void (*pipe_open)(struct trace_iterator *iter); |
bc0c38d1 | 242 | void (*close)(struct trace_iterator *iter); |
107bad8b SR |
243 | ssize_t (*read)(struct trace_iterator *iter, |
244 | struct file *filp, char __user *ubuf, | |
245 | size_t cnt, loff_t *ppos); | |
60a11774 SR |
246 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
247 | int (*selftest)(struct tracer *trace, | |
248 | struct trace_array *tr); | |
249 | #endif | |
2c4f035f | 250 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
bc0c38d1 SR |
251 | struct tracer *next; |
252 | int print_max; | |
253 | }; | |
254 | ||
214023c3 SR |
255 | struct trace_seq { |
256 | unsigned char buffer[PAGE_SIZE]; | |
257 | unsigned int len; | |
6c6c2796 | 258 | unsigned int readpos; |
214023c3 SR |
259 | }; |
260 | ||
bc0c38d1 SR |
261 | /* |
262 | * Trace iterator - used by printout routines who present trace | |
263 | * results to users and which routines might sleep, etc: | |
264 | */ | |
265 | struct trace_iterator { | |
266 | struct trace_array *tr; | |
267 | struct tracer *trace; | |
107bad8b | 268 | void *private; |
3928a8a2 | 269 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; |
4e3c3333 | 270 | |
53d0aa77 SR |
271 | /* The below is zeroed out in pipe_read */ |
272 | struct trace_seq seq; | |
bc0c38d1 | 273 | struct trace_entry *ent; |
4e3c3333 | 274 | int cpu; |
3928a8a2 | 275 | u64 ts; |
4e3c3333 | 276 | |
bc0c38d1 SR |
277 | unsigned long iter_flags; |
278 | loff_t pos; | |
4c11d7ae | 279 | long idx; |
bc0c38d1 SR |
280 | }; |
281 | ||
9036990d | 282 | int tracing_is_enabled(void); |
45dcd8b8 | 283 | void trace_wake_up(void); |
3928a8a2 | 284 | void tracing_reset(struct trace_array *tr, int cpu); |
bc0c38d1 SR |
285 | int tracing_open_generic(struct inode *inode, struct file *filp); |
286 | struct dentry *tracing_init_dentry(void); | |
d618b3e6 IM |
287 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
288 | ||
45dcd8b8 PP |
289 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
290 | struct trace_array_cpu *data); | |
291 | void tracing_generic_entry_update(struct trace_entry *entry, | |
38697053 SR |
292 | unsigned long flags, |
293 | int pc); | |
45dcd8b8 | 294 | |
bc0c38d1 SR |
295 | void ftrace(struct trace_array *tr, |
296 | struct trace_array_cpu *data, | |
297 | unsigned long ip, | |
298 | unsigned long parent_ip, | |
38697053 | 299 | unsigned long flags, int pc); |
bc0c38d1 SR |
300 | void tracing_sched_switch_trace(struct trace_array *tr, |
301 | struct trace_array_cpu *data, | |
302 | struct task_struct *prev, | |
303 | struct task_struct *next, | |
38697053 | 304 | unsigned long flags, int pc); |
bc0c38d1 | 305 | void tracing_record_cmdline(struct task_struct *tsk); |
57422797 IM |
306 | |
307 | void tracing_sched_wakeup_trace(struct trace_array *tr, | |
308 | struct trace_array_cpu *data, | |
309 | struct task_struct *wakee, | |
310 | struct task_struct *cur, | |
38697053 | 311 | unsigned long flags, int pc); |
f0a920d5 IM |
312 | void trace_special(struct trace_array *tr, |
313 | struct trace_array_cpu *data, | |
314 | unsigned long arg1, | |
315 | unsigned long arg2, | |
38697053 | 316 | unsigned long arg3, int pc); |
6fb44b71 SR |
317 | void trace_function(struct trace_array *tr, |
318 | struct trace_array_cpu *data, | |
319 | unsigned long ip, | |
320 | unsigned long parent_ip, | |
38697053 | 321 | unsigned long flags, int pc); |
bc0c38d1 | 322 | |
41bc8144 SR |
323 | void tracing_start_cmdline_record(void); |
324 | void tracing_stop_cmdline_record(void); | |
e168e051 SR |
325 | void tracing_sched_switch_assign_trace(struct trace_array *tr); |
326 | void tracing_stop_sched_switch_record(void); | |
327 | void tracing_start_sched_switch_record(void); | |
bc0c38d1 SR |
328 | int register_tracer(struct tracer *type); |
329 | void unregister_tracer(struct tracer *type); | |
330 | ||
331 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | |
332 | ||
333 | extern unsigned long tracing_max_latency; | |
334 | extern unsigned long tracing_thresh; | |
335 | ||
336 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | |
337 | void update_max_tr_single(struct trace_array *tr, | |
338 | struct task_struct *tsk, int cpu); | |
339 | ||
e309b41d | 340 | extern cycle_t ftrace_now(int cpu); |
bc0c38d1 | 341 | |
606576ce | 342 | #ifdef CONFIG_FUNCTION_TRACER |
001b6767 SR |
343 | void tracing_start_function_trace(void); |
344 | void tracing_stop_function_trace(void); | |
345 | #else | |
346 | # define tracing_start_function_trace() do { } while (0) | |
347 | # define tracing_stop_function_trace() do { } while (0) | |
348 | #endif | |
349 | ||
bc0c38d1 SR |
350 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
351 | typedef void | |
352 | (*tracer_switch_func_t)(void *private, | |
5b82a1b0 | 353 | void *__rq, |
bc0c38d1 SR |
354 | struct task_struct *prev, |
355 | struct task_struct *next); | |
356 | ||
357 | struct tracer_switch_ops { | |
358 | tracer_switch_func_t func; | |
359 | void *private; | |
360 | struct tracer_switch_ops *next; | |
361 | }; | |
362 | ||
bc0c38d1 SR |
363 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
364 | ||
365 | #ifdef CONFIG_DYNAMIC_FTRACE | |
366 | extern unsigned long ftrace_update_tot_cnt; | |
d05cdb25 SR |
367 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
368 | extern int DYN_FTRACE_TEST_NAME(void); | |
bc0c38d1 SR |
369 | #endif |
370 | ||
60a11774 | 371 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
60a11774 SR |
372 | extern int trace_selftest_startup_function(struct tracer *trace, |
373 | struct trace_array *tr); | |
60a11774 SR |
374 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
375 | struct trace_array *tr); | |
60a11774 SR |
376 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
377 | struct trace_array *tr); | |
60a11774 SR |
378 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
379 | struct trace_array *tr); | |
60a11774 SR |
380 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
381 | struct trace_array *tr); | |
fb1b6d8b SN |
382 | extern int trace_selftest_startup_nop(struct tracer *trace, |
383 | struct trace_array *tr); | |
60a11774 SR |
384 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
385 | struct trace_array *tr); | |
a6dd24f8 IM |
386 | extern int trace_selftest_startup_sysprof(struct tracer *trace, |
387 | struct trace_array *tr); | |
60a11774 SR |
388 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
389 | ||
c7aafc54 | 390 | extern void *head_page(struct trace_array_cpu *data); |
72829bc3 | 391 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); |
fc5e27ae PP |
392 | extern void trace_seq_print_cont(struct trace_seq *s, |
393 | struct trace_iterator *iter); | |
6c6c2796 PP |
394 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
395 | size_t cnt); | |
72829bc3 | 396 | extern long ns2usecs(cycle_t nsec); |
801fe400 | 397 | extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
c7aafc54 | 398 | |
4e655519 IM |
399 | extern unsigned long trace_flags; |
400 | ||
4fcdae83 SR |
401 | /* |
402 | * trace_iterator_flags is an enumeration that defines bit | |
403 | * positions into trace_flags that controls the output. | |
404 | * | |
405 | * NOTE: These bits must match the trace_options array in | |
406 | * trace.c. | |
407 | */ | |
4e655519 IM |
408 | enum trace_iterator_flags { |
409 | TRACE_ITER_PRINT_PARENT = 0x01, | |
410 | TRACE_ITER_SYM_OFFSET = 0x02, | |
411 | TRACE_ITER_SYM_ADDR = 0x04, | |
412 | TRACE_ITER_VERBOSE = 0x08, | |
413 | TRACE_ITER_RAW = 0x10, | |
414 | TRACE_ITER_HEX = 0x20, | |
415 | TRACE_ITER_BIN = 0x40, | |
416 | TRACE_ITER_BLOCK = 0x80, | |
417 | TRACE_ITER_STACKTRACE = 0x100, | |
4ac3ba41 | 418 | TRACE_ITER_SCHED_TREE = 0x200, |
f09ce573 | 419 | TRACE_ITER_PRINTK = 0x400, |
b2a866f9 | 420 | TRACE_ITER_PREEMPTONLY = 0x800, |
4e655519 IM |
421 | }; |
422 | ||
43a15386 FW |
423 | extern struct tracer nop_trace; |
424 | ||
8f0a056f SR |
425 | /** |
426 | * ftrace_preempt_disable - disable preemption scheduler safe | |
427 | * | |
428 | * When tracing can happen inside the scheduler, there exists | |
429 | * cases that the tracing might happen before the need_resched | |
430 | * flag is checked. If this happens and the tracer calls | |
431 | * preempt_enable (after a disable), a schedule might take place | |
432 | * causing an infinite recursion. | |
433 | * | |
434 | * To prevent this, we read the need_recshed flag before | |
435 | * disabling preemption. When we want to enable preemption we | |
436 | * check the flag, if it is set, then we call preempt_enable_no_resched. | |
437 | * Otherwise, we call preempt_enable. | |
438 | * | |
439 | * The rational for doing the above is that if need resched is set | |
440 | * and we have yet to reschedule, we are either in an atomic location | |
441 | * (where we do not need to check for scheduling) or we are inside | |
442 | * the scheduler and do not want to resched. | |
443 | */ | |
444 | static inline int ftrace_preempt_disable(void) | |
445 | { | |
446 | int resched; | |
447 | ||
448 | resched = need_resched(); | |
449 | preempt_disable_notrace(); | |
450 | ||
451 | return resched; | |
452 | } | |
453 | ||
454 | /** | |
455 | * ftrace_preempt_enable - enable preemption scheduler safe | |
456 | * @resched: the return value from ftrace_preempt_disable | |
457 | * | |
458 | * This is a scheduler safe way to enable preemption and not miss | |
459 | * any preemption checks. The disabled saved the state of preemption. | |
460 | * If resched is set, then we were either inside an atomic or | |
461 | * are inside the scheduler (we would have already scheduled | |
462 | * otherwise). In this case, we do not want to call normal | |
463 | * preempt_enable, but preempt_enable_no_resched instead. | |
464 | */ | |
465 | static inline void ftrace_preempt_enable(int resched) | |
466 | { | |
467 | if (resched) | |
468 | preempt_enable_no_resched_notrace(); | |
469 | else | |
470 | preempt_enable_notrace(); | |
471 | } | |
472 | ||
bc0c38d1 | 473 | #endif /* _LINUX_KERNEL_TRACE_H */ |