drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / trace / ftrace.h
1 /*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19 #include <linux/ftrace_event.h>
20
21 /*
22 * DECLARE_EVENT_CLASS can be used to add a generic function
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
27 *
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
29 */
30 #undef TRACE_EVENT
31 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 DECLARE_EVENT_CLASS(name, \
33 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39
40
41 #undef __field
42 #define __field(type, item) type item;
43
44 #undef __field_ext
45 #define __field_ext(type, item, filter_type) type item;
46
47 #undef __array
48 #define __array(type, item, len) type item[len];
49
50 #undef __dynamic_array
51 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
52
53 #undef __string
54 #define __string(item, src) __dynamic_array(char, item, -1)
55
56 #undef __bitmask
57 #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
58
59 #undef TP_STRUCT__entry
60 #define TP_STRUCT__entry(args...) args
61
62 #undef DECLARE_EVENT_CLASS
63 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
64 struct ftrace_raw_##name { \
65 struct trace_entry ent; \
66 tstruct \
67 char __data[0]; \
68 }; \
69 \
70 static struct ftrace_event_class event_class_##name;
71
72 #undef DEFINE_EVENT
73 #define DEFINE_EVENT(template, name, proto, args) \
74 static struct ftrace_event_call __used \
75 __attribute__((__aligned__(4))) event_##name
76
77 #undef DEFINE_EVENT_PRINT
78 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
79 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
80
81 /* Callbacks are meaningless to ftrace. */
82 #undef TRACE_EVENT_FN
83 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
84 assign, print, reg, unreg) \
85 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
86 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
87
88 #undef TRACE_EVENT_FLAGS
89 #define TRACE_EVENT_FLAGS(name, value) \
90 __TRACE_EVENT_FLAGS(name, value)
91
92 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
93
94
95 /*
96 * Stage 2 of the trace events.
97 *
98 * Include the following:
99 *
100 * struct ftrace_data_offsets_<call> {
101 * u32 <item1>;
102 * u32 <item2>;
103 * [...]
104 * };
105 *
106 * The __dynamic_array() macro will create each u32 <item>, this is
107 * to keep the offset of each array from the beginning of the event.
108 * The size of an array is also encoded, in the higher 16 bits of <item>.
109 */
110
111 #undef __field
112 #define __field(type, item)
113
114 #undef __field_ext
115 #define __field_ext(type, item, filter_type)
116
117 #undef __array
118 #define __array(type, item, len)
119
120 #undef __dynamic_array
121 #define __dynamic_array(type, item, len) u32 item;
122
123 #undef __string
124 #define __string(item, src) __dynamic_array(char, item, -1)
125
126 #undef __bitmask
127 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
128
129 #undef DECLARE_EVENT_CLASS
130 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
131 struct ftrace_data_offsets_##call { \
132 tstruct; \
133 };
134
135 #undef DEFINE_EVENT
136 #define DEFINE_EVENT(template, name, proto, args)
137
138 #undef DEFINE_EVENT_PRINT
139 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
140 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
141
142 #undef TRACE_EVENT_FLAGS
143 #define TRACE_EVENT_FLAGS(event, flag)
144
145 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
146
147 /*
148 * Stage 3 of the trace events.
149 *
150 * Override the macros in <trace/trace_events.h> to include the following:
151 *
152 * enum print_line_t
153 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
154 * {
155 * struct trace_seq *s = &iter->seq;
156 * struct ftrace_raw_<call> *field; <-- defined in stage 1
157 * struct trace_entry *entry;
158 * struct trace_seq *p = &iter->tmp_seq;
159 * int ret;
160 *
161 * entry = iter->ent;
162 *
163 * if (entry->type != event_<call>->event.type) {
164 * WARN_ON_ONCE(1);
165 * return TRACE_TYPE_UNHANDLED;
166 * }
167 *
168 * field = (typeof(field))entry;
169 *
170 * trace_seq_init(p);
171 * ret = trace_seq_printf(s, "%s: ", <call>);
172 * if (ret)
173 * ret = trace_seq_printf(s, <TP_printk> "\n");
174 * if (!ret)
175 * return TRACE_TYPE_PARTIAL_LINE;
176 *
177 * return TRACE_TYPE_HANDLED;
178 * }
179 *
180 * This is the method used to print the raw event to the trace
181 * output format. Note, this is not needed if the data is read
182 * in binary.
183 */
184
185 #undef __entry
186 #define __entry field
187
188 #undef TP_printk
189 #define TP_printk(fmt, args...) fmt "\n", args
190
191 #undef __get_dynamic_array
192 #define __get_dynamic_array(field) \
193 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
194
195 #undef __get_str
196 #define __get_str(field) (char *)__get_dynamic_array(field)
197
198 #undef __get_bitmask
199 #define __get_bitmask(field) \
200 ({ \
201 void *__bitmask = __get_dynamic_array(field); \
202 unsigned int __bitmask_size; \
203 __bitmask_size = (__entry->__data_loc_##field >> 16) & 0xffff; \
204 ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
205 })
206
207 #undef __print_flags
208 #define __print_flags(flag, delim, flag_array...) \
209 ({ \
210 static const struct trace_print_flags __flags[] = \
211 { flag_array, { -1, NULL }}; \
212 ftrace_print_flags_seq(p, delim, flag, __flags); \
213 })
214
215 #undef __print_symbolic
216 #define __print_symbolic(value, symbol_array...) \
217 ({ \
218 static const struct trace_print_flags symbols[] = \
219 { symbol_array, { -1, NULL }}; \
220 ftrace_print_symbols_seq(p, value, symbols); \
221 })
222
223 #undef __print_symbolic_u64
224 #if BITS_PER_LONG == 32
225 #define __print_symbolic_u64(value, symbol_array...) \
226 ({ \
227 static const struct trace_print_flags_u64 symbols[] = \
228 { symbol_array, { -1, NULL } }; \
229 ftrace_print_symbols_seq_u64(p, value, symbols); \
230 })
231 #else
232 #define __print_symbolic_u64(value, symbol_array...) \
233 __print_symbolic(value, symbol_array)
234 #endif
235
236 #undef __print_hex
237 #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
238
239 #undef DECLARE_EVENT_CLASS
240 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
241 static notrace enum print_line_t \
242 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
243 struct trace_event *trace_event) \
244 { \
245 struct trace_seq *s = &iter->seq; \
246 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
247 struct ftrace_raw_##call *field; \
248 int ret; \
249 \
250 field = (typeof(field))iter->ent; \
251 \
252 ret = ftrace_raw_output_prep(iter, trace_event); \
253 if (ret) \
254 return ret; \
255 \
256 ret = trace_seq_printf(s, print); \
257 if (!ret) \
258 return TRACE_TYPE_PARTIAL_LINE; \
259 \
260 return TRACE_TYPE_HANDLED; \
261 } \
262 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
263 .trace = ftrace_raw_output_##call, \
264 };
265
266 #undef DEFINE_EVENT_PRINT
267 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
268 static notrace enum print_line_t \
269 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
270 struct trace_event *event) \
271 { \
272 struct trace_seq *s = &iter->seq; \
273 struct ftrace_raw_##template *field; \
274 struct trace_entry *entry; \
275 struct trace_seq *p = &iter->tmp_seq; \
276 int ret; \
277 \
278 entry = iter->ent; \
279 \
280 if (entry->type != event_##call.event.type) { \
281 WARN_ON_ONCE(1); \
282 return TRACE_TYPE_UNHANDLED; \
283 } \
284 \
285 field = (typeof(field))entry; \
286 \
287 trace_seq_init(p); \
288 ret = trace_seq_printf(s, "%s: ", #call); \
289 if (ret) \
290 ret = trace_seq_printf(s, print); \
291 if (!ret) \
292 return TRACE_TYPE_PARTIAL_LINE; \
293 \
294 return TRACE_TYPE_HANDLED; \
295 } \
296 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
297 .trace = ftrace_raw_output_##call, \
298 };
299
300 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
301
302 #undef __field_ext
303 #define __field_ext(type, item, filter_type) \
304 ret = trace_define_field(event_call, #type, #item, \
305 offsetof(typeof(field), item), \
306 sizeof(field.item), \
307 is_signed_type(type), filter_type); \
308 if (ret) \
309 return ret;
310
311 #undef __field
312 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
313
314 #undef __array
315 #define __array(type, item, len) \
316 do { \
317 char *type_str = #type"["__stringify(len)"]"; \
318 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
319 ret = trace_define_field(event_call, type_str, #item, \
320 offsetof(typeof(field), item), \
321 sizeof(field.item), \
322 is_signed_type(type), FILTER_OTHER); \
323 if (ret) \
324 return ret; \
325 } while (0);
326
327 #undef __dynamic_array
328 #define __dynamic_array(type, item, len) \
329 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
330 offsetof(typeof(field), __data_loc_##item), \
331 sizeof(field.__data_loc_##item), \
332 is_signed_type(type), FILTER_OTHER);
333
334 #undef __string
335 #define __string(item, src) __dynamic_array(char, item, -1)
336
337 #undef __bitmask
338 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
339
340 #undef DECLARE_EVENT_CLASS
341 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
342 static int notrace __init \
343 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
344 { \
345 struct ftrace_raw_##call field; \
346 int ret; \
347 \
348 tstruct; \
349 \
350 return ret; \
351 }
352
353 #undef DEFINE_EVENT
354 #define DEFINE_EVENT(template, name, proto, args)
355
356 #undef DEFINE_EVENT_PRINT
357 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
358 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
359
360 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
361
362 /*
363 * remember the offset of each array from the beginning of the event.
364 */
365
366 #undef __entry
367 #define __entry entry
368
369 #undef __field
370 #define __field(type, item)
371
372 #undef __field_ext
373 #define __field_ext(type, item, filter_type)
374
375 #undef __array
376 #define __array(type, item, len)
377
378 #undef __dynamic_array
379 #define __dynamic_array(type, item, len) \
380 __data_offsets->item = __data_size + \
381 offsetof(typeof(*entry), __data); \
382 __data_offsets->item |= (len * sizeof(type)) << 16; \
383 __data_size += (len) * sizeof(type);
384
385 #undef __string
386 #define __string(item, src) __dynamic_array(char, item, \
387 strlen((src) ? (const char *)(src) : "(null)") + 1)
388
389 /*
390 * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
391 * num_possible_cpus().
392 */
393 #define __bitmask_size_in_bytes_raw(nr_bits) \
394 (((nr_bits) + 7) / 8)
395
396 #define __bitmask_size_in_longs(nr_bits) \
397 ((__bitmask_size_in_bytes_raw(nr_bits) + \
398 ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
399
400 /*
401 * __bitmask_size_in_bytes is the number of bytes needed to hold
402 * num_possible_cpus() padded out to the nearest long. This is what
403 * is saved in the buffer, just to be consistent.
404 */
405 #define __bitmask_size_in_bytes(nr_bits) \
406 (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
407
408 #undef __bitmask
409 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
410 __bitmask_size_in_longs(nr_bits))
411
412 #undef DECLARE_EVENT_CLASS
413 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
414 static inline notrace int ftrace_get_offsets_##call( \
415 struct ftrace_data_offsets_##call *__data_offsets, proto) \
416 { \
417 int __data_size = 0; \
418 struct ftrace_raw_##call __maybe_unused *entry; \
419 \
420 tstruct; \
421 \
422 return __data_size; \
423 }
424
425 #undef DEFINE_EVENT
426 #define DEFINE_EVENT(template, name, proto, args)
427
428 #undef DEFINE_EVENT_PRINT
429 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
430 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
431
432 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
433
434 /*
435 * Stage 4 of the trace events.
436 *
437 * Override the macros in <trace/trace_events.h> to include the following:
438 *
439 * For those macros defined with TRACE_EVENT:
440 *
441 * static struct ftrace_event_call event_<call>;
442 *
443 * static void ftrace_raw_event_<call>(void *__data, proto)
444 * {
445 * struct ftrace_event_file *ftrace_file = __data;
446 * struct ftrace_event_call *event_call = ftrace_file->event_call;
447 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
448 * struct ring_buffer_event *event;
449 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
450 * struct ring_buffer *buffer;
451 * unsigned long irq_flags;
452 * int __data_size;
453 * int pc;
454 *
455 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
456 * &ftrace_file->flags))
457 * return;
458 *
459 * local_save_flags(irq_flags);
460 * pc = preempt_count();
461 *
462 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
463 *
464 * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
465 * event_<call>->event.type,
466 * sizeof(*entry) + __data_size,
467 * irq_flags, pc);
468 * if (!event)
469 * return;
470 * entry = ring_buffer_event_data(event);
471 *
472 * { <assign>; } <-- Here we assign the entries by the __field and
473 * __array macros.
474 *
475 * if (!filter_current_check_discard(buffer, event_call, entry, event))
476 * trace_nowake_buffer_unlock_commit(buffer,
477 * event, irq_flags, pc);
478 * }
479 *
480 * static struct trace_event ftrace_event_type_<call> = {
481 * .trace = ftrace_raw_output_<call>, <-- stage 2
482 * };
483 *
484 * static const char print_fmt_<call>[] = <TP_printk>;
485 *
486 * static struct ftrace_event_class __used event_class_<template> = {
487 * .system = "<system>",
488 * .define_fields = ftrace_define_fields_<call>,
489 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
490 * .raw_init = trace_event_raw_init,
491 * .probe = ftrace_raw_event_##call,
492 * .reg = ftrace_event_reg,
493 * };
494 *
495 * static struct ftrace_event_call event_<call> = {
496 * .name = "<call>",
497 * .class = event_class_<template>,
498 * .event = &ftrace_event_type_<call>,
499 * .print_fmt = print_fmt_<call>,
500 * };
501 * // its only safe to use pointers when doing linker tricks to
502 * // create an array.
503 * static struct ftrace_event_call __used
504 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
505 *
506 */
507
508 #ifdef CONFIG_PERF_EVENTS
509
510 #define _TRACE_PERF_PROTO(call, proto) \
511 static notrace void \
512 perf_trace_##call(void *__data, proto);
513
514 #define _TRACE_PERF_INIT(call) \
515 .perf_probe = perf_trace_##call,
516
517 #else
518 #define _TRACE_PERF_PROTO(call, proto)
519 #define _TRACE_PERF_INIT(call)
520 #endif /* CONFIG_PERF_EVENTS */
521
522 #undef __entry
523 #define __entry entry
524
525 #undef __field
526 #define __field(type, item)
527
528 #undef __array
529 #define __array(type, item, len)
530
531 #undef __dynamic_array
532 #define __dynamic_array(type, item, len) \
533 __entry->__data_loc_##item = __data_offsets.item;
534
535 #undef __string
536 #define __string(item, src) __dynamic_array(char, item, -1)
537
538 #undef __assign_str
539 #define __assign_str(dst, src) \
540 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
541
542 #undef __bitmask
543 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
544
545 #undef __get_bitmask
546 #define __get_bitmask(field) (char *)__get_dynamic_array(field)
547
548 #undef __assign_bitmask
549 #define __assign_bitmask(dst, src, nr_bits) \
550 memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
551
552 #undef TP_fast_assign
553 #define TP_fast_assign(args...) args
554
555 #undef TP_perf_assign
556 #define TP_perf_assign(args...)
557
558 #undef DECLARE_EVENT_CLASS
559 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
560 \
561 static notrace void \
562 ftrace_raw_event_##call(void *__data, proto) \
563 { \
564 struct ftrace_event_file *ftrace_file = __data; \
565 struct ftrace_event_call *event_call = ftrace_file->event_call; \
566 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
567 struct ring_buffer_event *event; \
568 struct ftrace_raw_##call *entry; \
569 struct ring_buffer *buffer; \
570 unsigned long irq_flags; \
571 int __data_size; \
572 int pc; \
573 \
574 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \
575 &ftrace_file->flags)) \
576 return; \
577 \
578 local_save_flags(irq_flags); \
579 pc = preempt_count(); \
580 \
581 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
582 \
583 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \
584 event_call->event.type, \
585 sizeof(*entry) + __data_size, \
586 irq_flags, pc); \
587 if (!event) \
588 return; \
589 entry = ring_buffer_event_data(event); \
590 \
591 tstruct \
592 \
593 { assign; } \
594 \
595 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
596 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
597 }
598 /*
599 * The ftrace_test_probe is compiled out, it is only here as a build time check
600 * to make sure that if the tracepoint handling changes, the ftrace probe will
601 * fail to compile unless it too is updated.
602 */
603
604 #undef DEFINE_EVENT
605 #define DEFINE_EVENT(template, call, proto, args) \
606 static inline void ftrace_test_probe_##call(void) \
607 { \
608 check_trace_callback_type_##call(ftrace_raw_event_##template); \
609 }
610
611 #undef DEFINE_EVENT_PRINT
612 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
613
614 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
615
616 #undef __entry
617 #define __entry REC
618
619 #undef __print_flags
620 #undef __print_symbolic
621 #undef __print_hex
622 #undef __get_dynamic_array
623 #undef __get_str
624 #undef __get_bitmask
625
626 #undef TP_printk
627 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
628
629 #undef DECLARE_EVENT_CLASS
630 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
631 _TRACE_PERF_PROTO(call, PARAMS(proto)); \
632 static const char print_fmt_##call[] = print; \
633 static struct ftrace_event_class __used __refdata event_class_##call = { \
634 .system = __stringify(TRACE_SYSTEM), \
635 .define_fields = ftrace_define_fields_##call, \
636 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
637 .raw_init = trace_event_raw_init, \
638 .probe = ftrace_raw_event_##call, \
639 .reg = ftrace_event_reg, \
640 _TRACE_PERF_INIT(call) \
641 };
642
643 #undef DEFINE_EVENT
644 #define DEFINE_EVENT(template, call, proto, args) \
645 \
646 static struct ftrace_event_call __used event_##call = { \
647 .name = #call, \
648 .class = &event_class_##template, \
649 .event.funcs = &ftrace_event_type_funcs_##template, \
650 .print_fmt = print_fmt_##template, \
651 }; \
652 static struct ftrace_event_call __used \
653 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
654
655 #undef DEFINE_EVENT_PRINT
656 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
657 \
658 static const char print_fmt_##call[] = print; \
659 \
660 static struct ftrace_event_call __used event_##call = { \
661 .name = #call, \
662 .class = &event_class_##template, \
663 .event.funcs = &ftrace_event_type_funcs_##call, \
664 .print_fmt = print_fmt_##call, \
665 }; \
666 static struct ftrace_event_call __used \
667 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
668
669 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
670
671
672 #ifdef CONFIG_PERF_EVENTS
673
674 #undef __entry
675 #define __entry entry
676
677 #undef __get_dynamic_array
678 #define __get_dynamic_array(field) \
679 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
680
681 #undef __get_str
682 #define __get_str(field) (char *)__get_dynamic_array(field)
683
684 #undef __get_bitmask
685 #define __get_bitmask(field) (char *)__get_dynamic_array(field)
686
687 #undef __perf_addr
688 #define __perf_addr(a) __addr = (a)
689
690 #undef __perf_count
691 #define __perf_count(c) __count = (c)
692
693 #undef __perf_task
694 #define __perf_task(t) __task = (t)
695
696 #undef TP_perf_assign
697 #define TP_perf_assign(args...) args
698
699 #undef DECLARE_EVENT_CLASS
700 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
701 static notrace void \
702 perf_trace_##call(void *__data, proto) \
703 { \
704 struct ftrace_event_call *event_call = __data; \
705 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
706 struct ftrace_raw_##call *entry; \
707 struct pt_regs __regs; \
708 u64 __addr = 0, __count = 1; \
709 struct task_struct *__task = NULL; \
710 struct hlist_head *head; \
711 int __entry_size; \
712 int __data_size; \
713 int rctx; \
714 \
715 perf_fetch_caller_regs(&__regs); \
716 \
717 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
718 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
719 sizeof(u64)); \
720 __entry_size -= sizeof(u32); \
721 \
722 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
723 "profile buffer not large enough")) \
724 return; \
725 \
726 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
727 __entry_size, event_call->event.type, &__regs, &rctx); \
728 if (!entry) \
729 return; \
730 \
731 tstruct \
732 \
733 { assign; } \
734 \
735 head = this_cpu_ptr(event_call->perf_events); \
736 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
737 __count, &__regs, head, __task); \
738 }
739
740 /*
741 * This part is compiled out, it is only here as a build time check
742 * to make sure that if the tracepoint handling changes, the
743 * perf probe will fail to compile unless it too is updated.
744 */
745 #undef DEFINE_EVENT
746 #define DEFINE_EVENT(template, call, proto, args) \
747 static inline void perf_test_probe_##call(void) \
748 { \
749 check_trace_callback_type_##call(perf_trace_##template); \
750 }
751
752
753 #undef DEFINE_EVENT_PRINT
754 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
755 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
756
757 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
758 #endif /* CONFIG_PERF_EVENTS */
759