return tsk->trace & TSK_TRACE_FL_GRAPH;
}
+extern int ftrace_dump_on_oops;
+
#endif /* CONFIG_TRACING */
- * @syscall_nr: syscall number
+
+#ifdef CONFIG_HW_BRANCH_TRACER
+
+void trace_hw_branch(u64 from, u64 to);
+void trace_hw_branch_oops(void);
+
+#else /* CONFIG_HW_BRANCH_TRACER */
+
+static inline void trace_hw_branch(u64 from, u64 to) {}
+static inline void trace_hw_branch_oops(void) {}
+
+#endif /* CONFIG_HW_BRANCH_TRACER */
+
+/*
+ * A syscall entry in the ftrace syscalls array.
+ *
- struct syscall_trace_entry {
- int syscall_nr;
+ + * @name: name of the syscall
+ + * @nb_args: number of parameters it takes
+ + * @types: list of types as strings
+ + * @args: list of args as strings (args[i] matches types[i])
+ */
+ +struct syscall_metadata {
+ + const char *name;
+ + int nb_args;
+ + const char **types;
+ + const char **args;
+};
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+ +extern void arch_init_ftrace_syscalls(void);
+ +extern struct syscall_metadata *syscall_nr_to_meta(int nr);
+extern void start_ftrace_syscalls(void);
+extern void stop_ftrace_syscalls(void);
+extern void ftrace_syscall_enter(struct pt_regs *regs);
+extern void ftrace_syscall_exit(struct pt_regs *regs);
+#else
+static inline void start_ftrace_syscalls(void) { }
+static inline void stop_ftrace_syscalls(void) { }
+static inline void ftrace_syscall_enter(struct pt_regs *regs) { }
+static inline void ftrace_syscall_exit(struct pt_regs *regs) { }
+#endif
+
#endif /* _LINUX_FTRACE_H */
TRACE_FN,
TRACE_CTX,
TRACE_WAKE,
- TRACE_CONT,
TRACE_STACK,
TRACE_PRINT,
++ TRACE_BPRINT,
TRACE_SPECIAL,
TRACE_MMIO_RW,
TRACE_MMIO_MAP,
};
/*
- * ftrace_printk entry:
+ * trace_printk entry:
*/
- struct print_entry {
++struct bprint_entry {
+ struct trace_entry ent;
+ unsigned long ip;
+ int depth;
+ const char *fmt;
+ u32 buf[];
+};
+
+ struct print_entry {
+ struct trace_entry ent;
+ unsigned long ip;
+ int depth;
+ char buf[];
+ };
+
#define TRACE_OLD_SIZE 88
struct trace_field_cont {
struct power_trace state_data;
};
+struct kmemtrace_alloc_entry {
+ struct trace_entry ent;
+ enum kmemtrace_type_id type_id;
+ unsigned long call_site;
+ const void *ptr;
+ size_t bytes_req;
+ size_t bytes_alloc;
+ gfp_t gfp_flags;
+ int node;
+};
+
+struct kmemtrace_free_entry {
+ struct trace_entry ent;
+ enum kmemtrace_type_id type_id;
+ unsigned long call_site;
+ const void *ptr;
+};
+
+ +struct syscall_trace_enter {
+ + struct trace_entry ent;
+ + int nr;
+ + unsigned long args[];
+ +};
+ +
+ +struct syscall_trace_exit {
+ + struct trace_entry ent;
+ + int nr;
+ + unsigned long ret;
+ +};
+ +
+ +
/*
* trace_flag_type is an enumeration that holds different
* states when a trace occurs. These are:
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
TRACE_GRAPH_RET); \
IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
- IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
+ IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
+ IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
+ TRACE_KMEM_ALLOC); \
+ IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
+ TRACE_KMEM_FREE); \
+ + IF_ASSIGN(var, ent, struct syscall_trace_enter, \
+ + TRACE_SYSCALL_ENTER); \
+ + IF_ASSIGN(var, ent, struct syscall_trace_exit, \
+ + TRACE_SYSCALL_EXIT); \
__ftrace_bad_type(); \
} while (0)
#endif /* CONFIG_FTRACE_STARTUP_TEST */
extern void *head_page(struct trace_array_cpu *data);
-extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
-extern void trace_seq_print_cont(struct trace_seq *s,
- struct trace_iterator *iter);
-
-extern int
-seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
- unsigned long sym_flags);
-extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
- size_t cnt);
extern long ns2usecs(cycle_t nsec);
extern int
++trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args);
++extern int
trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
extern unsigned long trace_flags;
}
#endif /* CONFIG_BRANCH_TRACER */
++/* set ring buffers to default size if not already done so */
++int tracing_update_buffers(void);
++
+/* trace event type bit fields, not numeric */
+enum {
+ TRACE_EVENT_TYPE_PRINTF = 1,
+ TRACE_EVENT_TYPE_RAW = 2,
+};
+
+struct ftrace_event_call {
+ char *name;
+ char *system;
+ struct dentry *dir;
+ int enabled;
+ int (*regfunc)(void);
+ void (*unregfunc)(void);
+ int id;
+ int (*raw_init)(void);
+ int (*show_format)(struct trace_seq *s);
+};
+
+void event_trace_printk(unsigned long ip, const char *fmt, ...);
+extern struct ftrace_event_call __start_ftrace_events[];
+extern struct ftrace_event_call __stop_ftrace_events[];
+
++extern const char *__start___trace_bprintk_fmt[];
++extern const char *__stop___trace_bprintk_fmt[];
++
++/*
++ * The double __builtin_constant_p is because gcc will give us an error
++ * if we try to allocate the static variable to fmt if it is not a
++ * constant. Even with the outer if statement optimizing out.
++ */
++#define event_trace_printk(ip, fmt, args...) \
++do { \
++ __trace_printk_check_format(fmt, ##args); \
++ tracing_record_cmdline(current); \
++ if (__builtin_constant_p(fmt)) { \
++ static const char *trace_printk_fmt \
++ __attribute__((section("__trace_printk_fmt"))) = \
++ __builtin_constant_p(fmt) ? fmt : NULL; \
++ \
++ __trace_bprintk(ip, trace_printk_fmt, ##args); \
++ } else \
++ __trace_printk(ip, fmt, ##args); \
++} while (0)
++
#endif /* _LINUX_KERNEL_TRACE_H */