unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh;
++ +/* For tracers that don't implement custom flags */
++ +static struct tracer_opt dummy_tracer_opt[] = {
++ + { }
++ +};
++ +
++ +static struct tracer_flags dummy_tracer_flags = {
++ + .val = 0,
++ + .opts = dummy_tracer_opt
++ +};
++ +
++ +static int dummy_set_flag(u32 old_flags, u32 bit, int set)
++ +{
++ + return 0;
++ +}
+ +
+ +/*
+ + * Kill all tracing for good (never come back).
+ + * It is initialized to 1 but will turn to zero if the initialization
+ + * of the tracer is successful. But that is the only place that sets
+ + * this back to zero.
+ + */
+ +int tracing_disabled = 1;
+ +
static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
static inline void ftrace_disable_cpu(void)
* internal tracing to verify that everything is in order.
* If we fail, we do not register this tracer.
*/
--- for_each_tracing_cpu(i) {
+++ for_each_tracing_cpu(i)
tracing_reset(tr, i);
--- }
+++
current_trace = type;
- - tr->ctrl = 0;
/* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name);
ret = type->selftest(type, tr);
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
}
+ +#ifdef CONFIG_FUNCTION_RET_TRACER
+ +static void __trace_function_return(struct trace_array *tr,
+ + struct trace_array_cpu *data,
+ + struct ftrace_retfunc *trace,
+ + unsigned long flags,
+ + int pc)
+ +{
+ + struct ring_buffer_event *event;
+ + struct ftrace_ret_entry *entry;
+ + unsigned long irq_flags;
+ +
+ + if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+ + return;
+ +
+ + event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
+ + &irq_flags);
+ + if (!event)
+ + return;
+ + entry = ring_buffer_event_data(event);
+ + tracing_generic_entry_update(&entry->ent, flags, pc);
+ + entry->ent.type = TRACE_FN_RET;
+ + entry->ip = trace->func;
+ + entry->parent_ip = trace->ret;
+ + entry->rettime = trace->rettime;
+ + entry->calltime = trace->calltime;
++ + entry->overrun = trace->overrun;
+ + ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+ +}
+ +#endif
+ +
void
ftrace(struct trace_array *tr, struct trace_array_cpu *data,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
};
static ssize_t
- -tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
+ +tracing_trace_options_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
++ + int i;
char *buf;
int r = 0;
int len = 0;
return r;
}
++ +/* Try to assign a tracer specific option */
++ +static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
++ +{
++ + struct tracer_flags *trace_flags = trace->flags;
++ + struct tracer_opt *opts = NULL;
++ + int ret = 0, i = 0;
++ + int len;
++ +
++ + for (i = 0; trace_flags->opts[i].name; i++) {
++ + opts = &trace_flags->opts[i];
++ + len = strlen(opts->name);
++ +
++ + if (strncmp(cmp, opts->name, len) == 0) {
++ + ret = trace->set_flag(trace_flags->val,
++ + opts->bit, !neg);
++ + break;
++ + }
++ + }
++ + /* Not found */
++ + if (!trace_flags->opts[i].name)
++ + return -EINVAL;
++ +
++ + /* Refused to handle */
++ + if (ret)
++ + return ret;
++ +
++ + if (neg)
++ + trace_flags->val &= ~opts->bit;
++ + else
++ + trace_flags->val |= opts->bit;
++ +
++ + return 0;
++ +}
++ +
static ssize_t
- -tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
+ +tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];