LIST_HEAD(ftrace_trace_arrays);
-static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
-
int filter_current_check_discard(struct ring_buffer *buffer,
struct ftrace_event_call *call, void *rec,
struct ring_buffer_event *event)
*/
static struct trace_array max_tr;
-static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
-
int tracing_is_enabled(void)
{
return tracing_is_on();
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
- struct trace_array_cpu *data = tr->data[cpu];
+ struct trace_array_cpu *data = per_cpu_ptr(tr->data, cpu);
struct trace_array_cpu *max_data;
max_tr.cpu = cpu;
max_tr.time_start = data->preempt_timestamp;
- max_data = max_tr.data[cpu];
+ max_data = per_cpu_ptr(max_tr.data, cpu);
max_data->saved_latency = tracing_max_latency;
max_data->critical_start = data->critical_start;
max_data->critical_end = data->critical_end;
unsigned long entries = 0;
u64 ts;
- tr->data[cpu]->skipped_entries = 0;
+ per_cpu_ptr(tr->data, cpu)->skipped_entries = 0;
buf_iter = trace_buffer_iter(iter, cpu);
if (!buf_iter)
ring_buffer_read(buf_iter, NULL);
}
- tr->data[cpu]->skipped_entries = entries;
+ per_cpu_ptr(tr->data, cpu)->skipped_entries = entries;
}
/*
* entries for the trace and we need to ignore the
* ones before the time stamp.
*/
- if (tr->data[cpu]->skipped_entries) {
- count -= tr->data[cpu]->skipped_entries;
+ if (per_cpu_ptr(tr->data, cpu)->skipped_entries) {
+ count -= per_cpu_ptr(tr->data, cpu)->skipped_entries;
/* total is the same as the entries */
*total += count;
} else
{
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
struct trace_array *tr = iter->tr;
- struct trace_array_cpu *data = tr->data[tr->cpu];
+ struct trace_array_cpu *data = per_cpu_ptr(tr->data, tr->cpu);
struct tracer *type = iter->trace;
unsigned long entries;
unsigned long total;
if (cpumask_test_cpu(iter->cpu, iter->started))
return;
- if (iter->tr->data[iter->cpu]->skipped_entries)
+ if (per_cpu_ptr(iter->tr->data, iter->cpu)->skipped_entries)
return;
cpumask_set_cpu(iter->cpu, iter->started);
*/
if (cpumask_test_cpu(cpu, tracing_cpumask) &&
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
- atomic_inc(&tr->data[cpu]->disabled);
+ atomic_inc(&per_cpu_ptr(tr->data, cpu)->disabled);
ring_buffer_record_disable_cpu(tr->buffer, cpu);
}
if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
- atomic_dec(&tr->data[cpu]->disabled);
+ atomic_dec(&per_cpu_ptr(tr->data, cpu)->disabled);
ring_buffer_record_enable_cpu(tr->buffer, cpu);
}
}
{
int cpu;
for_each_tracing_cpu(cpu)
- tr->data[cpu]->entries = val;
+ per_cpu_ptr(tr->data, cpu)->entries = val;
}
/* resize @tr's buffer to the size of @size_tr's entries */
if (cpu_id == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
ret = ring_buffer_resize(tr->buffer,
- size_tr->data[cpu]->entries, cpu);
+ per_cpu_ptr(size_tr->data, cpu)->entries, cpu);
if (ret < 0)
break;
- tr->data[cpu]->entries = size_tr->data[cpu]->entries;
+ per_cpu_ptr(tr->data, cpu)->entries =
+ per_cpu_ptr(size_tr->data, cpu)->entries;
}
} else {
ret = ring_buffer_resize(tr->buffer,
- size_tr->data[cpu_id]->entries, cpu_id);
+ per_cpu_ptr(size_tr->data, cpu_id)->entries, cpu_id);
if (ret == 0)
- tr->data[cpu_id]->entries =
- size_tr->data[cpu_id]->entries;
+ per_cpu_ptr(tr->data, cpu_id)->entries =
+ per_cpu_ptr(size_tr->data, cpu_id)->entries;
}
return ret;
if (cpu == RING_BUFFER_ALL_CPUS)
set_buffer_entries(&max_tr, size);
else
- max_tr.data[cpu]->entries = size;
+ per_cpu_ptr(max_tr.data, cpu)->entries = size;
out:
if (cpu == RING_BUFFER_ALL_CPUS)
set_buffer_entries(tr, size);
else
- tr->data[cpu]->entries = size;
+ per_cpu_ptr(tr->data, cpu)->entries = size;
return ret;
}
for_each_tracing_cpu(cpu) {
/* fill in the size from first enabled cpu */
if (size == 0)
- size = tr->data[cpu]->entries;
- if (size != tr->data[cpu]->entries) {
+ size = per_cpu_ptr(tr->data, cpu)->entries;
+ if (size != per_cpu_ptr(tr->data, cpu)->entries) {
buf_size_same = 0;
break;
}
} else
r = sprintf(buf, "X\n");
} else
- r = sprintf(buf, "%lu\n", tr->data[tc->cpu]->entries >> 10);
+ r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->data, tc->cpu)->entries >> 10);
mutex_unlock(&trace_types_lock);
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
- size += tr->data[cpu]->entries >> 10;
+ size += per_cpu_ptr(tr->data, cpu)->entries >> 10;
if (!ring_buffer_expanded)
expanded_size += trace_buf_size >> 10;
}
static void
tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
{
- struct trace_array_cpu *data = tr->data[cpu];
+ struct trace_array_cpu *data = per_cpu_ptr(tr->data, cpu);
struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
struct dentry *d_cpu;
char cpu_dir[30]; /* 30 characters should be more than enough */
trace_init_global_iter(&iter);
for_each_tracing_cpu(cpu) {
- atomic_inc(&iter.tr->data[cpu]->disabled);
+ atomic_inc(&per_cpu_ptr(iter.tr->data, cpu)->disabled);
}
old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
trace_flags |= old_userobj;
for_each_tracing_cpu(cpu) {
- atomic_dec(&iter.tr->data[cpu]->disabled);
+ atomic_dec(&per_cpu_ptr(iter.tr->data, cpu)->disabled);
}
tracing_on();
}
WARN_ON(1);
goto out_free_cpumask;
}
+
+ global_trace.data = alloc_percpu(struct trace_array_cpu);
+
+ if (!global_trace.data) {
+ printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
+ WARN_ON(1);
+ goto out_free_cpumask;
+ }
+
+ for_each_tracing_cpu(i) {
+ memset(per_cpu_ptr(global_trace.data, i), 0, sizeof(struct trace_array_cpu));
+ per_cpu_ptr(global_trace.data, i)->trace_cpu.cpu = i;
+ per_cpu_ptr(global_trace.data, i)->trace_cpu.tr = &global_trace;
+ }
+
if (global_trace.buffer_disabled)
tracing_off();
-
#ifdef CONFIG_TRACER_MAX_TRACE
+ max_tr.data = alloc_percpu(struct trace_array_cpu);
+ if (!max_tr.data) {
+ printk(KERN_ERR "tracer: failed to allocate percpu memory!\n");
+ WARN_ON(1);
+ goto out_free_cpumask;
+ }
max_tr.buffer = ring_buffer_alloc(1, rb_flags);
raw_spin_lock_init(&max_tr.start_lock);
if (!max_tr.buffer) {
ring_buffer_free(global_trace.buffer);
goto out_free_cpumask;
}
-#endif
- /* Allocate the first page for all buffers */
for_each_tracing_cpu(i) {
- global_trace.data[i] = &per_cpu(global_trace_cpu, i);
- global_trace.data[i]->trace_cpu.cpu = i;
- global_trace.data[i]->trace_cpu.tr = &global_trace;
- max_tr.data[i] = &per_cpu(max_tr_data, i);
- max_tr.data[i]->trace_cpu.cpu = i;
- max_tr.data[i]->trace_cpu.tr = &max_tr;
+ memset(per_cpu_ptr(max_tr.data, i), 0, sizeof(struct trace_array_cpu));
+ per_cpu_ptr(max_tr.data, i)->trace_cpu.cpu = i;
+ per_cpu_ptr(max_tr.data, i)->trace_cpu.tr = &max_tr;
}
+#endif
+ /* Allocate the first page for all buffers */
set_buffer_entries(&global_trace,
ring_buffer_size(global_trace.buffer, 0));
#ifdef CONFIG_TRACER_MAX_TRACE
return 0;
out_free_cpumask:
+ free_percpu(global_trace.data);
+ free_percpu(max_tr.data);
free_cpumask_var(tracing_cpumask);
out_free_buffer_mask:
free_cpumask_var(tracing_buffer_mask);
if (cpu != wakeup_current_cpu)
goto out_enable;
- *data = tr->data[cpu];
+ *data = per_cpu_ptr(tr->data, cpu);
disabled = atomic_inc_return(&(*data)->disabled);
if (unlikely(disabled != 1))
goto out;
/* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id();
- disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
if (likely(disabled != 1))
goto out;
goto out_unlock;
/* The task we are waiting for is waking up */
- data = wakeup_trace->data[wakeup_cpu];
+ data = per_cpu_ptr(wakeup_trace->data, wakeup_cpu);
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
out:
- atomic_dec(&wakeup_trace->data[cpu]->disabled);
+ atomic_dec(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
}
static void __wakeup_reset(struct trace_array *tr)
return;
pc = preempt_count();
- disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
if (unlikely(disabled != 1))
goto out;
local_save_flags(flags);
- data = wakeup_trace->data[wakeup_cpu];
+ data = per_cpu_ptr(wakeup_trace->data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu);
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
out_locked:
arch_spin_unlock(&wakeup_lock);
out:
- atomic_dec(&wakeup_trace->data[cpu]->disabled);
+ atomic_dec(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled);
}
static void start_wakeup_tracer(struct trace_array *tr)