.llseek = seq_lseek,
};
-/*
- * Only trace on a CPU if the bitmask is set:
- */
-static cpumask_var_t tracing_cpumask;
-
/*
* The tracer itself will not take this lock, but still we want
* to provide a consistent cpumask to user-space:
tracing_cpumask_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
+ struct trace_array *tr = file_inode(filp)->i_private;
int len;
mutex_lock(&tracing_cpumask_update_lock);
- len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
+ len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
if (count - len < 2) {
count = -EINVAL;
goto out_err;
tracing_cpumask_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- struct trace_array *tr = filp->private_data;
+ struct trace_array *tr = file_inode(filp)->i_private;
cpumask_var_t tracing_cpumask_new;
int err, cpu;
* Increase/decrease the disabled counter if we are
* about to flip a bit in the cpumask:
*/
- if (cpumask_test_cpu(cpu, tracing_cpumask) &&
+ if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
}
- if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
+ if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
arch_spin_unlock(&ftrace_max_lock);
local_irq_enable();
- cpumask_copy(tracing_cpumask, tracing_cpumask_new);
+ cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
mutex_unlock(&tracing_cpumask_update_lock);
free_cpumask_var(tracing_cpumask_new);
}
static const struct file_operations tracing_cpumask_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_cpumask_read,
.write = tracing_cpumask_write,
+ .release = tracing_release_generic_tr,
.llseek = generic_file_llseek,
};
if (!tr->name)
goto out_free_tr;
+ if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
+ goto out_free_tr;
+
+ cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
+
raw_spin_lock_init(&tr->start_lock);
tr->current_trace = &nop_trace;
out_free_tr:
if (tr->trace_buffer.buffer)
ring_buffer_free(tr->trace_buffer.buffer);
+ free_cpumask_var(tr->tracing_cpumask);
kfree(tr->name);
kfree(tr);
{
int cpu;
+ trace_create_file("tracing_cpumask", 0644, d_tracer,
+ tr, &tracing_cpumask_fops);
+
trace_create_file("trace_options", 0644, d_tracer,
tr, &tracing_iter_fops);
init_tracer_debugfs(&global_trace, d_tracer);
- trace_create_file("tracing_cpumask", 0644, d_tracer,
- &global_trace, &tracing_cpumask_fops);
-
trace_create_file("available_tracers", 0444, d_tracer,
&global_trace, &show_traces_fops);
if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
goto out;
- if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
+ if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
/* Only allocate trace_printk buffers if a trace_printk exists */
ring_buf_size = 1;
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
- cpumask_copy(tracing_cpumask, cpu_all_mask);
+ cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
raw_spin_lock_init(&global_trace.start_lock);
#ifdef CONFIG_TRACER_MAX_TRACE
free_percpu(global_trace.max_buffer.data);
#endif
- free_cpumask_var(tracing_cpumask);
+ free_cpumask_var(global_trace.tracing_cpumask);
out_free_buffer_mask:
free_cpumask_var(tracing_buffer_mask);
out: