Impact: Reduce future memory usage, use new cpumask API.
(Eventually, cpumask_var_t will be allocated based on nr_cpu_ids, not NR_CPUS).
Convert kernel trace functions to use struct cpumask API:
1) Use cpumask_copy/cpumask_test_cpu/for_each_cpu.
2) Use cpumask_var_t and alloc_cpumask_var/free_cpumask_var everywhere.
3) Use on_each_cpu instead of playing with current->cpus_allowed.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
EXPORT_SYMBOL_GPL(ring_buffer_event_data);
#define for_each_buffer_cpu(buffer, cpu) \
- for_each_cpu_mask(cpu, buffer->cpumask)
+ for_each_cpu(cpu, buffer->cpumask)
#define TS_SHIFT 27
#define TS_MASK ((1ULL << TS_SHIFT) - 1)
unsigned pages;
unsigned flags;
int cpus;
- cpumask_t cpumask;
+ cpumask_var_t cpumask;
atomic_t record_disabled;
struct mutex mutex;
if (!buffer)
return NULL;
+ if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
+ goto fail_free_buffer;
+
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer->flags = flags;
if (buffer->pages == 1)
buffer->pages++;
- buffer->cpumask = cpu_possible_map;
+ cpumask_copy(buffer->cpumask, cpu_possible_mask);
buffer->cpus = nr_cpu_ids;
bsize = sizeof(void *) * nr_cpu_ids;
buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
GFP_KERNEL);
if (!buffer->buffers)
- goto fail_free_buffer;
+ goto fail_free_cpumask;
for_each_buffer_cpu(buffer, cpu) {
buffer->buffers[cpu] =
}
kfree(buffer->buffers);
+ fail_free_cpumask:
+ free_cpumask_var(buffer->cpumask);
+
fail_free_buffer:
kfree(buffer);
return NULL;
for_each_buffer_cpu(buffer, cpu)
rb_free_cpu_buffer(buffer->buffers[cpu]);
+ free_cpumask_var(buffer->cpumask);
+
kfree(buffer);
}
EXPORT_SYMBOL_GPL(ring_buffer_free);
cpu = raw_smp_processor_id();
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
goto out;
cpu_buffer = buffer->buffers[cpu];
cpu = raw_smp_processor_id();
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
goto out;
cpu_buffer = buffer->buffers[cpu];
{
struct ring_buffer_per_cpu *cpu_buffer;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
cpu_buffer = buffer->buffers[cpu];
{
struct ring_buffer_per_cpu *cpu_buffer;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
cpu_buffer = buffer->buffers[cpu];
{
struct ring_buffer_per_cpu *cpu_buffer;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
{
struct ring_buffer_per_cpu *cpu_buffer;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
struct buffer_page *reader;
int nr_loops = 0;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
unsigned long flags;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
struct ring_buffer_iter *iter;
unsigned long flags;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
unsigned long flags;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
{
struct ring_buffer_per_cpu *cpu_buffer;
- if (!cpu_isset(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 1;
cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_per_cpu *cpu_buffer_a;
struct ring_buffer_per_cpu *cpu_buffer_b;
- if (!cpu_isset(cpu, buffer_a->cpumask) ||
- !cpu_isset(cpu, buffer_b->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
+ !cpumask_test_cpu(cpu, buffer_b->cpumask))
return -EINVAL;
/* At least make sure the two buffers are somewhat the same */
preempt_enable();
}
-static cpumask_t __read_mostly tracing_buffer_mask;
+static cpumask_var_t __read_mostly tracing_buffer_mask;
#define for_each_tracing_cpu(cpu) \
- for_each_cpu_mask(cpu, tracing_buffer_mask)
+ for_each_cpu(cpu, tracing_buffer_mask)
/*
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops
/*
* Only trace on a CPU if the bitmask is set:
*/
-static cpumask_t tracing_cpumask = CPU_MASK_ALL;
-
-/*
- * When tracing/tracing_cpu_mask is modified then this holds
- * the new bitmask we are about to install:
- */
-static cpumask_t tracing_cpumask_new;
+static cpumask_var_t tracing_cpumask;
/*
* The tracer itself will not take this lock, but still we want
mutex_lock(&tracing_cpumask_update_lock);
- len = cpumask_scnprintf(mask_str, count, &tracing_cpumask);
+ len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
if (count - len < 2) {
count = -EINVAL;
goto out_err;
size_t count, loff_t *ppos)
{
int err, cpu;
+ cpumask_var_t tracing_cpumask_new;
+
+ if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+ return -ENOMEM;
mutex_lock(&tracing_cpumask_update_lock);
- err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new);
+ err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
if (err)
goto err_unlock;
* Increase/decrease the disabled counter if we are
* about to flip a bit in the cpumask:
*/
- if (cpu_isset(cpu, tracing_cpumask) &&
- !cpu_isset(cpu, tracing_cpumask_new)) {
+ if (cpumask_test_cpu(cpu, tracing_cpumask) &&
+ !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&global_trace.data[cpu]->disabled);
}
- if (!cpu_isset(cpu, tracing_cpumask) &&
- cpu_isset(cpu, tracing_cpumask_new)) {
+ if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
+ cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&global_trace.data[cpu]->disabled);
}
}
__raw_spin_unlock(&ftrace_max_lock);
local_irq_enable();
- tracing_cpumask = tracing_cpumask_new;
+ cpumask_copy(tracing_cpumask, tracing_cpumask_new);
mutex_unlock(&tracing_cpumask_update_lock);
+ free_cpumask_var(tracing_cpumask_new);
return count;
err_unlock:
mutex_unlock(&tracing_cpumask_update_lock);
+ free_cpumask_var(tracing_cpumask);
return err;
}
static DEFINE_SPINLOCK(ftrace_dump_lock);
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
- static cpumask_t mask;
static int dump_ran;
unsigned long flags;
int cnt = 0, cpu;
* and then release the locks again.
*/
- cpus_clear(mask);
-
while (!trace_empty(&iter)) {
if (!cnt)
{
struct trace_array_cpu *data;
int i;
+ int ret = -ENOMEM;
- /* TODO: make the number of buffers hot pluggable with CPUS */
- tracing_buffer_mask = cpu_possible_map;
+ if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
+ goto out;
+
+ if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
+ goto out_free_buffer_mask;
+ cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
+ cpumask_copy(tracing_cpumask, cpu_all_mask);
+
+ /* TODO: make the number of buffers hot pluggable with CPUS */
global_trace.buffer = ring_buffer_alloc(trace_buf_size,
TRACE_BUFFER_FLAGS);
if (!global_trace.buffer) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
WARN_ON(1);
- return 0;
+ goto out_free_cpumask;
}
global_trace.entries = ring_buffer_size(global_trace.buffer);
+
#ifdef CONFIG_TRACER_MAX_TRACE
max_tr.buffer = ring_buffer_alloc(trace_buf_size,
TRACE_BUFFER_FLAGS);
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
WARN_ON(1);
ring_buffer_free(global_trace.buffer);
- return 0;
+ goto out_free_cpumask;
}
max_tr.entries = ring_buffer_size(max_tr.buffer);
WARN_ON(max_tr.entries != global_trace.entries);
&trace_panic_notifier);
register_die_notifier(&trace_die_notifier);
+ ret = 0;
- return 0;
+out_free_cpumask:
+ free_cpumask_var(tracing_cpumask);
+out_free_buffer_mask:
+ free_cpumask_var(tracing_buffer_mask);
+out:
+ return ret;
}
early_initcall(tracer_alloc_buffers);
fs_initcall(tracer_init_debugfs);
return HRTIMER_RESTART;
}
-static void start_stack_timer(int cpu)
+static void start_stack_timer(void *unused)
{
- struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
+ struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = stack_trace_timer_fn;
static void start_stack_timers(void)
{
- cpumask_t saved_mask = current->cpus_allowed;
- int cpu;
-
- for_each_online_cpu(cpu) {
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
- start_stack_timer(cpu);
- }
- set_cpus_allowed_ptr(current, &saved_mask);
+ on_each_cpu(start_stack_timer, NULL, 1);
}
static void stop_stack_timer(int cpu)