extern unsigned stack_trace_index[];
extern struct stack_trace stack_trace_max;
extern unsigned long stack_trace_max_size;
-extern arch_spinlock_t max_stack_lock;
+extern arch_spinlock_t stack_trace_max_lock;
extern int stack_tracer_enabled;
void stack_trace_print(void);
};
unsigned long stack_trace_max_size;
-arch_spinlock_t max_stack_lock =
+arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static DEFINE_PER_CPU(int, trace_active);
/*
* When arch-specific code overides this function, the following
- * data should be filled up, assuming max_stack_lock is held to
+ * data should be filled up, assuming stack_trace_max_lock is held to
* prevent concurrent updates.
* stack_trace_index[]
* stack_trace_max
return;
local_irq_save(flags);
- arch_spin_lock(&max_stack_lock);
+ arch_spin_lock(&stack_trace_max_lock);
/* In case another CPU set the tracer_frame on us */
if (unlikely(!frame_size))
}
out:
- arch_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&stack_trace_max_lock);
local_irq_restore(flags);
}
cpu = smp_processor_id();
per_cpu(trace_active, cpu)++;
- arch_spin_lock(&max_stack_lock);
+ arch_spin_lock(&stack_trace_max_lock);
*ptr = val;
- arch_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&stack_trace_max_lock);
per_cpu(trace_active, cpu)--;
local_irq_restore(flags);
cpu = smp_processor_id();
per_cpu(trace_active, cpu)++;
- arch_spin_lock(&max_stack_lock);
+ arch_spin_lock(&stack_trace_max_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
{
int cpu;
- arch_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&stack_trace_max_lock);
cpu = smp_processor_id();
per_cpu(trace_active, cpu)--;