void (*hw_perf_counter_read) (struct perf_counter *counter);
};
+/**
+ * enum perf_counter_active_state - the states of a counter
+ */
+enum perf_counter_active_state {
+ PERF_COUNTER_STATE_OFF = -1,
+ PERF_COUNTER_STATE_INACTIVE = 0,
+ PERF_COUNTER_STATE_ACTIVE = 1,
+};
+
/**
* struct perf_counter - performance counter kernel representation:
*/
struct perf_counter *group_leader;
const struct hw_perf_counter_ops *hw_ops;
- int active;
+ enum perf_counter_active_state state;
#if BITS_PER_LONG == 64
atomic64_t count;
#else
spin_lock(&ctx->lock);
- if (counter->active) {
+ if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
counter->hw_ops->hw_perf_counter_disable(counter);
- counter->active = 0;
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
ctx->nr_active--;
cpuctx->active_oncpu--;
counter->task = NULL;
if (cpuctx->active_oncpu < perf_max_counters) {
counter->hw_ops->hw_perf_counter_enable(counter);
- counter->active = 1;
+ counter->state = PERF_COUNTER_STATE_ACTIVE;
counter->oncpu = cpu;
ctx->nr_active++;
cpuctx->active_oncpu++;
spin_lock_irq(&ctx->lock);
/*
- * If the context is active and the counter has not been added
* we need to retry the smp call.
*/
if (ctx->nr_active && list_empty(&counter->list_entry)) {
struct perf_cpu_context *cpuctx,
struct perf_counter_context *ctx)
{
- if (!counter->active)
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE)
return;
counter->hw_ops->hw_perf_counter_disable(counter);
- counter->active = 0;
- counter->oncpu = -1;
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+ counter->oncpu = -1;
cpuctx->active_oncpu--;
ctx->nr_active--;
struct perf_counter_context *ctx,
int cpu)
{
- if (counter->active == -1)
+ if (counter->state == PERF_COUNTER_STATE_OFF)
return;
counter->hw_ops->hw_perf_counter_enable(counter);
- counter->active = 1;
+ counter->state = PERF_COUNTER_STATE_ACTIVE;
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
cpuctx->active_oncpu++;
perf_flags = hw_perf_save_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- WARN_ON_ONCE(counter->active == 1);
- counter->active = -1;
+ WARN_ON_ONCE(counter->state == PERF_COUNTER_STATE_ACTIVE);
+ counter->state = PERF_COUNTER_STATE_OFF;
}
hw_perf_restore(perf_flags);
perf_flags = hw_perf_save_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
- if (counter->active != -1)
+ if (counter->state != PERF_COUNTER_STATE_OFF)
continue;
- counter->active = 0;
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
}
hw_perf_restore(perf_flags);
* If counter is enabled and currently active on a CPU, update the
* value in the counter structure:
*/
- if (counter->active) {
+ if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
smp_call_function_single(counter->oncpu,
__hw_perf_counter_read, counter, 1);
}
retry:
spin_lock_irq(&ctx->lock);
- if (!counter->active) {
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
counter->irqdata = counter->usrdata;
counter->usrdata = oldirqdata;
spin_unlock_irq(&ctx->lock);