{
u64 prev_raw_count, new_raw_count, delta;
- WARN_ON_ONCE(counter->state != PERF_COUNTER_STATE_ACTIVE);
/*
* Careful: an NMI might modify the previous counter value.
*
* of the count, so we do that by clipping the delta to 32 bits:
*/
delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
- WARN_ON_ONCE((int)delta < 0);
atomic64_add(delta, &counter->count);
atomic64_sub(delta, &hwc->period_left);
int err;
err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
- WARN_ON_ONCE(err);
}
static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]);
s32 left = atomic64_read(&hwc->period_left);
s32 period = hwc->irq_period;
- WARN_ON_ONCE(period <= 0);
-
/*
* If we are way outside a reasoable range then just skip forward:
*/
atomic64_set(&hwc->period_left, left);
}
- WARN_ON_ONCE(left <= 0);
-
per_cpu(prev_left[idx], smp_processor_id()) = left;
/*
extern int perf_max_counters;
#ifdef CONFIG_PERF_COUNTERS
-extern void
-perf_counter_show(struct perf_counter *counter, char *str, int trace);
extern const struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter);
#else
static inline void
-perf_counter_show(struct perf_counter *counter, char *str, int trace) { }
-static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
static inline void
perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
atomic64_set(&counter->hw.prev_count, now);
delta = now - prev;
- if (WARN_ON_ONCE(delta < 0))
- delta = 0;
atomic64_add(delta, &counter->count);
}
atomic64_set(&counter->hw.prev_count, now);
delta = now - prev;
- if (WARN_ON_ONCE(delta < 0))
- delta = 0;
atomic64_add(delta, &counter->count);
}
atomic64_set(&counter->hw.prev_count, now);
delta = now - prev;
- if (WARN_ON_ONCE(delta < 0))
- delta = 0;
atomic64_add(delta, &counter->count);
}
atomic64_set(&counter->hw.prev_count, now);
delta = now - prev;
- if (WARN_ON_ONCE(delta < 0))
- delta = 0;
atomic64_add(delta, &counter->count);
}