continue;
}
val = 0;
- if (counter->hw_event.irq_period) {
+ if (counter->hw.irq_period) {
left = atomic64_read(&counter->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
if (!ppmu)
return ERR_PTR(-ENXIO);
- if ((s64)counter->hw_event.irq_period < 0)
- return ERR_PTR(-EINVAL);
if (!perf_event_raw(&counter->hw_event)) {
ev = perf_event_id(&counter->hw_event);
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
counter->hw.config = events[n];
counter->hw.counter_base = cflags[n];
- atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period);
+ atomic64_set(&counter->hw.period_left, counter->hw.irq_period);
/*
* See if we need to reserve the PMU.
static void record_and_restart(struct perf_counter *counter, long val,
struct pt_regs *regs, int nmi)
{
+ u64 period = counter->hw.irq_period;
s64 prev, delta, left;
int record = 0;
*/
val = 0;
left = atomic64_read(&counter->hw.period_left) - delta;
- if (counter->hw_event.irq_period) {
+ if (period) {
if (left <= 0) {
- left += counter->hw_event.irq_period;
+ left += period;
if (left <= 0)
- left = counter->hw_event.irq_period;
+ left = period;
record = 1;
}
if (left < 0x80000000L)
return 0;
}
+void perf_adjust_freq(struct perf_counter_context *ctx)
+{
+ struct perf_counter *counter;
+ u64 irq_period;
+ u64 events, period;
+ s64 delta;
+
+ spin_lock(&ctx->lock);
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE)
+ continue;
+
+ if (!counter->hw_event.freq || !counter->hw_event.irq_freq)
+ continue;
+
+ events = HZ * counter->hw.interrupts * counter->hw.irq_period;
+ period = div64_u64(events, counter->hw_event.irq_freq);
+
+ delta = (s64)(1 + period - counter->hw.irq_period);
+ delta >>= 1;
+
+ irq_period = counter->hw.irq_period + delta;
+
+ if (!irq_period)
+ irq_period = 1;
+
+ counter->hw.irq_period = irq_period;
+ counter->hw.interrupts = 0;
+ }
+ spin_unlock(&ctx->lock);
+}
+
/*
* Round-robin a context's counters:
*/
cpuctx = &per_cpu(perf_cpu_context, cpu);
ctx = &curr->perf_counter_ctx;
+ perf_adjust_freq(&cpuctx->ctx);
+ perf_adjust_freq(ctx);
+
perf_counter_cpu_sched_out(cpuctx);
__perf_counter_task_sched_out(ctx);
int events = atomic_read(&counter->event_limit);
int ret = 0;
+ counter->hw.interrupts++;
+
/*
* XXX event_limit might not quite work as expected on inherited
* counters
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_counter *counter;
struct pt_regs *regs;
+ u64 period;
counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
counter->pmu->read(counter);
ret = HRTIMER_NORESTART;
}
- hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
+ period = max_t(u64, 10000, counter->hw.irq_period);
+ hrtimer_forward_now(hrtimer, ns_to_ktime(period));
return ret;
}
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swcounter_hrtimer;
if (hwc->irq_period) {
+ u64 period = max_t(u64, 10000, hwc->irq_period);
__hrtimer_start_range_ns(&hwc->hrtimer,
- ns_to_ktime(hwc->irq_period), 0,
+ ns_to_ktime(period), 0,
HRTIMER_MODE_REL, 0);
}
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swcounter_hrtimer;
if (hwc->irq_period) {
+ u64 period = max_t(u64, 10000, hwc->irq_period);
__hrtimer_start_range_ns(&hwc->hrtimer,
- ns_to_ktime(hwc->irq_period), 0,
+ ns_to_ktime(period), 0,
HRTIMER_MODE_REL, 0);
}
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
{
- struct perf_counter_hw_event *hw_event = &counter->hw_event;
const struct pmu *pmu = NULL;
- struct hw_perf_counter *hwc = &counter->hw;
/*
* Software counters (currently) can't in general distinguish
case PERF_COUNT_CPU_CLOCK:
pmu = &perf_ops_cpu_clock;
- if (hw_event->irq_period && hw_event->irq_period < 10000)
- hw_event->irq_period = 10000;
break;
case PERF_COUNT_TASK_CLOCK:
/*
else
pmu = &perf_ops_cpu_clock;
- if (hw_event->irq_period && hw_event->irq_period < 10000)
- hw_event->irq_period = 10000;
break;
case PERF_COUNT_PAGE_FAULTS:
case PERF_COUNT_PAGE_FAULTS_MIN:
break;
}
- if (pmu)
- hwc->irq_period = hw_event->irq_period;
-
return pmu;
}
{
const struct pmu *pmu;
struct perf_counter *counter;
+ struct hw_perf_counter *hwc;
long err;
counter = kzalloc(sizeof(*counter), gfpflags);
pmu = NULL;
+ hwc = &counter->hw;
+ if (hw_event->freq && hw_event->irq_freq)
+ hwc->irq_period = TICK_NSEC / hw_event->irq_freq;
+ else
+ hwc->irq_period = hw_event->irq_period;
+
/*
* we currently do not support PERF_RECORD_GROUP on inherited counters
*/