*/
if (record) {
struct perf_sample_data data = {
- .regs = regs,
.addr = 0,
.period = counter->hw.last_period,
};
if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
- if (perf_counter_overflow(counter, nmi, &data)) {
+ if (perf_counter_overflow(counter, nmi, &data, regs)) {
/*
* Interrupts are coming too fast - throttle them
* by setting the counter to 0, so it will be
regs = args->regs;
- data.regs = regs;
data.addr = 0;
cpuc = &__get_cpu_var(cpu_hw_counters);
if (!sparc_perf_counter_set_period(counter, hwc, idx))
continue;
- if (perf_counter_overflow(counter, 1, &data))
+ if (perf_counter_overflow(counter, 1, &data, regs))
sparc_pmu_disable_counter(hwc, idx);
}
static inline void perf_counter_fork(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { }
-static inline int
-perf_output_begin(struct perf_output_handle *handle, struct perf_counter *c,
- unsigned int size, int nmi, int sample) { }
-static inline void perf_output_end(struct perf_output_handle *handle) { }
-static inline void
-perf_output_copy(struct perf_output_handle *handle,
- const void *buf, unsigned int len) { }
-static inline void
-perf_output_sample(struct perf_output_handle *handle,
- struct perf_event_header *header,
- struct perf_sample_data *data,
- struct perf_counter *counter) { }
-static inline void
-perf_prepare_sample(struct perf_event_header *header,
- struct perf_sample_data *data,
- struct perf_counter *counter,
- struct pt_regs *regs) { }
#endif
#define perf_output_put(handle, x) \