PERF_RECORD_TID = 1U << 1,
PERF_RECORD_GROUP = 1U << 2,
PERF_RECORD_CALLCHAIN = 1U << 3,
+ PERF_RECORD_TIME = 1U << 4,
};
/*
__PERF_EVENT_TID = PERF_RECORD_TID,
__PERF_EVENT_GROUP = PERF_RECORD_GROUP,
__PERF_EVENT_CALLCHAIN = PERF_RECORD_CALLCHAIN,
+ __PERF_EVENT_TIME = PERF_RECORD_TIME,
};
#ifdef __KERNEL__
} group_entry;
struct perf_callchain_entry *callchain = NULL;
int callchain_size = 0;
+ u64 time;
header.type = PERF_EVENT_COUNTER_OVERFLOW;
header.size = sizeof(header);
}
}
+ if (record_type & PERF_RECORD_TIME) {
+ /*
+ * Maybe do better on x86 and provide cpu_clock_nmi()
+ */
+ time = sched_clock();
+
+ header.type |= __PERF_EVENT_TIME;
+ header.size += sizeof(u64);
+ }
+
ret = perf_output_begin(&handle, counter, header.size, nmi);
if (ret)
return;
if (callchain)
perf_output_copy(&handle, callchain, callchain_size);
+ if (record_type & PERF_RECORD_TIME)
+ perf_output_put(&handle, time);
+
perf_output_end(&handle);
}