enum perf_counter_record_format {
PERF_RECORD_IP = 1U << 0,
PERF_RECORD_TID = 1U << 1,
- PERF_RECORD_GROUP = 1U << 2,
- PERF_RECORD_CALLCHAIN = 1U << 3,
- PERF_RECORD_TIME = 1U << 4,
+ PERF_RECORD_TIME = 1U << 2,
+ PERF_RECORD_GROUP = 1U << 3,
+ PERF_RECORD_CALLCHAIN = 1U << 4,
};
/*
*
* { u64 ip; } && PERF_RECORD_IP
* { u32 pid, tid; } && PERF_RECORD_TID
+ * { u64 time; } && PERF_RECORD_TIME
*
* { u64 nr;
* { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP
* kernel,
* user;
* u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
- *
- * { u64 time; } && PERF_RECORD_TIME
* };
*/
};
header.size += sizeof(tid_entry);
}
+ if (record_type & PERF_RECORD_TIME) {
+ /*
+ * Maybe do better on x86 and provide cpu_clock_nmi()
+ */
+ time = sched_clock();
+
+ header.type |= PERF_RECORD_TIME;
+ header.size += sizeof(u64);
+ }
+
if (record_type & PERF_RECORD_GROUP) {
header.type |= PERF_RECORD_GROUP;
header.size += sizeof(u64) +
}
}
- if (record_type & PERF_RECORD_TIME) {
- /*
- * Maybe do better on x86 and provide cpu_clock_nmi()
- */
- time = sched_clock();
-
- header.type |= PERF_RECORD_TIME;
- header.size += sizeof(u64);
- }
-
ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
if (ret)
return;
if (record_type & PERF_RECORD_TID)
perf_output_put(&handle, tid_entry);
+ if (record_type & PERF_RECORD_TIME)
+ perf_output_put(&handle, time);
+
if (record_type & PERF_RECORD_GROUP) {
struct perf_counter *leader, *sub;
u64 nr = counter->nr_siblings;
if (callchain)
perf_output_copy(&handle, callchain, callchain_size);
- if (record_type & PERF_RECORD_TIME)
- perf_output_put(&handle, time);
-
perf_output_end(&handle);
}