* in increasing order of bit value, after the counter value.
*/
enum perf_counter_read_format {
- PERF_FORMAT_TOTAL_TIME_ENABLED = 1,
- PERF_FORMAT_TOTAL_TIME_RUNNING = 2,
+ PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
+ PERF_FORMAT_ID = 1U << 2,
};
/*
* { u32 cpu, res; } && PERF_RECORD_CPU
*
* { u64 nr;
- * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP
+ * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP
*
* { u16 nr,
* hv,
struct rcu_head rcu_head;
struct pid_namespace *ns;
+ u64 id;
#endif
};
if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = counter->total_time_running +
atomic64_read(&counter->child_total_time_running);
+ if (counter->hw_event.read_format & PERF_FORMAT_ID)
+ values[n++] = counter->id;
mutex_unlock(&counter->child_mutex);
if (count < n * sizeof(u64))
u32 pid, tid;
} tid_entry;
struct {
- u64 event;
+ u64 id;
u64 counter;
} group_entry;
struct perf_callchain_entry *callchain = NULL;
if (sub != counter)
sub->pmu->read(sub);
- group_entry.event = sub->hw_event.config;
+ group_entry.id = sub->id;
group_entry.counter = atomic64_read(&sub->count);
perf_output_put(&handle, group_entry);
return counter;
}
+static atomic64_t perf_counter_id;
+
/**
* sys_perf_counter_open - open a performance counter, associate it to a task/cpu
*
mutex_unlock(¤t->perf_counter_mutex);
counter->ns = get_pid_ns(current->nsproxy->pid_ns);
+ counter->id = atomic64_inc_return(&perf_counter_id);
fput_light(counter_file, fput_needed2);