return NULL;
if ((s64)counter->hw_event.irq_period < 0)
return NULL;
- if (!counter->hw_event.raw_type) {
- ev = counter->hw_event.event_id;
+ if (!perf_event_raw(&counter->hw_event)) {
+ ev = perf_event_id(&counter->hw_event);
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return NULL;
ev = ppmu->generic_events[ev];
} else {
- ev = counter->hw_event.raw_event_id;
+ ev = perf_event_config(&counter->hw_event);
}
counter->hw.config_base = ev;
counter->hw.idx = 0;
/*
* Raw event type provide the config in the event structure
*/
- if (hw_event->raw_type) {
- hwc->config |= pmc_ops->raw_event(hw_event->raw_event_id);
+ if (perf_event_raw(hw_event)) {
+ hwc->config |= pmc_ops->raw_event(perf_event_config(hw_event));
} else {
- if (hw_event->event_id >= pmc_ops->max_events)
+ if (perf_event_id(hw_event) >= pmc_ops->max_events)
return -EINVAL;
/*
* The generic map:
*/
- hwc->config |= pmc_ops->event_map(hw_event->event_id);
+ hwc->config |= pmc_ops->event_map(perf_event_id(hw_event));
}
counter->wakeup_pending = 0;
PERF_RECORD_GROUP = 2,
};
+#define __PERF_COUNTER_MASK(name) \
+ (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \
+ PERF_COUNTER_##name##_SHIFT)
+
+#define PERF_COUNTER_RAW_BITS 1
+#define PERF_COUNTER_RAW_SHIFT 63
+#define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW)
+
+#define PERF_COUNTER_CONFIG_BITS 63
+#define PERF_COUNTER_CONFIG_SHIFT 0
+#define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG)
+
+#define PERF_COUNTER_TYPE_BITS 7
+#define PERF_COUNTER_TYPE_SHIFT 56
+#define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE)
+
+#define PERF_COUNTER_EVENT_BITS 56
+#define PERF_COUNTER_EVENT_SHIFT 0
+#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT)
+
/*
* Hardware event to monitor via a performance monitoring counter:
*/
struct perf_counter_hw_event {
- union {
-#ifndef __BIG_ENDIAN_BITFIELD
- struct {
- __u64 event_id : 56,
- type : 8;
- };
- struct {
- __u64 raw_event_id : 63,
- raw_type : 1;
- };
-#else
- struct {
- __u64 type : 8,
- event_id : 56;
- };
- struct {
- __u64 raw_type : 1,
- raw_event_id : 63;
- };
-#endif /* __BIT_ENDIAN_BITFIELD */
- __u64 event_config;
- };
+ /*
+ * The MSB of the config word signifies if the rest contains cpu
+ * specific (raw) counter configuration data, if unset, the next
+ * 7 bits are an event type and the rest of the bits are the event
+ * identifier.
+ */
+ __u64 config;
__u64 irq_period;
__u64 record_type;
struct task_struct;
+static inline u64 perf_event_raw(struct perf_counter_hw_event *hw_event)
+{
+ return hw_event->config & PERF_COUNTER_RAW_MASK;
+}
+
+static inline u64 perf_event_config(struct perf_counter_hw_event *hw_event)
+{
+ return hw_event->config & PERF_COUNTER_CONFIG_MASK;
+}
+
+static inline u64 perf_event_type(struct perf_counter_hw_event *hw_event)
+{
+ return (hw_event->config & PERF_COUNTER_TYPE_MASK) >>
+ PERF_COUNTER_TYPE_SHIFT;
+}
+
+static inline u64 perf_event_id(struct perf_counter_hw_event *hw_event)
+{
+ return hw_event->config & PERF_COUNTER_EVENT_MASK;
+}
+
/**
* struct hw_perf_counter - performance counter hardware details:
*/
*/
static inline int is_software_counter(struct perf_counter *counter)
{
- return !counter->hw_event.raw_type &&
- counter->hw_event.type != PERF_TYPE_HARDWARE;
+ return !perf_event_raw(&counter->hw_event) &&
+ perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE;
}
extern void perf_swcounter_event(u32, u64, int, struct pt_regs *);
list_for_each_entry(sub, &leader->sibling_list, list_entry) {
if (sub != counter)
sub->hw_ops->read(sub);
- perf_counter_store_irq(counter, sub->hw_event.event_config);
+ perf_counter_store_irq(counter, sub->hw_event.config);
perf_counter_store_irq(counter, atomic64_read(&sub->count));
}
}
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
return 0;
- if (counter->hw_event.raw_type)
+ if (perf_event_raw(&counter->hw_event))
return 0;
- if (counter->hw_event.type != type)
+ if (perf_event_type(&counter->hw_event) != type)
return 0;
- if (counter->hw_event.event_id != event)
+ if (perf_event_id(&counter->hw_event) != event)
return 0;
if (counter->hw_event.exclude_user && user_mode(regs))
static void tp_perf_counter_destroy(struct perf_counter *counter)
{
- ftrace_profile_disable(counter->hw_event.event_id);
+ ftrace_profile_disable(perf_event_id(&counter->hw_event));
}
static const struct hw_perf_counter_ops *
tp_perf_counter_init(struct perf_counter *counter)
{
- int event_id = counter->hw_event.event_id;
+ int event_id = perf_event_id(&counter->hw_event);
int ret;
ret = ftrace_profile_enable(event_id);
* to be kernel events, and page faults are never hypervisor
* events.
*/
- switch (counter->hw_event.event_id) {
+ switch (perf_event_id(&counter->hw_event)) {
case PERF_COUNT_CPU_CLOCK:
hw_ops = &perf_ops_cpu_clock;
hw_ops = NULL;
- if (hw_event->raw_type)
+ if (perf_event_raw(hw_event)) {
hw_ops = hw_perf_counter_init(counter);
- else switch (hw_event->type) {
+ goto done;
+ }
+
+ switch (perf_event_type(hw_event)) {
case PERF_TYPE_HARDWARE:
hw_ops = hw_perf_counter_init(counter);
break;
kfree(counter);
return NULL;
}
+done:
counter->hw_ops = hw_ops;
return counter;