}
counter = ctrs[i];
if (first) {
- eu = counter->hw_event.exclude_user;
- ek = counter->hw_event.exclude_kernel;
- eh = counter->hw_event.exclude_hv;
+ eu = counter->attr.exclude_user;
+ ek = counter->attr.exclude_kernel;
+ eh = counter->attr.exclude_hv;
first = 0;
- } else if (counter->hw_event.exclude_user != eu ||
- counter->hw_event.exclude_kernel != ek ||
- counter->hw_event.exclude_hv != eh) {
+ } else if (counter->attr.exclude_user != eu ||
+ counter->attr.exclude_kernel != ek ||
+ counter->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
/*
* Add in MMCR0 freeze bits corresponding to the
- * hw_event.exclude_* bits for the first counter.
+ * attr.exclude_* bits for the first counter.
* We have already checked that all counters have the
* same values for these bits as the first counter.
*/
counter = cpuhw->counter[0];
- if (counter->hw_event.exclude_user)
+ if (counter->attr.exclude_user)
cpuhw->mmcr[0] |= MMCR0_FCP;
- if (counter->hw_event.exclude_kernel)
+ if (counter->attr.exclude_kernel)
cpuhw->mmcr[0] |= freeze_counters_kernel;
- if (counter->hw_event.exclude_hv)
+ if (counter->attr.exclude_hv)
cpuhw->mmcr[0] |= MMCR0_FCHV;
/*
int n;
u64 alt[MAX_EVENT_ALTERNATIVES];
- if (counter->hw_event.exclude_user
- || counter->hw_event.exclude_kernel
- || counter->hw_event.exclude_hv
- || counter->hw_event.sample_period)
+ if (counter->attr.exclude_user
+ || counter->attr.exclude_kernel
+ || counter->attr.exclude_hv
+ || counter->attr.sample_period)
return 0;
if (ppmu->limited_pmc_event(ev))
if (!ppmu)
return ERR_PTR(-ENXIO);
- if (!perf_event_raw(&counter->hw_event)) {
- ev = perf_event_id(&counter->hw_event);
+ if (!perf_event_raw(&counter->attr)) {
+ ev = perf_event_id(&counter->attr);
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return ERR_PTR(-EOPNOTSUPP);
ev = ppmu->generic_events[ev];
} else {
- ev = perf_event_config(&counter->hw_event);
+ ev = perf_event_config(&counter->attr);
}
counter->hw.config_base = ev;
counter->hw.idx = 0;
* the user set it to.
*/
if (!firmware_has_feature(FW_FEATURE_LPAR))
- counter->hw_event.exclude_hv = 0;
+ counter->attr.exclude_hv = 0;
/*
* If this is a per-task counter, then we can use
*/
if (record) {
addr = 0;
- if (counter->hw_event.record_type & PERF_RECORD_ADDR) {
+ if (counter->attr.record_type & PERF_RECORD_ADDR) {
/*
* The user wants a data address recorded.
* If we're not doing instruction sampling,
*/
/*
- * hw_event.type
+ * attr.type
*/
enum perf_event_types {
PERF_TYPE_HARDWARE = 0,
};
/*
- * Generalized performance counter event types, used by the hw_event.event_id
+ * Generalized performance counter event types, used by the attr.event_id
* parameter of the sys_perf_counter_open() syscall:
*/
-enum hw_event_ids {
+enum attr_ids {
/*
* Common hardware events, generalized by the kernel:
*/
#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT)
/*
- * Bits that can be set in hw_event.sample_type to request information
+ * Bits that can be set in attr.sample_type to request information
* in the overflow packets.
*/
enum perf_counter_sample_format {
};
/*
- * Bits that can be set in hw_event.read_format to request that
+ * Bits that can be set in attr.read_format to request that
* reads on the counter should return the indicated quantities,
* in increasing order of bit value, after the counter value.
*/
/*
* Hardware event to monitor via a performance monitoring counter:
*/
-struct perf_counter_hw_event {
+struct perf_counter_attr {
/*
* The MSB of the config word signifies if the rest contains cpu
* specific (raw) counter configuration data, if unset, the next
struct task_struct;
-static inline u64 perf_event_raw(struct perf_counter_hw_event *hw_event)
+static inline u64 perf_event_raw(struct perf_counter_attr *attr)
{
- return hw_event->config & PERF_COUNTER_RAW_MASK;
+ return attr->config & PERF_COUNTER_RAW_MASK;
}
-static inline u64 perf_event_config(struct perf_counter_hw_event *hw_event)
+static inline u64 perf_event_config(struct perf_counter_attr *attr)
{
- return hw_event->config & PERF_COUNTER_CONFIG_MASK;
+ return attr->config & PERF_COUNTER_CONFIG_MASK;
}
-static inline u64 perf_event_type(struct perf_counter_hw_event *hw_event)
+static inline u64 perf_event_type(struct perf_counter_attr *attr)
{
- return (hw_event->config & PERF_COUNTER_TYPE_MASK) >>
+ return (attr->config & PERF_COUNTER_TYPE_MASK) >>
PERF_COUNTER_TYPE_SHIFT;
}
-static inline u64 perf_event_id(struct perf_counter_hw_event *hw_event)
+static inline u64 perf_event_id(struct perf_counter_attr *attr)
{
- return hw_event->config & PERF_COUNTER_EVENT_MASK;
+ return attr->config & PERF_COUNTER_EVENT_MASK;
}
/**
u64 tstamp_running;
u64 tstamp_stopped;
- struct perf_counter_hw_event hw_event;
+ struct perf_counter_attr attr;
struct hw_perf_counter hw;
struct perf_counter_context *ctx;
*/
static inline int is_software_counter(struct perf_counter *counter)
{
- return !perf_event_raw(&counter->hw_event) &&
- perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE;
+ return !perf_event_raw(&counter->attr) &&
+ perf_event_type(&counter->attr) != PERF_TYPE_HARDWARE;
}
extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
if (!is_software_counter(counter))
cpuctx->active_oncpu--;
ctx->nr_active--;
- if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
+ if (counter->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
}
list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
counter_sched_out(counter, cpuctx, ctx);
- if (group_counter->hw_event.exclusive)
+ if (group_counter->attr.exclusive)
cpuctx->exclusive = 0;
}
cpuctx->active_oncpu++;
ctx->nr_active++;
- if (counter->hw_event.exclusive)
+ if (counter->attr.exclusive)
cpuctx->exclusive = 1;
return 0;
* If this group is exclusive and there are already
* counters on the CPU, it can't go on.
*/
- if (counter->hw_event.exclusive && cpuctx->active_oncpu)
+ if (counter->attr.exclusive && cpuctx->active_oncpu)
return 0;
/*
* Otherwise, try to add it if all previous groups were able
*/
if (leader != counter)
group_sched_out(leader, cpuctx, ctx);
- if (leader->hw_event.pinned) {
+ if (leader->attr.pinned) {
update_group_times(leader);
leader->state = PERF_COUNTER_STATE_ERROR;
}
*/
if (leader != counter)
group_sched_out(leader, cpuctx, ctx);
- if (leader->hw_event.pinned) {
+ if (leader->attr.pinned) {
update_group_times(leader);
leader->state = PERF_COUNTER_STATE_ERROR;
}
/*
* not supported on inherited counters
*/
- if (counter->hw_event.inherit)
+ if (counter->attr.inherit)
return -EINVAL;
atomic_add(refresh, &counter->event_limit);
*/
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
if (counter->state <= PERF_COUNTER_STATE_OFF ||
- !counter->hw_event.pinned)
+ !counter->attr.pinned)
continue;
if (counter->cpu != -1 && counter->cpu != cpu)
continue;
* ignore pinned counters since we did them already.
*/
if (counter->state <= PERF_COUNTER_STATE_OFF ||
- counter->hw_event.pinned)
+ counter->attr.pinned)
continue;
/*
interrupts = 2*sysctl_perf_counter_limit/HZ;
}
- if (!counter->hw_event.freq || !counter->hw_event.sample_freq)
+ if (!counter->attr.freq || !counter->attr.sample_freq)
continue;
events = HZ * interrupts * counter->hw.sample_period;
- period = div64_u64(events, counter->hw_event.sample_freq);
+ period = div64_u64(events, counter->attr.sample_freq);
delta = (s64)(1 + period - counter->hw.sample_period);
delta >>= 1;
perf_pending_sync(counter);
atomic_dec(&nr_counters);
- if (counter->hw_event.mmap)
+ if (counter->attr.mmap)
atomic_dec(&nr_mmap_tracking);
- if (counter->hw_event.munmap)
+ if (counter->attr.munmap)
atomic_dec(&nr_munmap_tracking);
- if (counter->hw_event.comm)
+ if (counter->attr.comm)
atomic_dec(&nr_comm_tracking);
if (counter->destroy)
mutex_lock(&counter->child_mutex);
values[0] = perf_counter_read(counter);
n = 1;
- if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = counter->total_time_enabled +
atomic64_read(&counter->child_total_time_enabled);
- if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = counter->total_time_running +
atomic64_read(&counter->child_total_time_running);
- if (counter->hw_event.read_format & PERF_FORMAT_ID)
+ if (counter->attr.read_format & PERF_FORMAT_ID)
values[n++] = counter->id;
mutex_unlock(&counter->child_mutex);
int ret = 0;
u64 value;
- if (!counter->hw_event.sample_period)
+ if (!counter->attr.sample_period)
return -EINVAL;
size = copy_from_user(&value, arg, sizeof(value));
return -EINVAL;
spin_lock_irq(&ctx->lock);
- if (counter->hw_event.freq) {
+ if (counter->attr.freq) {
if (value > sysctl_perf_counter_limit) {
ret = -EINVAL;
goto unlock;
}
- counter->hw_event.sample_freq = value;
+ counter->attr.sample_freq = value;
} else {
- counter->hw_event.sample_period = value;
+ counter->attr.sample_period = value;
counter->hw.sample_period = value;
perf_log_period(counter, value);
struct perf_counter *counter = handle->counter;
struct perf_mmap_data *data = handle->data;
- int wakeup_events = counter->hw_event.wakeup_events;
+ int wakeup_events = counter->attr.wakeup_events;
if (handle->overflow && wakeup_events) {
int events = atomic_inc_return(&data->events);
int nmi, struct pt_regs *regs, u64 addr)
{
int ret;
- u64 sample_type = counter->hw_event.sample_type;
+ u64 sample_type = counter->attr.sample_type;
struct perf_output_handle handle;
struct perf_event_header header;
u64 ip;
perf_output_put(&handle, addr);
if (sample_type & PERF_SAMPLE_CONFIG)
- perf_output_put(&handle, counter->hw_event.config);
+ perf_output_put(&handle, counter->attr.config);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(&handle, cpu_entry);
static int perf_counter_comm_match(struct perf_counter *counter,
struct perf_comm_event *comm_event)
{
- if (counter->hw_event.comm &&
+ if (counter->attr.comm &&
comm_event->event.header.type == PERF_EVENT_COMM)
return 1;
static int perf_counter_mmap_match(struct perf_counter *counter,
struct perf_mmap_event *mmap_event)
{
- if (counter->hw_event.mmap &&
+ if (counter->attr.mmap &&
mmap_event->event.header.type == PERF_EVENT_MMAP)
return 1;
- if (counter->hw_event.munmap &&
+ if (counter->attr.munmap &&
mmap_event->event.header.type == PERF_EVENT_MUNMAP)
return 1;
* In case we exclude kernel IPs or are somehow not in interrupt
* context, provide the next best thing, the user IP.
*/
- if ((counter->hw_event.exclude_kernel || !regs) &&
- !counter->hw_event.exclude_user)
+ if ((counter->attr.exclude_kernel || !regs) &&
+ !counter->attr.exclude_user)
regs = task_pt_regs(current);
if (regs) {
if (!perf_swcounter_is_counting(counter))
return 0;
- if (counter->hw_event.config != event_config)
+ if (counter->attr.config != event_config)
return 0;
if (regs) {
- if (counter->hw_event.exclude_user && user_mode(regs))
+ if (counter->attr.exclude_user && user_mode(regs))
return 0;
- if (counter->hw_event.exclude_kernel && !user_mode(regs))
+ if (counter->attr.exclude_kernel && !user_mode(regs))
return 0;
}
static void tp_perf_counter_destroy(struct perf_counter *counter)
{
- ftrace_profile_disable(perf_event_id(&counter->hw_event));
+ ftrace_profile_disable(perf_event_id(&counter->attr));
}
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
{
- int event_id = perf_event_id(&counter->hw_event);
+ int event_id = perf_event_id(&counter->attr);
int ret;
ret = ftrace_profile_enable(event_id);
return NULL;
counter->destroy = tp_perf_counter_destroy;
- counter->hw.sample_period = counter->hw_event.sample_period;
+ counter->hw.sample_period = counter->attr.sample_period;
return &perf_ops_generic;
}
* to be kernel events, and page faults are never hypervisor
* events.
*/
- switch (perf_event_id(&counter->hw_event)) {
+ switch (perf_event_id(&counter->attr)) {
case PERF_COUNT_CPU_CLOCK:
pmu = &perf_ops_cpu_clock;
* Allocate and initialize a counter structure
*/
static struct perf_counter *
-perf_counter_alloc(struct perf_counter_hw_event *hw_event,
+perf_counter_alloc(struct perf_counter_attr *attr,
int cpu,
struct perf_counter_context *ctx,
struct perf_counter *group_leader,
mutex_init(&counter->mmap_mutex);
counter->cpu = cpu;
- counter->hw_event = *hw_event;
+ counter->attr = *attr;
counter->group_leader = group_leader;
counter->pmu = NULL;
counter->ctx = ctx;
counter->oncpu = -1;
counter->state = PERF_COUNTER_STATE_INACTIVE;
- if (hw_event->disabled)
+ if (attr->disabled)
counter->state = PERF_COUNTER_STATE_OFF;
pmu = NULL;
hwc = &counter->hw;
- if (hw_event->freq && hw_event->sample_freq)
- hwc->sample_period = div64_u64(TICK_NSEC, hw_event->sample_freq);
+ if (attr->freq && attr->sample_freq)
+ hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq);
else
- hwc->sample_period = hw_event->sample_period;
+ hwc->sample_period = attr->sample_period;
/*
* we currently do not support PERF_SAMPLE_GROUP on inherited counters
*/
- if (hw_event->inherit && (hw_event->sample_type & PERF_SAMPLE_GROUP))
+ if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
goto done;
- if (perf_event_raw(hw_event)) {
+ if (perf_event_raw(attr)) {
pmu = hw_perf_counter_init(counter);
goto done;
}
- switch (perf_event_type(hw_event)) {
+ switch (perf_event_type(attr)) {
case PERF_TYPE_HARDWARE:
pmu = hw_perf_counter_init(counter);
break;
counter->pmu = pmu;
atomic_inc(&nr_counters);
- if (counter->hw_event.mmap)
+ if (counter->attr.mmap)
atomic_inc(&nr_mmap_tracking);
- if (counter->hw_event.munmap)
+ if (counter->attr.munmap)
atomic_inc(&nr_munmap_tracking);
- if (counter->hw_event.comm)
+ if (counter->attr.comm)
atomic_inc(&nr_comm_tracking);
return counter;
/**
* sys_perf_counter_open - open a performance counter, associate it to a task/cpu
*
- * @hw_event_uptr: event type attributes for monitoring/sampling
+ * @attr_uptr: event type attributes for monitoring/sampling
* @pid: target pid
* @cpu: target cpu
* @group_fd: group leader counter fd
*/
SYSCALL_DEFINE5(perf_counter_open,
- const struct perf_counter_hw_event __user *, hw_event_uptr,
+ const struct perf_counter_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{
struct perf_counter *counter, *group_leader;
- struct perf_counter_hw_event hw_event;
+ struct perf_counter_attr attr;
struct perf_counter_context *ctx;
struct file *counter_file = NULL;
struct file *group_file = NULL;
if (flags)
return -EINVAL;
- if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
+ if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
return -EFAULT;
/*
/*
* Only a group leader can be exclusive or pinned
*/
- if (hw_event.exclusive || hw_event.pinned)
+ if (attr.exclusive || attr.pinned)
goto err_put_context;
}
- counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
+ counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
GFP_KERNEL);
ret = PTR_ERR(counter);
if (IS_ERR(counter))
if (parent_counter->parent)
parent_counter = parent_counter->parent;
- child_counter = perf_counter_alloc(&parent_counter->hw_event,
+ child_counter = perf_counter_alloc(&parent_counter->attr,
parent_counter->cpu, child_ctx,
group_leader, GFP_KERNEL);
if (IS_ERR(child_counter))
/*
* Make the child state follow the state of the parent counter,
- * not its hw_event.disabled bit. We hold the parent's mutex,
+ * not its attr.disabled bit. We hold the parent's mutex,
* so we won't race with perf_counter_{en, dis}able_family.
*/
if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
/*
* inherit into child's child as well:
*/
- child_counter->hw_event.inherit = 1;
+ child_counter->attr.inherit = 1;
/*
* Get a reference to the parent filp - we will fput it
if (counter != counter->group_leader)
continue;
- if (!counter->hw_event.inherit) {
+ if (!counter->attr.inherit) {
inherited_all = 0;
continue;
}