static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;
+static atomic_t nr_counters __read_mostly;
static atomic_t nr_mmap_tracking __read_mostly;
static atomic_t nr_munmap_tracking __read_mostly;
static atomic_t nr_comm_tracking __read_mostly;
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
+int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
/*
- * Mutex for (sysadmin-configurable) counter reservations:
+ * Lock for (sysadmin-configurable) counter reservations:
*/
-static DEFINE_MUTEX(perf_resource_mutex);
+static DEFINE_SPINLOCK(perf_resource_lock);
/*
* Architecture provided APIs - weak aliases:
return NULL;
}
-u64 __weak hw_perf_save_disable(void) { return 0; }
-void __weak hw_perf_restore(u64 ctrl) { barrier(); }
+void __weak hw_perf_disable(void) { barrier(); }
+void __weak hw_perf_enable(void) { barrier(); }
+
void __weak hw_perf_counter_setup(int cpu) { barrier(); }
int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
struct perf_cpu_context *cpuctx,
void __weak perf_counter_print_debug(void) { }
+static DEFINE_PER_CPU(int, disable_count);
+
+void __perf_disable(void)
+{
+ __get_cpu_var(disable_count)++;
+}
+
+bool __perf_enable(void)
+{
+ return !--__get_cpu_var(disable_count);
+}
+
+void perf_disable(void)
+{
+ __perf_disable();
+ hw_perf_disable();
+}
+
+void perf_enable(void)
+{
+ if (__perf_enable())
+ hw_perf_enable();
+}
+
static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
* add it straight to the context's counter list, or to the group
* leader's sibling list:
*/
- if (counter->group_leader == counter)
+ if (group_leader == counter)
list_add_tail(&counter->list_entry, &ctx->counter_list);
else {
list_add_tail(&counter->list_entry, &group_leader->sibling_list);
}
list_add_rcu(&counter->event_entry, &ctx->event_list);
+ ctx->nr_counters++;
}
static void
{
struct perf_counter *sibling, *tmp;
+ ctx->nr_counters--;
+
list_del_init(&counter->list_entry);
list_del_rcu(&counter->event_entry);
struct perf_counter *counter = info;
struct perf_counter_context *ctx = counter->ctx;
unsigned long flags;
- u64 perf_flags;
/*
* If this is a task context, we need to check whether it is
counter_sched_out(counter, cpuctx, ctx);
counter->task = NULL;
- ctx->nr_counters--;
/*
* Protect the list operation against NMI by disabling the
* counters on a global level. NOP for non NMI based counters.
*/
- perf_flags = hw_perf_save_disable();
+ perf_disable();
list_del_counter(counter, ctx);
- hw_perf_restore(perf_flags);
+ perf_enable();
if (!ctx->task) {
/*
* succeed.
*/
if (!list_empty(&counter->list_entry)) {
- ctx->nr_counters--;
list_del_counter(counter, ctx);
counter->task = NULL;
}
spin_unlock_irq(&ctx->lock);
}
-/*
- * Disable a counter and all its children.
- */
-static void perf_counter_disable_family(struct perf_counter *counter)
-{
- struct perf_counter *child;
-
- perf_counter_disable(counter);
-
- /*
- * Lock the mutex to protect the list of children
- */
- mutex_lock(&counter->mutex);
- list_for_each_entry(child, &counter->child_list, child_list)
- perf_counter_disable(child);
- mutex_unlock(&counter->mutex);
-}
-
static int
counter_sched_in(struct perf_counter *counter,
struct perf_cpu_context *cpuctx,
return 0;
}
+static int
+group_sched_in(struct perf_counter *group_counter,
+ struct perf_cpu_context *cpuctx,
+ struct perf_counter_context *ctx,
+ int cpu)
+{
+ struct perf_counter *counter, *partial_group;
+ int ret;
+
+ if (group_counter->state == PERF_COUNTER_STATE_OFF)
+ return 0;
+
+ ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
+ if (ret)
+ return ret < 0 ? ret : 0;
+
+ group_counter->prev_state = group_counter->state;
+ if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
+ return -EAGAIN;
+
+ /*
+ * Schedule in siblings as one group (if any):
+ */
+ list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
+ counter->prev_state = counter->state;
+ if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
+ partial_group = counter;
+ goto group_error;
+ }
+ }
+
+ return 0;
+
+group_error:
+ /*
+ * Groups can be scheduled in as one unit only, so undo any
+ * partial group before returning:
+ */
+ list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
+ if (counter == partial_group)
+ break;
+ counter_sched_out(counter, cpuctx, ctx);
+ }
+ counter_sched_out(group_counter, cpuctx, ctx);
+
+ return -EAGAIN;
+}
+
/*
* Return 1 for a group consisting entirely of software counters,
* 0 if the group contains any hardware counters.
struct perf_counter_context *ctx)
{
list_add_counter(counter, ctx);
- ctx->nr_counters++;
counter->prev_state = PERF_COUNTER_STATE_OFF;
counter->tstamp_enabled = ctx->time;
counter->tstamp_running = ctx->time;
struct perf_counter *leader = counter->group_leader;
int cpu = smp_processor_id();
unsigned long flags;
- u64 perf_flags;
int err;
/*
* Protect the list operation against NMI by disabling the
* counters on a global level. NOP for non NMI based counters.
*/
- perf_flags = hw_perf_save_disable();
+ perf_disable();
add_counter_to_ctx(counter, ctx);
cpuctx->max_pertask--;
unlock:
- hw_perf_restore(perf_flags);
+ perf_enable();
spin_unlock_irqrestore(&ctx->lock, flags);
}
if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
goto unlock;
- if (!group_can_go_on(counter, cpuctx, 1))
+ if (!group_can_go_on(counter, cpuctx, 1)) {
err = -EEXIST;
- else
- err = counter_sched_in(counter, cpuctx, ctx,
- smp_processor_id());
+ } else {
+ perf_disable();
+ if (counter == leader)
+ err = group_sched_in(counter, cpuctx, ctx,
+ smp_processor_id());
+ else
+ err = counter_sched_in(counter, cpuctx, ctx,
+ smp_processor_id());
+ perf_enable();
+ }
if (err) {
/*
spin_unlock_irq(&ctx->lock);
}
-static void perf_counter_refresh(struct perf_counter *counter, int refresh)
-{
- atomic_add(refresh, &counter->event_limit);
- perf_counter_enable(counter);
-}
-
-/*
- * Enable a counter and all its children.
- */
-static void perf_counter_enable_family(struct perf_counter *counter)
+static int perf_counter_refresh(struct perf_counter *counter, int refresh)
{
- struct perf_counter *child;
+ /*
+ * not supported on inherited counters
+ */
+ if (counter->hw_event.inherit)
+ return -EINVAL;
+ atomic_add(refresh, &counter->event_limit);
perf_counter_enable(counter);
- /*
- * Lock the mutex to protect the list of children
- */
- mutex_lock(&counter->mutex);
- list_for_each_entry(child, &counter->child_list, child_list)
- perf_counter_enable(child);
- mutex_unlock(&counter->mutex);
+ return 0;
}
void __perf_counter_sched_out(struct perf_counter_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_counter *counter;
- u64 flags;
spin_lock(&ctx->lock);
ctx->is_active = 0;
goto out;
update_context_time(ctx);
- flags = hw_perf_save_disable();
+ perf_disable();
if (ctx->nr_active) {
list_for_each_entry(counter, &ctx->counter_list, list_entry)
group_sched_out(counter, cpuctx, ctx);
}
- hw_perf_restore(flags);
+ perf_enable();
out:
spin_unlock(&ctx->lock);
}
cpuctx->task_ctx = NULL;
}
-static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
+static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
{
- __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+
+ __perf_counter_sched_out(ctx, cpuctx);
+ cpuctx->task_ctx = NULL;
}
-static int
-group_sched_in(struct perf_counter *group_counter,
- struct perf_cpu_context *cpuctx,
- struct perf_counter_context *ctx,
- int cpu)
+static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
{
- struct perf_counter *counter, *partial_group;
- int ret;
-
- if (group_counter->state == PERF_COUNTER_STATE_OFF)
- return 0;
-
- ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
- if (ret)
- return ret < 0 ? ret : 0;
-
- group_counter->prev_state = group_counter->state;
- if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
- return -EAGAIN;
-
- /*
- * Schedule in siblings as one group (if any):
- */
- list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
- counter->prev_state = counter->state;
- if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
- partial_group = counter;
- goto group_error;
- }
- }
-
- return 0;
-
-group_error:
- /*
- * Groups can be scheduled in as one unit only, so undo any
- * partial group before returning:
- */
- list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
- if (counter == partial_group)
- break;
- counter_sched_out(counter, cpuctx, ctx);
- }
- counter_sched_out(group_counter, cpuctx, ctx);
-
- return -EAGAIN;
+ __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
}
static void
struct perf_cpu_context *cpuctx, int cpu)
{
struct perf_counter *counter;
- u64 flags;
int can_add_hw = 1;
spin_lock(&ctx->lock);
ctx->timestamp = perf_clock();
- flags = hw_perf_save_disable();
+ perf_disable();
/*
* First go through the list and put on any pinned groups
can_add_hw = 0;
}
}
- hw_perf_restore(flags);
+ perf_enable();
out:
spin_unlock(&ctx->lock);
}
struct perf_counter_context *ctx = &curr->perf_counter_ctx;
struct perf_counter *counter;
unsigned long flags;
- u64 perf_flags;
- int cpu;
if (likely(!ctx->nr_counters))
return 0;
local_irq_save(flags);
- cpu = smp_processor_id();
- perf_counter_task_sched_out(curr, cpu);
+ __perf_counter_task_sched_out(ctx);
spin_lock(&ctx->lock);
/*
* Disable all the counters:
*/
- perf_flags = hw_perf_save_disable();
+ perf_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
if (counter->state != PERF_COUNTER_STATE_ERROR) {
}
}
- hw_perf_restore(perf_flags);
+ perf_enable();
spin_unlock_irqrestore(&ctx->lock, flags);
struct perf_counter_context *ctx = &curr->perf_counter_ctx;
struct perf_counter *counter;
unsigned long flags;
- u64 perf_flags;
int cpu;
if (likely(!ctx->nr_counters))
local_irq_save(flags);
cpu = smp_processor_id();
- perf_counter_task_sched_out(curr, cpu);
+ __perf_counter_task_sched_out(ctx);
spin_lock(&ctx->lock);
/*
* Disable all the counters:
*/
- perf_flags = hw_perf_save_disable();
+ perf_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
if (counter->state > PERF_COUNTER_STATE_OFF)
ctx->time - counter->total_time_enabled;
counter->hw_event.disabled = 0;
}
- hw_perf_restore(perf_flags);
+ perf_enable();
spin_unlock(&ctx->lock);
return 0;
}
+void perf_adjust_freq(struct perf_counter_context *ctx)
+{
+ struct perf_counter *counter;
+ u64 irq_period;
+ u64 events, period;
+ s64 delta;
+
+ spin_lock(&ctx->lock);
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE)
+ continue;
+
+ if (!counter->hw_event.freq || !counter->hw_event.irq_freq)
+ continue;
+
+ events = HZ * counter->hw.interrupts * counter->hw.irq_period;
+ period = div64_u64(events, counter->hw_event.irq_freq);
+
+ delta = (s64)(1 + period - counter->hw.irq_period);
+ delta >>= 1;
+
+ irq_period = counter->hw.irq_period + delta;
+
+ if (!irq_period)
+ irq_period = 1;
+
+ counter->hw.irq_period = irq_period;
+ counter->hw.interrupts = 0;
+ }
+ spin_unlock(&ctx->lock);
+}
+
/*
* Round-robin a context's counters:
*/
static void rotate_ctx(struct perf_counter_context *ctx)
{
struct perf_counter *counter;
- u64 perf_flags;
if (!ctx->nr_counters)
return;
/*
* Rotate the first entry last (works just fine for group counters too):
*/
- perf_flags = hw_perf_save_disable();
+ perf_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
list_move_tail(&counter->list_entry, &ctx->counter_list);
break;
}
- hw_perf_restore(perf_flags);
+ perf_enable();
spin_unlock(&ctx->lock);
}
void perf_counter_task_tick(struct task_struct *curr, int cpu)
{
- struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
- struct perf_counter_context *ctx = &curr->perf_counter_ctx;
- const int rotate_percpu = 0;
+ struct perf_cpu_context *cpuctx;
+ struct perf_counter_context *ctx;
+
+ if (!atomic_read(&nr_counters))
+ return;
+
+ cpuctx = &per_cpu(perf_cpu_context, cpu);
+ ctx = &curr->perf_counter_ctx;
- if (rotate_percpu)
- perf_counter_cpu_sched_out(cpuctx);
- perf_counter_task_sched_out(curr, cpu);
+ perf_adjust_freq(&cpuctx->ctx);
+ perf_adjust_freq(ctx);
- if (rotate_percpu)
- rotate_ctx(&cpuctx->ctx);
+ perf_counter_cpu_sched_out(cpuctx);
+ __perf_counter_task_sched_out(ctx);
+
+ rotate_ctx(&cpuctx->ctx);
rotate_ctx(ctx);
- if (rotate_percpu)
- perf_counter_cpu_sched_in(cpuctx, cpu);
+ perf_counter_cpu_sched_in(cpuctx, cpu);
perf_counter_task_sched_in(curr, cpu);
}
{
perf_pending_sync(counter);
+ atomic_dec(&nr_counters);
if (counter->hw_event.mmap)
atomic_dec(&nr_mmap_tracking);
if (counter->hw_event.munmap)
{
struct perf_counter *counter = file->private_data;
struct perf_mmap_data *data;
- unsigned int events;
+ unsigned int events = POLL_HUP;
rcu_read_lock();
data = rcu_dereference(counter->data);
if (data)
- events = atomic_xchg(&data->wakeup, 0);
- else
- events = POLL_HUP;
+ events = atomic_xchg(&data->poll, 0);
rcu_read_unlock();
poll_wait(file, &counter->waitq, wait);
return events;
}
+static void perf_counter_reset(struct perf_counter *counter)
+{
+ (void)perf_counter_read(counter);
+ atomic64_set(&counter->count, 0);
+ perf_counter_update_userpage(counter);
+}
+
+static void perf_counter_for_each_sibling(struct perf_counter *counter,
+ void (*func)(struct perf_counter *))
+{
+ struct perf_counter_context *ctx = counter->ctx;
+ struct perf_counter *sibling;
+
+ spin_lock_irq(&ctx->lock);
+ counter = counter->group_leader;
+
+ func(counter);
+ list_for_each_entry(sibling, &counter->sibling_list, list_entry)
+ func(sibling);
+ spin_unlock_irq(&ctx->lock);
+}
+
+static void perf_counter_for_each_child(struct perf_counter *counter,
+ void (*func)(struct perf_counter *))
+{
+ struct perf_counter *child;
+
+ mutex_lock(&counter->mutex);
+ func(counter);
+ list_for_each_entry(child, &counter->child_list, child_list)
+ func(child);
+ mutex_unlock(&counter->mutex);
+}
+
+static void perf_counter_for_each(struct perf_counter *counter,
+ void (*func)(struct perf_counter *))
+{
+ struct perf_counter *child;
+
+ mutex_lock(&counter->mutex);
+ perf_counter_for_each_sibling(counter, func);
+ list_for_each_entry(child, &counter->child_list, child_list)
+ perf_counter_for_each_sibling(child, func);
+ mutex_unlock(&counter->mutex);
+}
+
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_counter *counter = file->private_data;
- int err = 0;
+ void (*func)(struct perf_counter *);
+ u32 flags = arg;
switch (cmd) {
case PERF_COUNTER_IOC_ENABLE:
- perf_counter_enable_family(counter);
+ func = perf_counter_enable;
break;
case PERF_COUNTER_IOC_DISABLE:
- perf_counter_disable_family(counter);
+ func = perf_counter_disable;
break;
- case PERF_COUNTER_IOC_REFRESH:
- perf_counter_refresh(counter, arg);
+ case PERF_COUNTER_IOC_RESET:
+ func = perf_counter_reset;
break;
+
+ case PERF_COUNTER_IOC_REFRESH:
+ return perf_counter_refresh(counter, arg);
default:
- err = -ENOTTY;
+ return -ENOTTY;
}
- return err;
+
+ if (flags & PERF_IOC_FLAG_GROUP)
+ perf_counter_for_each(counter, func);
+ else
+ perf_counter_for_each_child(counter, func);
+
+ return 0;
}
/*
}
data->nr_pages = nr_pages;
+ atomic_set(&data->lock, -1);
rcu_assign_pointer(counter->data, data);
if (atomic_dec_and_mutex_lock(&counter->mmap_count,
&counter->mmap_mutex)) {
- vma->vm_mm->locked_vm -= counter->data->nr_pages + 1;
+ struct user_struct *user = current_user();
+
+ atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
+ vma->vm_mm->locked_vm -= counter->data->nr_locked;
perf_mmap_data_free(counter);
mutex_unlock(&counter->mmap_mutex);
}
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct perf_counter *counter = file->private_data;
+ struct user_struct *user = current_user();
unsigned long vma_size;
unsigned long nr_pages;
+ unsigned long user_locked, user_lock_limit;
unsigned long locked, lock_limit;
+ long user_extra, extra;
int ret = 0;
if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
goto unlock;
}
- locked = vma->vm_mm->locked_vm;
- locked += nr_pages + 1;
+ user_extra = nr_pages + 1;
+ user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
+ user_locked = atomic_long_read(&user->locked_vm) + user_extra;
+
+ extra = 0;
+ if (user_locked > user_lock_limit)
+ extra = user_locked - user_lock_limit;
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
lock_limit >>= PAGE_SHIFT;
+ locked = vma->vm_mm->locked_vm + extra;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
ret = -EPERM;
goto unlock;
atomic_set(&counter->mmap_count, 1);
- vma->vm_mm->locked_vm += nr_pages + 1;
+ atomic_long_add(user_extra, &user->locked_vm);
+ vma->vm_mm->locked_vm += extra;
+ counter->data->nr_locked = extra;
unlock:
mutex_unlock(&counter->mmap_mutex);
void perf_counter_wakeup(struct perf_counter *counter)
{
- struct perf_mmap_data *data;
-
- rcu_read_lock();
- data = rcu_dereference(counter->data);
- if (data) {
- atomic_set(&data->wakeup, POLL_IN);
- /*
- * Ensure all data writes are issued before updating the
- * user-space data head information. The matching rmb()
- * will be in userspace after reading this value.
- */
- smp_wmb();
- data->user_page->data_head = atomic_read(&data->head);
- }
- rcu_read_unlock();
-
wake_up_all(&counter->waitq);
if (counter->pending_kill) {
struct perf_mmap_data *data;
unsigned int offset;
unsigned int head;
- int wakeup;
int nmi;
int overflow;
+ int locked;
+ unsigned long flags;
};
-static inline void __perf_output_wakeup(struct perf_output_handle *handle)
+static void perf_output_wakeup(struct perf_output_handle *handle)
{
+ atomic_set(&handle->data->poll, POLL_IN);
+
if (handle->nmi) {
handle->counter->pending_wakeup = 1;
perf_pending_queue(&handle->counter->pending,
perf_counter_wakeup(handle->counter);
}
+/*
+ * Curious locking construct.
+ *
+ * We need to ensure a later event doesn't publish a head when a former
+ * event isn't done writing. However since we need to deal with NMIs we
+ * cannot fully serialize things.
+ *
+ * What we do is serialize between CPUs so we only have to deal with NMI
+ * nesting on a single CPU.
+ *
+ * We only publish the head (and generate a wakeup) when the outer-most
+ * event completes.
+ */
+static void perf_output_lock(struct perf_output_handle *handle)
+{
+ struct perf_mmap_data *data = handle->data;
+ int cpu;
+
+ handle->locked = 0;
+
+ local_irq_save(handle->flags);
+ cpu = smp_processor_id();
+
+ if (in_nmi() && atomic_read(&data->lock) == cpu)
+ return;
+
+ while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
+ cpu_relax();
+
+ handle->locked = 1;
+}
+
+static void perf_output_unlock(struct perf_output_handle *handle)
+{
+ struct perf_mmap_data *data = handle->data;
+ int head, cpu;
+
+ data->done_head = data->head;
+
+ if (!handle->locked)
+ goto out;
+
+again:
+ /*
+ * The xchg implies a full barrier that ensures all writes are done
+ * before we publish the new head, matched by a rmb() in userspace when
+ * reading this position.
+ */
+ while ((head = atomic_xchg(&data->done_head, 0)))
+ data->user_page->data_head = head;
+
+ /*
+ * NMI can happen here, which means we can miss a done_head update.
+ */
+
+ cpu = atomic_xchg(&data->lock, -1);
+ WARN_ON_ONCE(cpu != smp_processor_id());
+
+ /*
+ * Therefore we have to validate we did not indeed do so.
+ */
+ if (unlikely(atomic_read(&data->done_head))) {
+ /*
+ * Since we had it locked, we can lock it again.
+ */
+ while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
+ cpu_relax();
+
+ goto again;
+ }
+
+ if (atomic_xchg(&data->wakeup, 0))
+ perf_output_wakeup(handle);
+out:
+ local_irq_restore(handle->flags);
+}
+
static int perf_output_begin(struct perf_output_handle *handle,
struct perf_counter *counter, unsigned int size,
int nmi, int overflow)
struct perf_mmap_data *data;
unsigned int offset, head;
+ /*
+ * For inherited counters we send all the output towards the parent.
+ */
+ if (counter->parent)
+ counter = counter->parent;
+
rcu_read_lock();
data = rcu_dereference(counter->data);
if (!data)
goto out;
+ handle->data = data;
handle->counter = counter;
handle->nmi = nmi;
handle->overflow = overflow;
if (!data->nr_pages)
goto fail;
+ perf_output_lock(handle);
+
do {
offset = head = atomic_read(&data->head);
head += size;
} while (atomic_cmpxchg(&data->head, offset, head) != offset);
- handle->data = data;
handle->offset = offset;
handle->head = head;
- handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
+
+ if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
+ atomic_set(&data->wakeup, 1);
return 0;
fail:
- __perf_output_wakeup(handle);
+ perf_output_wakeup(handle);
out:
rcu_read_unlock();
handle->offset = offset;
- WARN_ON_ONCE(handle->offset > handle->head);
+ /*
+ * Check we didn't copy past our reservation window, taking the
+ * possible unsigned int wrap into account.
+ */
+ WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0);
}
#define perf_output_put(handle, x) \
static void perf_output_end(struct perf_output_handle *handle)
{
- int wakeup_events = handle->counter->hw_event.wakeup_events;
+ struct perf_counter *counter = handle->counter;
+ struct perf_mmap_data *data = handle->data;
+
+ int wakeup_events = counter->hw_event.wakeup_events;
if (handle->overflow && wakeup_events) {
- int events = atomic_inc_return(&handle->data->events);
+ int events = atomic_inc_return(&data->events);
if (events >= wakeup_events) {
- atomic_sub(wakeup_events, &handle->data->events);
- __perf_output_wakeup(handle);
+ atomic_sub(wakeup_events, &data->events);
+ atomic_set(&data->wakeup, 1);
}
- } else if (handle->wakeup)
- __perf_output_wakeup(handle);
+ }
+
+ perf_output_unlock(handle);
rcu_read_unlock();
}
struct perf_callchain_entry *callchain = NULL;
int callchain_size = 0;
u64 time;
+ struct {
+ u32 cpu, reserved;
+ } cpu_entry;
header.type = 0;
header.size = sizeof(header);
header.misc = PERF_EVENT_MISC_OVERFLOW;
- header.misc |= user_mode(regs) ?
- PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
+ header.misc |= perf_misc_flags(regs);
if (record_type & PERF_RECORD_IP) {
- ip = instruction_pointer(regs);
+ ip = perf_instruction_pointer(regs);
header.type |= PERF_RECORD_IP;
header.size += sizeof(ip);
}
header.size += sizeof(u64);
}
+ if (record_type & PERF_RECORD_CONFIG) {
+ header.type |= PERF_RECORD_CONFIG;
+ header.size += sizeof(u64);
+ }
+
+ if (record_type & PERF_RECORD_CPU) {
+ header.type |= PERF_RECORD_CPU;
+ header.size += sizeof(cpu_entry);
+
+ cpu_entry.cpu = raw_smp_processor_id();
+ }
+
if (record_type & PERF_RECORD_GROUP) {
header.type |= PERF_RECORD_GROUP;
header.size += sizeof(u64) +
if (record_type & PERF_RECORD_ADDR)
perf_output_put(&handle, addr);
+ if (record_type & PERF_RECORD_CONFIG)
+ perf_output_put(&handle, counter->hw_event.config);
+
+ if (record_type & PERF_RECORD_CPU)
+ perf_output_put(&handle, cpu_entry);
+
+ /*
+ * XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
+ */
if (record_type & PERF_RECORD_GROUP) {
struct perf_counter *leader, *sub;
u64 nr = counter->nr_siblings;
int events = atomic_read(&counter->event_limit);
int ret = 0;
+ counter->hw.interrupts++;
+
+ /*
+ * XXX event_limit might not quite work as expected on inherited
+ * counters
+ */
+
counter->pending_kill = POLL_IN;
if (events && atomic_dec_and_test(&counter->event_limit)) {
ret = 1;
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_counter *counter;
struct pt_regs *regs;
+ u64 period;
counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
counter->pmu->read(counter);
ret = HRTIMER_NORESTART;
}
- hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
+ period = max_t(u64, 10000, counter->hw.irq_period);
+ hrtimer_forward_now(hrtimer, ns_to_ktime(period));
return ret;
}
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swcounter_hrtimer;
if (hwc->irq_period) {
+ u64 period = max_t(u64, 10000, hwc->irq_period);
__hrtimer_start_range_ns(&hwc->hrtimer,
- ns_to_ktime(hwc->irq_period), 0,
+ ns_to_ktime(period), 0,
HRTIMER_MODE_REL, 0);
}
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swcounter_hrtimer;
if (hwc->irq_period) {
+ u64 period = max_t(u64, 10000, hwc->irq_period);
__hrtimer_start_range_ns(&hwc->hrtimer,
- ns_to_ktime(hwc->irq_period), 0,
+ ns_to_ktime(period), 0,
HRTIMER_MODE_REL, 0);
}
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
{
- struct perf_counter_hw_event *hw_event = &counter->hw_event;
const struct pmu *pmu = NULL;
- struct hw_perf_counter *hwc = &counter->hw;
/*
* Software counters (currently) can't in general distinguish
case PERF_COUNT_CPU_CLOCK:
pmu = &perf_ops_cpu_clock;
- if (hw_event->irq_period && hw_event->irq_period < 10000)
- hw_event->irq_period = 10000;
break;
case PERF_COUNT_TASK_CLOCK:
/*
else
pmu = &perf_ops_cpu_clock;
- if (hw_event->irq_period && hw_event->irq_period < 10000)
- hw_event->irq_period = 10000;
break;
case PERF_COUNT_PAGE_FAULTS:
case PERF_COUNT_PAGE_FAULTS_MIN:
break;
}
- if (pmu)
- hwc->irq_period = hw_event->irq_period;
-
return pmu;
}
{
const struct pmu *pmu;
struct perf_counter *counter;
+ struct hw_perf_counter *hwc;
long err;
counter = kzalloc(sizeof(*counter), gfpflags);
pmu = NULL;
+ hwc = &counter->hw;
+ if (hw_event->freq && hw_event->irq_freq)
+ hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq);
+ else
+ hwc->irq_period = hw_event->irq_period;
+
+ /*
+ * we currently do not support PERF_RECORD_GROUP on inherited counters
+ */
+ if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
+ goto done;
+
if (perf_event_raw(hw_event)) {
pmu = hw_perf_counter_init(counter);
goto done;
counter->pmu = pmu;
+ atomic_inc(&nr_counters);
if (counter->hw_event.mmap)
atomic_inc(&nr_mmap_tracking);
if (counter->hw_event.munmap)
static void sync_child_counter(struct perf_counter *child_counter,
struct perf_counter *parent_counter)
{
- u64 parent_val, child_val;
+ u64 child_val;
- parent_val = atomic64_read(&parent_counter->count);
child_val = atomic64_read(&child_counter->count);
/*
struct perf_counter_context *child_ctx)
{
struct perf_counter *parent_counter;
- struct perf_counter *sub, *tmp;
/*
* If we do not self-reap then we have to wait for the
*/
if (child != current) {
wait_task_inactive(child, 0);
- list_del_init(&child_counter->list_entry);
update_counter_times(child_counter);
+ list_del_counter(child_counter, child_ctx);
} else {
struct perf_cpu_context *cpuctx;
unsigned long flags;
- u64 perf_flags;
/*
* Disable and unlink this counter.
* could still be processing it:
*/
local_irq_save(flags);
- perf_flags = hw_perf_save_disable();
+ perf_disable();
cpuctx = &__get_cpu_var(perf_cpu_context);
group_sched_out(child_counter, cpuctx, child_ctx);
update_counter_times(child_counter);
- list_del_init(&child_counter->list_entry);
-
- child_ctx->nr_counters--;
+ list_del_counter(child_counter, child_ctx);
- hw_perf_restore(perf_flags);
+ perf_enable();
local_irq_restore(flags);
}
*/
if (parent_counter) {
sync_child_counter(child_counter, parent_counter);
- list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
- list_entry) {
- if (sub->parent) {
- sync_child_counter(sub, sub->parent);
- free_counter(sub);
- }
- }
free_counter(child_counter);
}
}
struct perf_counter *child_counter, *tmp;
struct perf_counter_context *child_ctx;
+ WARN_ON_ONCE(child != current);
+
child_ctx = &child->perf_counter_ctx;
if (likely(!child_ctx->nr_counters))
return;
+again:
list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
list_entry)
__perf_counter_exit_task(child, child_counter, child_ctx);
+
+ /*
+ * If the last counter was a group counter, it will have appended all
+ * its siblings to the list, but we obtained 'tmp' before that which
+ * will still point to the list head terminating the iteration.
+ */
+ if (!list_empty(&child_ctx->counter_list))
+ goto again;
}
/*
cpuctx = &per_cpu(perf_cpu_context, cpu);
__perf_counter_init_context(&cpuctx->ctx, NULL);
- mutex_lock(&perf_resource_mutex);
+ spin_lock(&perf_resource_lock);
cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
- mutex_unlock(&perf_resource_mutex);
+ spin_unlock(&perf_resource_lock);
hw_perf_counter_setup(cpu);
}
.notifier_call = perf_cpu_notify,
};
-static int __init perf_counter_init(void)
+void __init perf_counter_init(void)
{
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&perf_cpu_nb);
-
- return 0;
}
-early_initcall(perf_counter_init);
static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
{
if (val > perf_max_counters)
return -EINVAL;
- mutex_lock(&perf_resource_mutex);
+ spin_lock(&perf_resource_lock);
perf_reserved_percpu = val;
for_each_online_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu);
cpuctx->max_pertask = mpt;
spin_unlock_irq(&cpuctx->ctx.lock);
}
- mutex_unlock(&perf_resource_mutex);
+ spin_unlock(&perf_resource_lock);
return count;
}
if (val > 1)
return -EINVAL;
- mutex_lock(&perf_resource_mutex);
+ spin_lock(&perf_resource_lock);
perf_overcommit = val;
- mutex_unlock(&perf_resource_mutex);
+ spin_unlock(&perf_resource_lock);
return count;
}