}
}
+/*
+ * Add a counter from the lists for its context.
+ * Must be called with ctx->mutex and ctx->lock held.
+ */
static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
/*
* Remove a counter from the lists for its context.
- * Must be called with counter->mutex and ctx->mutex held.
+ * Must be called with ctx->mutex and ctx->lock held.
*/
static void
list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
/*
* Remove the counter from a task's (or a CPU's) list of counters.
*
- * Must be called with counter->mutex and ctx->mutex held.
+ * Must be called with ctx->mutex held.
*
* CPU counters are removed with a smp call. For task counters we only
* call when the task is on a CPU.
file->private_data = NULL;
mutex_lock(&ctx->mutex);
- mutex_lock(&counter->mutex);
-
perf_counter_remove_from_context(counter);
-
- mutex_unlock(&counter->mutex);
mutex_unlock(&ctx->mutex);
free_counter(counter);
if (counter->state == PERF_COUNTER_STATE_ERROR)
return 0;
- mutex_lock(&counter->mutex);
+ mutex_lock(&counter->child_mutex);
values[0] = perf_counter_read(counter);
n = 1;
if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = counter->total_time_running +
atomic64_read(&counter->child_total_time_running);
- mutex_unlock(&counter->mutex);
+ mutex_unlock(&counter->child_mutex);
if (count < n * sizeof(u64))
return -EINVAL;
{
struct perf_counter *child;
- mutex_lock(&counter->mutex);
+ mutex_lock(&counter->child_mutex);
func(counter);
list_for_each_entry(child, &counter->child_list, child_list)
func(child);
- mutex_unlock(&counter->mutex);
+ mutex_unlock(&counter->child_mutex);
}
static void perf_counter_for_each(struct perf_counter *counter,
{
struct perf_counter *child;
- mutex_lock(&counter->mutex);
+ mutex_lock(&counter->child_mutex);
perf_counter_for_each_sibling(counter, func);
list_for_each_entry(child, &counter->child_list, child_list)
perf_counter_for_each_sibling(child, func);
- mutex_unlock(&counter->mutex);
+ mutex_unlock(&counter->child_mutex);
}
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (!group_leader)
group_leader = counter;
- mutex_init(&counter->mutex);
+ mutex_init(&counter->child_mutex);
+ INIT_LIST_HEAD(&counter->child_list);
+
INIT_LIST_HEAD(&counter->list_entry);
INIT_LIST_HEAD(&counter->event_entry);
INIT_LIST_HEAD(&counter->sibling_list);
mutex_init(&counter->mmap_mutex);
- INIT_LIST_HEAD(&counter->child_list);
-
counter->cpu = cpu;
counter->hw_event = *hw_event;
counter->group_leader = group_leader;
/*
* Link this into the parent counter's child list
*/
- mutex_lock(&parent_counter->mutex);
+ mutex_lock(&parent_counter->child_mutex);
list_add_tail(&child_counter->child_list, &parent_counter->child_list);
-
- mutex_unlock(&parent_counter->mutex);
+ mutex_unlock(&parent_counter->child_mutex);
return child_counter;
}
/*
* Remove this counter from the parent's list
*/
- mutex_lock(&parent_counter->mutex);
+ mutex_lock(&parent_counter->child_mutex);
list_del_init(&child_counter->child_list);
- mutex_unlock(&parent_counter->mutex);
+ mutex_unlock(&parent_counter->child_mutex);
/*
* Release the parent counter, if this was the last
{
struct perf_counter *parent_counter;
- /*
- * Protect against concurrent operations on child_counter
- * due its fd getting closed, etc.
- */
- mutex_lock(&child_counter->mutex);
-
update_counter_times(child_counter);
list_del_counter(child_counter, child_ctx);
- mutex_unlock(&child_counter->mutex);
-
parent_counter = child_counter->parent;
/*
* It can happen that parent exits first, and has counters