__perf_counter_sched_in(ctx, cpuctx, cpu);
}
-int perf_counter_task_enable(void)
-{
- struct perf_counter *counter;
-
- mutex_lock(¤t->perf_counter_mutex);
- list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
- perf_counter_enable(counter);
- mutex_unlock(¤t->perf_counter_mutex);
-
- return 0;
-}
-
-int perf_counter_task_disable(void)
-{
- struct perf_counter *counter;
-
- mutex_lock(¤t->perf_counter_mutex);
- list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
- perf_counter_disable(counter);
- mutex_unlock(¤t->perf_counter_mutex);
-
- return 0;
-}
-
static void perf_log_period(struct perf_counter *counter, u64 period);
static void perf_adjust_freq(struct perf_counter_context *ctx)
return 0;
}
+int perf_counter_task_enable(void)
+{
+ struct perf_counter *counter;
+
+ mutex_lock(¤t->perf_counter_mutex);
+ list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
+ perf_counter_for_each_child(counter, perf_counter_enable);
+ mutex_unlock(¤t->perf_counter_mutex);
+
+ return 0;
+}
+
+int perf_counter_task_disable(void)
+{
+ struct perf_counter *counter;
+
+ mutex_lock(¤t->perf_counter_mutex);
+ list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
+ perf_counter_for_each_child(counter, perf_counter_disable);
+ mutex_unlock(¤t->perf_counter_mutex);
+
+ return 0;
+}
+
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch