spin_unlock(&cpufreq_stats_lock);
}
+void cpufreq_task_stats_remove_uids(uid_t uid_start, uid_t uid_end)
+{
+ struct uid_entry *uid_entry;
+ struct hlist_node *tmp;
+
+ rt_mutex_lock(&uid_lock);
+
+ for (; uid_start <= uid_end; uid_start++) {
+ hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp,
+ hash, uid_start) {
+ if (uid_start == uid_entry->uid) {
+ hash_del(&uid_entry->hash);
+ kfree(uid_entry->dead_time_in_state);
+ kfree(uid_entry);
+ }
+ }
+ }
+
+ rt_mutex_unlock(&uid_lock);
+}
+
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/rtmutex.h>
+#include <linux/cpufreq.h>
#define UID_HASH_BITS 10
DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
struct hlist_node *tmp;
char uids[128];
char *start_uid, *end_uid = NULL;
- long int uid_start = 0, uid_end = 0;
+ long int start = 0, end = 0;
+ uid_t uid_start, uid_end;
if (count >= sizeof(uids))
count = sizeof(uids) - 1;
if (!start_uid || !end_uid)
return -EINVAL;
- if (kstrtol(start_uid, 10, &uid_start) != 0 ||
- kstrtol(end_uid, 10, &uid_end) != 0) {
+ if (kstrtol(start_uid, 10, &start) != 0 ||
+ kstrtol(end_uid, 10, &end) != 0) {
return -EINVAL;
}
+
+#define UID_T_MAX (((uid_t)~0U)-1)
+ if ((start < 0) || (end < 0) ||
+ (start > UID_T_MAX) || (end > UID_T_MAX)) {
+ return -EINVAL;
+ }
+
+ uid_start = start;
+ uid_end = end;
+
+ /* TODO need to unify uid_sys_stats interface with uid_time_in_state.
+ * Here we are reusing remove_uid_range to reduce the number of
+ * sys calls made by userspace clients, remove_uid_range removes uids
+ * from both here as well as from cpufreq uid_time_in_state
+ */
+ cpufreq_task_stats_remove_uids(uid_start, uid_end);
+
rt_mutex_lock(&uid_lock);
for (; uid_start <= uid_end; uid_start++) {
hash_for_each_possible_safe(hash_table, uid_entry, tmp,
- hash, (uid_t)uid_start) {
+ hash, uid_start) {
if (uid_start == uid_entry->uid) {
hash_del(&uid_entry->hash);
kfree(uid_entry);
}
rt_mutex_unlock(&uid_lock);
+
return count;
}
void acct_update_power(struct task_struct *p, cputime_t cputime);
void cpufreq_task_stats_init(struct task_struct *p);
void cpufreq_task_stats_exit(struct task_struct *p);
+void cpufreq_task_stats_remove_uids(uid_t uid_start, uid_t uid_end);
int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *p);
+ struct pid *pid, struct task_struct *p);
#else
static inline void acct_update_power(struct task_struct *p, cputime_t cputime) {}
static inline void cpufreq_task_stats_init(struct task_struct *p) {}
static inline void cpufreq_task_stats_exit(struct task_struct *p) {}
+static inline void cpufreq_task_stats_remove_uids(uid_t uid_start,
+ uid_t uid_end) {}
#endif
#endif /* _LINUX_CPUFREQ_H */