#include <linux/cpufreq.h>
#include <linux/cpufreq_times.h>
-<<<<<<< HEAD
-#include <linux/jiffies.h>
-=======
#include <linux/hashtable.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/proc_fs.h>
->>>>>>> 818299f6bdae
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
-<<<<<<< HEAD
-static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
-=======
#define UID_HASH_BITS 10
static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
struct rcu_head rcu;
u64 time_in_state[0];
};
->>>>>>> 818299f6bdae
/**
* struct cpu_freqs - per-cpu frequency information
static unsigned int next_offset;
-<<<<<<< HEAD
-void cpufreq_task_times_init(struct task_struct *p)
-{
- void *temp;
- unsigned long flags;
- unsigned int max_state;
-=======
/* Caller must hold rcu_read_lock() */
static struct uid_entry *find_uid_entry_rcu(uid_t uid)
void cpufreq_task_times_init(struct task_struct *p)
{
unsigned long flags;
->>>>>>> 818299f6bdae
spin_lock_irqsave(&task_time_in_state_lock, flags);
p->time_in_state = NULL;
spin_unlock_irqrestore(&task_time_in_state_lock, flags);
p->max_state = 0;
-<<<<<<< HEAD
-
- max_state = READ_ONCE(next_offset);
-=======
}
void cpufreq_task_times_alloc(struct task_struct *p)
void *temp;
unsigned long flags;
unsigned int max_state = READ_ONCE(next_offset);
->>>>>>> 818299f6bdae
/* We use one array to avoid multiple allocs per task */
temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
unsigned long flags;
void *temp;
-<<<<<<< HEAD
-=======
if (!p->time_in_state)
return;
->>>>>>> 818299f6bdae
spin_lock_irqsave(&task_time_in_state_lock, flags);
temp = p->time_in_state;
p->time_in_state = NULL;
{
unsigned long flags;
unsigned int state;
-<<<<<<< HEAD
- struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
-=======
struct uid_entry *uid_entry;
struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
->>>>>>> 818299f6bdae
if (!freqs || p->flags & PF_EXITING)
return;
p->time_in_state)
p->time_in_state[state] += cputime;
spin_unlock_irqrestore(&task_time_in_state_lock, flags);
-<<<<<<< HEAD
-=======
spin_lock_irqsave(&uid_lock, flags);
uid_entry = find_or_register_uid_locked(uid);
if (uid_entry && state < uid_entry->max_state)
uid_entry->time_in_state[state] += cputime;
spin_unlock_irqrestore(&uid_lock, flags);
->>>>>>> 818299f6bdae
}
void cpufreq_times_create_policy(struct cpufreq_policy *policy)
all_freqs[cpu] = freqs;
}
-<<<<<<< HEAD
-=======
void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
{
struct uid_entry *uid_entry;
spin_unlock_irqrestore(&uid_lock, flags);
}
->>>>>>> 818299f6bdae
void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
{
int index;
cpufreq_cpu_put(policy);
}
-<<<<<<< HEAD
-=======
static const struct seq_operations uid_time_in_state_seq_ops = {
.start = uid_seq_start,
}
early_initcall(cpufreq_times_init);
->>>>>>> 818299f6bdae