r.ru_majflt = current->maj_flt;
break;
case RUSAGE_CHILDREN:
- utime_jiffies = cputime_to_jiffies(current->signal->cutime);
- stime_jiffies = cputime_to_jiffies(current->signal->cstime);
+ utime_jiffies = nsecs_to_jiffies(current->signal->cutime);
+ stime_jiffies = nsecs_to_jiffies(current->signal->cstime);
jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
r.ru_minflt = current->signal->cmin_flt;
account_user_time(tsk, acct->utime);
if (acct->utime_scaled)
- tsk->utimescaled += acct->utime_scaled;
+ tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
if (acct->gtime)
account_guest_time(tsk, acct->gtime);
account_system_index_time(tsk, acct->stime, CPUTIME_SYSTEM);
if (acct->stime_scaled)
- tsk->stimescaled += acct->stime_scaled;
+ tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
if (acct->hardirq_time)
account_system_index_time(tsk, acct->hardirq_time, CPUTIME_IRQ);
cputime_t cputime, cputime_t scaled,
enum cpu_usage_stat index)
{
- p->stimescaled += scaled;
+ p->stimescaled += cputime_to_nsecs(scaled);
account_system_index_time(p, cputime, index);
}
/* Push account value */
if (user) {
account_user_time(tsk, user);
- tsk->utimescaled += scale_vtime(user);
+ tsk->utimescaled += cputime_to_nsecs(scale_vtime(user));
}
if (guest) {
account_guest_time(tsk, guest);
- tsk->utimescaled += scale_vtime(guest);
+ tsk->utimescaled += cputime_to_nsecs(scale_vtime(guest));
}
if (system)
/* Calculate cpu time spent by current task in 100ns units */
static u64 current_task_runtime_100ns(void)
{
- cputime_t utime, stime;
+ u64 utime, stime;
task_cputime_adjusted(current, &utime, &stime);
- return div_u64(cputime_to_nsecs(utime + stime), 100);
+
+ return div_u64(utime + stime, 100);
}
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
static void fill_prstatus(struct elf_prstatus *prstatus,
struct task_struct *p, long signr)
{
+ struct timeval tv;
+
prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
prstatus->pr_sigpend = p->pending.signal.sig[0];
prstatus->pr_sighold = p->blocked.sig[0];
cputime_to_timeval(utime, &prstatus->pr_utime);
cputime_to_timeval(stime, &prstatus->pr_stime);
}
- cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
- cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
+ tv = ns_to_timeval(p->signal->cutime);
+ prstatus->pr_cutime.tv_sec = tv.tv_sec;
+ prstatus->pr_cutime.tv_usec = tv.tv_usec;
+
+ tv = ns_to_timeval(p->signal->cstime);
+ prstatus->pr_cstime.tv_sec = tv.tv_sec;
+ prstatus->pr_cstime.tv_usec = tv.tv_usec;
}
static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
cputime_to_timeval(utime, &prstatus->pr_utime);
cputime_to_timeval(stime, &prstatus->pr_stime);
}
- cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
- cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
+ prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
+ prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
unsigned long long start_time;
unsigned long cmin_flt = 0, cmaj_flt = 0;
unsigned long min_flt = 0, maj_flt = 0;
- cputime_t cutime, cstime, utime, stime;
+ u64 cutime, cstime, utime, stime;
u64 cgtime, gtime;
unsigned long rsslim = 0;
char tcomm[sizeof(task->comm)];
seq_put_decimal_ull(m, " ", cmin_flt);
seq_put_decimal_ull(m, " ", maj_flt);
seq_put_decimal_ull(m, " ", cmaj_flt);
- seq_put_decimal_ull(m, " ", cputime_to_clock_t(utime));
- seq_put_decimal_ull(m, " ", cputime_to_clock_t(stime));
- seq_put_decimal_ll(m, " ", cputime_to_clock_t(cutime));
- seq_put_decimal_ll(m, " ", cputime_to_clock_t(cstime));
+ seq_put_decimal_ull(m, " ", nsec_to_clock_t(utime));
+ seq_put_decimal_ull(m, " ", nsec_to_clock_t(stime));
+ seq_put_decimal_ll(m, " ", nsec_to_clock_t(cutime));
+ seq_put_decimal_ll(m, " ", nsec_to_clock_t(cstime));
seq_put_decimal_ll(m, " ", priority);
seq_put_decimal_ll(m, " ", nice);
seq_put_decimal_ll(m, " ", num_threads);
*/
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- cputime_t utime;
- cputime_t stime;
+ u64 utime;
+ u64 stime;
raw_spinlock_t lock;
#endif
};
/**
* struct task_cputime - collected CPU time counts
- * @utime: time spent in user mode, in &cputime_t units
- * @stime: time spent in kernel mode, in &cputime_t units
+ * @utime: time spent in user mode, in nanoseconds
+ * @stime: time spent in kernel mode, in nanoseconds
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
* This structure groups together three kinds of CPU time that are tracked for
* these counts together and treat all three of them in parallel.
*/
struct task_cputime {
- cputime_t utime;
- cputime_t stime;
+ u64 utime;
+ u64 stime;
unsigned long long sum_exec_runtime;
};
* in __exit_signal, except for the group leader.
*/
seqlock_t stats_lock;
- cputime_t utime, stime, cutime, cstime;
+ u64 utime, stime, cutime, cstime;
u64 gtime;
u64 cgtime;
struct prev_cputime prev_cputime;
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- cputime_t utime, stime;
+ u64 utime, stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
- cputime_t utimescaled, stimescaled;
+ u64 utimescaled, stimescaled;
#endif
u64 gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
- cputime_t *utime, cputime_t *stime);
+ u64 *utime, u64 *stime);
extern u64 task_gtime(struct task_struct *t);
#else
static inline void task_cputime(struct task_struct *t,
- cputime_t *utime, cputime_t *stime)
+ u64 *utime, u64 *stime)
{
*utime = t->utime;
*stime = t->stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
static inline void task_cputime_scaled(struct task_struct *t,
- cputime_t *utimescaled,
- cputime_t *stimescaled)
+ u64 *utimescaled,
+ u64 *stimescaled)
{
*utimescaled = t->utimescaled;
*stimescaled = t->stimescaled;
}
#else
static inline void task_cputime_scaled(struct task_struct *t,
- cputime_t *utimescaled,
- cputime_t *stimescaled)
+ u64 *utimescaled,
+ u64 *stimescaled)
{
task_cputime(t, utimescaled, stimescaled);
}
static inline void task_cputime_t(struct task_struct *t,
cputime_t *utime, cputime_t *stime)
{
- task_cputime(t, utime, stime);
+ u64 ut, st;
+
+ task_cputime(t, &ut, &st);
+ *utime = nsecs_to_cputime(ut);
+ *stime = nsecs_to_cputime(st);
}
static inline void task_cputime_t_scaled(struct task_struct *t,
cputime_t *utimescaled,
cputime_t *stimescaled)
{
- task_cputime_scaled(t, utimescaled, stimescaled);
+ u64 ut, st;
+
+ task_cputime_scaled(t, &ut, &st);
+ *utimescaled = nsecs_to_cputime(ut);
+ *stimescaled = nsecs_to_cputime(st);
}
-extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
-extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
+extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
+extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
/*
* Per process flags
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times);
static inline void thread_group_cputime_t(struct task_struct *tsk,
- struct task_cputime_t *times)
+ struct task_cputime_t *cputime)
{
- thread_group_cputime(tsk, (struct task_cputime *)times);
+ struct task_cputime times;
+
+ thread_group_cputime(tsk, ×);
+ cputime->utime = nsecs_to_cputime(times.utime);
+ cputime->stime = nsecs_to_cputime(times.stime);
+ cputime->sum_exec_runtime = times.sum_exec_runtime;
}
/*
bool group_dead = thread_group_leader(tsk);
struct sighand_struct *sighand;
struct tty_struct *uninitialized_var(tty);
- cputime_t utime, stime;
+ u64 utime, stime;
sighand = rcu_dereference_check(tsk->sighand,
lockdep_tasklist_lock_is_held());
struct signal_struct *sig = p->signal;
struct signal_struct *psig = current->signal;
unsigned long maxrss;
- cputime_t tgutime, tgstime;
+ u64 tgutime, tgstime;
/*
* The resource counters for the group leader are in its
int index;
/* Add user time to process. */
- p->utime += cputime;
+ p->utime += cputime_to_nsecs(cputime);
account_group_user_time(p, cputime);
index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
u64 *cpustat = kcpustat_this_cpu->cpustat;
/* Add guest time to process. */
- p->utime += cputime;
+ p->utime += cputime_to_nsecs(cputime);
account_group_user_time(p, cputime);
p->gtime += cputime_to_nsecs(cputime);
cputime_t cputime, enum cpu_usage_stat index)
{
/* Add system time to process. */
- p->stime += cputime;
+ p->stime += cputime_to_nsecs(cputime);
account_group_system_time(p, cputime);
/* Add system time to cpustat. */
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
{
struct signal_struct *sig = tsk->signal;
- cputime_t utime, stime;
+ u64 utime, stime;
struct task_struct *t;
unsigned int seq, nextseq;
unsigned long flags;
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
*ut = p->utime;
*st = p->stime;
}
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
-void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime;
* Perform (stime * rtime) / total, but avoid multiplication overflow by
* loosing precision when the numbers are big.
*/
-static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
+static u64 scale_stime(u64 stime, u64 rtime, u64 total)
{
u64 scaled;
* followed by a 64/32->64 divide.
*/
scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
- return (__force cputime_t) scaled;
+ return scaled;
}
/*
*/
static void cputime_adjust(struct task_cputime *curr,
struct prev_cputime *prev,
- cputime_t *ut, cputime_t *st)
+ u64 *ut, u64 *st)
{
- cputime_t rtime, stime, utime;
+ u64 rtime, stime, utime;
unsigned long flags;
/* Serialize concurrent callers such that we can honour our guarantees */
raw_spin_lock_irqsave(&prev->lock, flags);
- rtime = nsecs_to_cputime(curr->sum_exec_runtime);
+ rtime = curr->sum_exec_runtime;
/*
* This is possible under two circumstances:
goto update;
}
- stime = scale_stime((__force u64)stime, (__force u64)rtime,
- (__force u64)(stime + utime));
+ stime = scale_stime(stime, rtime, stime + utime);
update:
/*
raw_spin_unlock_irqrestore(&prev->lock, flags);
}
-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime = {
.sum_exec_runtime = p->se.sum_exec_runtime,
}
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
-void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime;
* add up the pending nohz execution time since the last
* cputime snapshot.
*/
-void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
+void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
{
- cputime_t delta;
+ u64 delta;
unsigned int seq;
if (!vtime_accounting_enabled()) {
if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t))
continue;
- delta = vtime_delta(t);
+ delta = cputime_to_nsecs(vtime_delta(t));
/*
* Task runs either in user or kernel space, add pending nohz time to
rcu_read_unlock();
task_cputime_t(tsk, &utime, &stime);
- info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
- info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
+ info.si_utime = cputime_to_clock_t(utime + nsecs_to_cputime(tsk->signal->utime));
+ info.si_stime = cputime_to_clock_t(stime + nsecs_to_cputime(tsk->signal->stime));
info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80)
void do_sys_times(struct tms *tms)
{
- cputime_t tgutime, tgstime, cutime, cstime;
+ u64 tgutime, tgstime, cutime, cstime;
thread_group_cputime_adjusted(current, &tgutime, &tgstime);
cutime = current->signal->cutime;
cstime = current->signal->cstime;
- tms->tms_utime = cputime_to_clock_t(tgutime);
- tms->tms_stime = cputime_to_clock_t(tgstime);
- tms->tms_cutime = cputime_to_clock_t(cutime);
- tms->tms_cstime = cputime_to_clock_t(cstime);
+ tms->tms_utime = nsec_to_clock_t(tgutime);
+ tms->tms_stime = nsec_to_clock_t(tgstime);
+ tms->tms_cutime = nsec_to_clock_t(cutime);
+ tms->tms_cstime = nsec_to_clock_t(cstime);
}
SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
{
struct task_struct *t;
unsigned long flags;
- cputime_t tgutime, tgstime, utime, stime;
+ u64 tgutime, tgstime, utime, stime;
unsigned long maxrss = 0;
memset((char *)r, 0, sizeof (*r));
unlock_task_sighand(p, &flags);
out:
- cputime_to_timeval(utime, &r->ru_utime);
- cputime_to_timeval(stime, &r->ru_stime);
+ r->ru_utime = ns_to_timeval(utime);
+ r->ru_stime = ns_to_timeval(stime);
if (who != RUSAGE_CHILDREN) {
struct mm_struct *mm = get_task_mm(p);