account_guest_time(tsk, acct->gtime);
if (acct->steal_time)
- account_steal_time(acct->steal_time);
+ account_steal_time(cputime_to_nsecs(acct->steal_time));
if (acct->idle_time)
account_idle_time(acct->idle_time);
steal = S390_lowcore.steal_timer;
if ((s64) steal > 0) {
S390_lowcore.steal_timer = 0;
- account_steal_time(steal);
+ account_steal_time(cputime_to_nsecs(steal));
}
return virt_timer_forward(user + guest + system + hardirq + softirq);
extern void account_system_time(struct task_struct *, int, cputime_t);
extern void account_system_index_time(struct task_struct *, cputime_t,
enum cpu_usage_stat);
-extern void account_steal_time(cputime_t);
+extern void account_steal_time(u64);
extern void account_idle_time(cputime_t);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
* Account for involuntary wait time.
* @cputime: the cpu time spent in involuntary wait
*/
-void account_steal_time(cputime_t cputime)
+void account_steal_time(u64 cputime)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
- cpustat[CPUTIME_STEAL] += cputime_to_nsecs(cputime);
+ cpustat[CPUTIME_STEAL] += cputime;
}
/*
#ifdef CONFIG_PARAVIRT
if (static_key_false(¶virt_steal_enabled)) {
cputime_t steal_cputime;
- u64 steal;
+ u64 steal, rounded;
steal = paravirt_steal_clock(smp_processor_id());
steal -= this_rq()->prev_steal_time;
steal_cputime = min(nsecs_to_cputime(steal), maxtime);
- account_steal_time(steal_cputime);
- this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime);
+ rounded = cputime_to_nsecs(steal_cputime);
+ account_steal_time(rounded);
+ this_rq()->prev_steal_time += rounded;
return steal_cputime;
}