* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits)
sched/tracing: Add a new tracepoint for sleeptime
sched: Disable scheduler warnings during oopses
sched: Fix cgroup movement of waking process
sched: Fix cgroup movement of newly created process
sched: Fix cgroup movement of forking process
sched: Remove cfs bandwidth period check in tg_set_cfs_period()
sched: Fix load-balance lock-breaking
sched: Replace all_pinned with a generic flags field
sched: Only queue remote wakeups when crossing cache boundaries
sched: Add missing rcu_dereference() around ->real_parent usage
[S390] fix cputime overflow in uptime_proc_show
[S390] cputime: add sparse checking and cleanup
sched: Mark parent and real_parent as __rcu
sched, nohz: Fix missing RCU read lock
sched, nohz: Set the NOHZ_BALANCE_KICK flag for idle load balancer
sched, nohz: Fix the idle cpu check in nohz_idle_balance
sched: Use jump_labels for sched_feat
sched/accounting: Fix parameter passing in task_group_account_field
sched/accounting: Fix user/system tick double accounting
sched/accounting: Re-use scheduler statistics for the root cgroup
...
Fix up conflicts in
- arch/ia64/include/asm/cputime.h, include/asm-generic/cputime.h
usecs_to_cputime64() vs the sparse cleanups
- kernel/sched/fair.c, kernel/time/tick-sched.c
scheduler changes in multiple branches
/*
* Convert cputime <-> microseconds
*/
- #define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC)
- #define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC)
- #define usecs_to_cputime64(__usecs) usecs_to_cputime(__usecs)
+ #define cputime_to_usecs(__ct) \
+ ((__force u64)(__ct) / NSEC_PER_USEC)
+ #define usecs_to_cputime(__usecs) \
+ (__force cputime_t)((__usecs) * NSEC_PER_USEC)
++#define usecs_to_cputime64(__usecs) \
++ (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
/*
* Convert cputime <-> seconds
}
if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ return (__force cputime_t) ct;
}
+#define usecs_to_cputime64(us) usecs_to_cputime(us)
+
/*
* Convert cputime <-> seconds
*/
/*
* Convert cputime to microseconds and back.
*/
- static inline unsigned int
- cputime_to_usecs(const cputime_t cputime)
+ static inline unsigned int cputime_to_usecs(const cputime_t cputime)
{
- return cputime_div(cputime, 4096);
+ return (__force unsigned long long) cputime >> 12;
}
- static inline cputime_t
- usecs_to_cputime(const unsigned int m)
+ static inline cputime_t usecs_to_cputime(const unsigned int m)
{
- return (cputime_t) m * 4096;
+ return (__force cputime_t)(m * 4096ULL);
}
+#define usecs_to_cputime64(m) usecs_to_cputime(m)
+
/*
* Convert cputime to milliseconds and back.
*/
if (idle_time == -1ULL) {
/* !NO_HZ so we can rely on cpustat.idle */
- idle = kstat_cpu(cpu).cpustat.idle;
- idle = cputime64_add(idle, arch_idle_time(cpu));
+ idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
+ idle += arch_idle_time(cpu);
} else
- idle = nsecs_to_jiffies64(1000 * idle_time);
+ idle = usecs_to_cputime64(idle_time);
return idle;
}
if (iowait_time == -1ULL)
/* !NO_HZ so we can rely on cpustat.iowait */
- iowait = kstat_cpu(cpu).cpustat.iowait;
+ iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
else
- iowait = nsecs_to_jiffies64(1000 * iowait_time);
+ iowait = usecs_to_cputime64(iowait_time);
return iowait;
}
/*
* Convert cputime to microseconds and back.
*/
- #define cputime_to_usecs(__ct) jiffies_to_usecs(__ct)
- #define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs)
- #define usecs_to_cputime64(__msecs) nsecs_to_jiffies64((__msecs) * 1000)
+ #define cputime_to_usecs(__ct) \
- jiffies_to_usecs(cputime_to_jiffies(__ct));
-#define usecs_to_cputime(__msecs) \
- jiffies_to_cputime(usecs_to_jiffies(__msecs));
++ jiffies_to_usecs(cputime_to_jiffies(__ct))
++#define usecs_to_cputime(__usec) \
++ jiffies_to_cputime(usecs_to_jiffies(__usec))
++#define usecs_to_cputime64(__usec) \
++ jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
/*
* Convert cputime to seconds and back.
ts->next_jiffies = next_jiffies;
ts->last_jiffies = last_jiffies;
ts->sleep_length = ktime_sub(dev->next_event, now);
-end:
- local_irq_restore(flags);
+}
+
+/**
+ * tick_nohz_idle_enter - stop the idle tick from the idle task
+ *
+ * When the next event is more than a tick into the future, stop the idle tick
+ * Called when we start the idle loop.
+ *
+ * The arch is responsible of calling:
+ *
+ * - rcu_idle_enter() after its last use of RCU before the CPU is put
+ * to sleep.
+ * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
+ */
+void tick_nohz_idle_enter(void)
+{
+ struct tick_sched *ts;
+
+ WARN_ON_ONCE(irqs_disabled());
+
++ /*
++ * Update the idle state in the scheduler domain hierarchy
++ * when tick_nohz_stop_sched_tick() is called from the idle loop.
++ * State will be updated to busy during the first busy tick after
++ * exiting idle.
++ */
++ set_cpu_sd_state_idle();
++
+ local_irq_disable();
+
+ ts = &__get_cpu_var(tick_cpu_sched);
+ /*
+ * set ts->inidle unconditionally. even if the system did not
+ * switch to nohz mode the cpu frequency governers rely on the
+ * update of the idle time accounting in tick_nohz_start_idle().
+ */
+ ts->inidle = 1;
+ tick_nohz_stop_sched_tick(ts);
+
+ local_irq_enable();
+}
+
+/**
+ * tick_nohz_irq_exit - update next tick event from interrupt exit
+ *
+ * When an interrupt fires while we are idle and it doesn't cause
+ * a reschedule, it may still add, modify or delete a timer, enqueue
+ * an RCU callback, etc...
+ * So we need to re-calculate and reprogram the next tick event.
+ */
+void tick_nohz_irq_exit(void)
+{
+ struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+
+ if (!ts->inidle)
+ return;
+
+ tick_nohz_stop_sched_tick(ts);
}
/**