projects
/
GitHub
/
mt8127
/
android_kernel_alcatel_ttab.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
rcu: Remove waitqueue usage for cpu, node, and boost kthreads
[GitHub/mt8127/android_kernel_alcatel_ttab.git]
/
kernel
/
rcutree_plugin.h
diff --git
a/kernel/rcutree_plugin.h
b/kernel/rcutree_plugin.h
index 3f6559a5f5cd7911fac44ac500b670e125e73157..049f2787a9840cf0c944aeda59afeffbee6f57a5 100644
(file)
--- a/
kernel/rcutree_plugin.h
+++ b/
kernel/rcutree_plugin.h
@@
-1196,8
+1196,7
@@
static int rcu_boost_kthread(void *arg)
for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
- wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks ||
- rnp->exp_tasks);
+ rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp);
if (more2boost)
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp);
if (more2boost)
@@
-1274,14
+1273,6
@@
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
}
rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
}
-/*
- * Initialize the RCU-boost waitqueue.
- */
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
-{
- init_waitqueue_head(&rnp->boost_wq);
-}
-
/*
* Create an RCU-boost kthread for the specified node if one does not
* already exist. We only create this kthread for preemptible RCU.
/*
* Create an RCU-boost kthread for the specified node if one does not
* already exist. We only create this kthread for preemptible RCU.
@@
-1306,7
+1297,6
@@
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
raw_spin_lock_irqsave(&rnp->lock, flags);
rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
raw_spin_lock_irqsave(&rnp->lock, flags);
rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- wake_up_process(t);
sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
return 0;
sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
return 0;
@@
-1328,10
+1318,6
@@
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
}
{
}
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
-{
-}
-
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp,
int rnp_index)
static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp,
int rnp_index)
@@
-1520,7
+1506,6
@@
int rcu_needs_cpu(int cpu)
{
int c = 0;
int snap;
{
int c = 0;
int snap;
- int snap_nmi;
int thatcpu;
/* Check for being in the holdoff period. */
int thatcpu;
/* Check for being in the holdoff period. */
@@
-1531,10
+1516,10
@@
int rcu_needs_cpu(int cpu)
for_each_online_cpu(thatcpu) {
if (thatcpu == cpu)
continue;
for_each_online_cpu(thatcpu) {
if (thatcpu == cpu)
continue;
- snap =
per_cpu(rcu_dynticks, thatcpu).dynticks;
-
snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi
;
+ snap =
atomic_add_return(0, &per_cpu(rcu_dynticks,
+
thatcpu).dynticks)
;
smp_mb(); /* Order sampling of snap with end of grace period. */
smp_mb(); /* Order sampling of snap with end of grace period. */
- if ((
(snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)
) {
+ if ((
snap & 0x1) != 0
) {
per_cpu(rcu_dyntick_drain, cpu) = 0;
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
return rcu_needs_cpu_quick_check(cpu);
per_cpu(rcu_dyntick_drain, cpu) = 0;
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
return rcu_needs_cpu_quick_check(cpu);