}
KERNEL_ATTR_RO(fscaps);
+int rcu_expedited;
+static ssize_t rcu_expedited_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", rcu_expedited);
+}
+static ssize_t rcu_expedited_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (kstrtoint(buf, 0, &rcu_expedited))
+ return -EINVAL;
+
+ return count;
+}
+KERNEL_ATTR_RW(rcu_expedited);
+
/*
* Make /sys/kernel/notes give the raw contents of our kernel .notes section.
*/
&kexec_crash_size_attr.attr,
&vmcoreinfo_attr.attr,
#endif
+ &rcu_expedited_attr.attr,
NULL
};
}
}
+extern int rcu_expedited;
+
#endif /* __LINUX_RCU_H */
#include <linux/export.h>
#include <linux/hardirq.h>
#include <linux/delay.h>
+#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include <trace/events/rcu.h>
#include "rcu.h"
+module_param(rcu_expedited, int, 0);
+
#ifdef CONFIG_PREEMPT_RCU
/*
return;
/* Once we get past the fastpath checks, same code as rcu_barrier(). */
- rcu_barrier();
+ if (rcu_expedited)
+ synchronize_rcu_expedited();
+ else
+ rcu_barrier();
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
"Illegal synchronize_sched() in RCU-sched read-side critical section");
if (rcu_blocking_is_gp())
return;
- wait_rcu_gp(call_rcu_sched);
+ if (rcu_expedited)
+ synchronize_sched_expedited();
+ else
+ wait_rcu_gp(call_rcu_sched);
}
EXPORT_SYMBOL_GPL(synchronize_sched);
"Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
if (rcu_blocking_is_gp())
return;
- wait_rcu_gp(call_rcu_bh);
+ if (rcu_expedited)
+ synchronize_rcu_bh_expedited();
+ else
+ wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
if (trycount++ < 10) {
udelay(trycount * num_online_cpus());
} else {
- synchronize_sched();
+ wait_rcu_gp(call_rcu_sched);
return;
}
"Illegal synchronize_rcu() in RCU read-side critical section");
if (!rcu_scheduler_active)
return;
- wait_rcu_gp(call_rcu);
+ if (rcu_expedited)
+ synchronize_rcu_expedited();
+ else
+ wait_rcu_gp(call_rcu);
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
udelay(trycount * num_online_cpus());
} else {
put_online_cpus();
- synchronize_rcu();
+ wait_rcu_gp(call_rcu);
return;
}
}
#include <linux/delay.h>
#include <linux/srcu.h>
+#include <trace/events/rcu.h>
+
+#include "rcu.h"
+
/*
* Initialize an rcu_batch structure to empty.
*/
*/
void synchronize_srcu(struct srcu_struct *sp)
{
- __synchronize_srcu(sp, SYNCHRONIZE_SRCU_TRYCOUNT);
+ __synchronize_srcu(sp, rcu_expedited
+ ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
+ : SYNCHRONIZE_SRCU_TRYCOUNT);
}
EXPORT_SYMBOL_GPL(synchronize_srcu);