rcu.qlowmark= [KNL,BOOT] Set threshold of queued
RCU callbacks below which batch limiting is re-enabled.
- rcu.rsinterval= [KNL,BOOT,SMP] Set the number of additional
- RCU callbacks to queued before forcing reschedule
- on all cpus.
-
rdinit= [KNL]
Format: <full_path>
Run specified binary instead of /init from the ramdisk,
long completed; /* Number of the last completed batch */
int next_pending; /* Is the next batch already waiting? */
+ int signaled;
+
spinlock_t lock ____cacheline_internodealigned_in_smp;
cpumask_t cpumask; /* CPUs that need to switch in order */
/* for current batch to proceed. */
long blimit; /* Upper limit on a processed batch */
int cpu;
struct rcu_head barrier;
-#ifdef CONFIG_SMP
- long last_rs_qlen; /* qlen during the last resched */
-#endif
};
DECLARE_PER_CPU(struct rcu_data, rcu_data);
static int blimit = 10;
static int qhimark = 10000;
static int qlowmark = 100;
-#ifdef CONFIG_SMP
-static int rsinterval = 1000;
-#endif
static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex);
int cpu;
cpumask_t cpumask;
set_need_resched();
- if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
- rdp->last_rs_qlen = rdp->qlen;
+ if (unlikely(!rcp->signaled)) {
+ rcp->signaled = 1;
/*
* Don't send IPI to itself. With irqs disabled,
* rdp->cpu is the current cpu.
smp_mb();
cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
+ rcp->signaled = 0;
}
}
module_param(blimit, int, 0);
module_param(qhimark, int, 0);
module_param(qlowmark, int, 0);
-#ifdef CONFIG_SMP
-module_param(rsinterval, int, 0);
-#endif
EXPORT_SYMBOL_GPL(rcu_batches_completed);
EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
EXPORT_SYMBOL_GPL(call_rcu);