rcutorture: Bind rcuperf reader/writer kthreads to CPUs
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 12 Jan 2016 22:15:40 +0000 (14:15 -0800)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 31 Mar 2016 20:37:39 +0000 (13:37 -0700)
This commit forces more deterministic behavior by binding rcuperf's
rcu_perf_reader() and rcu_perf_writer() kthreads to their respective
CPUs.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/rcuperf.c

index 9d54a57bee7d077b0ad687cf5dd92e20745a23b3..7a1edf417d187d1df58042768d99dfe6bb55f4ce 100644 (file)
@@ -328,8 +328,10 @@ rcu_perf_reader(void *arg)
 {
        unsigned long flags;
        int idx;
+       long me = (long)arg;
 
        VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
+       set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
        set_user_nice(current, MAX_NICE);
        atomic_inc(&n_rcu_perf_reader_started);
 
@@ -362,6 +364,7 @@ rcu_perf_writer(void *arg)
        WARN_ON(rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp);
        WARN_ON(rcu_gp_is_normal() && gp_exp);
        WARN_ON(!wdpp);
+       set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
        t = ktime_get_mono_fast_ns();
        if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
                t_rcu_perf_writer_started = t;
@@ -594,7 +597,7 @@ rcu_perf_init(void)
                goto unwind;
        }
        for (i = 0; i < nrealreaders; i++) {
-               firsterr = torture_create_kthread(rcu_perf_reader, NULL,
+               firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
                                                  reader_tasks[i]);
                if (firsterr)
                        goto unwind;