locking/mutex: Break out of expensive busy-loop on {mutex,rwsem}_spin_on_owner()...
authorPan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Wed, 2 Nov 2016 09:08:30 +0000 (05:08 -0400)
committerIngo Molnar <mingo@kernel.org>
Tue, 22 Nov 2016 11:48:10 +0000 (12:48 +0100)
An over-committed guest with more vCPUs than pCPUs has a heavy overload
in the two spin_on_owner. This blames on the lock holder preemption
issue.

Break out of the loop if the vCPU is preempted: if vcpu_is_preempted(cpu)
is true.

test-case:
perf record -a perf bench sched messaging -g 400 -p && perf report

before patch:
20.68%  sched-messaging  [kernel.vmlinux]  [k] mutex_spin_on_owner
 8.45%  sched-messaging  [kernel.vmlinux]  [k] mutex_unlock
 4.12%  sched-messaging  [kernel.vmlinux]  [k] system_call
 3.01%  sched-messaging  [kernel.vmlinux]  [k] system_call_common
 2.83%  sched-messaging  [kernel.vmlinux]  [k] copypage_power7
 2.64%  sched-messaging  [kernel.vmlinux]  [k] rwsem_spin_on_owner
 2.00%  sched-messaging  [kernel.vmlinux]  [k] osq_lock

after patch:
 9.99%  sched-messaging  [kernel.vmlinux]  [k] mutex_unlock
 5.28%  sched-messaging  [unknown]         [H] 0xc0000000000768e0
 4.27%  sched-messaging  [kernel.vmlinux]  [k] __copy_tofrom_user_power7
 3.77%  sched-messaging  [kernel.vmlinux]  [k] copypage_power7
 3.24%  sched-messaging  [kernel.vmlinux]  [k] _raw_write_lock_irq
 3.02%  sched-messaging  [kernel.vmlinux]  [k] system_call
 2.69%  sched-messaging  [kernel.vmlinux]  [k] wait_consider_task

Tested-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: benh@kernel.crashing.org
Cc: boqun.feng@gmail.com
Cc: bsingharora@gmail.com
Cc: dave@stgolabs.net
Cc: kernellwp@gmail.com
Cc: konrad.wilk@oracle.com
Cc: linuxppc-dev@lists.ozlabs.org
Cc: mpe@ellerman.id.au
Cc: paulmck@linux.vnet.ibm.com
Cc: paulus@samba.org
Cc: rkrcmar@redhat.com
Cc: virtualization@lists.linux-foundation.org
Cc: will.deacon@arm.com
Cc: xen-devel-request@lists.xenproject.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1478077718-37424-4-git-send-email-xinhui.pan@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/mutex.c
kernel/locking/rwsem-xadd.c

index c0731685603fbcf871bafa967a66e08766fa6fa6..9b349619f431443479fdbe42fe162715d1e4e3fc 100644 (file)
@@ -364,7 +364,11 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
                 */
                barrier();
 
-               if (!owner->on_cpu || need_resched()) {
+               /*
+                * Use vcpu_is_preempted to detect lock holder preemption issue.
+                */
+               if (!owner->on_cpu || need_resched() ||
+                               vcpu_is_preempted(task_cpu(owner))) {
                        ret = false;
                        break;
                }
@@ -389,8 +393,13 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
 
        rcu_read_lock();
        owner = __mutex_owner(lock);
+
+       /*
+        * As lock holder preemption issue, we both skip spinning if task is not
+        * on cpu or its cpu is preempted
+        */
        if (owner)
-               retval = owner->on_cpu;
+               retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
        rcu_read_unlock();
 
        /*
index 263e7449282ab2fc59251349089747e05b6e4698..631506004f9e04fa3b0b03a17b3d2f9909c7fe61 100644 (file)
@@ -336,7 +336,11 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
                goto done;
        }
 
-       ret = owner->on_cpu;
+       /*
+        * As lock holder preemption issue, we both skip spinning if task is not
+        * on cpu or its cpu is preempted
+        */
+       ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
 done:
        rcu_read_unlock();
        return ret;
@@ -362,8 +366,12 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
                 */
                barrier();
 
-               /* abort spinning when need_resched or owner is not running */
-               if (!owner->on_cpu || need_resched()) {
+               /*
+                * abort spinning when need_resched or owner is not running or
+                * owner's cpu is preempted.
+                */
+               if (!owner->on_cpu || need_resched() ||
+                               vcpu_is_preempted(task_cpu(owner))) {
                        rcu_read_unlock();
                        return false;
                }