locking/pvqspinlock: Enable slowpath locking count tracking
authorWaiman Long <Waiman.Long@hpe.com>
Thu, 10 Dec 2015 20:17:45 +0000 (15:17 -0500)
committerIngo Molnar <mingo@kernel.org>
Mon, 29 Feb 2016 09:02:42 +0000 (10:02 +0100)
This patch enables the tracking of the number of slowpath locking
operations performed. This can be used to compare against the number
of lock stealing operations to see what percentage of locks are stolen
versus acquired via the regular slowpath.

Signed-off-by: Waiman Long <Waiman.Long@hpe.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Douglas Hatch <doug.hatch@hpe.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hpe.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1449778666-13593-2-git-send-email-Waiman.Long@hpe.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/qspinlock_paravirt.h
kernel/locking/qspinlock_stat.h

index 78f04a232d3a61fc1cb0caddfb72c2a58af22926..21ede57f68b320594f874e4ff0a1b60974fc67f0 100644 (file)
@@ -400,6 +400,11 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
        if (READ_ONCE(pn->state) == vcpu_hashed)
                lp = (struct qspinlock **)1;
 
+       /*
+        * Tracking # of slowpath locking operations
+        */
+       qstat_inc(qstat_pv_lock_slowpath, true);
+
        for (;; waitcnt++) {
                /*
                 * Set correct vCPU state to be used by queue node wait-early
index 869988d4612416c5e178ec9f3a91d83cfd7b666a..eb2a2c9bc3fc15d181c9e5981648974dc7aec65c 100644 (file)
@@ -22,6 +22,7 @@
  *   pv_kick_wake      - # of vCPU kicks used for computing pv_latency_wake
  *   pv_latency_kick   - average latency (ns) of vCPU kick operation
  *   pv_latency_wake   - average latency (ns) from vCPU kick to wakeup
+ *   pv_lock_slowpath  - # of locking operations via the slowpath
  *   pv_lock_stealing  - # of lock stealing operations
  *   pv_spurious_wakeup        - # of spurious wakeups
  *   pv_wait_again     - # of vCPU wait's that happened after a vCPU kick
@@ -45,6 +46,7 @@ enum qlock_stats {
        qstat_pv_kick_wake,
        qstat_pv_latency_kick,
        qstat_pv_latency_wake,
+       qstat_pv_lock_slowpath,
        qstat_pv_lock_stealing,
        qstat_pv_spurious_wakeup,
        qstat_pv_wait_again,
@@ -70,6 +72,7 @@ static const char * const qstat_names[qstat_num + 1] = {
        [qstat_pv_spurious_wakeup] = "pv_spurious_wakeup",
        [qstat_pv_latency_kick]    = "pv_latency_kick",
        [qstat_pv_latency_wake]    = "pv_latency_wake",
+       [qstat_pv_lock_slowpath]   = "pv_lock_slowpath",
        [qstat_pv_lock_stealing]   = "pv_lock_stealing",
        [qstat_pv_wait_again]      = "pv_wait_again",
        [qstat_pv_wait_early]      = "pv_wait_early",