u8 state;
};
+/*
+ * Include queued spinlock statistics code
+ */
+#include "qspinlock_stat.h"
+
/*
* By replacing the regular queued_spin_trylock() with the function below,
* it will be called once when a lock waiter enter the PV slowpath before
static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
{
struct __qspinlock *l = (void *)lock;
+ int ret = !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
+ (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0);
- return !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
- (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0);
+ qstat_inc(qstat_pv_lock_stealing, ret);
+ return ret;
}
/*
}
#endif /* _Q_PENDING_BITS == 8 */
-/*
- * Include queued spinlock statistics code
- */
-#include "qspinlock_stat.h"
-
/*
* Lock and MCS node addresses hash table for fast lookup
*
if (READ_ONCE(pn->state) == vcpu_hashed)
lp = (struct qspinlock **)1;
+ /*
+ * Tracking # of slowpath locking operations
+ */
+ qstat_inc(qstat_pv_lock_slowpath, true);
+
for (;; waitcnt++) {
/*
* Set correct vCPU state to be used by queue node wait-early