locking/pvqspinlock, x86: Optimize the PV unlock code path
[deliverable/linux.git] / kernel / locking / qspinlock_paravirt.h
index f0450ff4829b6c1308d4768b2ae3a7c575b1cf51..4bd323d38c60bce5f5f582e2acf1027d80b023fb 100644 (file)
@@ -308,23 +308,14 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
 }
 
 /*
- * PV version of the unlock function to be used in stead of
- * queued_spin_unlock().
+ * PV versions of the unlock fastpath and slowpath functions to be used
+ * instead of queued_spin_unlock().
  */
-__visible void __pv_queued_spin_unlock(struct qspinlock *lock)
+__visible void
+__pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
 {
        struct __qspinlock *l = (void *)lock;
        struct pv_node *node;
-       u8 locked;
-
-       /*
-        * We must not unlock if SLOW, because in that case we must first
-        * unhash. Otherwise it would be possible to have multiple @lock
-        * entries, which would be BAD.
-        */
-       locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
-       if (likely(locked == _Q_LOCKED_VAL))
-               return;
 
        if (unlikely(locked != _Q_SLOW_VAL)) {
                WARN(!debug_locks_silent,
@@ -363,12 +354,32 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
         */
        pv_kick(node->cpu);
 }
+
 /*
  * Include the architecture specific callee-save thunk of the
  * __pv_queued_spin_unlock(). This thunk is put together with
- * __pv_queued_spin_unlock() near the top of the file to make sure
- * that the callee-save thunk and the real unlock function are close
- * to each other sharing consecutive instruction cachelines.
+ * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
+ * function close to each other sharing consecutive instruction cachelines.
+ * Alternatively, architecture specific version of __pv_queued_spin_unlock()
+ * can be defined.
  */
 #include <asm/qspinlock_paravirt.h>
 
+#ifndef __pv_queued_spin_unlock
+__visible void __pv_queued_spin_unlock(struct qspinlock *lock)
+{
+       struct __qspinlock *l = (void *)lock;
+       u8 locked;
+
+       /*
+        * We must not unlock if SLOW, because in that case we must first
+        * unhash. Otherwise it would be possible to have multiple @lock
+        * entries, which would be BAD.
+        */
+       locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
+       if (likely(locked == _Q_LOCKED_VAL))
+               return;
+
+       __pv_queued_spin_unlock_slowpath(lock, locked);
+}
+#endif /* __pv_queued_spin_unlock */
This page took 0.025447 seconds and 5 git commands to generate.