locking/qspinlock: Use _acquire/_release() versions of cmpxchg() & xchg()
[deliverable/linux.git] / include / asm-generic / qspinlock.h
index e2aadbc7151f4cd69b8745e80a0af403257f1678..39e1cb201b8eaa00ffe759ce7147cc8b3647fb5d 100644 (file)
@@ -12,8 +12,9 @@
  * GNU General Public License for more details.
  *
  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
  *
- * Authors: Waiman Long <waiman.long@hp.com>
+ * Authors: Waiman Long <waiman.long@hpe.com>
  */
 #ifndef __ASM_GENERIC_QSPINLOCK_H
 #define __ASM_GENERIC_QSPINLOCK_H
@@ -62,7 +63,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
 {
        if (!atomic_read(&lock->val) &&
-          (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
+          (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
                return 1;
        return 0;
 }
@@ -77,7 +78,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
 {
        u32 val;
 
-       val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
+       val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
        if (likely(val == 0))
                return;
        queued_spin_lock_slowpath(lock, val);
@@ -93,7 +94,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
        /*
         * smp_mb__before_atomic() in order to guarantee release semantics
         */
-       smp_mb__before_atomic_dec();
+       smp_mb__before_atomic();
        atomic_sub(_Q_LOCKED_VAL, &lock->val);
 }
 #endif
This page took 0.024185 seconds and 5 git commands to generate.