arch,arm: Convert smp_mb__*()
authorPeter Zijlstra <peterz@infradead.org>
Wed, 12 Mar 2014 16:11:00 +0000 (17:11 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 18 Apr 2014 09:40:32 +0000 (11:40 +0200)
ARM uses ll/sc primitives that do not imply barriers for all regular
atomic ops, therefore smp_mb__{before,after} need be a full barrier.

Since ARM doesn't use asm-generic/barrier.h include the required
definitions in its asm/barrier.h

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-yijo7sglsl7uusbp13upcuvo@git.kernel.org
Cc: Albin Tonnerre <albin.tonnerre@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Gang <gang.chen@asianux.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicolas Pitre <nico@linaro.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Victor Kamensky <victor.kamensky@linaro.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/arm/include/asm/atomic.h
arch/arm/include/asm/barrier.h
arch/arm/include/asm/bitops.h

index 9a92fd7864a841a989942048c8f6beb6ad9890c1..3040359094d93a5d4f21bc517f997ef5a6d0b277 100644 (file)
@@ -241,11 +241,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 
 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
 
-#define smp_mb__before_atomic_dec()    smp_mb()
-#define smp_mb__after_atomic_dec()     smp_mb()
-#define smp_mb__before_atomic_inc()    smp_mb()
-#define smp_mb__after_atomic_inc()     smp_mb()
-
 #ifndef CONFIG_GENERIC_ATOMIC64
 typedef struct {
        long long counter;
index 2f59f74433964016003167007badbeffcbba465a..c6a3e73a6e2407215f3f9859212e41097071f492 100644 (file)
@@ -79,5 +79,8 @@ do {                                                                  \
 
 #define set_mb(var, value)     do { var = value; smp_mb(); } while (0)
 
+#define smp_mb__before_atomic()        smp_mb()
+#define smp_mb__after_atomic() smp_mb()
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __ASM_BARRIER_H */
index b2e298a90d76fa700df85007a23d652959912f4e..56380995f4c38364c620c0055083c877f85dcff0 100644 (file)
@@ -25,9 +25,7 @@
 
 #include <linux/compiler.h>
 #include <linux/irqflags.h>
-
-#define smp_mb__before_clear_bit()     smp_mb()
-#define smp_mb__after_clear_bit()      smp_mb()
+#include <asm/barrier.h>
 
 /*
  * These functions are the basis of our bit ops.
This page took 0.034429 seconds and 5 git commands to generate.