From 0bc671d3f4bee9c31110d096ada0de52380e693d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 29 May 2015 14:26:59 +0100 Subject: [PATCH] arm64: cmpxchg: avoid "cc" clobber in ll/sc routines We can perform the cmpxchg comparison using eor and cbnz which avoids the "cc" clobber for the ll/sc case and consequently for the LSE case where we may have to fall-back on the ll/sc code at runtime. Reviewed-by: Steve Capper Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic_ll_sc.h | 14 ++++++-------- arch/arm64/include/asm/atomic_lse.h | 4 ++-- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index f89f1e4ba577..c02684d1eab3 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -101,14 +101,13 @@ __LL_SC_PREFIX(atomic_cmpxchg(atomic_t *ptr, int old, int new)) asm volatile("// atomic_cmpxchg\n" "1: ldxr %w1, %2\n" -" cmp %w1, %w3\n" -" b.ne 2f\n" +" eor %w0, %w1, %w3\n" +" cbnz %w0, 2f\n" " stxr %w0, %w4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) - : "Ir" (old), "r" (new) - : "cc"); + : "Lr" (old), "r" (new)); smp_mb(); return oldval; @@ -179,14 +178,13 @@ __LL_SC_PREFIX(atomic64_cmpxchg(atomic64_t *ptr, long old, long new)) asm volatile("// atomic64_cmpxchg\n" "1: ldxr %1, %2\n" -" cmp %1, %3\n" -" b.ne 2f\n" +" eor %0, %1, %3\n" +" cbnz %w0, 2f\n" " stxr %w0, %4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) - : "Ir" (old), "r" (new) - : "cc"); + : "Lr" (old), "r" (new)); smp_mb(); return oldval; diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index f3cb1052ab24..a3d21e7cee4f 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) " mov %w[ret], w30") : [ret] "+r" (x0), [v] "+Q" (ptr->counter) : [old] "r" (w1), [new] "r" (w2) - : "x30", "cc", "memory"); + : "x30", "memory"); return x0; } @@ -313,7 +313,7 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) " mov %[ret], x30") : [ret] "+r" (x0), [v] "+Q" (ptr->counter) : [old] "r" (x1), [new] "r" (x2) - : "x30", "cc", "memory"); + : "x30", "memory"); return x0; } -- 2.34.1