Merge tag 'xtensa-for-next-20140715' of git://github.com/jcmvbkbc/linux-xtensa into...
[deliverable/linux.git] / arch / sparc / include / asm / barrier_64.h
1 #ifndef __SPARC64_BARRIER_H
2 #define __SPARC64_BARRIER_H
3
4 /* These are here in an effort to more fully work around Spitfire Errata
5 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
6 * branch, the chip can stop executing instructions until a trap occurs.
7 * Therefore, if interrupts are disabled, the chip can hang forever.
8 *
9 * It used to be believed that the memory barrier had to be right in the
10 * delay slot, but a case has been traced recently wherein the memory barrier
11 * was one instruction after the branch delay slot and the chip still hung.
12 * The offending sequence was the following in sym_wakeup_done() of the
13 * sym53c8xx_2 driver:
14 *
15 * call sym_ccb_from_dsa, 0
16 * movge %icc, 0, %l0
17 * brz,pn %o0, .LL1303
18 * mov %o0, %l2
19 * membar #LoadLoad
20 *
21 * The branch has to be mispredicted for the bug to occur. Therefore, we put
22 * the memory barrier explicitly into a "branch always, predicted taken"
23 * delay slot to avoid the problem case.
24 */
25 #define membar_safe(type) \
26 do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
27 " membar " type "\n" \
28 "1:\n" \
29 : : : "memory"); \
30 } while (0)
31
32 /* The kernel always executes in TSO memory model these days,
33 * and furthermore most sparc64 chips implement more stringent
34 * memory ordering than required by the specifications.
35 */
36 #define mb() membar_safe("#StoreLoad")
37 #define rmb() __asm__ __volatile__("":::"memory")
38 #define wmb() __asm__ __volatile__("":::"memory")
39
40 #define read_barrier_depends() do { } while(0)
41 #define set_mb(__var, __value) \
42 do { __var = __value; membar_safe("#StoreLoad"); } while(0)
43
44 #ifdef CONFIG_SMP
45 #define smp_mb() mb()
46 #define smp_rmb() rmb()
47 #define smp_wmb() wmb()
48 #else
49 #define smp_mb() __asm__ __volatile__("":::"memory")
50 #define smp_rmb() __asm__ __volatile__("":::"memory")
51 #define smp_wmb() __asm__ __volatile__("":::"memory")
52 #endif
53
54 #define smp_read_barrier_depends() do { } while(0)
55
56 #define smp_store_release(p, v) \
57 do { \
58 compiletime_assert_atomic_type(*p); \
59 barrier(); \
60 ACCESS_ONCE(*p) = (v); \
61 } while (0)
62
63 #define smp_load_acquire(p) \
64 ({ \
65 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
66 compiletime_assert_atomic_type(*p); \
67 barrier(); \
68 ___p1; \
69 })
70
71 #define smp_mb__before_atomic() barrier()
72 #define smp_mb__after_atomic() barrier()
73
74 #endif /* !(__SPARC64_BARRIER_H) */
This page took 0.049172 seconds and 5 git commands to generate.