Commit | Line | Data |
---|---|---|
1e57372e JH |
1 | #ifndef _ASM_METAG_BARRIER_H |
2 | #define _ASM_METAG_BARRIER_H | |
3 | ||
4 | #include <asm/metag_mem.h> | |
5 | ||
6 | #define nop() asm volatile ("NOP") | |
7 | #define mb() wmb() | |
8 | #define rmb() barrier() | |
9 | ||
10 | #ifdef CONFIG_METAG_META21 | |
11 | ||
12 | /* HTP and above have a system event to fence writes */ | |
13 | static inline void wr_fence(void) | |
14 | { | |
15 | volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE; | |
16 | barrier(); | |
17 | *flushptr = 0; | |
2425ce84 | 18 | barrier(); |
1e57372e JH |
19 | } |
20 | ||
21 | #else /* CONFIG_METAG_META21 */ | |
22 | ||
23 | /* | |
24 | * ATP doesn't have system event to fence writes, so it is necessary to flush | |
25 | * the processor write queues as well as possibly the write combiner (depending | |
26 | * on the page being written). | |
27 | * To ensure the write queues are flushed we do 4 writes to a system event | |
28 | * register (in this case write combiner flush) which will also flush the write | |
29 | * combiner. | |
30 | */ | |
31 | static inline void wr_fence(void) | |
32 | { | |
33 | volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH; | |
34 | barrier(); | |
35 | *flushptr = 0; | |
36 | *flushptr = 0; | |
37 | *flushptr = 0; | |
38 | *flushptr = 0; | |
2425ce84 | 39 | barrier(); |
1e57372e JH |
40 | } |
41 | ||
42 | #endif /* !CONFIG_METAG_META21 */ | |
43 | ||
44 | static inline void wmb(void) | |
45 | { | |
46 | /* flush writes through the write combiner */ | |
47 | wr_fence(); | |
48 | } | |
49 | ||
1e57372e JH |
50 | #ifndef CONFIG_SMP |
51 | #define fence() do { } while (0) | |
52 | #define smp_mb() barrier() | |
53 | #define smp_rmb() barrier() | |
54 | #define smp_wmb() barrier() | |
55 | #else | |
56 | ||
57 | #ifdef CONFIG_METAG_SMP_WRITE_REORDERING | |
58 | /* | |
59 | * Write to the atomic memory unlock system event register (command 0). This is | |
60 | * needed before a write to shared memory in a critical section, to prevent | |
61 | * external reordering of writes before the fence on other threads with writes | |
62 | * after the fence on this thread (and to prevent the ensuing cache-memory | |
63 | * incoherence). It is therefore ineffective if used after and on the same | |
64 | * thread as a write. | |
65 | */ | |
66 | static inline void fence(void) | |
67 | { | |
68 | volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; | |
69 | barrier(); | |
70 | *flushptr = 0; | |
2425ce84 | 71 | barrier(); |
1e57372e JH |
72 | } |
73 | #define smp_mb() fence() | |
74 | #define smp_rmb() fence() | |
75 | #define smp_wmb() barrier() | |
76 | #else | |
77 | #define fence() do { } while (0) | |
78 | #define smp_mb() barrier() | |
79 | #define smp_rmb() barrier() | |
80 | #define smp_wmb() barrier() | |
81 | #endif | |
82 | #endif | |
8a449718 AD |
83 | |
84 | #define read_barrier_depends() do { } while (0) | |
85 | #define smp_read_barrier_depends() do { } while (0) | |
86 | ||
1e57372e JH |
87 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) |
88 | ||
47933ad4 PZ |
89 | #define smp_store_release(p, v) \ |
90 | do { \ | |
91 | compiletime_assert_atomic_type(*p); \ | |
92 | smp_mb(); \ | |
93 | ACCESS_ONCE(*p) = (v); \ | |
94 | } while (0) | |
95 | ||
96 | #define smp_load_acquire(p) \ | |
97 | ({ \ | |
98 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | |
99 | compiletime_assert_atomic_type(*p); \ | |
100 | smp_mb(); \ | |
101 | ___p1; \ | |
102 | }) | |
103 | ||
40074dec PZ |
104 | #define smp_mb__before_atomic() barrier() |
105 | #define smp_mb__after_atomic() barrier() | |
106 | ||
1e57372e | 107 | #endif /* _ASM_METAG_BARRIER_H */ |