Commit | Line | Data |
---|---|---|
ae3a197e DH |
1 | /* |
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
3 | */ | |
4 | #ifndef _ASM_POWERPC_BARRIER_H | |
5 | #define _ASM_POWERPC_BARRIER_H | |
6 | ||
7 | /* | |
8 | * Memory barrier. | |
9 | * The sync instruction guarantees that all memory accesses initiated | |
10 | * by this processor have been performed (with respect to all other | |
11 | * mechanisms that access memory). The eieio instruction is a barrier | |
12 | * providing an ordering (separately) for (a) cacheable stores and (b) | |
13 | * loads and stores to non-cacheable memory (e.g. I/O devices). | |
14 | * | |
15 | * mb() prevents loads and stores being reordered across this point. | |
16 | * rmb() prevents loads being reordered across this point. | |
17 | * wmb() prevents stores being reordered across this point. | |
18 | * read_barrier_depends() prevents data-dependent loads being reordered | |
19 | * across this point (nop on PPC). | |
20 | * | |
21 | * *mb() variants without smp_ prefix must order all types of memory | |
22 | * operations with one another. sync is the only instruction sufficient | |
23 | * to do this. | |
24 | * | |
25 | * For the smp_ barriers, ordering is for cacheable memory operations | |
26 | * only. We have to use the sync instruction for smp_mb(), since lwsync | |
27 | * doesn't order loads with respect to previous stores. Lwsync can be | |
28 | * used for smp_rmb() and smp_wmb(). | |
29 | * | |
30 | * However, on CPUs that don't support lwsync, lwsync actually maps to a | |
31 | * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. | |
32 | */ | |
33 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | |
34 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") | |
35 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") | |
ae3a197e DH |
36 | |
37 | #define set_mb(var, value) do { var = value; mb(); } while (0) | |
38 | ||
39 | #ifdef CONFIG_SMP | |
40 | ||
41 | #ifdef __SUBARCH_HAS_LWSYNC | |
42 | # define SMPWMB LWSYNC | |
43 | #else | |
44 | # define SMPWMB eieio | |
45 | #endif | |
46 | ||
47933ad4 PZ |
47 | #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") |
48 | ||
ae3a197e | 49 | #define smp_mb() mb() |
47933ad4 | 50 | #define smp_rmb() __lwsync() |
ae3a197e | 51 | #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") |
ae3a197e | 52 | #else |
47933ad4 PZ |
53 | #define __lwsync() barrier() |
54 | ||
ae3a197e DH |
55 | #define smp_mb() barrier() |
56 | #define smp_rmb() barrier() | |
57 | #define smp_wmb() barrier() | |
ae3a197e DH |
58 | #endif /* CONFIG_SMP */ |
59 | ||
8a449718 AD |
60 | #define read_barrier_depends() do { } while (0) |
61 | #define smp_read_barrier_depends() do { } while (0) | |
62 | ||
ae3a197e DH |
63 | /* |
64 | * This is a barrier which prevents following instructions from being | |
65 | * started until the value of the argument x is known. For example, if | |
66 | * x is a variable loaded from memory, this prevents following | |
67 | * instructions from being executed until the load has been performed. | |
68 | */ | |
69 | #define data_barrier(x) \ | |
70 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); | |
71 | ||
47933ad4 PZ |
72 | #define smp_store_release(p, v) \ |
73 | do { \ | |
74 | compiletime_assert_atomic_type(*p); \ | |
75 | __lwsync(); \ | |
76 | ACCESS_ONCE(*p) = (v); \ | |
77 | } while (0) | |
78 | ||
79 | #define smp_load_acquire(p) \ | |
80 | ({ \ | |
81 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | |
82 | compiletime_assert_atomic_type(*p); \ | |
83 | __lwsync(); \ | |
84 | ___p1; \ | |
85 | }) | |
86 | ||
c645073f PZ |
87 | #define smp_mb__before_atomic() smp_mb() |
88 | #define smp_mb__after_atomic() smp_mb() | |
89 | ||
ae3a197e | 90 | #endif /* _ASM_POWERPC_BARRIER_H */ |