Commit | Line | Data |
---|---|---|
c140d879 DH |
1 | /* |
2 | * Memory barrier definitions. This is based on information published | |
3 | * in the Processor Abstraction Layer and the System Abstraction Layer | |
4 | * manual. | |
5 | * | |
6 | * Copyright (C) 1998-2003 Hewlett-Packard Co | |
7 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
8 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | |
9 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | |
10 | */ | |
11 | #ifndef _ASM_IA64_BARRIER_H | |
12 | #define _ASM_IA64_BARRIER_H | |
13 | ||
14 | #include <linux/compiler.h> | |
15 | ||
16 | /* | |
17 | * Macros to force memory ordering. In these descriptions, "previous" | |
18 | * and "subsequent" refer to program order; "visible" means that all | |
19 | * architecturally visible effects of a memory access have occurred | |
20 | * (at a minimum, this means the memory has been read or written). | |
21 | * | |
22 | * wmb(): Guarantees that all preceding stores to memory- | |
23 | * like regions are visible before any subsequent | |
24 | * stores and that all following stores will be | |
25 | * visible only after all previous stores. | |
26 | * rmb(): Like wmb(), but for reads. | |
27 | * mb(): wmb()/rmb() combo, i.e., all previous memory | |
28 | * accesses are visible before all subsequent | |
29 | * accesses and vice versa. This is also known as | |
30 | * a "fence." | |
31 | * | |
32 | * Note: "mb()" and its variants cannot be used as a fence to order | |
33 | * accesses to memory mapped I/O registers. For that, mf.a needs to | |
34 | * be used. However, we don't want to always use mf.a because (a) | |
35 | * it's (presumably) much slower than mf and (b) mf.a is supported for | |
36 | * sequential memory pages only. | |
37 | */ | |
8a449718 AD |
38 | #define mb() ia64_mf() |
39 | #define rmb() mb() | |
40 | #define wmb() mb() | |
c140d879 | 41 | |
1077fa36 AD |
42 | #define dma_rmb() mb() |
43 | #define dma_wmb() mb() | |
44 | ||
c140d879 DH |
45 | #ifdef CONFIG_SMP |
46 | # define smp_mb() mb() | |
c140d879 DH |
47 | #else |
48 | # define smp_mb() barrier() | |
c140d879 DH |
49 | #endif |
50 | ||
0cd64efb PZ |
51 | #define smp_mb__before_atomic() barrier() |
52 | #define smp_mb__after_atomic() barrier() | |
53 | ||
47933ad4 PZ |
54 | /* |
55 | * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no | |
56 | * need for asm trickery! | |
57 | */ | |
58 | ||
59 | #define smp_store_release(p, v) \ | |
60 | do { \ | |
61 | compiletime_assert_atomic_type(*p); \ | |
62 | barrier(); \ | |
76695af2 | 63 | WRITE_ONCE(*p, v); \ |
47933ad4 PZ |
64 | } while (0) |
65 | ||
66 | #define smp_load_acquire(p) \ | |
67 | ({ \ | |
76695af2 | 68 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
47933ad4 PZ |
69 | compiletime_assert_atomic_type(*p); \ |
70 | barrier(); \ | |
71 | ___p1; \ | |
72 | }) | |
73 | ||
c140d879 DH |
74 | /* |
75 | * The group barrier in front of the rsm & ssm are necessary to ensure | |
76 | * that none of the previous instructions in the same group are | |
77 | * affected by the rsm/ssm. | |
78 | */ | |
79 | ||
53a05ac1 MT |
80 | #include <asm-generic/barrier.h> |
81 | ||
c140d879 | 82 | #endif /* _ASM_IA64_BARRIER_H */ |