arch: Cleanup read_barrier_depends() and comments
[deliverable/linux.git] / arch / ia64 / include / asm / barrier.h
CommitLineData
c140d879
DH
1/*
2 * Memory barrier definitions. This is based on information published
3 * in the Processor Abstraction Layer and the System Abstraction Layer
4 * manual.
5 *
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
10 */
11#ifndef _ASM_IA64_BARRIER_H
12#define _ASM_IA64_BARRIER_H
13
14#include <linux/compiler.h>
15
16/*
17 * Macros to force memory ordering. In these descriptions, "previous"
18 * and "subsequent" refer to program order; "visible" means that all
19 * architecturally visible effects of a memory access have occurred
20 * (at a minimum, this means the memory has been read or written).
21 *
22 * wmb(): Guarantees that all preceding stores to memory-
23 * like regions are visible before any subsequent
24 * stores and that all following stores will be
25 * visible only after all previous stores.
26 * rmb(): Like wmb(), but for reads.
27 * mb(): wmb()/rmb() combo, i.e., all previous memory
28 * accesses are visible before all subsequent
29 * accesses and vice versa. This is also known as
30 * a "fence."
31 *
32 * Note: "mb()" and its variants cannot be used as a fence to order
33 * accesses to memory mapped I/O registers. For that, mf.a needs to
34 * be used. However, we don't want to always use mf.a because (a)
35 * it's (presumably) much slower than mf and (b) mf.a is supported for
36 * sequential memory pages only.
37 */
8a449718
AD
38#define mb() ia64_mf()
39#define rmb() mb()
40#define wmb() mb()
c140d879
DH
41
42#ifdef CONFIG_SMP
43# define smp_mb() mb()
c140d879
DH
44#else
45# define smp_mb() barrier()
c140d879
DH
46#endif
47
8a449718
AD
48#define smp_rmb() smp_mb()
49#define smp_wmb() smp_mb()
50
51#define read_barrier_depends() do { } while (0)
52#define smp_read_barrier_depends() do { } while (0)
53
0cd64efb
PZ
54#define smp_mb__before_atomic() barrier()
55#define smp_mb__after_atomic() barrier()
56
47933ad4
PZ
57/*
58 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
59 * need for asm trickery!
60 */
61
62#define smp_store_release(p, v) \
63do { \
64 compiletime_assert_atomic_type(*p); \
65 barrier(); \
66 ACCESS_ONCE(*p) = (v); \
67} while (0)
68
69#define smp_load_acquire(p) \
70({ \
71 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
72 compiletime_assert_atomic_type(*p); \
73 barrier(); \
74 ___p1; \
75})
76
c140d879
DH
77/*
78 * XXX check on this ---I suspect what Linus really wants here is
79 * acquire vs release semantics but we can't discuss this stuff with
80 * Linus just yet. Grrr...
81 */
82#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
83
84/*
85 * The group barrier in front of the rsm & ssm are necessary to ensure
86 * that none of the previous instructions in the same group are
87 * affected by the rsm/ssm.
88 */
89
90#endif /* _ASM_IA64_BARRIER_H */
This page took 0.15631 seconds and 5 git commands to generate.