Merge remote-tracking branch 'wireless-next/master' into iwlwifi-next
[deliverable/linux.git] / arch / sparc / lib / atomic32.c
1 /*
2 * atomic32.c: 32-bit atomic_t implementation
3 *
4 * Copyright (C) 2004 Keith M Wesolowski
5 * Copyright (C) 2007 Kyle McMartin
6 *
7 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
8 */
9
10 #include <linux/atomic.h>
11 #include <linux/spinlock.h>
12 #include <linux/module.h>
13
14 #ifdef CONFIG_SMP
15 #define ATOMIC_HASH_SIZE 4
16 #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
17
18 spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
19 [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
20 };
21
22 #else /* SMP */
23
24 static DEFINE_SPINLOCK(dummy);
25 #define ATOMIC_HASH_SIZE 1
26 #define ATOMIC_HASH(a) (&dummy)
27
28 #endif /* SMP */
29
30 #define ATOMIC_OP(op, cop) \
31 int atomic_##op##_return(int i, atomic_t *v) \
32 { \
33 int ret; \
34 unsigned long flags; \
35 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
36 \
37 ret = (v->counter cop i); \
38 \
39 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
40 return ret; \
41 } \
42 EXPORT_SYMBOL(atomic_##op##_return);
43
44 ATOMIC_OP(add, +=)
45
46 #undef ATOMIC_OP
47
48 int atomic_cmpxchg(atomic_t *v, int old, int new)
49 {
50 int ret;
51 unsigned long flags;
52
53 spin_lock_irqsave(ATOMIC_HASH(v), flags);
54 ret = v->counter;
55 if (likely(ret == old))
56 v->counter = new;
57
58 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
59 return ret;
60 }
61 EXPORT_SYMBOL(atomic_cmpxchg);
62
63 int __atomic_add_unless(atomic_t *v, int a, int u)
64 {
65 int ret;
66 unsigned long flags;
67
68 spin_lock_irqsave(ATOMIC_HASH(v), flags);
69 ret = v->counter;
70 if (ret != u)
71 v->counter += a;
72 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
73 return ret;
74 }
75 EXPORT_SYMBOL(__atomic_add_unless);
76
77 /* Atomic operations are already serializing */
78 void atomic_set(atomic_t *v, int i)
79 {
80 unsigned long flags;
81
82 spin_lock_irqsave(ATOMIC_HASH(v), flags);
83 v->counter = i;
84 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
85 }
86 EXPORT_SYMBOL(atomic_set);
87
88 unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
89 {
90 unsigned long old, flags;
91
92 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
93 old = *addr;
94 *addr = old | mask;
95 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
96
97 return old & mask;
98 }
99 EXPORT_SYMBOL(___set_bit);
100
101 unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
102 {
103 unsigned long old, flags;
104
105 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
106 old = *addr;
107 *addr = old & ~mask;
108 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
109
110 return old & mask;
111 }
112 EXPORT_SYMBOL(___clear_bit);
113
114 unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
115 {
116 unsigned long old, flags;
117
118 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
119 old = *addr;
120 *addr = old ^ mask;
121 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
122
123 return old & mask;
124 }
125 EXPORT_SYMBOL(___change_bit);
126
127 unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
128 {
129 unsigned long flags;
130 u32 prev;
131
132 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
133 if ((prev = *ptr) == old)
134 *ptr = new;
135 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
136
137 return (unsigned long)prev;
138 }
139 EXPORT_SYMBOL(__cmpxchg_u32);
This page took 0.034186 seconds and 5 git commands to generate.