2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/cache.h>
16 #include <linux/delay.h>
17 #include <linux/uaccess.h>
18 #include <linux/module.h>
20 #include <linux/atomic.h>
21 #include <arch/chip.h>
23 /* This page is remapped on startup to be hash-for-home. */
24 int atomic_locks
[PAGE_SIZE
/ sizeof(int)] __page_aligned_bss
;
26 int *__atomic_hashed_lock(volatile void *v
)
28 /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
30 * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
31 * Using mm works here because atomic_locks is page aligned.
33 unsigned long ptr
= __insn_mm((unsigned long)v
>> 1,
34 (unsigned long)atomic_locks
,
35 2, (ATOMIC_HASH_SHIFT
+ 2) - 1);
40 /* Return whether the passed pointer is a valid atomic lock pointer. */
41 static int is_atomic_lock(int *p
)
43 return p
>= &atomic_locks
[0] && p
< &atomic_locks
[ATOMIC_HASH_SIZE
];
46 void __atomic_fault_unlock(int *irqlock_word
)
48 BUG_ON(!is_atomic_lock(irqlock_word
));
49 BUG_ON(*irqlock_word
!= 1);
53 #endif /* CONFIG_SMP */
55 static inline int *__atomic_setup(volatile void *v
)
57 /* Issue a load to the target to bring it into cache. */
59 return __atomic_hashed_lock(v
);
62 int _atomic_xchg(int *v
, int n
)
64 return __atomic32_xchg(v
, __atomic_setup(v
), n
).val
;
66 EXPORT_SYMBOL(_atomic_xchg
);
68 int _atomic_xchg_add(int *v
, int i
)
70 return __atomic32_xchg_add(v
, __atomic_setup(v
), i
).val
;
72 EXPORT_SYMBOL(_atomic_xchg_add
);
74 int _atomic_xchg_add_unless(int *v
, int a
, int u
)
77 * Note: argument order is switched here since it is easier
78 * to use the first argument consistently as the "old value"
79 * in the assembly, as is done for _atomic_cmpxchg().
81 return __atomic32_xchg_add_unless(v
, __atomic_setup(v
), u
, a
).val
;
83 EXPORT_SYMBOL(_atomic_xchg_add_unless
);
85 int _atomic_cmpxchg(int *v
, int o
, int n
)
87 return __atomic32_cmpxchg(v
, __atomic_setup(v
), o
, n
).val
;
89 EXPORT_SYMBOL(_atomic_cmpxchg
);
91 unsigned long _atomic_fetch_or(volatile unsigned long *p
, unsigned long mask
)
93 return __atomic32_fetch_or((int *)p
, __atomic_setup(p
), mask
).val
;
95 EXPORT_SYMBOL(_atomic_fetch_or
);
97 unsigned long _atomic_fetch_and(volatile unsigned long *p
, unsigned long mask
)
99 return __atomic32_fetch_and((int *)p
, __atomic_setup(p
), mask
).val
;
101 EXPORT_SYMBOL(_atomic_fetch_and
);
103 unsigned long _atomic_fetch_andn(volatile unsigned long *p
, unsigned long mask
)
105 return __atomic32_fetch_andn((int *)p
, __atomic_setup(p
), mask
).val
;
107 EXPORT_SYMBOL(_atomic_fetch_andn
);
109 unsigned long _atomic_fetch_xor(volatile unsigned long *p
, unsigned long mask
)
111 return __atomic32_fetch_xor((int *)p
, __atomic_setup(p
), mask
).val
;
113 EXPORT_SYMBOL(_atomic_fetch_xor
);
116 long long _atomic64_xchg(long long *v
, long long n
)
118 return __atomic64_xchg(v
, __atomic_setup(v
), n
);
120 EXPORT_SYMBOL(_atomic64_xchg
);
122 long long _atomic64_xchg_add(long long *v
, long long i
)
124 return __atomic64_xchg_add(v
, __atomic_setup(v
), i
);
126 EXPORT_SYMBOL(_atomic64_xchg_add
);
128 long long _atomic64_xchg_add_unless(long long *v
, long long a
, long long u
)
131 * Note: argument order is switched here since it is easier
132 * to use the first argument consistently as the "old value"
133 * in the assembly, as is done for _atomic_cmpxchg().
135 return __atomic64_xchg_add_unless(v
, __atomic_setup(v
), u
, a
);
137 EXPORT_SYMBOL(_atomic64_xchg_add_unless
);
139 long long _atomic64_cmpxchg(long long *v
, long long o
, long long n
)
141 return __atomic64_cmpxchg(v
, __atomic_setup(v
), o
, n
);
143 EXPORT_SYMBOL(_atomic64_cmpxchg
);
145 long long _atomic64_fetch_and(long long *v
, long long n
)
147 return __atomic64_fetch_and(v
, __atomic_setup(v
), n
);
149 EXPORT_SYMBOL(_atomic64_fetch_and
);
151 long long _atomic64_fetch_or(long long *v
, long long n
)
153 return __atomic64_fetch_or(v
, __atomic_setup(v
), n
);
155 EXPORT_SYMBOL(_atomic64_fetch_or
);
157 long long _atomic64_fetch_xor(long long *v
, long long n
)
159 return __atomic64_fetch_xor(v
, __atomic_setup(v
), n
);
161 EXPORT_SYMBOL(_atomic64_fetch_xor
);
164 * If any of the atomic or futex routines hit a bad address (not in
165 * the page tables at kernel PL) this routine is called. The futex
166 * routines are never used on kernel space, and the normal atomics and
167 * bitops are never used on user space. So a fault on kernel space
168 * must be fatal, but a fault on userspace is a futex fault and we
169 * need to return -EFAULT. Note that the context this routine is
170 * invoked in is the context of the "_atomic_xxx()" routines called
171 * by the functions in this file.
173 struct __get_user
__atomic_bad_address(int __user
*addr
)
175 if (unlikely(!access_ok(VERIFY_WRITE
, addr
, sizeof(int))))
176 panic("Bad address used for kernel atomic op: %p\n", addr
);
177 return (struct __get_user
) { .err
= -EFAULT
};
181 void __init
__init_atomic_per_cpu(void)
183 /* Validate power-of-two and "bigger than cpus" assumption */
184 BUILD_BUG_ON(ATOMIC_HASH_SIZE
& (ATOMIC_HASH_SIZE
-1));
185 BUG_ON(ATOMIC_HASH_SIZE
< nr_cpu_ids
);
188 * On TILEPro we prefer to use a single hash-for-home
189 * page, since this means atomic operations are less
190 * likely to encounter a TLB fault and thus should
191 * in general perform faster. You may wish to disable
192 * this in situations where few hash-for-home tiles
195 BUG_ON((unsigned long)atomic_locks
% PAGE_SIZE
!= 0);
197 /* The locks must all fit on one page. */
198 BUILD_BUG_ON(ATOMIC_HASH_SIZE
* sizeof(int) > PAGE_SIZE
);
201 * We use the page offset of the atomic value's address as
202 * an index into atomic_locks, excluding the low 3 bits.
203 * That should not produce more indices than ATOMIC_HASH_SIZE.
205 BUILD_BUG_ON((PAGE_SIZE
>> 3) > ATOMIC_HASH_SIZE
);