ARC: remove extraneous __KERNEL__ guards
[deliverable/linux.git] / arch / arc / include / asm / bitops.h
CommitLineData
14e968ba
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
14e968ba
VG
16#ifndef __ASSEMBLY__
17
18#include <linux/types.h>
19#include <linux/compiler.h>
d594ffa9 20#include <asm/barrier.h>
14e968ba
VG
21
22/*
23 * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
24 * The Kconfig glue ensures that in SMP, this is only set if the container
25 * SoC/platform has cross-core coherent LLOCK/SCOND
26 */
27#if defined(CONFIG_ARC_HAS_LLSC)
28
29static inline void set_bit(unsigned long nr, volatile unsigned long *m)
30{
31 unsigned int temp;
32
33 m += nr >> 5;
34
35 if (__builtin_constant_p(nr))
36 nr &= 0x1f;
37
38 __asm__ __volatile__(
39 "1: llock %0, [%1] \n"
40 " bset %0, %0, %2 \n"
41 " scond %0, [%1] \n"
42 " bnz 1b \n"
43 : "=&r"(temp)
44 : "r"(m), "ir"(nr)
45 : "cc");
46}
47
48static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
49{
50 unsigned int temp;
51
52 m += nr >> 5;
53
54 if (__builtin_constant_p(nr))
55 nr &= 0x1f;
56
57 __asm__ __volatile__(
58 "1: llock %0, [%1] \n"
59 " bclr %0, %0, %2 \n"
60 " scond %0, [%1] \n"
61 " bnz 1b \n"
62 : "=&r"(temp)
63 : "r"(m), "ir"(nr)
64 : "cc");
65}
66
67static inline void change_bit(unsigned long nr, volatile unsigned long *m)
68{
69 unsigned int temp;
70
71 m += nr >> 5;
72
73 if (__builtin_constant_p(nr))
74 nr &= 0x1f;
75
76 __asm__ __volatile__(
77 "1: llock %0, [%1] \n"
78 " bxor %0, %0, %2 \n"
79 " scond %0, [%1] \n"
80 " bnz 1b \n"
81 : "=&r"(temp)
82 : "r"(m), "ir"(nr)
83 : "cc");
84}
85
86/*
87 * Semantically:
88 * Test the bit
89 * if clear
90 * set it and return 0 (old value)
91 * else
92 * return 1 (old value).
93 *
94 * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
95 * and the old value of bit is returned
96 */
97static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
98{
99 unsigned long old, temp;
100
101 m += nr >> 5;
102
103 if (__builtin_constant_p(nr))
104 nr &= 0x1f;
105
106 __asm__ __volatile__(
107 "1: llock %0, [%2] \n"
108 " bset %1, %0, %3 \n"
109 " scond %1, [%2] \n"
110 " bnz 1b \n"
111 : "=&r"(old), "=&r"(temp)
112 : "r"(m), "ir"(nr)
113 : "cc");
114
115 return (old & (1 << nr)) != 0;
116}
117
118static inline int
119test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
120{
121 unsigned int old, temp;
122
123 m += nr >> 5;
124
125 if (__builtin_constant_p(nr))
126 nr &= 0x1f;
127
128 __asm__ __volatile__(
129 "1: llock %0, [%2] \n"
130 " bclr %1, %0, %3 \n"
131 " scond %1, [%2] \n"
132 " bnz 1b \n"
133 : "=&r"(old), "=&r"(temp)
134 : "r"(m), "ir"(nr)
135 : "cc");
136
137 return (old & (1 << nr)) != 0;
138}
139
140static inline int
141test_and_change_bit(unsigned long nr, volatile unsigned long *m)
142{
143 unsigned int old, temp;
144
145 m += nr >> 5;
146
147 if (__builtin_constant_p(nr))
148 nr &= 0x1f;
149
150 __asm__ __volatile__(
151 "1: llock %0, [%2] \n"
152 " bxor %1, %0, %3 \n"
153 " scond %1, [%2] \n"
154 " bnz 1b \n"
155 : "=&r"(old), "=&r"(temp)
156 : "r"(m), "ir"(nr)
157 : "cc");
158
159 return (old & (1 << nr)) != 0;
160}
161
162#else /* !CONFIG_ARC_HAS_LLSC */
163
164#include <asm/smp.h>
165
166/*
167 * Non hardware assisted Atomic-R-M-W
168 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
169 *
170 * There's "significant" micro-optimization in writing our own variants of
171 * bitops (over generic variants)
172 *
173 * (1) The generic APIs have "signed" @nr while we have it "unsigned"
174 * This avoids extra code to be generated for pointer arithmatic, since
175 * is "not sure" that index is NOT -ve
176 * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
177 * only consider bottom 5 bits of @nr, so NO need to mask them off.
178 * (GCC Quirk: however for constant @nr we still need to do the masking
179 * at compile time)
180 */
181
182static inline void set_bit(unsigned long nr, volatile unsigned long *m)
183{
184 unsigned long temp, flags;
185 m += nr >> 5;
186
187 if (__builtin_constant_p(nr))
188 nr &= 0x1f;
189
190 bitops_lock(flags);
191
192 temp = *m;
193 *m = temp | (1UL << nr);
194
195 bitops_unlock(flags);
196}
197
198static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
199{
200 unsigned long temp, flags;
201 m += nr >> 5;
202
203 if (__builtin_constant_p(nr))
204 nr &= 0x1f;
205
206 bitops_lock(flags);
207
208 temp = *m;
209 *m = temp & ~(1UL << nr);
210
211 bitops_unlock(flags);
212}
213
214static inline void change_bit(unsigned long nr, volatile unsigned long *m)
215{
216 unsigned long temp, flags;
217 m += nr >> 5;
218
219 if (__builtin_constant_p(nr))
220 nr &= 0x1f;
221
222 bitops_lock(flags);
223
224 temp = *m;
225 *m = temp ^ (1UL << nr);
226
227 bitops_unlock(flags);
228}
229
230static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
231{
232 unsigned long old, flags;
233 m += nr >> 5;
234
235 if (__builtin_constant_p(nr))
236 nr &= 0x1f;
237
238 bitops_lock(flags);
239
240 old = *m;
241 *m = old | (1 << nr);
242
243 bitops_unlock(flags);
244
245 return (old & (1 << nr)) != 0;
246}
247
248static inline int
249test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
250{
251 unsigned long old, flags;
252 m += nr >> 5;
253
254 if (__builtin_constant_p(nr))
255 nr &= 0x1f;
256
257 bitops_lock(flags);
258
259 old = *m;
260 *m = old & ~(1 << nr);
261
262 bitops_unlock(flags);
263
264 return (old & (1 << nr)) != 0;
265}
266
267static inline int
268test_and_change_bit(unsigned long nr, volatile unsigned long *m)
269{
270 unsigned long old, flags;
271 m += nr >> 5;
272
273 if (__builtin_constant_p(nr))
274 nr &= 0x1f;
275
276 bitops_lock(flags);
277
278 old = *m;
279 *m = old ^ (1 << nr);
280
281 bitops_unlock(flags);
282
283 return (old & (1 << nr)) != 0;
284}
285
286#endif /* CONFIG_ARC_HAS_LLSC */
287
288/***************************************
289 * Non atomic variants
290 **************************************/
291
292static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
293{
294 unsigned long temp;
295 m += nr >> 5;
296
297 if (__builtin_constant_p(nr))
298 nr &= 0x1f;
299
300 temp = *m;
301 *m = temp | (1UL << nr);
302}
303
304static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
305{
306 unsigned long temp;
307 m += nr >> 5;
308
309 if (__builtin_constant_p(nr))
310 nr &= 0x1f;
311
312 temp = *m;
313 *m = temp & ~(1UL << nr);
314}
315
316static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
317{
318 unsigned long temp;
319 m += nr >> 5;
320
321 if (__builtin_constant_p(nr))
322 nr &= 0x1f;
323
324 temp = *m;
325 *m = temp ^ (1UL << nr);
326}
327
328static inline int
329__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
330{
331 unsigned long old;
332 m += nr >> 5;
333
334 if (__builtin_constant_p(nr))
335 nr &= 0x1f;
336
337 old = *m;
338 *m = old | (1 << nr);
339
340 return (old & (1 << nr)) != 0;
341}
342
343static inline int
344__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
345{
346 unsigned long old;
347 m += nr >> 5;
348
349 if (__builtin_constant_p(nr))
350 nr &= 0x1f;
351
352 old = *m;
353 *m = old & ~(1 << nr);
354
355 return (old & (1 << nr)) != 0;
356}
357
358static inline int
359__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
360{
361 unsigned long old;
362 m += nr >> 5;
363
364 if (__builtin_constant_p(nr))
365 nr &= 0x1f;
366
367 old = *m;
368 *m = old ^ (1 << nr);
369
370 return (old & (1 << nr)) != 0;
371}
372
373/*
374 * This routine doesn't need to be atomic.
375 */
376static inline int
377__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
378{
379 return ((1UL << (nr & 31)) &
380 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
381}
382
383static inline int
384__test_bit(unsigned int nr, const volatile unsigned long *addr)
385{
386 unsigned long mask;
387
388 addr += nr >> 5;
389
390 /* ARC700 only considers 5 bits in bit-fiddling insn */
391 mask = 1 << nr;
392
393 return ((mask & *addr) != 0);
394}
395
396#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
397 __constant_test_bit((nr), (addr)) : \
398 __test_bit((nr), (addr)))
399
400/*
401 * Count the number of zeros, starting from MSB
402 * Helper for fls( ) friends
403 * This is a pure count, so (1-32) or (0-31) doesn't apply
404 * It could be 0 to 32, based on num of 0's in there
405 * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
406 */
407static inline __attribute__ ((const)) int clz(unsigned int x)
408{
409 unsigned int res;
410
411 __asm__ __volatile__(
412 " norm.f %0, %1 \n"
413 " mov.n %0, 0 \n"
414 " add.p %0, %0, 1 \n"
415 : "=r"(res)
416 : "r"(x)
417 : "cc");
418
419 return res;
420}
421
422static inline int constant_fls(int x)
423{
424 int r = 32;
425
426 if (!x)
427 return 0;
428 if (!(x & 0xffff0000u)) {
429 x <<= 16;
430 r -= 16;
431 }
432 if (!(x & 0xff000000u)) {
433 x <<= 8;
434 r -= 8;
435 }
436 if (!(x & 0xf0000000u)) {
437 x <<= 4;
438 r -= 4;
439 }
440 if (!(x & 0xc0000000u)) {
441 x <<= 2;
442 r -= 2;
443 }
444 if (!(x & 0x80000000u)) {
445 x <<= 1;
446 r -= 1;
447 }
448 return r;
449}
450
451/*
452 * fls = Find Last Set in word
453 * @result: [1-32]
454 * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
455 */
456static inline __attribute__ ((const)) int fls(unsigned long x)
457{
458 if (__builtin_constant_p(x))
459 return constant_fls(x);
460
461 return 32 - clz(x);
462}
463
464/*
465 * __fls: Similar to fls, but zero based (0-31)
466 */
467static inline __attribute__ ((const)) int __fls(unsigned long x)
468{
469 if (!x)
470 return 0;
471 else
472 return fls(x) - 1;
473}
474
475/*
476 * ffs = Find First Set in word (LSB to MSB)
477 * @result: [1-32], 0 if all 0's
478 */
479#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
480
481/*
482 * __ffs: Similar to ffs, but zero based (0-31)
483 */
484static inline __attribute__ ((const)) int __ffs(unsigned long word)
485{
486 if (!word)
487 return word;
488
489 return ffs(word) - 1;
490}
491
492/*
493 * ffz = Find First Zero in word.
494 * @return:[0-31], 32 if all 1's
495 */
496#define ffz(x) __ffs(~(x))
497
14e968ba
VG
498#include <asm-generic/bitops/hweight.h>
499#include <asm-generic/bitops/fls64.h>
500#include <asm-generic/bitops/sched.h>
501#include <asm-generic/bitops/lock.h>
502
503#include <asm-generic/bitops/find.h>
504#include <asm-generic/bitops/le.h>
505#include <asm-generic/bitops/ext2-atomic-setbit.h>
506
507#endif /* !__ASSEMBLY__ */
508
14e968ba 509#endif
This page took 0.108015 seconds and 5 git commands to generate.