Merge branch 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / mips / include / asm / bitops.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
102fa15c 6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
1da177e4
LT
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
0624517d
JS
12#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
1da177e4 16#include <linux/compiler.h>
4ffd8b38 17#include <linux/irqflags.h>
1da177e4 18#include <linux/types.h>
0004a9df 19#include <asm/barrier.h>
1da177e4
LT
20#include <asm/byteorder.h> /* sigh ... */
21#include <asm/cpu-features.h>
4ffd8b38
RB
22#include <asm/sgidefs.h>
23#include <asm/war.h>
1da177e4 24
49a89efb 25#if _MIPS_SZLONG == 32
1da177e4
LT
26#define SZLONG_LOG 5
27#define SZLONG_MASK 31UL
aac8aa77
MR
28#define __LL "ll "
29#define __SC "sc "
102fa15c
RB
30#define __INS "ins "
31#define __EXT "ext "
49a89efb 32#elif _MIPS_SZLONG == 64
1da177e4
LT
33#define SZLONG_LOG 6
34#define SZLONG_MASK 63UL
aac8aa77
MR
35#define __LL "lld "
36#define __SC "scd "
102fa15c
RB
37#define __INS "dins "
38#define __EXT "dext "
1da177e4
LT
39#endif
40
1da177e4
LT
41/*
42 * clear_bit() doesn't provide any barrier for the compiler.
43 */
f252ffd5 44#define smp_mb__before_clear_bit() smp_mb__before_llsc()
17099b11 45#define smp_mb__after_clear_bit() smp_llsc_mb()
1da177e4 46
1da177e4
LT
47/*
48 * set_bit - Atomically set a bit in memory
49 * @nr: the bit to set
50 * @addr: the address to start counting from
51 *
52 * This function is atomic and may not be reordered. See __set_bit()
53 * if you do not require the atomic guarantees.
54 * Note that @nr may be almost arbitrarily large; this function is not
55 * restricted to acting on a single-word quantity.
56 */
57static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
58{
59 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
b961153b 60 unsigned short bit = nr & SZLONG_MASK;
1da177e4
LT
61 unsigned long temp;
62
b791d119 63 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4 64 __asm__ __volatile__(
c4559f67 65 " .set mips3 \n"
1da177e4
LT
66 "1: " __LL "%0, %1 # set_bit \n"
67 " or %0, %2 \n"
aac8aa77 68 " " __SC "%0, %1 \n"
1da177e4 69 " beqzl %0, 1b \n"
aac8aa77 70 " .set mips0 \n"
1da177e4 71 : "=&r" (temp), "=m" (*m)
b961153b 72 : "ir" (1UL << bit), "m" (*m));
102fa15c 73#ifdef CONFIG_CPU_MIPSR2
b791d119 74 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
7837314d
RB
75 do {
76 __asm__ __volatile__(
77 " " __LL "%0, %1 # set_bit \n"
78 " " __INS "%0, %3, %2, 1 \n"
79 " " __SC "%0, %1 \n"
80 : "=&r" (temp), "+m" (*m)
81 : "ir" (bit), "r" (~0));
82 } while (unlikely(!temp));
102fa15c 83#endif /* CONFIG_CPU_MIPSR2 */
b791d119 84 } else if (kernel_uses_llsc) {
7837314d
RB
85 do {
86 __asm__ __volatile__(
87 " .set mips3 \n"
88 " " __LL "%0, %1 # set_bit \n"
89 " or %0, %2 \n"
90 " " __SC "%0, %1 \n"
91 " .set mips0 \n"
92 : "=&r" (temp), "+m" (*m)
93 : "ir" (1UL << bit));
94 } while (unlikely(!temp));
1da177e4
LT
95 } else {
96 volatile unsigned long *a = addr;
97 unsigned long mask;
4ffd8b38 98 unsigned long flags;
1da177e4
LT
99
100 a += nr >> SZLONG_LOG;
b961153b 101 mask = 1UL << bit;
49edd098 102 raw_local_irq_save(flags);
1da177e4 103 *a |= mask;
49edd098 104 raw_local_irq_restore(flags);
1da177e4
LT
105 }
106}
107
1da177e4
LT
108/*
109 * clear_bit - Clears a bit in memory
110 * @nr: Bit to clear
111 * @addr: Address to start counting from
112 *
113 * clear_bit() is atomic and may not be reordered. However, it does
114 * not contain a memory barrier, so if it is used for locking purposes,
115 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
116 * in order to ensure changes are visible on other processors.
117 */
118static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
119{
120 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
b961153b 121 unsigned short bit = nr & SZLONG_MASK;
1da177e4
LT
122 unsigned long temp;
123
b791d119 124 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4 125 __asm__ __volatile__(
c4559f67 126 " .set mips3 \n"
1da177e4
LT
127 "1: " __LL "%0, %1 # clear_bit \n"
128 " and %0, %2 \n"
129 " " __SC "%0, %1 \n"
130 " beqzl %0, 1b \n"
aac8aa77 131 " .set mips0 \n"
7837314d
RB
132 : "=&r" (temp), "+m" (*m)
133 : "ir" (~(1UL << bit)));
102fa15c 134#ifdef CONFIG_CPU_MIPSR2
b791d119 135 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
7837314d
RB
136 do {
137 __asm__ __volatile__(
138 " " __LL "%0, %1 # clear_bit \n"
139 " " __INS "%0, $0, %2, 1 \n"
140 " " __SC "%0, %1 \n"
141 : "=&r" (temp), "+m" (*m)
142 : "ir" (bit));
143 } while (unlikely(!temp));
102fa15c 144#endif /* CONFIG_CPU_MIPSR2 */
b791d119 145 } else if (kernel_uses_llsc) {
7837314d
RB
146 do {
147 __asm__ __volatile__(
148 " .set mips3 \n"
149 " " __LL "%0, %1 # clear_bit \n"
150 " and %0, %2 \n"
151 " " __SC "%0, %1 \n"
152 " .set mips0 \n"
153 : "=&r" (temp), "+m" (*m)
154 : "ir" (~(1UL << bit)));
155 } while (unlikely(!temp));
1da177e4
LT
156 } else {
157 volatile unsigned long *a = addr;
158 unsigned long mask;
4ffd8b38 159 unsigned long flags;
1da177e4
LT
160
161 a += nr >> SZLONG_LOG;
b961153b 162 mask = 1UL << bit;
49edd098 163 raw_local_irq_save(flags);
1da177e4 164 *a &= ~mask;
49edd098 165 raw_local_irq_restore(flags);
1da177e4
LT
166 }
167}
168
728697cd
NP
169/*
170 * clear_bit_unlock - Clears a bit in memory
171 * @nr: Bit to clear
172 * @addr: Address to start counting from
173 *
174 * clear_bit() is atomic and implies release semantics before the memory
175 * operation. It can be used for an unlock.
176 */
177static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
178{
179 smp_mb__before_clear_bit();
180 clear_bit(nr, addr);
181}
182
1da177e4
LT
183/*
184 * change_bit - Toggle a bit in memory
185 * @nr: Bit to change
186 * @addr: Address to start counting from
187 *
188 * change_bit() is atomic and may not be reordered.
189 * Note that @nr may be almost arbitrarily large; this function is not
190 * restricted to acting on a single-word quantity.
191 */
192static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
193{
b961153b
RB
194 unsigned short bit = nr & SZLONG_MASK;
195
b791d119 196 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4
LT
197 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
198 unsigned long temp;
199
200 __asm__ __volatile__(
c4559f67 201 " .set mips3 \n"
1da177e4
LT
202 "1: " __LL "%0, %1 # change_bit \n"
203 " xor %0, %2 \n"
aac8aa77 204 " " __SC "%0, %1 \n"
1da177e4 205 " beqzl %0, 1b \n"
aac8aa77 206 " .set mips0 \n"
7837314d
RB
207 : "=&r" (temp), "+m" (*m)
208 : "ir" (1UL << bit));
b791d119 209 } else if (kernel_uses_llsc) {
1da177e4
LT
210 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
211 unsigned long temp;
212
7837314d
RB
213 do {
214 __asm__ __volatile__(
215 " .set mips3 \n"
216 " " __LL "%0, %1 # change_bit \n"
217 " xor %0, %2 \n"
218 " " __SC "%0, %1 \n"
219 " .set mips0 \n"
220 : "=&r" (temp), "+m" (*m)
221 : "ir" (1UL << bit));
222 } while (unlikely(!temp));
1da177e4
LT
223 } else {
224 volatile unsigned long *a = addr;
225 unsigned long mask;
4ffd8b38 226 unsigned long flags;
1da177e4
LT
227
228 a += nr >> SZLONG_LOG;
b961153b 229 mask = 1UL << bit;
49edd098 230 raw_local_irq_save(flags);
1da177e4 231 *a ^= mask;
49edd098 232 raw_local_irq_restore(flags);
1da177e4
LT
233 }
234}
235
1da177e4
LT
236/*
237 * test_and_set_bit - Set a bit and return its old value
238 * @nr: Bit to set
239 * @addr: Address to count from
240 *
241 * This operation is atomic and cannot be reordered.
242 * It also implies a memory barrier.
243 */
244static inline int test_and_set_bit(unsigned long nr,
245 volatile unsigned long *addr)
246{
b961153b 247 unsigned short bit = nr & SZLONG_MASK;
ff72b7a6 248 unsigned long res;
b961153b 249
f252ffd5 250 smp_mb__before_llsc();
c8f30ae5 251
b791d119 252 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4 253 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 254 unsigned long temp;
1da177e4
LT
255
256 __asm__ __volatile__(
c4559f67 257 " .set mips3 \n"
1da177e4
LT
258 "1: " __LL "%0, %1 # test_and_set_bit \n"
259 " or %2, %0, %3 \n"
260 " " __SC "%2, %1 \n"
261 " beqzl %2, 1b \n"
262 " and %2, %0, %3 \n"
aac8aa77 263 " .set mips0 \n"
7837314d
RB
264 : "=&r" (temp), "+m" (*m), "=&r" (res)
265 : "r" (1UL << bit)
1da177e4 266 : "memory");
b791d119 267 } else if (kernel_uses_llsc) {
1da177e4 268 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 269 unsigned long temp;
1da177e4 270
7837314d
RB
271 do {
272 __asm__ __volatile__(
273 " .set mips3 \n"
274 " " __LL "%0, %1 # test_and_set_bit \n"
275 " or %2, %0, %3 \n"
276 " " __SC "%2, %1 \n"
277 " .set mips0 \n"
278 : "=&r" (temp), "+m" (*m), "=&r" (res)
279 : "r" (1UL << bit)
280 : "memory");
281 } while (unlikely(!res));
282
283 res = temp & (1UL << bit);
1da177e4
LT
284 } else {
285 volatile unsigned long *a = addr;
286 unsigned long mask;
4ffd8b38 287 unsigned long flags;
1da177e4
LT
288
289 a += nr >> SZLONG_LOG;
b961153b 290 mask = 1UL << bit;
49edd098 291 raw_local_irq_save(flags);
ff72b7a6 292 res = (mask & *a);
1da177e4 293 *a |= mask;
49edd098 294 raw_local_irq_restore(flags);
1da177e4 295 }
0004a9df 296
17099b11 297 smp_llsc_mb();
ff72b7a6
RB
298
299 return res != 0;
1da177e4
LT
300}
301
728697cd
NP
302/*
303 * test_and_set_bit_lock - Set a bit and return its old value
304 * @nr: Bit to set
305 * @addr: Address to count from
306 *
307 * This operation is atomic and implies acquire ordering semantics
308 * after the memory operation.
309 */
310static inline int test_and_set_bit_lock(unsigned long nr,
311 volatile unsigned long *addr)
312{
313 unsigned short bit = nr & SZLONG_MASK;
314 unsigned long res;
315
b791d119 316 if (kernel_uses_llsc && R10000_LLSC_WAR) {
728697cd
NP
317 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
318 unsigned long temp;
319
320 __asm__ __volatile__(
321 " .set mips3 \n"
322 "1: " __LL "%0, %1 # test_and_set_bit \n"
323 " or %2, %0, %3 \n"
324 " " __SC "%2, %1 \n"
325 " beqzl %2, 1b \n"
326 " and %2, %0, %3 \n"
327 " .set mips0 \n"
7837314d
RB
328 : "=&r" (temp), "+m" (*m), "=&r" (res)
329 : "r" (1UL << bit)
728697cd 330 : "memory");
b791d119 331 } else if (kernel_uses_llsc) {
728697cd
NP
332 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
333 unsigned long temp;
334
7837314d
RB
335 do {
336 __asm__ __volatile__(
337 " .set mips3 \n"
338 " " __LL "%0, %1 # test_and_set_bit \n"
339 " or %2, %0, %3 \n"
340 " " __SC "%2, %1 \n"
341 " .set mips0 \n"
342 : "=&r" (temp), "+m" (*m), "=&r" (res)
343 : "r" (1UL << bit)
344 : "memory");
345 } while (unlikely(!res));
346
347 res = temp & (1UL << bit);
728697cd
NP
348 } else {
349 volatile unsigned long *a = addr;
350 unsigned long mask;
351 unsigned long flags;
352
353 a += nr >> SZLONG_LOG;
354 mask = 1UL << bit;
355 raw_local_irq_save(flags);
356 res = (mask & *a);
357 *a |= mask;
358 raw_local_irq_restore(flags);
359 }
360
361 smp_llsc_mb();
362
363 return res != 0;
364}
1da177e4
LT
365/*
366 * test_and_clear_bit - Clear a bit and return its old value
367 * @nr: Bit to clear
368 * @addr: Address to count from
369 *
370 * This operation is atomic and cannot be reordered.
371 * It also implies a memory barrier.
372 */
373static inline int test_and_clear_bit(unsigned long nr,
374 volatile unsigned long *addr)
375{
b961153b 376 unsigned short bit = nr & SZLONG_MASK;
ff72b7a6 377 unsigned long res;
b961153b 378
f252ffd5 379 smp_mb__before_llsc();
c8f30ae5 380
b791d119 381 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4 382 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
8e09ffb6 383 unsigned long temp;
1da177e4
LT
384
385 __asm__ __volatile__(
c4559f67 386 " .set mips3 \n"
1da177e4
LT
387 "1: " __LL "%0, %1 # test_and_clear_bit \n"
388 " or %2, %0, %3 \n"
389 " xor %2, %3 \n"
aac8aa77 390 " " __SC "%2, %1 \n"
1da177e4
LT
391 " beqzl %2, 1b \n"
392 " and %2, %0, %3 \n"
aac8aa77 393 " .set mips0 \n"
7837314d
RB
394 : "=&r" (temp), "+m" (*m), "=&r" (res)
395 : "r" (1UL << bit)
1da177e4 396 : "memory");
102fa15c 397#ifdef CONFIG_CPU_MIPSR2
b791d119 398 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
102fa15c 399 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 400 unsigned long temp;
102fa15c 401
7837314d
RB
402 do {
403 __asm__ __volatile__(
404 " " __LL "%0, %1 # test_and_clear_bit \n"
405 " " __EXT "%2, %0, %3, 1 \n"
406 " " __INS "%0, $0, %3, 1 \n"
407 " " __SC "%0, %1 \n"
408 : "=&r" (temp), "+m" (*m), "=&r" (res)
409 : "ir" (bit)
410 : "memory");
411 } while (unlikely(!temp));
102fa15c 412#endif
b791d119 413 } else if (kernel_uses_llsc) {
1da177e4 414 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 415 unsigned long temp;
1da177e4 416
7837314d
RB
417 do {
418 __asm__ __volatile__(
419 " .set mips3 \n"
420 " " __LL "%0, %1 # test_and_clear_bit \n"
421 " or %2, %0, %3 \n"
422 " xor %2, %3 \n"
423 " " __SC "%2, %1 \n"
424 " .set mips0 \n"
425 : "=&r" (temp), "+m" (*m), "=&r" (res)
426 : "r" (1UL << bit)
427 : "memory");
428 } while (unlikely(!res));
429
430 res = temp & (1UL << bit);
1da177e4
LT
431 } else {
432 volatile unsigned long *a = addr;
433 unsigned long mask;
4ffd8b38 434 unsigned long flags;
1da177e4
LT
435
436 a += nr >> SZLONG_LOG;
b961153b 437 mask = 1UL << bit;
49edd098 438 raw_local_irq_save(flags);
ff72b7a6 439 res = (mask & *a);
1da177e4 440 *a &= ~mask;
49edd098 441 raw_local_irq_restore(flags);
1da177e4 442 }
0004a9df 443
17099b11 444 smp_llsc_mb();
ff72b7a6
RB
445
446 return res != 0;
1da177e4
LT
447}
448
1da177e4
LT
449/*
450 * test_and_change_bit - Change a bit and return its old value
451 * @nr: Bit to change
452 * @addr: Address to count from
453 *
454 * This operation is atomic and cannot be reordered.
455 * It also implies a memory barrier.
456 */
457static inline int test_and_change_bit(unsigned long nr,
458 volatile unsigned long *addr)
459{
b961153b 460 unsigned short bit = nr & SZLONG_MASK;
ff72b7a6 461 unsigned long res;
b961153b 462
f252ffd5 463 smp_mb__before_llsc();
c8f30ae5 464
b791d119 465 if (kernel_uses_llsc && R10000_LLSC_WAR) {
1da177e4 466 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 467 unsigned long temp;
1da177e4
LT
468
469 __asm__ __volatile__(
c4559f67 470 " .set mips3 \n"
aac8aa77 471 "1: " __LL "%0, %1 # test_and_change_bit \n"
1da177e4 472 " xor %2, %0, %3 \n"
aac8aa77 473 " " __SC "%2, %1 \n"
1da177e4
LT
474 " beqzl %2, 1b \n"
475 " and %2, %0, %3 \n"
aac8aa77 476 " .set mips0 \n"
7837314d
RB
477 : "=&r" (temp), "+m" (*m), "=&r" (res)
478 : "r" (1UL << bit)
1da177e4 479 : "memory");
b791d119 480 } else if (kernel_uses_llsc) {
1da177e4 481 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
ff72b7a6 482 unsigned long temp;
1da177e4 483
7837314d
RB
484 do {
485 __asm__ __volatile__(
486 " .set mips3 \n"
487 " " __LL "%0, %1 # test_and_change_bit \n"
488 " xor %2, %0, %3 \n"
489 " " __SC "\t%2, %1 \n"
490 " .set mips0 \n"
491 : "=&r" (temp), "+m" (*m), "=&r" (res)
492 : "r" (1UL << bit)
493 : "memory");
494 } while (unlikely(!res));
495
496 res = temp & (1UL << bit);
1da177e4
LT
497 } else {
498 volatile unsigned long *a = addr;
ff72b7a6 499 unsigned long mask;
4ffd8b38 500 unsigned long flags;
1da177e4
LT
501
502 a += nr >> SZLONG_LOG;
b961153b 503 mask = 1UL << bit;
49edd098 504 raw_local_irq_save(flags);
ff72b7a6 505 res = (mask & *a);
1da177e4 506 *a ^= mask;
49edd098 507 raw_local_irq_restore(flags);
1da177e4 508 }
0004a9df 509
17099b11 510 smp_llsc_mb();
ff72b7a6
RB
511
512 return res != 0;
1da177e4
LT
513}
514
3c9ee7ef 515#include <asm-generic/bitops/non-atomic.h>
1da177e4 516
728697cd
NP
517/*
518 * __clear_bit_unlock - Clears a bit in memory
519 * @nr: Bit to clear
520 * @addr: Address to start counting from
521 *
522 * __clear_bit() is non-atomic and implies release semantics before the memory
523 * operation. It can be used for an unlock if no other CPUs can concurrently
524 * modify other bits in the word.
525 */
526static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
527{
528 smp_mb();
529 __clear_bit(nr, addr);
530}
531
1da177e4 532/*
ec917c2c 533 * Return the bit position (0..63) of the most significant 1 bit in a word
65903265
RB
534 * Returns -1 if no 1 bit exists
535 */
4816227b 536static inline unsigned long __fls(unsigned long word)
65903265 537{
4816227b 538 int num;
65903265 539
4816227b 540 if (BITS_PER_LONG == 32 &&
47740eb8 541 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
49a89efb 542 __asm__(
ec917c2c
RB
543 " .set push \n"
544 " .set mips32 \n"
545 " clz %0, %1 \n"
546 " .set pop \n"
4816227b
RB
547 : "=r" (num)
548 : "r" (word));
65903265 549
4816227b 550 return 31 - num;
ec917c2c
RB
551 }
552
4816227b
RB
553 if (BITS_PER_LONG == 64 &&
554 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
555 __asm__(
556 " .set push \n"
557 " .set mips64 \n"
558 " dclz %0, %1 \n"
559 " .set pop \n"
560 : "=r" (num)
561 : "r" (word));
65903265 562
4816227b
RB
563 return 63 - num;
564 }
565
566 num = BITS_PER_LONG - 1;
65903265 567
4816227b
RB
568#if BITS_PER_LONG == 64
569 if (!(word & (~0ul << 32))) {
570 num -= 32;
571 word <<= 32;
572 }
573#endif
574 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
575 num -= 16;
576 word <<= 16;
577 }
578 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
579 num -= 8;
580 word <<= 8;
581 }
582 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
583 num -= 4;
584 word <<= 4;
585 }
586 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
587 num -= 2;
588 word <<= 2;
589 }
590 if (!(word & (~0ul << (BITS_PER_LONG-1))))
591 num -= 1;
592 return num;
65903265 593}
65903265
RB
594
595/*
596 * __ffs - find first bit in word.
1da177e4
LT
597 * @word: The word to search
598 *
65903265
RB
599 * Returns 0..SZLONG-1
600 * Undefined if no bit exists, so code should check against 0 first.
1da177e4 601 */
65903265 602static inline unsigned long __ffs(unsigned long word)
1da177e4 603{
ddc0d009 604 return __fls(word & -word);
1da177e4
LT
605}
606
607/*
bc818247 608 * fls - find last bit set.
1da177e4
LT
609 * @word: The word to search
610 *
bc818247
AN
611 * This is defined the same way as ffs.
612 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
1da177e4 613 */
4816227b 614static inline int fls(int x)
1da177e4 615{
4816227b 616 int r;
65903265 617
47740eb8 618 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
4816227b 619 __asm__("clz %0, %1" : "=r" (x) : "r" (x));
1da177e4 620
4816227b
RB
621 return 32 - x;
622 }
bc818247 623
4816227b
RB
624 r = 32;
625 if (!x)
626 return 0;
627 if (!(x & 0xffff0000u)) {
628 x <<= 16;
629 r -= 16;
630 }
631 if (!(x & 0xff000000u)) {
632 x <<= 8;
633 r -= 8;
634 }
635 if (!(x & 0xf0000000u)) {
636 x <<= 4;
637 r -= 4;
638 }
639 if (!(x & 0xc0000000u)) {
640 x <<= 2;
641 r -= 2;
642 }
643 if (!(x & 0x80000000u)) {
644 x <<= 1;
645 r -= 1;
646 }
647 return r;
65903265 648}
4816227b 649
bc818247 650#include <asm-generic/bitops/fls64.h>
65903265
RB
651
652/*
bc818247 653 * ffs - find first bit set.
65903265
RB
654 * @word: The word to search
655 *
bc818247
AN
656 * This is defined the same way as
657 * the libc and compiler builtin ffs routines, therefore
658 * differs in spirit from the above ffz (man ffs).
65903265 659 */
bc818247 660static inline int ffs(int word)
65903265 661{
bc818247
AN
662 if (!word)
663 return 0;
2caf1900 664
bc818247 665 return fls(word & -word);
65903265
RB
666}
667
bc818247 668#include <asm-generic/bitops/ffz.h>
3c9ee7ef 669#include <asm-generic/bitops/find.h>
1da177e4
LT
670
671#ifdef __KERNEL__
672
3c9ee7ef 673#include <asm-generic/bitops/sched.h>
1a403d1d
DD
674
675#include <asm/arch_hweight.h>
676#include <asm-generic/bitops/const_hweight.h>
677
861b5ae7 678#include <asm-generic/bitops/le.h>
3c9ee7ef 679#include <asm-generic/bitops/ext2-atomic.h>
1da177e4
LT
680
681#endif /* __KERNEL__ */
682
683#endif /* _ASM_BITOPS_H */
This page took 0.64314 seconds and 5 git commands to generate.