Fixup a few lose ends in explicit support for MIPS R1/R2.
[deliverable/linux.git] / include / asm-mips / bitops.h
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
11
12 #include <linux/config.h>
13 #include <linux/compiler.h>
14 #include <linux/types.h>
15 #include <asm/bug.h>
16 #include <asm/byteorder.h> /* sigh ... */
17 #include <asm/cpu-features.h>
18
19 #if (_MIPS_SZLONG == 32)
20 #define SZLONG_LOG 5
21 #define SZLONG_MASK 31UL
22 #define __LL "ll "
23 #define __SC "sc "
24 #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
25 #elif (_MIPS_SZLONG == 64)
26 #define SZLONG_LOG 6
27 #define SZLONG_MASK 63UL
28 #define __LL "lld "
29 #define __SC "scd "
30 #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
31 #endif
32
33 #ifdef __KERNEL__
34
35 #include <asm/interrupt.h>
36 #include <asm/sgidefs.h>
37 #include <asm/war.h>
38
39 /*
40 * clear_bit() doesn't provide any barrier for the compiler.
41 */
42 #define smp_mb__before_clear_bit() smp_mb()
43 #define smp_mb__after_clear_bit() smp_mb()
44
45 /*
46 * Only disable interrupt for kernel mode stuff to keep usermode stuff
47 * that dares to use kernel include files alive.
48 */
49
50 #define __bi_flags unsigned long flags
51 #define __bi_local_irq_save(x) local_irq_save(x)
52 #define __bi_local_irq_restore(x) local_irq_restore(x)
53 #else
54 #define __bi_flags
55 #define __bi_local_irq_save(x)
56 #define __bi_local_irq_restore(x)
57 #endif /* __KERNEL__ */
58
59 /*
60 * set_bit - Atomically set a bit in memory
61 * @nr: the bit to set
62 * @addr: the address to start counting from
63 *
64 * This function is atomic and may not be reordered. See __set_bit()
65 * if you do not require the atomic guarantees.
66 * Note that @nr may be almost arbitrarily large; this function is not
67 * restricted to acting on a single-word quantity.
68 */
69 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
70 {
71 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
72 unsigned long temp;
73
74 if (cpu_has_llsc && R10000_LLSC_WAR) {
75 __asm__ __volatile__(
76 " .set mips3 \n"
77 "1: " __LL "%0, %1 # set_bit \n"
78 " or %0, %2 \n"
79 " " __SC "%0, %1 \n"
80 " beqzl %0, 1b \n"
81 " .set mips0 \n"
82 : "=&r" (temp), "=m" (*m)
83 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
84 } else if (cpu_has_llsc) {
85 __asm__ __volatile__(
86 " .set mips3 \n"
87 "1: " __LL "%0, %1 # set_bit \n"
88 " or %0, %2 \n"
89 " " __SC "%0, %1 \n"
90 " beqz %0, 1b \n"
91 " .set mips0 \n"
92 : "=&r" (temp), "=m" (*m)
93 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
94 } else {
95 volatile unsigned long *a = addr;
96 unsigned long mask;
97 __bi_flags;
98
99 a += nr >> SZLONG_LOG;
100 mask = 1UL << (nr & SZLONG_MASK);
101 __bi_local_irq_save(flags);
102 *a |= mask;
103 __bi_local_irq_restore(flags);
104 }
105 }
106
107 /*
108 * __set_bit - Set a bit in memory
109 * @nr: the bit to set
110 * @addr: the address to start counting from
111 *
112 * Unlike set_bit(), this function is non-atomic and may be reordered.
113 * If it's called on the same region of memory simultaneously, the effect
114 * may be that only one operation succeeds.
115 */
116 static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
117 {
118 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
119
120 *m |= 1UL << (nr & SZLONG_MASK);
121 }
122
123 /*
124 * clear_bit - Clears a bit in memory
125 * @nr: Bit to clear
126 * @addr: Address to start counting from
127 *
128 * clear_bit() is atomic and may not be reordered. However, it does
129 * not contain a memory barrier, so if it is used for locking purposes,
130 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
131 * in order to ensure changes are visible on other processors.
132 */
133 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
134 {
135 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
136 unsigned long temp;
137
138 if (cpu_has_llsc && R10000_LLSC_WAR) {
139 __asm__ __volatile__(
140 " .set mips3 \n"
141 "1: " __LL "%0, %1 # clear_bit \n"
142 " and %0, %2 \n"
143 " " __SC "%0, %1 \n"
144 " beqzl %0, 1b \n"
145 " .set mips0 \n"
146 : "=&r" (temp), "=m" (*m)
147 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
148 } else if (cpu_has_llsc) {
149 __asm__ __volatile__(
150 " .set mips3 \n"
151 "1: " __LL "%0, %1 # clear_bit \n"
152 " and %0, %2 \n"
153 " " __SC "%0, %1 \n"
154 " beqz %0, 1b \n"
155 " .set mips0 \n"
156 : "=&r" (temp), "=m" (*m)
157 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
158 } else {
159 volatile unsigned long *a = addr;
160 unsigned long mask;
161 __bi_flags;
162
163 a += nr >> SZLONG_LOG;
164 mask = 1UL << (nr & SZLONG_MASK);
165 __bi_local_irq_save(flags);
166 *a &= ~mask;
167 __bi_local_irq_restore(flags);
168 }
169 }
170
171 /*
172 * __clear_bit - Clears a bit in memory
173 * @nr: Bit to clear
174 * @addr: Address to start counting from
175 *
176 * Unlike clear_bit(), this function is non-atomic and may be reordered.
177 * If it's called on the same region of memory simultaneously, the effect
178 * may be that only one operation succeeds.
179 */
180 static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
181 {
182 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
183
184 *m &= ~(1UL << (nr & SZLONG_MASK));
185 }
186
187 /*
188 * change_bit - Toggle a bit in memory
189 * @nr: Bit to change
190 * @addr: Address to start counting from
191 *
192 * change_bit() is atomic and may not be reordered.
193 * Note that @nr may be almost arbitrarily large; this function is not
194 * restricted to acting on a single-word quantity.
195 */
196 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
197 {
198 if (cpu_has_llsc && R10000_LLSC_WAR) {
199 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
200 unsigned long temp;
201
202 __asm__ __volatile__(
203 " .set mips3 \n"
204 "1: " __LL "%0, %1 # change_bit \n"
205 " xor %0, %2 \n"
206 " " __SC "%0, %1 \n"
207 " beqzl %0, 1b \n"
208 " .set mips0 \n"
209 : "=&r" (temp), "=m" (*m)
210 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
211 } else if (cpu_has_llsc) {
212 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
213 unsigned long temp;
214
215 __asm__ __volatile__(
216 " .set mips3 \n"
217 "1: " __LL "%0, %1 # change_bit \n"
218 " xor %0, %2 \n"
219 " " __SC "%0, %1 \n"
220 " beqz %0, 1b \n"
221 " .set mips0 \n"
222 : "=&r" (temp), "=m" (*m)
223 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
224 } else {
225 volatile unsigned long *a = addr;
226 unsigned long mask;
227 __bi_flags;
228
229 a += nr >> SZLONG_LOG;
230 mask = 1UL << (nr & SZLONG_MASK);
231 __bi_local_irq_save(flags);
232 *a ^= mask;
233 __bi_local_irq_restore(flags);
234 }
235 }
236
237 /*
238 * __change_bit - Toggle a bit in memory
239 * @nr: the bit to change
240 * @addr: the address to start counting from
241 *
242 * Unlike change_bit(), this function is non-atomic and may be reordered.
243 * If it's called on the same region of memory simultaneously, the effect
244 * may be that only one operation succeeds.
245 */
246 static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
247 {
248 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
249
250 *m ^= 1UL << (nr & SZLONG_MASK);
251 }
252
253 /*
254 * test_and_set_bit - Set a bit and return its old value
255 * @nr: Bit to set
256 * @addr: Address to count from
257 *
258 * This operation is atomic and cannot be reordered.
259 * It also implies a memory barrier.
260 */
261 static inline int test_and_set_bit(unsigned long nr,
262 volatile unsigned long *addr)
263 {
264 if (cpu_has_llsc && R10000_LLSC_WAR) {
265 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
266 unsigned long temp, res;
267
268 __asm__ __volatile__(
269 " .set mips3 \n"
270 "1: " __LL "%0, %1 # test_and_set_bit \n"
271 " or %2, %0, %3 \n"
272 " " __SC "%2, %1 \n"
273 " beqzl %2, 1b \n"
274 " and %2, %0, %3 \n"
275 #ifdef CONFIG_SMP
276 " sync \n"
277 #endif
278 " .set mips0 \n"
279 : "=&r" (temp), "=m" (*m), "=&r" (res)
280 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
281 : "memory");
282
283 return res != 0;
284 } else if (cpu_has_llsc) {
285 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
286 unsigned long temp, res;
287
288 __asm__ __volatile__(
289 " .set push \n"
290 " .set noreorder \n"
291 " .set mips3 \n"
292 "1: " __LL "%0, %1 # test_and_set_bit \n"
293 " or %2, %0, %3 \n"
294 " " __SC "%2, %1 \n"
295 " beqz %2, 1b \n"
296 " and %2, %0, %3 \n"
297 #ifdef CONFIG_SMP
298 " sync \n"
299 #endif
300 " .set pop \n"
301 : "=&r" (temp), "=m" (*m), "=&r" (res)
302 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
303 : "memory");
304
305 return res != 0;
306 } else {
307 volatile unsigned long *a = addr;
308 unsigned long mask;
309 int retval;
310 __bi_flags;
311
312 a += nr >> SZLONG_LOG;
313 mask = 1UL << (nr & SZLONG_MASK);
314 __bi_local_irq_save(flags);
315 retval = (mask & *a) != 0;
316 *a |= mask;
317 __bi_local_irq_restore(flags);
318
319 return retval;
320 }
321 }
322
323 /*
324 * __test_and_set_bit - Set a bit and return its old value
325 * @nr: Bit to set
326 * @addr: Address to count from
327 *
328 * This operation is non-atomic and can be reordered.
329 * If two examples of this operation race, one can appear to succeed
330 * but actually fail. You must protect multiple accesses with a lock.
331 */
332 static inline int __test_and_set_bit(unsigned long nr,
333 volatile unsigned long *addr)
334 {
335 volatile unsigned long *a = addr;
336 unsigned long mask;
337 int retval;
338
339 a += nr >> SZLONG_LOG;
340 mask = 1UL << (nr & SZLONG_MASK);
341 retval = (mask & *a) != 0;
342 *a |= mask;
343
344 return retval;
345 }
346
347 /*
348 * test_and_clear_bit - Clear a bit and return its old value
349 * @nr: Bit to clear
350 * @addr: Address to count from
351 *
352 * This operation is atomic and cannot be reordered.
353 * It also implies a memory barrier.
354 */
355 static inline int test_and_clear_bit(unsigned long nr,
356 volatile unsigned long *addr)
357 {
358 if (cpu_has_llsc && R10000_LLSC_WAR) {
359 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
360 unsigned long temp, res;
361
362 __asm__ __volatile__(
363 " .set mips3 \n"
364 "1: " __LL "%0, %1 # test_and_clear_bit \n"
365 " or %2, %0, %3 \n"
366 " xor %2, %3 \n"
367 " " __SC "%2, %1 \n"
368 " beqzl %2, 1b \n"
369 " and %2, %0, %3 \n"
370 #ifdef CONFIG_SMP
371 " sync \n"
372 #endif
373 " .set mips0 \n"
374 : "=&r" (temp), "=m" (*m), "=&r" (res)
375 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
376 : "memory");
377
378 return res != 0;
379 } else if (cpu_has_llsc) {
380 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
381 unsigned long temp, res;
382
383 __asm__ __volatile__(
384 " .set push \n"
385 " .set noreorder \n"
386 " .set mips3 \n"
387 "1: " __LL "%0, %1 # test_and_clear_bit \n"
388 " or %2, %0, %3 \n"
389 " xor %2, %3 \n"
390 " " __SC "%2, %1 \n"
391 " beqz %2, 1b \n"
392 " and %2, %0, %3 \n"
393 #ifdef CONFIG_SMP
394 " sync \n"
395 #endif
396 " .set pop \n"
397 : "=&r" (temp), "=m" (*m), "=&r" (res)
398 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
399 : "memory");
400
401 return res != 0;
402 } else {
403 volatile unsigned long *a = addr;
404 unsigned long mask;
405 int retval;
406 __bi_flags;
407
408 a += nr >> SZLONG_LOG;
409 mask = 1UL << (nr & SZLONG_MASK);
410 __bi_local_irq_save(flags);
411 retval = (mask & *a) != 0;
412 *a &= ~mask;
413 __bi_local_irq_restore(flags);
414
415 return retval;
416 }
417 }
418
419 /*
420 * __test_and_clear_bit - Clear a bit and return its old value
421 * @nr: Bit to clear
422 * @addr: Address to count from
423 *
424 * This operation is non-atomic and can be reordered.
425 * If two examples of this operation race, one can appear to succeed
426 * but actually fail. You must protect multiple accesses with a lock.
427 */
428 static inline int __test_and_clear_bit(unsigned long nr,
429 volatile unsigned long * addr)
430 {
431 volatile unsigned long *a = addr;
432 unsigned long mask;
433 int retval;
434
435 a += (nr >> SZLONG_LOG);
436 mask = 1UL << (nr & SZLONG_MASK);
437 retval = ((mask & *a) != 0);
438 *a &= ~mask;
439
440 return retval;
441 }
442
443 /*
444 * test_and_change_bit - Change a bit and return its old value
445 * @nr: Bit to change
446 * @addr: Address to count from
447 *
448 * This operation is atomic and cannot be reordered.
449 * It also implies a memory barrier.
450 */
451 static inline int test_and_change_bit(unsigned long nr,
452 volatile unsigned long *addr)
453 {
454 if (cpu_has_llsc && R10000_LLSC_WAR) {
455 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
456 unsigned long temp, res;
457
458 __asm__ __volatile__(
459 " .set mips3 \n"
460 "1: " __LL "%0, %1 # test_and_change_bit \n"
461 " xor %2, %0, %3 \n"
462 " " __SC "%2, %1 \n"
463 " beqzl %2, 1b \n"
464 " and %2, %0, %3 \n"
465 #ifdef CONFIG_SMP
466 " sync \n"
467 #endif
468 " .set mips0 \n"
469 : "=&r" (temp), "=m" (*m), "=&r" (res)
470 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
471 : "memory");
472
473 return res != 0;
474 } else if (cpu_has_llsc) {
475 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
476 unsigned long temp, res;
477
478 __asm__ __volatile__(
479 " .set push \n"
480 " .set noreorder \n"
481 " .set mips3 \n"
482 "1: " __LL "%0, %1 # test_and_change_bit \n"
483 " xor %2, %0, %3 \n"
484 " " __SC "\t%2, %1 \n"
485 " beqz %2, 1b \n"
486 " and %2, %0, %3 \n"
487 #ifdef CONFIG_SMP
488 " sync \n"
489 #endif
490 " .set pop \n"
491 : "=&r" (temp), "=m" (*m), "=&r" (res)
492 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
493 : "memory");
494
495 return res != 0;
496 } else {
497 volatile unsigned long *a = addr;
498 unsigned long mask, retval;
499 __bi_flags;
500
501 a += nr >> SZLONG_LOG;
502 mask = 1UL << (nr & SZLONG_MASK);
503 __bi_local_irq_save(flags);
504 retval = (mask & *a) != 0;
505 *a ^= mask;
506 __bi_local_irq_restore(flags);
507
508 return retval;
509 }
510 }
511
512 /*
513 * __test_and_change_bit - Change a bit and return its old value
514 * @nr: Bit to change
515 * @addr: Address to count from
516 *
517 * This operation is non-atomic and can be reordered.
518 * If two examples of this operation race, one can appear to succeed
519 * but actually fail. You must protect multiple accesses with a lock.
520 */
521 static inline int __test_and_change_bit(unsigned long nr,
522 volatile unsigned long *addr)
523 {
524 volatile unsigned long *a = addr;
525 unsigned long mask;
526 int retval;
527
528 a += (nr >> SZLONG_LOG);
529 mask = 1UL << (nr & SZLONG_MASK);
530 retval = ((mask & *a) != 0);
531 *a ^= mask;
532
533 return retval;
534 }
535
536 #undef __bi_flags
537 #undef __bi_local_irq_save
538 #undef __bi_local_irq_restore
539
540 /*
541 * test_bit - Determine whether a bit is set
542 * @nr: bit number to test
543 * @addr: Address to start counting from
544 */
545 static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
546 {
547 return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
548 }
549
550 /*
551 * Return the bit position (0..63) of the most significant 1 bit in a word
552 * Returns -1 if no 1 bit exists
553 */
554 static inline int __ilog2(unsigned long x)
555 {
556 int lz;
557
558 if (sizeof(x) == 4) {
559 __asm__ (
560 " .set push \n"
561 " .set mips32 \n"
562 " clz %0, %1 \n"
563 " .set pop \n"
564 : "=r" (lz)
565 : "r" (x));
566
567 return 31 - lz;
568 }
569
570 BUG_ON(sizeof(x) != 8);
571
572 __asm__ (
573 " .set push \n"
574 " .set mips64 \n"
575 " dclz %0, %1 \n"
576 " .set pop \n"
577 : "=r" (lz)
578 : "r" (x));
579
580 return 63 - lz;
581 }
582
583 /*
584 * __ffs - find first bit in word.
585 * @word: The word to search
586 *
587 * Returns 0..SZLONG-1
588 * Undefined if no bit exists, so code should check against 0 first.
589 */
590 static inline unsigned long __ffs(unsigned long word)
591 {
592 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
593 return __ilog2(word & -word);
594 #else
595 int b = 0, s;
596
597 #ifdef CONFIG_32BIT
598 s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
599 s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
600 s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
601 s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
602 s = 1; if (word << 31 != 0) s = 0; b += s;
603
604 return b;
605 #endif
606 #ifdef CONFIG_64BIT
607 s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
608 s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
609 s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
610 s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
611 s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
612 s = 1; if (word << 63 != 0) s = 0; b += s;
613
614 return b;
615 #endif
616 #endif
617 }
618
619 /*
620 * ffs - find first bit set.
621 * @word: The word to search
622 *
623 * Returns 1..SZLONG
624 * Returns 0 if no bit exists
625 */
626
627 static inline unsigned long ffs(unsigned long word)
628 {
629 if (!word)
630 return 0;
631
632 return __ffs(word) + 1;
633 }
634
635 /*
636 * ffz - find first zero in word.
637 * @word: The word to search
638 *
639 * Undefined if no zero exists, so code should check against ~0UL first.
640 */
641 static inline unsigned long ffz(unsigned long word)
642 {
643 return __ffs (~word);
644 }
645
646 /*
647 * flz - find last zero in word.
648 * @word: The word to search
649 *
650 * Returns 0..SZLONG-1
651 * Undefined if no zero exists, so code should check against ~0UL first.
652 */
653 static inline unsigned long flz(unsigned long word)
654 {
655 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
656 return __ilog2(~word);
657 #else
658 #ifdef CONFIG_32BIT
659 int r = 31, s;
660 word = ~word;
661 s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s;
662 s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s;
663 s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s;
664 s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s;
665 s = 1; if ((word & 0x80000000)) s = 0; r -= s;
666
667 return r;
668 #endif
669 #ifdef CONFIG_64BIT
670 int r = 63, s;
671 word = ~word;
672 s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s;
673 s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s;
674 s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s;
675 s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s;
676 s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s;
677 s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s;
678
679 return r;
680 #endif
681 #endif
682 }
683
684 /*
685 * fls - find last bit set.
686 * @word: The word to search
687 *
688 * Returns 1..SZLONG
689 * Returns 0 if no bit exists
690 */
691 static inline unsigned long fls(unsigned long word)
692 {
693 if (word == 0)
694 return 0;
695
696 return flz(~word) + 1;
697 }
698
699
700 /*
701 * find_next_zero_bit - find the first zero bit in a memory region
702 * @addr: The address to base the search on
703 * @offset: The bitnumber to start searching at
704 * @size: The maximum size to search
705 */
706 static inline unsigned long find_next_zero_bit(const unsigned long *addr,
707 unsigned long size, unsigned long offset)
708 {
709 const unsigned long *p = addr + (offset >> SZLONG_LOG);
710 unsigned long result = offset & ~SZLONG_MASK;
711 unsigned long tmp;
712
713 if (offset >= size)
714 return size;
715 size -= result;
716 offset &= SZLONG_MASK;
717 if (offset) {
718 tmp = *(p++);
719 tmp |= ~0UL >> (_MIPS_SZLONG-offset);
720 if (size < _MIPS_SZLONG)
721 goto found_first;
722 if (~tmp)
723 goto found_middle;
724 size -= _MIPS_SZLONG;
725 result += _MIPS_SZLONG;
726 }
727 while (size & ~SZLONG_MASK) {
728 if (~(tmp = *(p++)))
729 goto found_middle;
730 result += _MIPS_SZLONG;
731 size -= _MIPS_SZLONG;
732 }
733 if (!size)
734 return result;
735 tmp = *p;
736
737 found_first:
738 tmp |= ~0UL << size;
739 if (tmp == ~0UL) /* Are any bits zero? */
740 return result + size; /* Nope. */
741 found_middle:
742 return result + ffz(tmp);
743 }
744
745 #define find_first_zero_bit(addr, size) \
746 find_next_zero_bit((addr), (size), 0)
747
748 /*
749 * find_next_bit - find the next set bit in a memory region
750 * @addr: The address to base the search on
751 * @offset: The bitnumber to start searching at
752 * @size: The maximum size to search
753 */
754 static inline unsigned long find_next_bit(const unsigned long *addr,
755 unsigned long size, unsigned long offset)
756 {
757 const unsigned long *p = addr + (offset >> SZLONG_LOG);
758 unsigned long result = offset & ~SZLONG_MASK;
759 unsigned long tmp;
760
761 if (offset >= size)
762 return size;
763 size -= result;
764 offset &= SZLONG_MASK;
765 if (offset) {
766 tmp = *(p++);
767 tmp &= ~0UL << offset;
768 if (size < _MIPS_SZLONG)
769 goto found_first;
770 if (tmp)
771 goto found_middle;
772 size -= _MIPS_SZLONG;
773 result += _MIPS_SZLONG;
774 }
775 while (size & ~SZLONG_MASK) {
776 if ((tmp = *(p++)))
777 goto found_middle;
778 result += _MIPS_SZLONG;
779 size -= _MIPS_SZLONG;
780 }
781 if (!size)
782 return result;
783 tmp = *p;
784
785 found_first:
786 tmp &= ~0UL >> (_MIPS_SZLONG - size);
787 if (tmp == 0UL) /* Are any bits set? */
788 return result + size; /* Nope. */
789 found_middle:
790 return result + __ffs(tmp);
791 }
792
793 /*
794 * find_first_bit - find the first set bit in a memory region
795 * @addr: The address to start the search at
796 * @size: The maximum size to search
797 *
798 * Returns the bit-number of the first set bit, not the number of the byte
799 * containing a bit.
800 */
801 #define find_first_bit(addr, size) \
802 find_next_bit((addr), (size), 0)
803
804 #ifdef __KERNEL__
805
806 /*
807 * Every architecture must define this function. It's the fastest
808 * way of searching a 140-bit bitmap where the first 100 bits are
809 * unlikely to be set. It's guaranteed that at least one of the 140
810 * bits is cleared.
811 */
812 static inline int sched_find_first_bit(const unsigned long *b)
813 {
814 #ifdef CONFIG_32BIT
815 if (unlikely(b[0]))
816 return __ffs(b[0]);
817 if (unlikely(b[1]))
818 return __ffs(b[1]) + 32;
819 if (unlikely(b[2]))
820 return __ffs(b[2]) + 64;
821 if (b[3])
822 return __ffs(b[3]) + 96;
823 return __ffs(b[4]) + 128;
824 #endif
825 #ifdef CONFIG_64BIT
826 if (unlikely(b[0]))
827 return __ffs(b[0]);
828 if (unlikely(b[1]))
829 return __ffs(b[1]) + 64;
830 return __ffs(b[2]) + 128;
831 #endif
832 }
833
834 /*
835 * hweightN - returns the hamming weight of a N-bit word
836 * @x: the word to weigh
837 *
838 * The Hamming Weight of a number is the total number of bits set in it.
839 */
840
841 #define hweight64(x) generic_hweight64(x)
842 #define hweight32(x) generic_hweight32(x)
843 #define hweight16(x) generic_hweight16(x)
844 #define hweight8(x) generic_hweight8(x)
845
846 static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
847 {
848 unsigned char *ADDR = (unsigned char *) addr;
849 int mask, retval;
850
851 ADDR += nr >> 3;
852 mask = 1 << (nr & 0x07);
853 retval = (mask & *ADDR) != 0;
854 *ADDR |= mask;
855
856 return retval;
857 }
858
859 static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
860 {
861 unsigned char *ADDR = (unsigned char *) addr;
862 int mask, retval;
863
864 ADDR += nr >> 3;
865 mask = 1 << (nr & 0x07);
866 retval = (mask & *ADDR) != 0;
867 *ADDR &= ~mask;
868
869 return retval;
870 }
871
872 static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
873 {
874 const unsigned char *ADDR = (const unsigned char *) addr;
875 int mask;
876
877 ADDR += nr >> 3;
878 mask = 1 << (nr & 0x07);
879
880 return ((mask & *ADDR) != 0);
881 }
882
883 static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
884 unsigned long size, unsigned long offset)
885 {
886 unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
887 unsigned long result = offset & ~SZLONG_MASK;
888 unsigned long tmp;
889
890 if (offset >= size)
891 return size;
892 size -= result;
893 offset &= SZLONG_MASK;
894 if (offset) {
895 tmp = cpu_to_lelongp(p++);
896 tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
897 if (size < _MIPS_SZLONG)
898 goto found_first;
899 if (~tmp)
900 goto found_middle;
901 size -= _MIPS_SZLONG;
902 result += _MIPS_SZLONG;
903 }
904 while (size & ~SZLONG_MASK) {
905 if (~(tmp = cpu_to_lelongp(p++)))
906 goto found_middle;
907 result += _MIPS_SZLONG;
908 size -= _MIPS_SZLONG;
909 }
910 if (!size)
911 return result;
912 tmp = cpu_to_lelongp(p);
913
914 found_first:
915 tmp |= ~0UL << size;
916 if (tmp == ~0UL) /* Are any bits zero? */
917 return result + size; /* Nope. */
918
919 found_middle:
920 return result + ffz(tmp);
921 }
922
923 #define find_first_zero_le_bit(addr, size) \
924 find_next_zero_le_bit((addr), (size), 0)
925
926 #define ext2_set_bit(nr,addr) \
927 __test_and_set_le_bit((nr),(unsigned long*)addr)
928 #define ext2_clear_bit(nr, addr) \
929 __test_and_clear_le_bit((nr),(unsigned long*)addr)
930 #define ext2_set_bit_atomic(lock, nr, addr) \
931 ({ \
932 int ret; \
933 spin_lock(lock); \
934 ret = ext2_set_bit((nr), (addr)); \
935 spin_unlock(lock); \
936 ret; \
937 })
938
939 #define ext2_clear_bit_atomic(lock, nr, addr) \
940 ({ \
941 int ret; \
942 spin_lock(lock); \
943 ret = ext2_clear_bit((nr), (addr)); \
944 spin_unlock(lock); \
945 ret; \
946 })
947 #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
948 #define ext2_find_first_zero_bit(addr, size) \
949 find_first_zero_le_bit((unsigned long*)addr, size)
950 #define ext2_find_next_zero_bit(addr, size, off) \
951 find_next_zero_le_bit((unsigned long*)addr, size, off)
952
953 /*
954 * Bitmap functions for the minix filesystem.
955 *
956 * FIXME: These assume that Minix uses the native byte/bitorder.
957 * This limits the Minix filesystem's value for data exchange very much.
958 */
959 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
960 #define minix_set_bit(nr,addr) set_bit(nr,addr)
961 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
962 #define minix_test_bit(nr,addr) test_bit(nr,addr)
963 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
964
965 #endif /* __KERNEL__ */
966
967 #endif /* _ASM_BITOPS_H */
This page took 0.051422 seconds and 5 git commands to generate.