Actual handlers for bus errors for Pmax and 3min.
[deliverable/linux.git] / include / asm-mips / bitops.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#include <linux/config.h>
13#include <linux/compiler.h>
14#include <linux/types.h>
15#include <asm/byteorder.h> /* sigh ... */
16#include <asm/cpu-features.h>
17
18#if (_MIPS_SZLONG == 32)
19#define SZLONG_LOG 5
20#define SZLONG_MASK 31UL
aac8aa77
MR
21#define __LL "ll "
22#define __SC "sc "
23#define __SET_MIPS ".set mips2 "
42a3b4f2 24#define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
1da177e4
LT
25#elif (_MIPS_SZLONG == 64)
26#define SZLONG_LOG 6
27#define SZLONG_MASK 63UL
aac8aa77
MR
28#define __LL "lld "
29#define __SC "scd "
30#define __SET_MIPS ".set mips3 "
42a3b4f2 31#define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
1da177e4
LT
32#endif
33
34#ifdef __KERNEL__
35
36#include <asm/interrupt.h>
37#include <asm/sgidefs.h>
38#include <asm/war.h>
39
40/*
41 * clear_bit() doesn't provide any barrier for the compiler.
42 */
43#define smp_mb__before_clear_bit() smp_mb()
44#define smp_mb__after_clear_bit() smp_mb()
45
46/*
47 * Only disable interrupt for kernel mode stuff to keep usermode stuff
48 * that dares to use kernel include files alive.
49 */
50
51#define __bi_flags unsigned long flags
52#define __bi_local_irq_save(x) local_irq_save(x)
53#define __bi_local_irq_restore(x) local_irq_restore(x)
54#else
55#define __bi_flags
56#define __bi_local_irq_save(x)
57#define __bi_local_irq_restore(x)
58#endif /* __KERNEL__ */
59
60/*
61 * set_bit - Atomically set a bit in memory
62 * @nr: the bit to set
63 * @addr: the address to start counting from
64 *
65 * This function is atomic and may not be reordered. See __set_bit()
66 * if you do not require the atomic guarantees.
67 * Note that @nr may be almost arbitrarily large; this function is not
68 * restricted to acting on a single-word quantity.
69 */
70static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
71{
72 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
73 unsigned long temp;
74
75 if (cpu_has_llsc && R10000_LLSC_WAR) {
76 __asm__ __volatile__(
aac8aa77 77 " " __SET_MIPS " \n"
1da177e4
LT
78 "1: " __LL "%0, %1 # set_bit \n"
79 " or %0, %2 \n"
aac8aa77 80 " " __SC "%0, %1 \n"
1da177e4 81 " beqzl %0, 1b \n"
aac8aa77 82 " .set mips0 \n"
1da177e4
LT
83 : "=&r" (temp), "=m" (*m)
84 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
85 } else if (cpu_has_llsc) {
86 __asm__ __volatile__(
aac8aa77 87 " " __SET_MIPS " \n"
1da177e4
LT
88 "1: " __LL "%0, %1 # set_bit \n"
89 " or %0, %2 \n"
aac8aa77 90 " " __SC "%0, %1 \n"
1da177e4 91 " beqz %0, 1b \n"
aac8aa77 92 " .set mips0 \n"
1da177e4
LT
93 : "=&r" (temp), "=m" (*m)
94 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
95 } else {
96 volatile unsigned long *a = addr;
97 unsigned long mask;
98 __bi_flags;
99
100 a += nr >> SZLONG_LOG;
101 mask = 1UL << (nr & SZLONG_MASK);
102 __bi_local_irq_save(flags);
103 *a |= mask;
104 __bi_local_irq_restore(flags);
105 }
106}
107
108/*
109 * __set_bit - Set a bit in memory
110 * @nr: the bit to set
111 * @addr: the address to start counting from
112 *
113 * Unlike set_bit(), this function is non-atomic and may be reordered.
114 * If it's called on the same region of memory simultaneously, the effect
115 * may be that only one operation succeeds.
116 */
117static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
118{
119 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
120
121 *m |= 1UL << (nr & SZLONG_MASK);
122}
123
124/*
125 * clear_bit - Clears a bit in memory
126 * @nr: Bit to clear
127 * @addr: Address to start counting from
128 *
129 * clear_bit() is atomic and may not be reordered. However, it does
130 * not contain a memory barrier, so if it is used for locking purposes,
131 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
132 * in order to ensure changes are visible on other processors.
133 */
134static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
135{
136 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
137 unsigned long temp;
138
139 if (cpu_has_llsc && R10000_LLSC_WAR) {
140 __asm__ __volatile__(
aac8aa77 141 " " __SET_MIPS " \n"
1da177e4
LT
142 "1: " __LL "%0, %1 # clear_bit \n"
143 " and %0, %2 \n"
144 " " __SC "%0, %1 \n"
145 " beqzl %0, 1b \n"
aac8aa77 146 " .set mips0 \n"
1da177e4
LT
147 : "=&r" (temp), "=m" (*m)
148 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
149 } else if (cpu_has_llsc) {
150 __asm__ __volatile__(
aac8aa77 151 " " __SET_MIPS " \n"
1da177e4
LT
152 "1: " __LL "%0, %1 # clear_bit \n"
153 " and %0, %2 \n"
154 " " __SC "%0, %1 \n"
155 " beqz %0, 1b \n"
aac8aa77 156 " .set mips0 \n"
1da177e4
LT
157 : "=&r" (temp), "=m" (*m)
158 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
159 } else {
160 volatile unsigned long *a = addr;
161 unsigned long mask;
162 __bi_flags;
163
164 a += nr >> SZLONG_LOG;
165 mask = 1UL << (nr & SZLONG_MASK);
166 __bi_local_irq_save(flags);
167 *a &= ~mask;
168 __bi_local_irq_restore(flags);
169 }
170}
171
172/*
173 * __clear_bit - Clears a bit in memory
174 * @nr: Bit to clear
175 * @addr: Address to start counting from
176 *
177 * Unlike clear_bit(), this function is non-atomic and may be reordered.
178 * If it's called on the same region of memory simultaneously, the effect
179 * may be that only one operation succeeds.
180 */
181static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
182{
183 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
184
185 *m &= ~(1UL << (nr & SZLONG_MASK));
186}
187
188/*
189 * change_bit - Toggle a bit in memory
190 * @nr: Bit to change
191 * @addr: Address to start counting from
192 *
193 * change_bit() is atomic and may not be reordered.
194 * Note that @nr may be almost arbitrarily large; this function is not
195 * restricted to acting on a single-word quantity.
196 */
197static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
198{
199 if (cpu_has_llsc && R10000_LLSC_WAR) {
200 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
201 unsigned long temp;
202
203 __asm__ __volatile__(
aac8aa77 204 " " __SET_MIPS " \n"
1da177e4
LT
205 "1: " __LL "%0, %1 # change_bit \n"
206 " xor %0, %2 \n"
aac8aa77 207 " " __SC "%0, %1 \n"
1da177e4 208 " beqzl %0, 1b \n"
aac8aa77 209 " .set mips0 \n"
1da177e4
LT
210 : "=&r" (temp), "=m" (*m)
211 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
212 } else if (cpu_has_llsc) {
213 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
214 unsigned long temp;
215
216 __asm__ __volatile__(
aac8aa77 217 " " __SET_MIPS " \n"
1da177e4
LT
218 "1: " __LL "%0, %1 # change_bit \n"
219 " xor %0, %2 \n"
aac8aa77 220 " " __SC "%0, %1 \n"
1da177e4 221 " beqz %0, 1b \n"
aac8aa77 222 " .set mips0 \n"
1da177e4
LT
223 : "=&r" (temp), "=m" (*m)
224 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
225 } else {
226 volatile unsigned long *a = addr;
227 unsigned long mask;
228 __bi_flags;
229
230 a += nr >> SZLONG_LOG;
231 mask = 1UL << (nr & SZLONG_MASK);
232 __bi_local_irq_save(flags);
233 *a ^= mask;
234 __bi_local_irq_restore(flags);
235 }
236}
237
238/*
239 * __change_bit - Toggle a bit in memory
240 * @nr: the bit to change
241 * @addr: the address to start counting from
242 *
243 * Unlike change_bit(), this function is non-atomic and may be reordered.
244 * If it's called on the same region of memory simultaneously, the effect
245 * may be that only one operation succeeds.
246 */
247static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
248{
249 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
250
251 *m ^= 1UL << (nr & SZLONG_MASK);
252}
253
254/*
255 * test_and_set_bit - Set a bit and return its old value
256 * @nr: Bit to set
257 * @addr: Address to count from
258 *
259 * This operation is atomic and cannot be reordered.
260 * It also implies a memory barrier.
261 */
262static inline int test_and_set_bit(unsigned long nr,
263 volatile unsigned long *addr)
264{
265 if (cpu_has_llsc && R10000_LLSC_WAR) {
266 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
267 unsigned long temp, res;
268
269 __asm__ __volatile__(
aac8aa77 270 " " __SET_MIPS " \n"
1da177e4
LT
271 "1: " __LL "%0, %1 # test_and_set_bit \n"
272 " or %2, %0, %3 \n"
273 " " __SC "%2, %1 \n"
274 " beqzl %2, 1b \n"
275 " and %2, %0, %3 \n"
276#ifdef CONFIG_SMP
aac8aa77 277 " sync \n"
1da177e4 278#endif
aac8aa77 279 " .set mips0 \n"
1da177e4
LT
280 : "=&r" (temp), "=m" (*m), "=&r" (res)
281 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
282 : "memory");
283
284 return res != 0;
285 } else if (cpu_has_llsc) {
286 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
287 unsigned long temp, res;
288
289 __asm__ __volatile__(
aac8aa77
MR
290 " .set push \n"
291 " .set noreorder \n"
292 " " __SET_MIPS " \n"
293 "1: " __LL "%0, %1 # test_and_set_bit \n"
1da177e4
LT
294 " or %2, %0, %3 \n"
295 " " __SC "%2, %1 \n"
296 " beqz %2, 1b \n"
297 " and %2, %0, %3 \n"
298#ifdef CONFIG_SMP
aac8aa77 299 " sync \n"
1da177e4 300#endif
aac8aa77 301 " .set pop \n"
1da177e4
LT
302 : "=&r" (temp), "=m" (*m), "=&r" (res)
303 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
304 : "memory");
305
306 return res != 0;
307 } else {
308 volatile unsigned long *a = addr;
309 unsigned long mask;
310 int retval;
311 __bi_flags;
312
313 a += nr >> SZLONG_LOG;
314 mask = 1UL << (nr & SZLONG_MASK);
315 __bi_local_irq_save(flags);
316 retval = (mask & *a) != 0;
317 *a |= mask;
318 __bi_local_irq_restore(flags);
319
320 return retval;
321 }
322}
323
324/*
325 * __test_and_set_bit - Set a bit and return its old value
326 * @nr: Bit to set
327 * @addr: Address to count from
328 *
329 * This operation is non-atomic and can be reordered.
330 * If two examples of this operation race, one can appear to succeed
331 * but actually fail. You must protect multiple accesses with a lock.
332 */
333static inline int __test_and_set_bit(unsigned long nr,
334 volatile unsigned long *addr)
335{
336 volatile unsigned long *a = addr;
337 unsigned long mask;
338 int retval;
339
340 a += nr >> SZLONG_LOG;
341 mask = 1UL << (nr & SZLONG_MASK);
342 retval = (mask & *a) != 0;
343 *a |= mask;
344
345 return retval;
346}
347
348/*
349 * test_and_clear_bit - Clear a bit and return its old value
350 * @nr: Bit to clear
351 * @addr: Address to count from
352 *
353 * This operation is atomic and cannot be reordered.
354 * It also implies a memory barrier.
355 */
356static inline int test_and_clear_bit(unsigned long nr,
357 volatile unsigned long *addr)
358{
359 if (cpu_has_llsc && R10000_LLSC_WAR) {
360 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
361 unsigned long temp, res;
362
363 __asm__ __volatile__(
aac8aa77 364 " " __SET_MIPS " \n"
1da177e4
LT
365 "1: " __LL "%0, %1 # test_and_clear_bit \n"
366 " or %2, %0, %3 \n"
367 " xor %2, %3 \n"
aac8aa77 368 " " __SC "%2, %1 \n"
1da177e4
LT
369 " beqzl %2, 1b \n"
370 " and %2, %0, %3 \n"
371#ifdef CONFIG_SMP
372 " sync \n"
373#endif
aac8aa77 374 " .set mips0 \n"
1da177e4
LT
375 : "=&r" (temp), "=m" (*m), "=&r" (res)
376 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
377 : "memory");
378
379 return res != 0;
380 } else if (cpu_has_llsc) {
381 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
382 unsigned long temp, res;
383
384 __asm__ __volatile__(
aac8aa77
MR
385 " .set push \n"
386 " .set noreorder \n"
387 " " __SET_MIPS " \n"
388 "1: " __LL "%0, %1 # test_and_clear_bit \n"
1da177e4
LT
389 " or %2, %0, %3 \n"
390 " xor %2, %3 \n"
aac8aa77 391 " " __SC "%2, %1 \n"
1da177e4
LT
392 " beqz %2, 1b \n"
393 " and %2, %0, %3 \n"
394#ifdef CONFIG_SMP
395 " sync \n"
396#endif
aac8aa77 397 " .set pop \n"
1da177e4
LT
398 : "=&r" (temp), "=m" (*m), "=&r" (res)
399 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
400 : "memory");
401
402 return res != 0;
403 } else {
404 volatile unsigned long *a = addr;
405 unsigned long mask;
406 int retval;
407 __bi_flags;
408
409 a += nr >> SZLONG_LOG;
410 mask = 1UL << (nr & SZLONG_MASK);
411 __bi_local_irq_save(flags);
412 retval = (mask & *a) != 0;
413 *a &= ~mask;
414 __bi_local_irq_restore(flags);
415
416 return retval;
417 }
418}
419
420/*
421 * __test_and_clear_bit - Clear a bit and return its old value
422 * @nr: Bit to clear
423 * @addr: Address to count from
424 *
425 * This operation is non-atomic and can be reordered.
426 * If two examples of this operation race, one can appear to succeed
427 * but actually fail. You must protect multiple accesses with a lock.
428 */
429static inline int __test_and_clear_bit(unsigned long nr,
430 volatile unsigned long * addr)
431{
432 volatile unsigned long *a = addr;
433 unsigned long mask;
434 int retval;
435
436 a += (nr >> SZLONG_LOG);
437 mask = 1UL << (nr & SZLONG_MASK);
438 retval = ((mask & *a) != 0);
439 *a &= ~mask;
440
441 return retval;
442}
443
444/*
445 * test_and_change_bit - Change a bit and return its old value
446 * @nr: Bit to change
447 * @addr: Address to count from
448 *
449 * This operation is atomic and cannot be reordered.
450 * It also implies a memory barrier.
451 */
452static inline int test_and_change_bit(unsigned long nr,
453 volatile unsigned long *addr)
454{
455 if (cpu_has_llsc && R10000_LLSC_WAR) {
456 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
457 unsigned long temp, res;
458
459 __asm__ __volatile__(
aac8aa77
MR
460 " " __SET_MIPS " \n"
461 "1: " __LL "%0, %1 # test_and_change_bit \n"
1da177e4 462 " xor %2, %0, %3 \n"
aac8aa77 463 " " __SC "%2, %1 \n"
1da177e4
LT
464 " beqzl %2, 1b \n"
465 " and %2, %0, %3 \n"
466#ifdef CONFIG_SMP
467 " sync \n"
468#endif
aac8aa77 469 " .set mips0 \n"
1da177e4
LT
470 : "=&r" (temp), "=m" (*m), "=&r" (res)
471 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
472 : "memory");
473
474 return res != 0;
475 } else if (cpu_has_llsc) {
476 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
477 unsigned long temp, res;
478
479 __asm__ __volatile__(
aac8aa77
MR
480 " .set push \n"
481 " .set noreorder \n"
482 " " __SET_MIPS " \n"
483 "1: " __LL "%0, %1 # test_and_change_bit \n"
1da177e4 484 " xor %2, %0, %3 \n"
aac8aa77 485 " " __SC "\t%2, %1 \n"
1da177e4
LT
486 " beqz %2, 1b \n"
487 " and %2, %0, %3 \n"
488#ifdef CONFIG_SMP
489 " sync \n"
490#endif
aac8aa77 491 " .set pop \n"
1da177e4
LT
492 : "=&r" (temp), "=m" (*m), "=&r" (res)
493 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
494 : "memory");
495
496 return res != 0;
497 } else {
498 volatile unsigned long *a = addr;
499 unsigned long mask, retval;
500 __bi_flags;
501
502 a += nr >> SZLONG_LOG;
503 mask = 1UL << (nr & SZLONG_MASK);
504 __bi_local_irq_save(flags);
505 retval = (mask & *a) != 0;
506 *a ^= mask;
507 __bi_local_irq_restore(flags);
508
509 return retval;
510 }
511}
512
513/*
514 * __test_and_change_bit - Change a bit and return its old value
515 * @nr: Bit to change
516 * @addr: Address to count from
517 *
518 * This operation is non-atomic and can be reordered.
519 * If two examples of this operation race, one can appear to succeed
520 * but actually fail. You must protect multiple accesses with a lock.
521 */
522static inline int __test_and_change_bit(unsigned long nr,
523 volatile unsigned long *addr)
524{
525 volatile unsigned long *a = addr;
526 unsigned long mask;
527 int retval;
528
529 a += (nr >> SZLONG_LOG);
530 mask = 1UL << (nr & SZLONG_MASK);
531 retval = ((mask & *a) != 0);
532 *a ^= mask;
533
534 return retval;
535}
536
537#undef __bi_flags
538#undef __bi_local_irq_save
539#undef __bi_local_irq_restore
540
541/*
542 * test_bit - Determine whether a bit is set
543 * @nr: bit number to test
544 * @addr: Address to start counting from
545 */
546static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
547{
548 return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
549}
550
551/*
552 * ffz - find first zero in word.
553 * @word: The word to search
554 *
555 * Undefined if no zero exists, so code should check against ~0UL first.
556 */
557static inline unsigned long ffz(unsigned long word)
558{
559 int b = 0, s;
560
561 word = ~word;
875d43e7 562#ifdef CONFIG_32BIT
1da177e4
LT
563 s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
564 s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
565 s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
566 s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
567 s = 1; if (word << 31 != 0) s = 0; b += s;
568#endif
875d43e7 569#ifdef CONFIG_64BIT
1da177e4
LT
570 s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
571 s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
572 s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
573 s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
574 s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
575 s = 1; if (word << 63 != 0) s = 0; b += s;
576#endif
577
578 return b;
579}
580
581/*
582 * __ffs - find first bit in word.
583 * @word: The word to search
584 *
585 * Undefined if no bit exists, so code should check against 0 first.
586 */
587static inline unsigned long __ffs(unsigned long word)
588{
589 return ffz(~word);
590}
591
592/*
593 * fls: find last bit set.
594 */
595
596#define fls(x) generic_fls(x)
597
598/*
599 * find_next_zero_bit - find the first zero bit in a memory region
600 * @addr: The address to base the search on
601 * @offset: The bitnumber to start searching at
602 * @size: The maximum size to search
603 */
604static inline unsigned long find_next_zero_bit(const unsigned long *addr,
605 unsigned long size, unsigned long offset)
606{
607 const unsigned long *p = addr + (offset >> SZLONG_LOG);
608 unsigned long result = offset & ~SZLONG_MASK;
609 unsigned long tmp;
610
611 if (offset >= size)
612 return size;
613 size -= result;
614 offset &= SZLONG_MASK;
615 if (offset) {
616 tmp = *(p++);
617 tmp |= ~0UL >> (_MIPS_SZLONG-offset);
618 if (size < _MIPS_SZLONG)
619 goto found_first;
620 if (~tmp)
621 goto found_middle;
622 size -= _MIPS_SZLONG;
623 result += _MIPS_SZLONG;
624 }
625 while (size & ~SZLONG_MASK) {
626 if (~(tmp = *(p++)))
627 goto found_middle;
628 result += _MIPS_SZLONG;
629 size -= _MIPS_SZLONG;
630 }
631 if (!size)
632 return result;
633 tmp = *p;
634
635found_first:
636 tmp |= ~0UL << size;
637 if (tmp == ~0UL) /* Are any bits zero? */
638 return result + size; /* Nope. */
639found_middle:
640 return result + ffz(tmp);
641}
642
643#define find_first_zero_bit(addr, size) \
644 find_next_zero_bit((addr), (size), 0)
645
646/*
647 * find_next_bit - find the next set bit in a memory region
648 * @addr: The address to base the search on
649 * @offset: The bitnumber to start searching at
650 * @size: The maximum size to search
651 */
652static inline unsigned long find_next_bit(const unsigned long *addr,
653 unsigned long size, unsigned long offset)
654{
655 const unsigned long *p = addr + (offset >> SZLONG_LOG);
656 unsigned long result = offset & ~SZLONG_MASK;
657 unsigned long tmp;
658
659 if (offset >= size)
660 return size;
661 size -= result;
662 offset &= SZLONG_MASK;
663 if (offset) {
664 tmp = *(p++);
665 tmp &= ~0UL << offset;
666 if (size < _MIPS_SZLONG)
667 goto found_first;
668 if (tmp)
669 goto found_middle;
670 size -= _MIPS_SZLONG;
671 result += _MIPS_SZLONG;
672 }
673 while (size & ~SZLONG_MASK) {
674 if ((tmp = *(p++)))
675 goto found_middle;
676 result += _MIPS_SZLONG;
677 size -= _MIPS_SZLONG;
678 }
679 if (!size)
680 return result;
681 tmp = *p;
682
683found_first:
684 tmp &= ~0UL >> (_MIPS_SZLONG - size);
685 if (tmp == 0UL) /* Are any bits set? */
686 return result + size; /* Nope. */
687found_middle:
688 return result + __ffs(tmp);
689}
690
691/*
692 * find_first_bit - find the first set bit in a memory region
693 * @addr: The address to start the search at
694 * @size: The maximum size to search
695 *
696 * Returns the bit-number of the first set bit, not the number of the byte
697 * containing a bit.
698 */
699#define find_first_bit(addr, size) \
700 find_next_bit((addr), (size), 0)
701
702#ifdef __KERNEL__
703
704/*
705 * Every architecture must define this function. It's the fastest
706 * way of searching a 140-bit bitmap where the first 100 bits are
707 * unlikely to be set. It's guaranteed that at least one of the 140
708 * bits is cleared.
709 */
710static inline int sched_find_first_bit(const unsigned long *b)
711{
875d43e7 712#ifdef CONFIG_32BIT
1da177e4
LT
713 if (unlikely(b[0]))
714 return __ffs(b[0]);
715 if (unlikely(b[1]))
716 return __ffs(b[1]) + 32;
717 if (unlikely(b[2]))
718 return __ffs(b[2]) + 64;
719 if (b[3])
720 return __ffs(b[3]) + 96;
721 return __ffs(b[4]) + 128;
722#endif
875d43e7 723#ifdef CONFIG_64BIT
1da177e4
LT
724 if (unlikely(b[0]))
725 return __ffs(b[0]);
726 if (unlikely(b[1]))
727 return __ffs(b[1]) + 64;
728 return __ffs(b[2]) + 128;
729#endif
730}
731
732/*
733 * ffs - find first bit set
734 * @x: the word to search
735 *
736 * This is defined the same way as
737 * the libc and compiler builtin ffs routines, therefore
738 * differs in spirit from the above ffz (man ffs).
739 */
740
741#define ffs(x) generic_ffs(x)
742
743/*
744 * hweightN - returns the hamming weight of a N-bit word
745 * @x: the word to weigh
746 *
747 * The Hamming Weight of a number is the total number of bits set in it.
748 */
749
750#define hweight64(x) generic_hweight64(x)
751#define hweight32(x) generic_hweight32(x)
752#define hweight16(x) generic_hweight16(x)
753#define hweight8(x) generic_hweight8(x)
754
755static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
756{
757 unsigned char *ADDR = (unsigned char *) addr;
758 int mask, retval;
759
760 ADDR += nr >> 3;
761 mask = 1 << (nr & 0x07);
762 retval = (mask & *ADDR) != 0;
763 *ADDR |= mask;
764
765 return retval;
766}
767
768static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
769{
770 unsigned char *ADDR = (unsigned char *) addr;
771 int mask, retval;
772
773 ADDR += nr >> 3;
774 mask = 1 << (nr & 0x07);
775 retval = (mask & *ADDR) != 0;
776 *ADDR &= ~mask;
777
778 return retval;
779}
780
781static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
782{
783 const unsigned char *ADDR = (const unsigned char *) addr;
784 int mask;
785
786 ADDR += nr >> 3;
787 mask = 1 << (nr & 0x07);
788
789 return ((mask & *ADDR) != 0);
790}
791
792static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
793 unsigned long size, unsigned long offset)
794{
795 unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
796 unsigned long result = offset & ~SZLONG_MASK;
797 unsigned long tmp;
798
799 if (offset >= size)
800 return size;
801 size -= result;
802 offset &= SZLONG_MASK;
803 if (offset) {
804 tmp = cpu_to_lelongp(p++);
805 tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
806 if (size < _MIPS_SZLONG)
807 goto found_first;
808 if (~tmp)
809 goto found_middle;
810 size -= _MIPS_SZLONG;
811 result += _MIPS_SZLONG;
812 }
813 while (size & ~SZLONG_MASK) {
814 if (~(tmp = cpu_to_lelongp(p++)))
815 goto found_middle;
816 result += _MIPS_SZLONG;
817 size -= _MIPS_SZLONG;
818 }
819 if (!size)
820 return result;
821 tmp = cpu_to_lelongp(p);
822
823found_first:
824 tmp |= ~0UL << size;
825 if (tmp == ~0UL) /* Are any bits zero? */
826 return result + size; /* Nope. */
827
828found_middle:
829 return result + ffz(tmp);
830}
831
832#define find_first_zero_le_bit(addr, size) \
833 find_next_zero_le_bit((addr), (size), 0)
834
835#define ext2_set_bit(nr,addr) \
836 __test_and_set_le_bit((nr),(unsigned long*)addr)
837#define ext2_clear_bit(nr, addr) \
838 __test_and_clear_le_bit((nr),(unsigned long*)addr)
839 #define ext2_set_bit_atomic(lock, nr, addr) \
840({ \
841 int ret; \
842 spin_lock(lock); \
843 ret = ext2_set_bit((nr), (addr)); \
844 spin_unlock(lock); \
845 ret; \
846})
847
848#define ext2_clear_bit_atomic(lock, nr, addr) \
849({ \
850 int ret; \
851 spin_lock(lock); \
852 ret = ext2_clear_bit((nr), (addr)); \
853 spin_unlock(lock); \
854 ret; \
855})
856#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
857#define ext2_find_first_zero_bit(addr, size) \
858 find_first_zero_le_bit((unsigned long*)addr, size)
859#define ext2_find_next_zero_bit(addr, size, off) \
860 find_next_zero_le_bit((unsigned long*)addr, size, off)
861
862/*
863 * Bitmap functions for the minix filesystem.
864 *
865 * FIXME: These assume that Minix uses the native byte/bitorder.
866 * This limits the Minix filesystem's value for data exchange very much.
867 */
868#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
869#define minix_set_bit(nr,addr) set_bit(nr,addr)
870#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
871#define minix_test_bit(nr,addr) test_bit(nr,addr)
872#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
873
874#endif /* __KERNEL__ */
875
876#endif /* _ASM_BITOPS_H */
This page took 0.094539 seconds and 5 git commands to generate.