Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH_BITOPS_H |
2 | #define __ASM_SH_BITOPS_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | #include <asm/system.h> | |
6 | /* For __swab32 */ | |
7 | #include <asm/byteorder.h> | |
8 | ||
9 | static __inline__ void set_bit(int nr, volatile void * addr) | |
10 | { | |
11 | int mask; | |
12 | volatile unsigned int *a = addr; | |
13 | unsigned long flags; | |
14 | ||
15 | a += nr >> 5; | |
16 | mask = 1 << (nr & 0x1f); | |
17 | local_irq_save(flags); | |
18 | *a |= mask; | |
19 | local_irq_restore(flags); | |
20 | } | |
21 | ||
22 | static __inline__ void __set_bit(int nr, volatile void * addr) | |
23 | { | |
24 | int mask; | |
25 | volatile unsigned int *a = addr; | |
26 | ||
27 | a += nr >> 5; | |
28 | mask = 1 << (nr & 0x1f); | |
29 | *a |= mask; | |
30 | } | |
31 | ||
32 | /* | |
33 | * clear_bit() doesn't provide any barrier for the compiler. | |
34 | */ | |
35 | #define smp_mb__before_clear_bit() barrier() | |
36 | #define smp_mb__after_clear_bit() barrier() | |
37 | static __inline__ void clear_bit(int nr, volatile void * addr) | |
38 | { | |
39 | int mask; | |
40 | volatile unsigned int *a = addr; | |
41 | unsigned long flags; | |
42 | ||
43 | a += nr >> 5; | |
44 | mask = 1 << (nr & 0x1f); | |
45 | local_irq_save(flags); | |
46 | *a &= ~mask; | |
47 | local_irq_restore(flags); | |
48 | } | |
49 | ||
50 | static __inline__ void __clear_bit(int nr, volatile void * addr) | |
51 | { | |
52 | int mask; | |
53 | volatile unsigned int *a = addr; | |
54 | ||
55 | a += nr >> 5; | |
56 | mask = 1 << (nr & 0x1f); | |
57 | *a &= ~mask; | |
58 | } | |
59 | ||
60 | static __inline__ void change_bit(int nr, volatile void * addr) | |
61 | { | |
62 | int mask; | |
63 | volatile unsigned int *a = addr; | |
64 | unsigned long flags; | |
65 | ||
66 | a += nr >> 5; | |
67 | mask = 1 << (nr & 0x1f); | |
68 | local_irq_save(flags); | |
69 | *a ^= mask; | |
70 | local_irq_restore(flags); | |
71 | } | |
72 | ||
73 | static __inline__ void __change_bit(int nr, volatile void * addr) | |
74 | { | |
75 | int mask; | |
76 | volatile unsigned int *a = addr; | |
77 | ||
78 | a += nr >> 5; | |
79 | mask = 1 << (nr & 0x1f); | |
80 | *a ^= mask; | |
81 | } | |
82 | ||
83 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |
84 | { | |
85 | int mask, retval; | |
86 | volatile unsigned int *a = addr; | |
87 | unsigned long flags; | |
88 | ||
89 | a += nr >> 5; | |
90 | mask = 1 << (nr & 0x1f); | |
91 | local_irq_save(flags); | |
92 | retval = (mask & *a) != 0; | |
93 | *a |= mask; | |
94 | local_irq_restore(flags); | |
95 | ||
96 | return retval; | |
97 | } | |
98 | ||
99 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | |
100 | { | |
101 | int mask, retval; | |
102 | volatile unsigned int *a = addr; | |
103 | ||
104 | a += nr >> 5; | |
105 | mask = 1 << (nr & 0x1f); | |
106 | retval = (mask & *a) != 0; | |
107 | *a |= mask; | |
108 | ||
109 | return retval; | |
110 | } | |
111 | ||
112 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |
113 | { | |
114 | int mask, retval; | |
115 | volatile unsigned int *a = addr; | |
116 | unsigned long flags; | |
117 | ||
118 | a += nr >> 5; | |
119 | mask = 1 << (nr & 0x1f); | |
120 | local_irq_save(flags); | |
121 | retval = (mask & *a) != 0; | |
122 | *a &= ~mask; | |
123 | local_irq_restore(flags); | |
124 | ||
125 | return retval; | |
126 | } | |
127 | ||
128 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | |
129 | { | |
130 | int mask, retval; | |
131 | volatile unsigned int *a = addr; | |
132 | ||
133 | a += nr >> 5; | |
134 | mask = 1 << (nr & 0x1f); | |
135 | retval = (mask & *a) != 0; | |
136 | *a &= ~mask; | |
137 | ||
138 | return retval; | |
139 | } | |
140 | ||
141 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |
142 | { | |
143 | int mask, retval; | |
144 | volatile unsigned int *a = addr; | |
145 | unsigned long flags; | |
146 | ||
147 | a += nr >> 5; | |
148 | mask = 1 << (nr & 0x1f); | |
149 | local_irq_save(flags); | |
150 | retval = (mask & *a) != 0; | |
151 | *a ^= mask; | |
152 | local_irq_restore(flags); | |
153 | ||
154 | return retval; | |
155 | } | |
156 | ||
157 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | |
158 | { | |
159 | int mask, retval; | |
160 | volatile unsigned int *a = addr; | |
161 | ||
162 | a += nr >> 5; | |
163 | mask = 1 << (nr & 0x1f); | |
164 | retval = (mask & *a) != 0; | |
165 | *a ^= mask; | |
166 | ||
167 | return retval; | |
168 | } | |
169 | ||
170 | static __inline__ int test_bit(int nr, const volatile void *addr) | |
171 | { | |
172 | return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); | |
173 | } | |
174 | ||
175 | static __inline__ unsigned long ffz(unsigned long word) | |
176 | { | |
177 | unsigned long result; | |
178 | ||
179 | __asm__("1:\n\t" | |
180 | "shlr %1\n\t" | |
181 | "bt/s 1b\n\t" | |
182 | " add #1, %0" | |
183 | : "=r" (result), "=r" (word) | |
184 | : "0" (~0L), "1" (word) | |
185 | : "t"); | |
186 | return result; | |
187 | } | |
188 | ||
189 | /** | |
190 | * __ffs - find first bit in word. | |
191 | * @word: The word to search | |
192 | * | |
193 | * Undefined if no bit exists, so code should check against 0 first. | |
194 | */ | |
195 | static __inline__ unsigned long __ffs(unsigned long word) | |
196 | { | |
197 | unsigned long result; | |
198 | ||
199 | __asm__("1:\n\t" | |
200 | "shlr %1\n\t" | |
201 | "bf/s 1b\n\t" | |
202 | " add #1, %0" | |
203 | : "=r" (result), "=r" (word) | |
204 | : "0" (~0L), "1" (word) | |
205 | : "t"); | |
206 | return result; | |
207 | } | |
208 | ||
209 | /** | |
210 | * find_next_bit - find the next set bit in a memory region | |
211 | * @addr: The address to base the search on | |
212 | * @offset: The bitnumber to start searching at | |
213 | * @size: The maximum size to search | |
214 | */ | |
215 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | |
216 | unsigned long size, unsigned long offset) | |
217 | { | |
218 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | |
219 | unsigned int result = offset & ~31UL; | |
220 | unsigned int tmp; | |
221 | ||
222 | if (offset >= size) | |
223 | return size; | |
224 | size -= result; | |
225 | offset &= 31UL; | |
226 | if (offset) { | |
227 | tmp = *p++; | |
228 | tmp &= ~0UL << offset; | |
229 | if (size < 32) | |
230 | goto found_first; | |
231 | if (tmp) | |
232 | goto found_middle; | |
233 | size -= 32; | |
234 | result += 32; | |
235 | } | |
236 | while (size >= 32) { | |
237 | if ((tmp = *p++) != 0) | |
238 | goto found_middle; | |
239 | result += 32; | |
240 | size -= 32; | |
241 | } | |
242 | if (!size) | |
243 | return result; | |
244 | tmp = *p; | |
245 | ||
246 | found_first: | |
247 | tmp &= ~0UL >> (32 - size); | |
248 | if (tmp == 0UL) /* Are any bits set? */ | |
249 | return result + size; /* Nope. */ | |
250 | found_middle: | |
251 | return result + __ffs(tmp); | |
252 | } | |
253 | ||
254 | /** | |
255 | * find_first_bit - find the first set bit in a memory region | |
256 | * @addr: The address to start the search at | |
257 | * @size: The maximum size to search | |
258 | * | |
259 | * Returns the bit-number of the first set bit, not the number of the byte | |
260 | * containing a bit. | |
261 | */ | |
262 | #define find_first_bit(addr, size) \ | |
263 | find_next_bit((addr), (size), 0) | |
264 | ||
265 | static __inline__ int find_next_zero_bit(const unsigned long *addr, int size, int offset) | |
266 | { | |
267 | const unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | |
268 | unsigned long result = offset & ~31UL; | |
269 | unsigned long tmp; | |
270 | ||
271 | if (offset >= size) | |
272 | return size; | |
273 | size -= result; | |
274 | offset &= 31UL; | |
275 | if (offset) { | |
276 | tmp = *(p++); | |
277 | tmp |= ~0UL >> (32-offset); | |
278 | if (size < 32) | |
279 | goto found_first; | |
280 | if (~tmp) | |
281 | goto found_middle; | |
282 | size -= 32; | |
283 | result += 32; | |
284 | } | |
285 | while (size & ~31UL) { | |
286 | if (~(tmp = *(p++))) | |
287 | goto found_middle; | |
288 | result += 32; | |
289 | size -= 32; | |
290 | } | |
291 | if (!size) | |
292 | return result; | |
293 | tmp = *p; | |
294 | ||
295 | found_first: | |
296 | tmp |= ~0UL << size; | |
297 | found_middle: | |
298 | return result + ffz(tmp); | |
299 | } | |
300 | ||
301 | #define find_first_zero_bit(addr, size) \ | |
302 | find_next_zero_bit((addr), (size), 0) | |
303 | ||
304 | /* | |
305 | * ffs: find first bit set. This is defined the same way as | |
306 | * the libc and compiler builtin ffs routines, therefore | |
307 | * differs in spirit from the above ffz (man ffs). | |
308 | */ | |
309 | ||
310 | #define ffs(x) generic_ffs(x) | |
311 | ||
312 | /* | |
313 | * hweightN: returns the hamming weight (i.e. the number | |
314 | * of bits set) of a N-bit word | |
315 | */ | |
316 | ||
317 | #define hweight32(x) generic_hweight32(x) | |
318 | #define hweight16(x) generic_hweight16(x) | |
319 | #define hweight8(x) generic_hweight8(x) | |
320 | ||
321 | /* | |
322 | * Every architecture must define this function. It's the fastest | |
323 | * way of searching a 140-bit bitmap where the first 100 bits are | |
324 | * unlikely to be set. It's guaranteed that at least one of the 140 | |
325 | * bits is cleared. | |
326 | */ | |
327 | ||
328 | static inline int sched_find_first_bit(const unsigned long *b) | |
329 | { | |
330 | if (unlikely(b[0])) | |
331 | return __ffs(b[0]); | |
332 | if (unlikely(b[1])) | |
333 | return __ffs(b[1]) + 32; | |
334 | if (unlikely(b[2])) | |
335 | return __ffs(b[2]) + 64; | |
336 | if (b[3]) | |
337 | return __ffs(b[3]) + 96; | |
338 | return __ffs(b[4]) + 128; | |
339 | } | |
340 | ||
341 | #ifdef __LITTLE_ENDIAN__ | |
342 | #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr)) | |
343 | #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr)) | |
344 | #define ext2_test_bit(nr, addr) test_bit((nr), (addr)) | |
345 | #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) | |
346 | #define ext2_find_next_zero_bit(addr, size, offset) \ | |
347 | find_next_zero_bit((unsigned long *)(addr), (size), (offset)) | |
348 | #else | |
349 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | |
350 | { | |
351 | int mask, retval; | |
352 | unsigned long flags; | |
353 | volatile unsigned char *ADDR = (unsigned char *) addr; | |
354 | ||
355 | ADDR += nr >> 3; | |
356 | mask = 1 << (nr & 0x07); | |
357 | local_irq_save(flags); | |
358 | retval = (mask & *ADDR) != 0; | |
359 | *ADDR |= mask; | |
360 | local_irq_restore(flags); | |
361 | return retval; | |
362 | } | |
363 | ||
364 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) | |
365 | { | |
366 | int mask, retval; | |
367 | unsigned long flags; | |
368 | volatile unsigned char *ADDR = (unsigned char *) addr; | |
369 | ||
370 | ADDR += nr >> 3; | |
371 | mask = 1 << (nr & 0x07); | |
372 | local_irq_save(flags); | |
373 | retval = (mask & *ADDR) != 0; | |
374 | *ADDR &= ~mask; | |
375 | local_irq_restore(flags); | |
376 | return retval; | |
377 | } | |
378 | ||
379 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) | |
380 | { | |
381 | int mask; | |
382 | const volatile unsigned char *ADDR = (const unsigned char *) addr; | |
383 | ||
384 | ADDR += nr >> 3; | |
385 | mask = 1 << (nr & 0x07); | |
386 | return ((mask & *ADDR) != 0); | |
387 | } | |
388 | ||
389 | #define ext2_find_first_zero_bit(addr, size) \ | |
390 | ext2_find_next_zero_bit((addr), (size), 0) | |
391 | ||
392 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | |
393 | { | |
394 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | |
395 | unsigned long result = offset & ~31UL; | |
396 | unsigned long tmp; | |
397 | ||
398 | if (offset >= size) | |
399 | return size; | |
400 | size -= result; | |
401 | offset &= 31UL; | |
402 | if(offset) { | |
403 | /* We hold the little endian value in tmp, but then the | |
404 | * shift is illegal. So we could keep a big endian value | |
405 | * in tmp, like this: | |
406 | * | |
407 | * tmp = __swab32(*(p++)); | |
408 | * tmp |= ~0UL >> (32-offset); | |
409 | * | |
410 | * but this would decrease preformance, so we change the | |
411 | * shift: | |
412 | */ | |
413 | tmp = *(p++); | |
414 | tmp |= __swab32(~0UL >> (32-offset)); | |
415 | if(size < 32) | |
416 | goto found_first; | |
417 | if(~tmp) | |
418 | goto found_middle; | |
419 | size -= 32; | |
420 | result += 32; | |
421 | } | |
422 | while(size & ~31UL) { | |
423 | if(~(tmp = *(p++))) | |
424 | goto found_middle; | |
425 | result += 32; | |
426 | size -= 32; | |
427 | } | |
428 | if(!size) | |
429 | return result; | |
430 | tmp = *p; | |
431 | ||
432 | found_first: | |
433 | /* tmp is little endian, so we would have to swab the shift, | |
434 | * see above. But then we have to swab tmp below for ffz, so | |
435 | * we might as well do this here. | |
436 | */ | |
437 | return result + ffz(__swab32(tmp) | (~0UL << size)); | |
438 | found_middle: | |
439 | return result + ffz(__swab32(tmp)); | |
440 | } | |
441 | #endif | |
442 | ||
443 | #define ext2_set_bit_atomic(lock, nr, addr) \ | |
444 | ({ \ | |
445 | int ret; \ | |
446 | spin_lock(lock); \ | |
447 | ret = ext2_set_bit((nr), (addr)); \ | |
448 | spin_unlock(lock); \ | |
449 | ret; \ | |
450 | }) | |
451 | ||
452 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | |
453 | ({ \ | |
454 | int ret; \ | |
455 | spin_lock(lock); \ | |
456 | ret = ext2_clear_bit((nr), (addr)); \ | |
457 | spin_unlock(lock); \ | |
458 | ret; \ | |
459 | }) | |
460 | ||
461 | /* Bitmap functions for the minix filesystem. */ | |
462 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | |
463 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | |
464 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | |
465 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | |
466 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | |
467 | ||
468 | /* | |
469 | * fls: find last bit set. | |
470 | */ | |
471 | ||
472 | #define fls(x) generic_fls(x) | |
473 | ||
474 | #endif /* __KERNEL__ */ | |
475 | ||
476 | #endif /* __ASM_SH_BITOPS_H */ |