Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[deliverable/linux.git] / include / asm-x86 / bitops_32.h
1 #ifndef _I386_BITOPS_H
2 #define _I386_BITOPS_H
3
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8 #ifndef _LINUX_BITOPS_H
9 #error only <linux/bitops.h> can be included directly
10 #endif
11
12 #include <linux/compiler.h>
13 #include <asm/alternative.h>
14
15 /*
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
19 *
20 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
21 */
22
23 #define ADDR (*(volatile long *) addr)
24
25 /**
26 * set_bit - Atomically set a bit in memory
27 * @nr: the bit to set
28 * @addr: the address to start counting from
29 *
30 * This function is atomic and may not be reordered. See __set_bit()
31 * if you do not require the atomic guarantees.
32 *
33 * Note: there are no guarantees that this function will not be reordered
34 * on non x86 architectures, so if you are writing portable code,
35 * make sure not to rely on its reordering guarantees.
36 *
37 * Note that @nr may be almost arbitrarily large; this function is not
38 * restricted to acting on a single-word quantity.
39 */
40 static inline void set_bit(int nr, volatile unsigned long * addr)
41 {
42 __asm__ __volatile__( LOCK_PREFIX
43 "btsl %1,%0"
44 :"+m" (ADDR)
45 :"Ir" (nr));
46 }
47
48 /**
49 * __set_bit - Set a bit in memory
50 * @nr: the bit to set
51 * @addr: the address to start counting from
52 *
53 * Unlike set_bit(), this function is non-atomic and may be reordered.
54 * If it's called on the same region of memory simultaneously, the effect
55 * may be that only one operation succeeds.
56 */
57 static inline void __set_bit(int nr, volatile unsigned long * addr)
58 {
59 __asm__(
60 "btsl %1,%0"
61 :"+m" (ADDR)
62 :"Ir" (nr));
63 }
64
65 /**
66 * clear_bit - Clears a bit in memory
67 * @nr: Bit to clear
68 * @addr: Address to start counting from
69 *
70 * clear_bit() is atomic and may not be reordered. However, it does
71 * not contain a memory barrier, so if it is used for locking purposes,
72 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
73 * in order to ensure changes are visible on other processors.
74 */
75 static inline void clear_bit(int nr, volatile unsigned long * addr)
76 {
77 __asm__ __volatile__( LOCK_PREFIX
78 "btrl %1,%0"
79 :"+m" (ADDR)
80 :"Ir" (nr));
81 }
82
83 /*
84 * clear_bit_unlock - Clears a bit in memory
85 * @nr: Bit to clear
86 * @addr: Address to start counting from
87 *
88 * clear_bit() is atomic and implies release semantics before the memory
89 * operation. It can be used for an unlock.
90 */
91 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
92 {
93 barrier();
94 clear_bit(nr, addr);
95 }
96
97 static inline void __clear_bit(int nr, volatile unsigned long * addr)
98 {
99 __asm__ __volatile__(
100 "btrl %1,%0"
101 :"+m" (ADDR)
102 :"Ir" (nr));
103 }
104
105 /*
106 * __clear_bit_unlock - Clears a bit in memory
107 * @nr: Bit to clear
108 * @addr: Address to start counting from
109 *
110 * __clear_bit() is non-atomic and implies release semantics before the memory
111 * operation. It can be used for an unlock if no other CPUs can concurrently
112 * modify other bits in the word.
113 *
114 * No memory barrier is required here, because x86 cannot reorder stores past
115 * older loads. Same principle as spin_unlock.
116 */
117 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
118 {
119 barrier();
120 __clear_bit(nr, addr);
121 }
122
123 #define smp_mb__before_clear_bit() barrier()
124 #define smp_mb__after_clear_bit() barrier()
125
126 /**
127 * __change_bit - Toggle a bit in memory
128 * @nr: the bit to change
129 * @addr: the address to start counting from
130 *
131 * Unlike change_bit(), this function is non-atomic and may be reordered.
132 * If it's called on the same region of memory simultaneously, the effect
133 * may be that only one operation succeeds.
134 */
135 static inline void __change_bit(int nr, volatile unsigned long * addr)
136 {
137 __asm__ __volatile__(
138 "btcl %1,%0"
139 :"+m" (ADDR)
140 :"Ir" (nr));
141 }
142
143 /**
144 * change_bit - Toggle a bit in memory
145 * @nr: Bit to change
146 * @addr: Address to start counting from
147 *
148 * change_bit() is atomic and may not be reordered. It may be
149 * reordered on other architectures than x86.
150 * Note that @nr may be almost arbitrarily large; this function is not
151 * restricted to acting on a single-word quantity.
152 */
153 static inline void change_bit(int nr, volatile unsigned long * addr)
154 {
155 __asm__ __volatile__( LOCK_PREFIX
156 "btcl %1,%0"
157 :"+m" (ADDR)
158 :"Ir" (nr));
159 }
160
161 /**
162 * test_and_set_bit - Set a bit and return its old value
163 * @nr: Bit to set
164 * @addr: Address to count from
165 *
166 * This operation is atomic and cannot be reordered.
167 * It may be reordered on other architectures than x86.
168 * It also implies a memory barrier.
169 */
170 static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
171 {
172 int oldbit;
173
174 __asm__ __volatile__( LOCK_PREFIX
175 "btsl %2,%1\n\tsbbl %0,%0"
176 :"=r" (oldbit),"+m" (ADDR)
177 :"Ir" (nr) : "memory");
178 return oldbit;
179 }
180
181 /**
182 * test_and_set_bit_lock - Set a bit and return its old value for lock
183 * @nr: Bit to set
184 * @addr: Address to count from
185 *
186 * This is the same as test_and_set_bit on x86
187 */
188 #define test_and_set_bit_lock test_and_set_bit
189
190 /**
191 * __test_and_set_bit - Set a bit and return its old value
192 * @nr: Bit to set
193 * @addr: Address to count from
194 *
195 * This operation is non-atomic and can be reordered.
196 * If two examples of this operation race, one can appear to succeed
197 * but actually fail. You must protect multiple accesses with a lock.
198 */
199 static inline int __test_and_set_bit(int nr, volatile unsigned long * addr)
200 {
201 int oldbit;
202
203 __asm__(
204 "btsl %2,%1\n\tsbbl %0,%0"
205 :"=r" (oldbit),"+m" (ADDR)
206 :"Ir" (nr));
207 return oldbit;
208 }
209
210 /**
211 * test_and_clear_bit - Clear a bit and return its old value
212 * @nr: Bit to clear
213 * @addr: Address to count from
214 *
215 * This operation is atomic and cannot be reordered.
216 * It can be reorderdered on other architectures other than x86.
217 * It also implies a memory barrier.
218 */
219 static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
220 {
221 int oldbit;
222
223 __asm__ __volatile__( LOCK_PREFIX
224 "btrl %2,%1\n\tsbbl %0,%0"
225 :"=r" (oldbit),"+m" (ADDR)
226 :"Ir" (nr) : "memory");
227 return oldbit;
228 }
229
230 /**
231 * __test_and_clear_bit - Clear a bit and return its old value
232 * @nr: Bit to clear
233 * @addr: Address to count from
234 *
235 * This operation is non-atomic and can be reordered.
236 * If two examples of this operation race, one can appear to succeed
237 * but actually fail. You must protect multiple accesses with a lock.
238 */
239 static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
240 {
241 int oldbit;
242
243 __asm__(
244 "btrl %2,%1\n\tsbbl %0,%0"
245 :"=r" (oldbit),"+m" (ADDR)
246 :"Ir" (nr));
247 return oldbit;
248 }
249
250 /* WARNING: non atomic and it can be reordered! */
251 static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
252 {
253 int oldbit;
254
255 __asm__ __volatile__(
256 "btcl %2,%1\n\tsbbl %0,%0"
257 :"=r" (oldbit),"+m" (ADDR)
258 :"Ir" (nr) : "memory");
259 return oldbit;
260 }
261
262 /**
263 * test_and_change_bit - Change a bit and return its old value
264 * @nr: Bit to change
265 * @addr: Address to count from
266 *
267 * This operation is atomic and cannot be reordered.
268 * It also implies a memory barrier.
269 */
270 static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
271 {
272 int oldbit;
273
274 __asm__ __volatile__( LOCK_PREFIX
275 "btcl %2,%1\n\tsbbl %0,%0"
276 :"=r" (oldbit),"+m" (ADDR)
277 :"Ir" (nr) : "memory");
278 return oldbit;
279 }
280
281 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
282 /**
283 * test_bit - Determine whether a bit is set
284 * @nr: bit number to test
285 * @addr: Address to start counting from
286 */
287 static int test_bit(int nr, const volatile void * addr);
288 #endif
289
290 static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
291 {
292 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
293 }
294
295 static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
296 {
297 int oldbit;
298
299 __asm__ __volatile__(
300 "btl %2,%1\n\tsbbl %0,%0"
301 :"=r" (oldbit)
302 :"m" (ADDR),"Ir" (nr));
303 return oldbit;
304 }
305
306 #define test_bit(nr,addr) \
307 (__builtin_constant_p(nr) ? \
308 constant_test_bit((nr),(addr)) : \
309 variable_test_bit((nr),(addr)))
310
311 #undef ADDR
312
313 /**
314 * find_first_zero_bit - find the first zero bit in a memory region
315 * @addr: The address to start the search at
316 * @size: The maximum size to search
317 *
318 * Returns the bit-number of the first zero bit, not the number of the byte
319 * containing a bit.
320 */
321 static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
322 {
323 int d0, d1, d2;
324 int res;
325
326 if (!size)
327 return 0;
328 /* This looks at memory. Mark it volatile to tell gcc not to move it around */
329 __asm__ __volatile__(
330 "movl $-1,%%eax\n\t"
331 "xorl %%edx,%%edx\n\t"
332 "repe; scasl\n\t"
333 "je 1f\n\t"
334 "xorl -4(%%edi),%%eax\n\t"
335 "subl $4,%%edi\n\t"
336 "bsfl %%eax,%%edx\n"
337 "1:\tsubl %%ebx,%%edi\n\t"
338 "shll $3,%%edi\n\t"
339 "addl %%edi,%%edx"
340 :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
341 :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
342 return res;
343 }
344
345 /**
346 * find_next_zero_bit - find the first zero bit in a memory region
347 * @addr: The address to base the search on
348 * @offset: The bitnumber to start searching at
349 * @size: The maximum size to search
350 */
351 int find_next_zero_bit(const unsigned long *addr, int size, int offset);
352
353 /**
354 * __ffs - find first bit in word.
355 * @word: The word to search
356 *
357 * Undefined if no bit exists, so code should check against 0 first.
358 */
359 static inline unsigned long __ffs(unsigned long word)
360 {
361 __asm__("bsfl %1,%0"
362 :"=r" (word)
363 :"rm" (word));
364 return word;
365 }
366
367 /**
368 * find_first_bit - find the first set bit in a memory region
369 * @addr: The address to start the search at
370 * @size: The maximum size to search
371 *
372 * Returns the bit-number of the first set bit, not the number of the byte
373 * containing a bit.
374 */
375 static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
376 {
377 unsigned x = 0;
378
379 while (x < size) {
380 unsigned long val = *addr++;
381 if (val)
382 return __ffs(val) + x;
383 x += (sizeof(*addr)<<3);
384 }
385 return x;
386 }
387
388 /**
389 * find_next_bit - find the first set bit in a memory region
390 * @addr: The address to base the search on
391 * @offset: The bitnumber to start searching at
392 * @size: The maximum size to search
393 */
394 int find_next_bit(const unsigned long *addr, int size, int offset);
395
396 /**
397 * ffz - find first zero in word.
398 * @word: The word to search
399 *
400 * Undefined if no zero exists, so code should check against ~0UL first.
401 */
402 static inline unsigned long ffz(unsigned long word)
403 {
404 __asm__("bsfl %1,%0"
405 :"=r" (word)
406 :"r" (~word));
407 return word;
408 }
409
410 #ifdef __KERNEL__
411
412 #include <asm-generic/bitops/sched.h>
413
414 /**
415 * ffs - find first bit set
416 * @x: the word to search
417 *
418 * This is defined the same way as
419 * the libc and compiler builtin ffs routines, therefore
420 * differs in spirit from the above ffz() (man ffs).
421 */
422 static inline int ffs(int x)
423 {
424 int r;
425
426 __asm__("bsfl %1,%0\n\t"
427 "jnz 1f\n\t"
428 "movl $-1,%0\n"
429 "1:" : "=r" (r) : "rm" (x));
430 return r+1;
431 }
432
433 /**
434 * fls - find last bit set
435 * @x: the word to search
436 *
437 * This is defined the same way as ffs().
438 */
439 static inline int fls(int x)
440 {
441 int r;
442
443 __asm__("bsrl %1,%0\n\t"
444 "jnz 1f\n\t"
445 "movl $-1,%0\n"
446 "1:" : "=r" (r) : "rm" (x));
447 return r+1;
448 }
449
450 #include <asm-generic/bitops/hweight.h>
451
452 #endif /* __KERNEL__ */
453
454 #include <asm-generic/bitops/fls64.h>
455
456 #ifdef __KERNEL__
457
458 #include <asm-generic/bitops/ext2-non-atomic.h>
459
460 #define ext2_set_bit_atomic(lock,nr,addr) \
461 test_and_set_bit((nr),(unsigned long*)addr)
462 #define ext2_clear_bit_atomic(lock,nr, addr) \
463 test_and_clear_bit((nr),(unsigned long*)addr)
464
465 #include <asm-generic/bitops/minix.h>
466
467 #endif /* __KERNEL__ */
468
469 #endif /* _I386_BITOPS_H */
This page took 0.040218 seconds and 6 git commands to generate.