Merge git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild
[deliverable/linux.git] / include / asm-x86 / bitops_32.h
1 #ifndef _I386_BITOPS_H
2 #define _I386_BITOPS_H
3
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8 #ifndef _LINUX_BITOPS_H
9 #error only <linux/bitops.h> can be included directly
10 #endif
11
12 #include <linux/compiler.h>
13 #include <asm/alternative.h>
14
15 /*
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
19 *
20 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
21 */
22
23 #define ADDR (*(volatile long *) addr)
24
25 /**
26 * set_bit - Atomically set a bit in memory
27 * @nr: the bit to set
28 * @addr: the address to start counting from
29 *
30 * This function is atomic and may not be reordered. See __set_bit()
31 * if you do not require the atomic guarantees.
32 *
33 * Note: there are no guarantees that this function will not be reordered
34 * on non x86 architectures, so if you are writing portable code,
35 * make sure not to rely on its reordering guarantees.
36 *
37 * Note that @nr may be almost arbitrarily large; this function is not
38 * restricted to acting on a single-word quantity.
39 */
40 static inline void set_bit(int nr, volatile unsigned long * addr)
41 {
42 __asm__ __volatile__( LOCK_PREFIX
43 "btsl %1,%0"
44 :"+m" (ADDR)
45 :"Ir" (nr));
46 }
47
48 /**
49 * __set_bit - Set a bit in memory
50 * @nr: the bit to set
51 * @addr: the address to start counting from
52 *
53 * Unlike set_bit(), this function is non-atomic and may be reordered.
54 * If it's called on the same region of memory simultaneously, the effect
55 * may be that only one operation succeeds.
56 */
57 static inline void __set_bit(int nr, volatile unsigned long * addr)
58 {
59 __asm__(
60 "btsl %1,%0"
61 :"+m" (ADDR)
62 :"Ir" (nr));
63 }
64
65 /**
66 * clear_bit - Clears a bit in memory
67 * @nr: Bit to clear
68 * @addr: Address to start counting from
69 *
70 * clear_bit() is atomic and may not be reordered. However, it does
71 * not contain a memory barrier, so if it is used for locking purposes,
72 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
73 * in order to ensure changes are visible on other processors.
74 */
75 static inline void clear_bit(int nr, volatile unsigned long * addr)
76 {
77 __asm__ __volatile__( LOCK_PREFIX
78 "btrl %1,%0"
79 :"+m" (ADDR)
80 :"Ir" (nr));
81 }
82
83 static inline void __clear_bit(int nr, volatile unsigned long * addr)
84 {
85 __asm__ __volatile__(
86 "btrl %1,%0"
87 :"+m" (ADDR)
88 :"Ir" (nr));
89 }
90 #define smp_mb__before_clear_bit() barrier()
91 #define smp_mb__after_clear_bit() barrier()
92
93 /**
94 * __change_bit - Toggle a bit in memory
95 * @nr: the bit to change
96 * @addr: the address to start counting from
97 *
98 * Unlike change_bit(), this function is non-atomic and may be reordered.
99 * If it's called on the same region of memory simultaneously, the effect
100 * may be that only one operation succeeds.
101 */
102 static inline void __change_bit(int nr, volatile unsigned long * addr)
103 {
104 __asm__ __volatile__(
105 "btcl %1,%0"
106 :"+m" (ADDR)
107 :"Ir" (nr));
108 }
109
110 /**
111 * change_bit - Toggle a bit in memory
112 * @nr: Bit to change
113 * @addr: Address to start counting from
114 *
115 * change_bit() is atomic and may not be reordered. It may be
116 * reordered on other architectures than x86.
117 * Note that @nr may be almost arbitrarily large; this function is not
118 * restricted to acting on a single-word quantity.
119 */
120 static inline void change_bit(int nr, volatile unsigned long * addr)
121 {
122 __asm__ __volatile__( LOCK_PREFIX
123 "btcl %1,%0"
124 :"+m" (ADDR)
125 :"Ir" (nr));
126 }
127
128 /**
129 * test_and_set_bit - Set a bit and return its old value
130 * @nr: Bit to set
131 * @addr: Address to count from
132 *
133 * This operation is atomic and cannot be reordered.
134 * It may be reordered on other architectures than x86.
135 * It also implies a memory barrier.
136 */
137 static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
138 {
139 int oldbit;
140
141 __asm__ __volatile__( LOCK_PREFIX
142 "btsl %2,%1\n\tsbbl %0,%0"
143 :"=r" (oldbit),"+m" (ADDR)
144 :"Ir" (nr) : "memory");
145 return oldbit;
146 }
147
148 /**
149 * __test_and_set_bit - Set a bit and return its old value
150 * @nr: Bit to set
151 * @addr: Address to count from
152 *
153 * This operation is non-atomic and can be reordered.
154 * If two examples of this operation race, one can appear to succeed
155 * but actually fail. You must protect multiple accesses with a lock.
156 */
157 static inline int __test_and_set_bit(int nr, volatile unsigned long * addr)
158 {
159 int oldbit;
160
161 __asm__(
162 "btsl %2,%1\n\tsbbl %0,%0"
163 :"=r" (oldbit),"+m" (ADDR)
164 :"Ir" (nr));
165 return oldbit;
166 }
167
168 /**
169 * test_and_clear_bit - Clear a bit and return its old value
170 * @nr: Bit to clear
171 * @addr: Address to count from
172 *
173 * This operation is atomic and cannot be reordered.
174 * It can be reorderdered on other architectures other than x86.
175 * It also implies a memory barrier.
176 */
177 static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
178 {
179 int oldbit;
180
181 __asm__ __volatile__( LOCK_PREFIX
182 "btrl %2,%1\n\tsbbl %0,%0"
183 :"=r" (oldbit),"+m" (ADDR)
184 :"Ir" (nr) : "memory");
185 return oldbit;
186 }
187
188 /**
189 * __test_and_clear_bit - Clear a bit and return its old value
190 * @nr: Bit to clear
191 * @addr: Address to count from
192 *
193 * This operation is non-atomic and can be reordered.
194 * If two examples of this operation race, one can appear to succeed
195 * but actually fail. You must protect multiple accesses with a lock.
196 */
197 static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
198 {
199 int oldbit;
200
201 __asm__(
202 "btrl %2,%1\n\tsbbl %0,%0"
203 :"=r" (oldbit),"+m" (ADDR)
204 :"Ir" (nr));
205 return oldbit;
206 }
207
208 /* WARNING: non atomic and it can be reordered! */
209 static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
210 {
211 int oldbit;
212
213 __asm__ __volatile__(
214 "btcl %2,%1\n\tsbbl %0,%0"
215 :"=r" (oldbit),"+m" (ADDR)
216 :"Ir" (nr) : "memory");
217 return oldbit;
218 }
219
220 /**
221 * test_and_change_bit - Change a bit and return its old value
222 * @nr: Bit to change
223 * @addr: Address to count from
224 *
225 * This operation is atomic and cannot be reordered.
226 * It also implies a memory barrier.
227 */
228 static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
229 {
230 int oldbit;
231
232 __asm__ __volatile__( LOCK_PREFIX
233 "btcl %2,%1\n\tsbbl %0,%0"
234 :"=r" (oldbit),"+m" (ADDR)
235 :"Ir" (nr) : "memory");
236 return oldbit;
237 }
238
239 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
240 /**
241 * test_bit - Determine whether a bit is set
242 * @nr: bit number to test
243 * @addr: Address to start counting from
244 */
245 static int test_bit(int nr, const volatile void * addr);
246 #endif
247
248 static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
249 {
250 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
251 }
252
253 static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
254 {
255 int oldbit;
256
257 __asm__ __volatile__(
258 "btl %2,%1\n\tsbbl %0,%0"
259 :"=r" (oldbit)
260 :"m" (ADDR),"Ir" (nr));
261 return oldbit;
262 }
263
264 #define test_bit(nr,addr) \
265 (__builtin_constant_p(nr) ? \
266 constant_test_bit((nr),(addr)) : \
267 variable_test_bit((nr),(addr)))
268
269 #undef ADDR
270
271 /**
272 * find_first_zero_bit - find the first zero bit in a memory region
273 * @addr: The address to start the search at
274 * @size: The maximum size to search
275 *
276 * Returns the bit-number of the first zero bit, not the number of the byte
277 * containing a bit.
278 */
279 static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
280 {
281 int d0, d1, d2;
282 int res;
283
284 if (!size)
285 return 0;
286 /* This looks at memory. Mark it volatile to tell gcc not to move it around */
287 __asm__ __volatile__(
288 "movl $-1,%%eax\n\t"
289 "xorl %%edx,%%edx\n\t"
290 "repe; scasl\n\t"
291 "je 1f\n\t"
292 "xorl -4(%%edi),%%eax\n\t"
293 "subl $4,%%edi\n\t"
294 "bsfl %%eax,%%edx\n"
295 "1:\tsubl %%ebx,%%edi\n\t"
296 "shll $3,%%edi\n\t"
297 "addl %%edi,%%edx"
298 :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
299 :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
300 return res;
301 }
302
303 /**
304 * find_next_zero_bit - find the first zero bit in a memory region
305 * @addr: The address to base the search on
306 * @offset: The bitnumber to start searching at
307 * @size: The maximum size to search
308 */
309 int find_next_zero_bit(const unsigned long *addr, int size, int offset);
310
311 /**
312 * __ffs - find first bit in word.
313 * @word: The word to search
314 *
315 * Undefined if no bit exists, so code should check against 0 first.
316 */
317 static inline unsigned long __ffs(unsigned long word)
318 {
319 __asm__("bsfl %1,%0"
320 :"=r" (word)
321 :"rm" (word));
322 return word;
323 }
324
325 /**
326 * find_first_bit - find the first set bit in a memory region
327 * @addr: The address to start the search at
328 * @size: The maximum size to search
329 *
330 * Returns the bit-number of the first set bit, not the number of the byte
331 * containing a bit.
332 */
333 static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
334 {
335 unsigned x = 0;
336
337 while (x < size) {
338 unsigned long val = *addr++;
339 if (val)
340 return __ffs(val) + x;
341 x += (sizeof(*addr)<<3);
342 }
343 return x;
344 }
345
346 /**
347 * find_next_bit - find the first set bit in a memory region
348 * @addr: The address to base the search on
349 * @offset: The bitnumber to start searching at
350 * @size: The maximum size to search
351 */
352 int find_next_bit(const unsigned long *addr, int size, int offset);
353
354 /**
355 * ffz - find first zero in word.
356 * @word: The word to search
357 *
358 * Undefined if no zero exists, so code should check against ~0UL first.
359 */
360 static inline unsigned long ffz(unsigned long word)
361 {
362 __asm__("bsfl %1,%0"
363 :"=r" (word)
364 :"r" (~word));
365 return word;
366 }
367
368 #ifdef __KERNEL__
369
370 #include <asm-generic/bitops/sched.h>
371
372 /**
373 * ffs - find first bit set
374 * @x: the word to search
375 *
376 * This is defined the same way as
377 * the libc and compiler builtin ffs routines, therefore
378 * differs in spirit from the above ffz() (man ffs).
379 */
380 static inline int ffs(int x)
381 {
382 int r;
383
384 __asm__("bsfl %1,%0\n\t"
385 "jnz 1f\n\t"
386 "movl $-1,%0\n"
387 "1:" : "=r" (r) : "rm" (x));
388 return r+1;
389 }
390
391 /**
392 * fls - find last bit set
393 * @x: the word to search
394 *
395 * This is defined the same way as ffs().
396 */
397 static inline int fls(int x)
398 {
399 int r;
400
401 __asm__("bsrl %1,%0\n\t"
402 "jnz 1f\n\t"
403 "movl $-1,%0\n"
404 "1:" : "=r" (r) : "rm" (x));
405 return r+1;
406 }
407
408 #include <asm-generic/bitops/hweight.h>
409 #include <asm-generic/bitops/lock.h>
410
411 #endif /* __KERNEL__ */
412
413 #include <asm-generic/bitops/fls64.h>
414
415 #ifdef __KERNEL__
416
417 #include <asm-generic/bitops/ext2-non-atomic.h>
418
419 #define ext2_set_bit_atomic(lock,nr,addr) \
420 test_and_set_bit((nr),(unsigned long*)addr)
421 #define ext2_clear_bit_atomic(lock,nr, addr) \
422 test_and_clear_bit((nr),(unsigned long*)addr)
423
424 #include <asm-generic/bitops/minix.h>
425
426 #endif /* __KERNEL__ */
427
428 #endif /* _I386_BITOPS_H */
This page took 0.039104 seconds and 6 git commands to generate.