1 #ifndef _X86_64_BITOPS_H
2 #define _X86_64_BITOPS_H
5 * Copyright 1992, Linus Torvalds.
8 #ifndef _LINUX_BITOPS_H
9 #error only <linux/bitops.h> can be included directly
12 #include <asm/alternative.h>
14 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
15 /* Technically wrong, but this avoids compilation errors on some gcc
17 #define ADDR "=m" (*(volatile long *) addr)
19 #define ADDR "+m" (*(volatile long *) addr)
23 * set_bit - Atomically set a bit in memory
25 * @addr: the address to start counting from
27 * This function is atomic and may not be reordered. See __set_bit()
28 * if you do not require the atomic guarantees.
29 * Note that @nr may be almost arbitrarily large; this function is not
30 * restricted to acting on a single-word quantity.
32 static __inline__
void set_bit(int nr
, volatile void * addr
)
34 __asm__
__volatile__( LOCK_PREFIX
37 :"dIr" (nr
) : "memory");
41 * __set_bit - Set a bit in memory
43 * @addr: the address to start counting from
45 * Unlike set_bit(), this function is non-atomic and may be reordered.
46 * If it's called on the same region of memory simultaneously, the effect
47 * may be that only one operation succeeds.
49 static __inline__
void __set_bit(int nr
, volatile void * addr
)
54 :"dIr" (nr
) : "memory");
58 * clear_bit - Clears a bit in memory
60 * @addr: Address to start counting from
62 * clear_bit() is atomic and may not be reordered. However, it does
63 * not contain a memory barrier, so if it is used for locking purposes,
64 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
65 * in order to ensure changes are visible on other processors.
67 static __inline__
void clear_bit(int nr
, volatile void * addr
)
69 __asm__
__volatile__( LOCK_PREFIX
76 * clear_bit_unlock - Clears a bit in memory
78 * @addr: Address to start counting from
80 * clear_bit() is atomic and implies release semantics before the memory
81 * operation. It can be used for an unlock.
83 static inline void clear_bit_unlock(unsigned long nr
, volatile unsigned long *addr
)
89 static __inline__
void __clear_bit(int nr
, volatile void * addr
)
98 * __clear_bit_unlock - Clears a bit in memory
100 * @addr: Address to start counting from
102 * __clear_bit() is non-atomic and implies release semantics before the memory
103 * operation. It can be used for an unlock if no other CPUs can concurrently
104 * modify other bits in the word.
106 * No memory barrier is required here, because x86 cannot reorder stores past
107 * older loads. Same principle as spin_unlock.
109 static inline void __clear_bit_unlock(unsigned long nr
, volatile unsigned long *addr
)
112 __clear_bit(nr
, addr
);
115 #define smp_mb__before_clear_bit() barrier()
116 #define smp_mb__after_clear_bit() barrier()
119 * __change_bit - Toggle a bit in memory
120 * @nr: the bit to change
121 * @addr: the address to start counting from
123 * Unlike change_bit(), this function is non-atomic and may be reordered.
124 * If it's called on the same region of memory simultaneously, the effect
125 * may be that only one operation succeeds.
127 static __inline__
void __change_bit(int nr
, volatile void * addr
)
129 __asm__
__volatile__(
136 * change_bit - Toggle a bit in memory
138 * @addr: Address to start counting from
140 * change_bit() is atomic and may not be reordered.
141 * Note that @nr may be almost arbitrarily large; this function is not
142 * restricted to acting on a single-word quantity.
144 static __inline__
void change_bit(int nr
, volatile void * addr
)
146 __asm__
__volatile__( LOCK_PREFIX
153 * test_and_set_bit - Set a bit and return its old value
155 * @addr: Address to count from
157 * This operation is atomic and cannot be reordered.
158 * It also implies a memory barrier.
160 static __inline__
int test_and_set_bit(int nr
, volatile void * addr
)
164 __asm__
__volatile__( LOCK_PREFIX
165 "btsl %2,%1\n\tsbbl %0,%0"
167 :"dIr" (nr
) : "memory");
172 * test_and_set_bit_lock - Set a bit and return its old value for lock
174 * @addr: Address to count from
176 * This is the same as test_and_set_bit on x86
178 #define test_and_set_bit_lock test_and_set_bit
181 * __test_and_set_bit - Set a bit and return its old value
183 * @addr: Address to count from
185 * This operation is non-atomic and can be reordered.
186 * If two examples of this operation race, one can appear to succeed
187 * but actually fail. You must protect multiple accesses with a lock.
189 static __inline__
int __test_and_set_bit(int nr
, volatile void * addr
)
194 "btsl %2,%1\n\tsbbl %0,%0"
201 * test_and_clear_bit - Clear a bit and return its old value
203 * @addr: Address to count from
205 * This operation is atomic and cannot be reordered.
206 * It also implies a memory barrier.
208 static __inline__
int test_and_clear_bit(int nr
, volatile void * addr
)
212 __asm__
__volatile__( LOCK_PREFIX
213 "btrl %2,%1\n\tsbbl %0,%0"
215 :"dIr" (nr
) : "memory");
220 * __test_and_clear_bit - Clear a bit and return its old value
222 * @addr: Address to count from
224 * This operation is non-atomic and can be reordered.
225 * If two examples of this operation race, one can appear to succeed
226 * but actually fail. You must protect multiple accesses with a lock.
228 static __inline__
int __test_and_clear_bit(int nr
, volatile void * addr
)
233 "btrl %2,%1\n\tsbbl %0,%0"
239 /* WARNING: non atomic and it can be reordered! */
240 static __inline__
int __test_and_change_bit(int nr
, volatile void * addr
)
244 __asm__
__volatile__(
245 "btcl %2,%1\n\tsbbl %0,%0"
247 :"dIr" (nr
) : "memory");
252 * test_and_change_bit - Change a bit and return its old value
254 * @addr: Address to count from
256 * This operation is atomic and cannot be reordered.
257 * It also implies a memory barrier.
259 static __inline__
int test_and_change_bit(int nr
, volatile void * addr
)
263 __asm__
__volatile__( LOCK_PREFIX
264 "btcl %2,%1\n\tsbbl %0,%0"
266 :"dIr" (nr
) : "memory");
270 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
272 * test_bit - Determine whether a bit is set
273 * @nr: bit number to test
274 * @addr: Address to start counting from
276 static int test_bit(int nr
, const volatile void * addr
);
279 static __inline__
int constant_test_bit(int nr
, const volatile void * addr
)
281 return ((1UL << (nr
& 31)) & (((const volatile unsigned int *) addr
)[nr
>> 5])) != 0;
284 static __inline__
int variable_test_bit(int nr
, volatile const void * addr
)
288 __asm__
__volatile__(
289 "btl %2,%1\n\tsbbl %0,%0"
291 :"m" (*(volatile long *)addr
),"dIr" (nr
));
295 #define test_bit(nr,addr) \
296 (__builtin_constant_p(nr) ? \
297 constant_test_bit((nr),(addr)) : \
298 variable_test_bit((nr),(addr)))
302 extern long find_first_zero_bit(const unsigned long * addr
, unsigned long size
);
303 extern long find_next_zero_bit (const unsigned long * addr
, long size
, long offset
);
304 extern long find_first_bit(const unsigned long * addr
, unsigned long size
);
305 extern long find_next_bit(const unsigned long * addr
, long size
, long offset
);
307 /* return index of first bet set in val or max when no bit is set */
308 static inline long __scanbit(unsigned long val
, unsigned long max
)
310 asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val
) : "r" (val
), "r" (max
));
314 #define find_first_bit(addr,size) \
315 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
316 (__scanbit(*(unsigned long *)addr,(size))) : \
317 find_first_bit(addr,size)))
319 #define find_next_bit(addr,size,off) \
320 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
321 ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
322 find_next_bit(addr,size,off)))
324 #define find_first_zero_bit(addr,size) \
325 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
326 (__scanbit(~*(unsigned long *)addr,(size))) : \
327 find_first_zero_bit(addr,size)))
329 #define find_next_zero_bit(addr,size,off) \
330 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
331 ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
332 find_next_zero_bit(addr,size,off)))
335 * Find string of zero bits in a bitmap. -1 when not found.
338 find_next_zero_string(unsigned long *bitmap
, long start
, long nbits
, int len
);
340 static inline void set_bit_string(unsigned long *bitmap
, unsigned long i
,
343 unsigned long end
= i
+ len
;
345 __set_bit(i
, bitmap
);
350 static inline void __clear_bit_string(unsigned long *bitmap
, unsigned long i
,
353 unsigned long end
= i
+ len
;
355 __clear_bit(i
, bitmap
);
361 * ffz - find first zero in word.
362 * @word: The word to search
364 * Undefined if no zero exists, so code should check against ~0UL first.
366 static __inline__
unsigned long ffz(unsigned long word
)
375 * __ffs - find first bit in word.
376 * @word: The word to search
378 * Undefined if no bit exists, so code should check against 0 first.
380 static __inline__
unsigned long __ffs(unsigned long word
)
389 * __fls: find last bit set.
390 * @word: The word to search
392 * Undefined if no zero exists, so code should check against ~0UL first.
394 static __inline__
unsigned long __fls(unsigned long word
)
404 #include <asm-generic/bitops/sched.h>
407 * ffs - find first bit set
408 * @x: the word to search
410 * This is defined the same way as
411 * the libc and compiler builtin ffs routines, therefore
412 * differs in spirit from the above ffz (man ffs).
414 static __inline__
int ffs(int x
)
418 __asm__("bsfl %1,%0\n\t"
420 : "=r" (r
) : "rm" (x
), "r" (-1));
425 * fls64 - find last bit set in 64 bit word
426 * @x: the word to search
428 * This is defined the same way as fls.
430 static __inline__
int fls64(__u64 x
)
438 * fls - find last bit set
439 * @x: the word to search
441 * This is defined the same way as ffs.
443 static __inline__
int fls(int x
)
447 __asm__("bsrl %1,%0\n\t"
449 : "=&r" (r
) : "rm" (x
), "rm" (-1));
453 #define ARCH_HAS_FAST_MULTIPLIER 1
455 #include <asm-generic/bitops/hweight.h>
457 #endif /* __KERNEL__ */
461 #include <asm-generic/bitops/ext2-non-atomic.h>
463 #define ext2_set_bit_atomic(lock,nr,addr) \
464 test_and_set_bit((nr),(unsigned long*)addr)
465 #define ext2_clear_bit_atomic(lock,nr,addr) \
466 test_and_clear_bit((nr),(unsigned long*)addr)
468 #include <asm-generic/bitops/minix.h>
470 #endif /* __KERNEL__ */
472 #endif /* _X86_64_BITOPS_H */
This page took 0.042561 seconds and 6 git commands to generate.