x86: Cleanup rwsem_count_t typedef
authorThomas Gleixner <tglx@linutronix.de>
Wed, 26 Jan 2011 20:05:53 +0000 (20:05 +0000)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 27 Jan 2011 11:30:38 +0000 (12:30 +0100)
Remove the typedef which has no real reason to be there.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David Miller <davem@davemloft.net>
Cc: Chris Zankel <chris@zankel.net>
LKML-Reference: <20110126195833.580335506@linutronix.de>

arch/x86/include/asm/rwsem.h

index a626cff8604166fdd0a97a56a3c599f7d7668e71..c30206c2bbf95739c2537f84c2c82b643ccbb17b 100644 (file)
@@ -68,10 +68,8 @@ extern asmregparm struct rw_semaphore *
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
-typedef signed long rwsem_count_t;
-
 struct rw_semaphore {
-       rwsem_count_t           count;
+       long                    count;
        spinlock_t              wait_lock;
        struct list_head        wait_list;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -127,7 +125,7 @@ static inline void __down_read(struct rw_semaphore *sem)
  */
 static inline int __down_read_trylock(struct rw_semaphore *sem)
 {
-       rwsem_count_t result, tmp;
+       long result, tmp;
        asm volatile("# beginning __down_read_trylock\n\t"
                     "  mov          %0,%1\n\t"
                     "1:\n\t"
@@ -149,7 +147,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
  */
 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 {
-       rwsem_count_t tmp;
+       long tmp;
        asm volatile("# beginning down_write\n\t"
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
                     /* adds 0xffff0001, returns the old value */
@@ -174,9 +172,8 @@ static inline void __down_write(struct rw_semaphore *sem)
  */
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
-       rwsem_count_t ret = cmpxchg(&sem->count,
-                                   RWSEM_UNLOCKED_VALUE,
-                                   RWSEM_ACTIVE_WRITE_BIAS);
+       long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+                          RWSEM_ACTIVE_WRITE_BIAS);
        if (ret == RWSEM_UNLOCKED_VALUE)
                return 1;
        return 0;
@@ -187,7 +184,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
  */
 static inline void __up_read(struct rw_semaphore *sem)
 {
-       rwsem_count_t tmp;
+       long tmp;
        asm volatile("# beginning __up_read\n\t"
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
                     /* subtracts 1, returns the old value */
@@ -205,7 +202,7 @@ static inline void __up_read(struct rw_semaphore *sem)
  */
 static inline void __up_write(struct rw_semaphore *sem)
 {
-       rwsem_count_t tmp;
+       long tmp;
        asm volatile("# beginning __up_write\n\t"
                     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
                     /* subtracts 0xffff0001, returns the old value */
@@ -241,8 +238,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
 /*
  * implement atomic add functionality
  */
-static inline void rwsem_atomic_add(rwsem_count_t delta,
-                                   struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
 {
        asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
                     : "+m" (sem->count)
@@ -252,10 +248,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta,
 /*
  * implement exchange and add functionality
  */
-static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
-                                               struct rw_semaphore *sem)
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 {
-       rwsem_count_t tmp = delta;
+       long tmp = delta;
 
        asm volatile(LOCK_PREFIX "xadd %0,%1"
                     : "+r" (tmp), "+m" (sem->count)
This page took 0.025598 seconds and 5 git commands to generate.