summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-03 20:01:19 +0100
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 23:55:32 +0100
commitfb3a6bbc912b12347614e5742c7c61416cdb0ca0 (patch)
treef9dbf8dab23cea6f033a58672ba16abf2ae09ebd /arch
parent0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 (diff)
downloadlinux-3.10-fb3a6bbc912b12347614e5742c7c61416cdb0ca0.tar.gz
linux-3.10-fb3a6bbc912b12347614e5742c7c61416cdb0ca0.tar.bz2
linux-3.10-fb3a6bbc912b12347614e5742c7c61416cdb0ca0.zip
locking: Convert raw_rwlock to arch_rwlock
Not strictly necessary for -rt as -rt does not have non sleeping rwlocks, but it's odd to not have a consistent naming convention. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/spinlock.h16
-rw-r--r--arch/alpha/include/asm/spinlock_types.h4
-rw-r--r--arch/arm/include/asm/spinlock.h12
-rw-r--r--arch/arm/include/asm/spinlock_types.h4
-rw-r--r--arch/blackfin/include/asm/spinlock.h16
-rw-r--r--arch/blackfin/include/asm/spinlock_types.h4
-rw-r--r--arch/cris/include/arch-v32/arch/spinlock.h16
-rw-r--r--arch/ia64/include/asm/spinlock.h16
-rw-r--r--arch/ia64/include/asm/spinlock_types.h4
-rw-r--r--arch/m32r/include/asm/spinlock.h12
-rw-r--r--arch/m32r/include/asm/spinlock_types.h4
-rw-r--r--arch/mips/include/asm/spinlock.h12
-rw-r--r--arch/mips/include/asm/spinlock_types.h4
-rw-r--r--arch/parisc/include/asm/spinlock.h16
-rw-r--r--arch/parisc/include/asm/spinlock_types.h4
-rw-r--r--arch/powerpc/include/asm/spinlock.h18
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h4
-rw-r--r--arch/powerpc/lib/locks.c2
-rw-r--r--arch/s390/include/asm/spinlock.h30
-rw-r--r--arch/s390/include/asm/spinlock_types.h4
-rw-r--r--arch/s390/lib/spinlock.c12
-rw-r--r--arch/sh/include/asm/spinlock.h12
-rw-r--r--arch/sh/include/asm/spinlock_types.h4
-rw-r--r--arch/sparc/include/asm/spinlock_32.h20
-rw-r--r--arch/sparc/include/asm/spinlock_64.h12
-rw-r--r--arch/sparc/include/asm/spinlock_types.h4
-rw-r--r--arch/x86/include/asm/spinlock.h16
-rw-r--r--arch/x86/include/asm/spinlock_types.h4
28 files changed, 143 insertions, 143 deletions
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index 4dac79f504c..e8b2970f037 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
/***********************************************************/
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+static inline int __raw_read_can_lock(arch_rwlock_t *lock)
{
return (lock->lock & 1) == 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+static inline int __raw_write_can_lock(arch_rwlock_t *lock)
{
return lock->lock == 0;
}
-static inline void __raw_read_lock(raw_rwlock_t *lock)
+static inline void __raw_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock)
: "m" (*lock) : "memory");
}
-static inline void __raw_write_lock(raw_rwlock_t *lock)
+static inline void __raw_write_lock(arch_rwlock_t *lock)
{
long regx;
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock)
: "m" (*lock) : "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t * lock)
+static inline int __raw_read_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock)
return success;
}
-static inline int __raw_write_trylock(raw_rwlock_t * lock)
+static inline int __raw_write_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock)
return success;
}
-static inline void __raw_read_unlock(raw_rwlock_t * lock)
+static inline void __raw_read_unlock(arch_rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
@@ -160,7 +160,7 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock)
: "m" (*lock) : "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t * lock)
+static inline void __raw_write_unlock(arch_rwlock_t * lock)
{
mb();
lock->lock = 0;
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
index 08975ee0a10..54c2afce0a1 100644
--- a/arch/alpha/include/asm/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index de62eb098f6..a8671d8bc7d 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
* just write zero since the lock is exclusively held.
*/
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
smp_mb();
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
}
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
* currently active. However, we know we won't have any write
* locks.
*/
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
smp_mb();
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
: "cc");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2 = 1;
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 9622e126a8d..d14d197ae04 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 62d49540e02..7e1c56b0a57 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -52,42 +52,42 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
cpu_relax();
}
-static inline int __raw_read_can_lock(raw_rwlock_t *rw)
+static inline int __raw_read_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *rw)
+static inline int __raw_write_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
__raw_read_lock_asm(&rw->lock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
return __raw_read_trylock_asm(&rw->lock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
__raw_read_unlock_asm(&rw->lock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
__raw_write_lock_asm(&rw->lock);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
return __raw_write_trylock_asm(&rw->lock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
__raw_write_unlock_asm(&rw->lock);
}
diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h
index c8a3928a58c..1a33608c958 100644
--- a/arch/blackfin/include/asm/spinlock_types.h
+++ b/arch/blackfin/include/asm/spinlock_types.h
@@ -21,8 +21,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
index a2e8a394d55..1d7d3a8046c 100644
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ b/arch/cris/include/arch-v32/arch/spinlock.h
@@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
*
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *x)
+static inline int __raw_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *x)
+static inline int __raw_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock == 0);
@@ -74,7 +74,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
@@ -82,14 +82,14 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
rw->lock++;
arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
@@ -97,7 +97,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
arch_spin_unlock(&rw->slock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
@@ -109,7 +109,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index b06165f6352..6715b6a8ebc 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -146,7 +146,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1,%2\n"
@@ -177,7 +177,7 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
#define __raw_read_lock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
\
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -190,14 +190,14 @@ do { \
#define __raw_read_unlock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1, %2\n"
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
(result == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void __raw_write_unlock(arch_rwlock_t *x)
{
u8 *y = (u8 *)x;
barrier();
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
(ia64_val == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void __raw_write_unlock(arch_rwlock_t *x)
{
barrier();
x->write_lock = 0;
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
#endif /* !ASM_SUPPORTED */
-static inline int __raw_read_trylock(raw_rwlock_t *x)
+static inline int __raw_read_trylock(arch_rwlock_t *x)
{
union {
- raw_rwlock_t lock;
+ arch_rwlock_t lock;
__u32 word;
} old, new;
old.lock = new.lock = *x;
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 6a11b65fa66..e2b42a52a6d 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -14,8 +14,8 @@ typedef struct {
typedef struct {
volatile unsigned int read_counter : 31;
volatile unsigned int write_lock : 1;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
#endif
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index 8acac950a43..1c76af8c8e1 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -148,7 +148,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
*/
#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
);
}
-static inline int __raw_read_trylock(raw_rwlock_t *lock)
+static inline int __raw_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t*)lock;
if (atomic_dec_return(count) >= 0)
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
return 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *lock)
+static inline int __raw_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h
index 5873a870110..92e27672661 100644
--- a/arch/m32r/include/asm/spinlock_types.h
+++ b/arch/m32r/include/asm/spinlock_types.h
@@ -13,11 +13,11 @@ typedef struct {
typedef struct {
volatile int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_M32R_SPINLOCK_TYPES_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 95edebaaf22..7bf27c8a336 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -256,7 +256,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
*/
#define __raw_write_can_lock(rw) (!(rw)->lock)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
/* Note the use of sub, not subu which will make the kernel die with an
overflow exception if we ever try to unlock an rwlock that is already
unlocked or is being held by a writer. */
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -335,7 +335,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
}
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -377,7 +377,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
smp_llsc_mb();
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
@@ -389,7 +389,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
: "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
@@ -433,7 +433,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
index b4c5efaadb9..ee197c2f9c9 100644
--- a/arch/mips/include/asm/spinlock_types.h
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -18,8 +18,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 235e7e386e2..1ff3a0a94a4 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
+static __inline__ void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
@@ -81,7 +81,7 @@ static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
+static __inline__ void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
@@ -93,7 +93,7 @@ static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
+static __inline__ int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
@@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
+static __inline__ void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
@@ -141,7 +141,7 @@ retry:
local_irq_restore(flags);
}
-static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
+static __inline__ void __raw_write_unlock(arch_rwlock_t *rw)
{
rw->counter = 0;
arch_spin_unlock(&rw->lock);
@@ -149,7 +149,7 @@ static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
+static __inline__ int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
int result = 0;
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
+static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw)
{
return rw->counter >= 0;
}
@@ -182,7 +182,7 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
+static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw)
{
return !rw->counter;
}
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 396d2746ca5..8c373aa28a8 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -14,8 +14,8 @@ typedef struct {
typedef struct {
arch_spinlock_t lock;
volatile int counter;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#endif
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index cdcaf6b9708..2fad2c07c59 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -97,7 +97,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
/* We only yield to the hypervisor if we are in shared processor mode */
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
extern void __spin_yield(arch_spinlock_t *lock);
-extern void __rw_yield(raw_rwlock_t *lock);
+extern void __rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR || ISERIES */
#define __spin_yield(x) barrier()
#define __rw_yield(x) barrier()
@@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
*/
-static inline long arch_read_trylock(raw_rwlock_t *rw)
+static inline long arch_read_trylock(arch_rwlock_t *rw)
{
long tmp;
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw)
* This returns the old value in the lock,
* so we got the write lock if the return value is 0.
*/
-static inline long arch_write_trylock(raw_rwlock_t *rw)
+static inline long arch_write_trylock(arch_rwlock_t *rw)
{
long tmp, token;
@@ -225,7 +225,7 @@ static inline long arch_write_trylock(raw_rwlock_t *rw)
return tmp;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
while (1) {
if (likely(arch_read_trylock(rw) > 0))
@@ -239,7 +239,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
}
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
while (1) {
if (likely(arch_write_trylock(rw) == 0))
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
}
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
return arch_read_trylock(rw) > 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
return arch_write_trylock(rw) == 0;
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
long tmp;
@@ -280,7 +280,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
: "cr0", "xer", "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__("# write_unlock\n\t"
LWSYNC_ON_SMP: : :"memory");
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
index f5f39d82711..2351adc4fdc 100644
--- a/arch/powerpc/include/asm/spinlock_types.h
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile signed int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index ee395e39211..58e14fba11b 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -55,7 +55,7 @@ void __spin_yield(arch_spinlock_t *lock)
* This turns out to be the same for read and write locks, since
* we only know the holder if it is write-locked.
*/
-void __rw_yield(raw_rwlock_t *rw)
+void __rw_yield(arch_rwlock_t *rw)
{
int lock_value;
unsigned int holder_cpu, yield_count;
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index a94c146657a..7f98f0e48ac 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -121,14 +121,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
*/
#define __raw_write_can_lock(x) ((x)->lock == 0)
-extern void _raw_read_lock_wait(raw_rwlock_t *lp);
-extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
-
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
+
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
_raw_read_lock_wait(rw);
}
-static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
_raw_read_lock_wait_flags(rw, flags);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned int old, cmp;
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
} while (cmp != old);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait(rw);
}
-static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait_flags(rw, flags);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -181,7 +181,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return _raw_read_trylock_retry(rw);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
return 1;
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index e25c0370f6c..9c76656a0af 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f4596452f07..09fee9a1aa1 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -105,7 +105,7 @@ void arch_spin_relax(arch_spinlock_t *lock)
}
EXPORT_SYMBOL(arch_spin_relax);
-void _raw_read_lock_wait(raw_rwlock_t *rw)
+void _raw_read_lock_wait(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
@@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_lock_wait);
-void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
int count = spin_retry;
@@ -145,7 +145,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
}
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
-int _raw_read_trylock_retry(raw_rwlock_t *rw)
+int _raw_read_trylock_retry(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
@@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
-void _raw_write_lock_wait(raw_rwlock_t *rw)
+void _raw_write_lock_wait(arch_rwlock_t *rw)
{
int count = spin_retry;
@@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_write_lock_wait);
-void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
int count = spin_retry;
@@ -197,7 +197,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
}
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
-int _raw_write_trylock_retry(raw_rwlock_t *rw)
+int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
int count = spin_retry;
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index da1c6491ed4..7f3626aac86 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -108,7 +108,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
*/
#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -126,7 +126,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -142,7 +142,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -160,7 +160,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__ (
"mov.l %1, @%0 ! __raw_write_unlock \n\t"
@@ -170,7 +170,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int __raw_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
@@ -193,7 +193,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return (oldval > 0);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h
index a3be2db960e..9b7560db06c 100644
--- a/arch/sh/include/asm/spinlock_types.h
+++ b/arch/sh/include/asm/spinlock_types.h
@@ -13,9 +13,9 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
#define RW_LOCK_BIAS 0x01000000
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 9b0f2f53c81..06d37e588fd 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -65,7 +65,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
* Sort of like atomic_t's on Sparc, but even more clever.
*
* ------------------------------------
- * | 24-bit counter | wlock | raw_rwlock_t
+ * | 24-bit counter | wlock | arch_rwlock_t
* ------------------------------------
* 31 8 7 0
*
@@ -76,9 +76,9 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
-static inline void arch_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -96,9 +96,9 @@ do { unsigned long flags; \
local_irq_restore(flags); \
} while(0)
-static inline void arch_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -116,9 +116,9 @@ do { unsigned long flags; \
local_irq_restore(flags); \
} while(0)
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
*(volatile __u32 *)&lp->lock = ~0U;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int __raw_write_trylock(arch_rwlock_t *rw)
{
unsigned int val;
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return (val == 0);
}
-static inline int arch_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
register int res asm("o0");
lp = rw;
__asm__ __volatile__(
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 7cf58a2fcda..2b22d7f2c2f 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -92,7 +92,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-static void inline arch_read_lock(raw_rwlock_t *lock)
+static void inline arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
: "memory");
}
-static int inline arch_read_trylock(raw_rwlock_t *lock)
+static int inline arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
return tmp1;
}
-static void inline arch_read_unlock(raw_rwlock_t *lock)
+static void inline arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_lock(raw_rwlock_t *lock)
+static void inline arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_unlock(raw_rwlock_t *lock)
+static void inline arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
: "memory");
}
-static int inline arch_write_trylock(raw_rwlock_t *lock)
+static int inline arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index c145e63a5d6..9c454fdeaad 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index ab9055fd57d..99cb86e843a 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -232,7 +232,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+static inline int __raw_read_can_lock(arch_rwlock_t *lock)
{
return (int)(lock)->lock > 0;
}
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock)
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+static inline int __raw_write_can_lock(arch_rwlock_t *lock)
{
return (lock)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void __raw_read_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
"jns 1f\n"
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
::LOCK_PTR_REG (rw) : "memory");
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void __raw_write_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
"jz 1f\n"
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t *lock)
+static inline int __raw_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
return 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *lock)
+static inline int __raw_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -284,12 +284,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
return 0;
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void __raw_read_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void __raw_write_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "addl %1, %0"
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 696f8364a4f..dcb48b2edc1 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct arch_spinlock {
typedef struct {
unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_X86_SPINLOCK_TYPES_H */