summaryrefslogtreecommitdiff
path: root/arch/tile/lib
diff options
context:
space:
mode:
authorAkinobu Mita <akinobu.mita@gmail.com>2010-10-06 00:55:29 +0900
committerChris Metcalf <cmetcalf@tilera.com>2010-10-05 12:49:35 -0400
commitde5bbad6770882209b0ac58b0ba9259a98cfb953 (patch)
treeda258f026fbd5342a61ce858fc2de5ed1e233932 /arch/tile/lib
parent2bfc96a127bc1cc94d26bfaa40159966064f9c8c (diff)
downloadkernel-common-de5bbad6770882209b0ac58b0ba9259a98cfb953.tar.gz
kernel-common-de5bbad6770882209b0ac58b0ba9259a98cfb953.tar.bz2
kernel-common-de5bbad6770882209b0ac58b0ba9259a98cfb953.zip
tile: replace some BUG_ON checks with BUILD_BUG_ON checks
Some BUG_ON checks can be detected at compile time rather than at runtime. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/lib')
-rw-r--r--arch/tile/lib/atomic_32.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 8040b42a8eea..7a5cc706ab62 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -300,7 +300,7 @@ void __init __init_atomic_per_cpu(void)
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/* Validate power-of-two and "bigger than cpus" assumption */
- BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
+ BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
/*
@@ -314,17 +314,17 @@ void __init __init_atomic_per_cpu(void)
BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
/* The locks must all fit on one page. */
- BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
+ BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
/*
* We use the page offset of the atomic value's address as
* an index into atomic_locks, excluding the low 3 bits.
* That should not produce more indices than ATOMIC_HASH_SIZE.
*/
- BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
+ BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
/* The futex code makes this assumption, so we validate it here. */
- BUG_ON(sizeof(atomic_t) != sizeof(int));
+ BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
}