summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortravis@sgi.com <travis@sgi.com>2008-01-30 13:32:52 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:32:52 +0100
commit0af8a5ccc51ee2269712c90ab09c91b0150f4224 (patch)
tree449771ee3ee0a681712f41378ce6ff8d4433edef
parentacdac87202a408133ee8f7985076de9d2e0dc5ab (diff)
downloadlinux-stable-0af8a5ccc51ee2269712c90ab09c91b0150f4224.tar.gz
linux-stable-0af8a5ccc51ee2269712c90ab09c91b0150f4224.tar.bz2
linux-stable-0af8a5ccc51ee2269712c90ab09c91b0150f4224.zip
x86_32: use generic percpu.h
x86_32 only provides a special way to obtain the local per cpu area offset via x86_read_percpu. Otherwise it can fully use the generic handling. Cc: ak@suse.de Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/asm-x86/percpu_32.h30
1 files changed, 9 insertions, 21 deletions
diff --git a/include/asm-x86/percpu_32.h b/include/asm-x86/percpu_32.h
index 77bd0045f331..e62ce2fe2c9c 100644
--- a/include/asm-x86/percpu_32.h
+++ b/include/asm-x86/percpu_32.h
@@ -42,26 +42,7 @@
*/
#ifdef CONFIG_SMP
-/* This is used for other cpus to find our section. */
-extern unsigned long __per_cpu_offset[];
-
-#define per_cpu_offset(x) (__per_cpu_offset[x])
-
-#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
-/* We can use this directly for local CPU (faster). */
-DECLARE_PER_CPU(unsigned long, this_cpu_off);
-
-/* var is in discarded region: offset to particular copy we want */
-#define per_cpu(var, cpu) (*({ \
- extern int simple_indentifier_##var(void); \
- RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); }))
-
-#define __raw_get_cpu_var(var) (*({ \
- extern int simple_indentifier_##var(void); \
- RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off)); \
-}))
-
-#define __get_cpu_var(var) __raw_get_cpu_var(var)
+#define __my_cpu_offset x86_read_percpu(this_cpu_off)
/* A macro to avoid #include hell... */
#define percpu_modcopy(pcpudst, src, size) \
@@ -74,11 +55,18 @@ do { \
/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
#define __percpu_seg "%%fs:"
+
#else /* !SMP */
-#include <asm-generic/percpu.h>
+
#define __percpu_seg ""
+
#endif /* SMP */
+#include <asm-generic/percpu.h>
+
+/* We can use this directly for local CPU (faster). */
+DECLARE_PER_CPU(unsigned long, this_cpu_off);
+
/* For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though). */
extern void __bad_percpu_size(void);