From e0c4ae06495494a38843da8445e2b6e1f59b9253 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 1 Jan 2008 19:12:15 +0100 Subject: x86: fix asm-x86/byteorder.h for userspace export Since asm-x86/byteorder.h is exported to userspace, use __asm__ rather than asm in its code. Signed-Off-By: Mike Frysinger Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/byteorder.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h index 1f2d6d5bf20..fe2f2e5d51b 100644 --- a/include/asm-x86/byteorder.h +++ b/include/asm-x86/byteorder.h @@ -30,13 +30,13 @@ static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) } v; v.u = val; #ifdef CONFIG_X86_BSWAP - asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" + __asm__("bswapl %0 ; bswapl %1 ; xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); #else v.s.a = ___arch__swab32(v.s.a); v.s.b = ___arch__swab32(v.s.b); - asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); + __asm__("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); #endif return v.u; } -- cgit v1.2.3 From 56986d4210e5077d67e4eff538a316a6cc4e5158 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 1 Jan 2008 19:12:15 +0100 Subject: x86: fix asm-x86/msr.h for user-space export Use __asm__ and __volatile__ in code that is exported to userspace. Wrap kernel functions with __KERNEL__ so they get scrubbed. No code changed: text data bss dec hex filename 9681036 1698924 3407872 14787832 e1a4f8 vmlinux.before 9681036 1698924 3407872 14787832 e1a4f8 vmlinux.after Signed-off-by: Mike Frysinger Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/msr.h | 74 +++++++++++++++++++++++++++------------------------ 1 file changed, 39 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index ba4b3143212..664a2fa7adc 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h @@ -191,38 +191,6 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) -/* wrmsr with exception handling */ -#define wrmsr_safe(msr,a,b) ({ int ret__; \ - asm volatile("2: wrmsr ; xorl %0,%0\n" \ - "1:\n\t" \ - ".section .fixup,\"ax\"\n\t" \ - "3: movl %4,%0 ; jmp 1b\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n\t" \ - " .quad 2b,3b\n\t" \ - ".previous" \ - : "=a" (ret__) \ - : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ - ret__; }) - -#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) - -#define rdmsr_safe(msr,a,b) \ - ({ int ret__; \ - asm volatile ("1: rdmsr\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %4,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n" \ - " .quad 1b,3b\n" \ - ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \ - :"c"(msr), "i"(-EIO), "0"(0)); \ - ret__; }) - #define rdtsc(low,high) \ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) @@ -230,17 +198,17 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") #define rdtscp(low,high,aux) \ - asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) + __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) #define rdtscll(val) do { \ unsigned int __a,__d; \ - asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ + __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ } while(0) #define rdtscpll(val, aux) do { \ unsigned long __a, __d; \ - asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ + __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ (val) = (__d << 32) | __a; \ } while (0) @@ -253,6 +221,7 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) : "=a" (low), "=d" (high) \ : "c" (counter)) + static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { @@ -320,6 +289,40 @@ static inline unsigned int cpuid_edx(unsigned int op) return edx; } +#ifdef __KERNEL__ + +/* wrmsr with exception handling */ +#define wrmsr_safe(msr,a,b) ({ int ret__; \ + asm volatile("2: wrmsr ; xorl %0,%0\n" \ + "1:\n\t" \ + ".section .fixup,\"ax\"\n\t" \ + "3: movl %4,%0 ; jmp 1b\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n" \ + " .align 8\n\t" \ + " .quad 2b,3b\n\t" \ + ".previous" \ + : "=a" (ret__) \ + : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ + ret__; }) + +#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) + +#define rdmsr_safe(msr,a,b) \ + ({ int ret__; \ + asm volatile ("1: rdmsr\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: movl %4,%0\n" \ + " jmp 2b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 8\n" \ + " .quad 1b,3b\n" \ + ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \ + :"c"(msr), "i"(-EIO), "0"(0)); \ + ret__; }) + #ifdef CONFIG_SMP void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); @@ -343,6 +346,7 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) return wrmsr_safe(msr_no, l, h); } #endif /* CONFIG_SMP */ +#endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ #endif /* !__i386__ */ -- cgit v1.2.3