diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-23 16:38:03 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-23 16:38:03 -0700 |
commit | a7aed1c2dc4939d1d61285c738ad32700d791692 (patch) | |
tree | a64cda4c4dd29137a09f06a8c1d5db7cd20e7da5 /arch | |
parent | 1212663fba7c5e003e05d24f043d5ed57eb18b24 (diff) | |
parent | 1b82ba6e47c13ee369a4808f72d003499f8c7920 (diff) | |
download | kernel-common-a7aed1c2dc4939d1d61285c738ad32700d791692.tar.gz kernel-common-a7aed1c2dc4939d1d61285c738ad32700d791692.tar.bz2 kernel-common-a7aed1c2dc4939d1d61285c738ad32700d791692.zip |
Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* ssh://master.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (35 commits)
x86: Add HPET force support for MCP55 (nForce 5) chipsets
x86: Force enable HPET for CK804 (nForce 4) chipsets
x86: clean up setup.h and the boot code
x86: Save registers in saved_context during suspend and hibernation
x86: merge setup_32/64.h
x86: merge signal_32/64.h
x86: merge required-features.h
x86: merge sigcontext_32/64.h
x86: merge msr_32/64.h
x86: merge mttr_32/64.h
x86: merge statfs_32/64.h
x86: merge stat_32/64.h
x86: merge shmbuf_32/64.h
x86: merge ptrace_32/64.h
x86: merge msgbuf_32/64.h
x86: merge elf_32/64.h
x86: merge byteorder_32/64.h
x86: whitespace cleanup of mce_64.c
x86: consolidate the cpu/ related code usage
x86: prepare consolidation of cpu/ related code usage
...
Diffstat (limited to 'arch')
33 files changed, 368 insertions, 430 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 5bed8be34ba5..b4437ce0f973 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -1270,6 +1270,8 @@ source "drivers/Kconfig" source "fs/Kconfig" +source "kernel/Kconfig.instrumentation" + source "arch/i386/Kconfig.debug" source "security/Kconfig" diff --git a/arch/i386/Makefile b/arch/i386/Makefile index b81cb64d48e5..f5b9a37def8b 100644 --- a/arch/i386/Makefile +++ b/arch/i386/Makefile @@ -20,6 +20,12 @@ # Fill in SRCARCH SRCARCH := x86 +# BITS is used as extension for files which are available in a 32 bit +# and a 64 bit version to simplify shared Makefiles. +# e.g.: obj-y += foo_$(BITS).o +BITS := 32 +export BITS + HAS_BIARCH := $(call cc-option-yn, -m32) ifeq ($(HAS_BIARCH),y) AS := $(AS) --32 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index 20bab9431acb..5f9a2e72a731 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -23,7 +23,7 @@ #include <linux/types.h> #include <linux/edd.h> #include <asm/boot.h> -#include <asm/bootparam.h> +#include <asm/setup.h> /* Useful macros */ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index 2d77ee728f92..7a0d00b2cf28 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c @@ -38,11 +38,9 @@ static const char* safe_abs_relocs[] = { static int is_safe_abs_reloc(const char* sym_name) { - int i, array_size; - - array_size = sizeof(safe_abs_relocs)/sizeof(char*); + int i; - for(i = 0; i < array_size; i++) { + for(i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) { if (!strcmp(sym_name, safe_abs_relocs[i])) /* Match found */ return 1; diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c index 0eeef3989a17..1f95750ede28 100644 --- a/arch/x86/boot/main.c +++ b/arch/x86/boot/main.c @@ -26,8 +26,6 @@ char *heap_end = _end; /* Default end of heap = no heap */ * screws up the old-style command line protocol, adjust by * filling in the new-style command line pointer instead. */ -#define OLD_CL_MAGIC 0xA33F -#define OLD_CL_ADDRESS 0x20 static void copy_boot_params(void) { diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 18dcdc6fb7aa..46bb609e2444 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -1,5 +1,15 @@ -ifeq ($(CONFIG_X86_32),y) -include ${srctree}/arch/x86/crypto/Makefile_32 -else -include ${srctree}/arch/x86/crypto/Makefile_64 -endif +# +# Arch-specific CryptoAPI modules. +# + +obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o +obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o + +obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o +obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o + +aes-i586-y := aes-i586-asm_32.o aes_32.o +twofish-i586-y := twofish-i586-asm_32.o twofish_32.o + +aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o +twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o diff --git a/arch/x86/crypto/Makefile_32 b/arch/x86/crypto/Makefile_32 deleted file mode 100644 index 2d873a2388ed..000000000000 --- a/arch/x86/crypto/Makefile_32 +++ /dev/null @@ -1,12 +0,0 @@ -# -# x86/crypto/Makefile -# -# Arch-specific CryptoAPI modules. -# - -obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o -obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o - -aes-i586-y := aes-i586-asm_32.o aes_32.o -twofish-i586-y := twofish-i586-asm_32.o twofish_32.o - diff --git a/arch/x86/crypto/Makefile_64 b/arch/x86/crypto/Makefile_64 deleted file mode 100644 index b40896276e93..000000000000 --- a/arch/x86/crypto/Makefile_64 +++ /dev/null @@ -1,12 +0,0 @@ -# -# x86/crypto/Makefile -# -# Arch-specific CryptoAPI modules. -# - -obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o -obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o - -aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o -twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o - diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32 index ccea590bbb92..b9d679820306 100644 --- a/arch/x86/kernel/Makefile_32 +++ b/arch/x86/kernel/Makefile_32 @@ -26,7 +26,7 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse_32.o obj-$(CONFIG_X86_LOCAL_APIC) += apic_32.o nmi_32.o obj-$(CONFIG_X86_IO_APIC) += io_apic_32.o obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o -obj-$(CONFIG_KEXEC) += machine_kexec_32.o relocate_kernel_32.o crash_32.o +obj-$(CONFIG_KEXEC) += machine_kexec_32.o relocate_kernel_32.o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_32.o obj-$(CONFIG_X86_NUMAQ) += numaq_32.o obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64 index dec06e769281..466337ae9a1e 100644 --- a/arch/x86/kernel/Makefile_64 +++ b/arch/x86/kernel/Makefile_64 @@ -9,25 +9,21 @@ obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \ x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \ setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \ pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \ - perfctr-watchdog.o i8253.o + i8253.o obj-$(CONFIG_STACKTRACE) += stacktrace.o -obj-$(CONFIG_X86_MCE) += mce_64.o therm_throt.o -obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o -obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o -obj-$(CONFIG_MTRR) += cpu/mtrr/ -obj-$(CONFIG_ACPI) += acpi/ +obj-y += cpu/ +obj-y += acpi/ obj-$(CONFIG_X86_MSR) += msr.o obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_X86_CPUID) += cpuid.o obj-$(CONFIG_SMP) += smp_64.o smpboot_64.o trampoline_64.o tsc_sync.o obj-y += apic_64.o nmi_64.o obj-y += io_apic_64.o mpparse_64.o genapic_64.o genapic_flat_64.o -obj-$(CONFIG_KEXEC) += machine_kexec_64.o relocate_kernel_64.o crash_64.o +obj-$(CONFIG_KEXEC) += machine_kexec_64.o relocate_kernel_64.o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_64.o obj-$(CONFIG_PM) += suspend_64.o obj-$(CONFIG_HIBERNATION) += suspend_asm_64.o -obj-$(CONFIG_CPU_FREQ) += cpu/cpufreq/ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_IOMMU) += pci-gart_64.o aperture_64.o obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o @@ -42,13 +38,6 @@ obj-$(CONFIG_MODULES) += module_64.o obj-$(CONFIG_PCI) += early-quirks.o obj-y += topology.o -obj-y += intel_cacheinfo.o -obj-y += addon_cpuid_features.o obj-y += pcspeaker.o CFLAGS_vsyscall_64.o := $(PROFILING) -g0 - -therm_throt-y += cpu/mcheck/therm_throt.o -intel_cacheinfo-y += cpu/intel_cacheinfo.o -addon_cpuid_features-y += cpu/addon_cpuid_features.o -perfctr-watchdog-y += cpu/perfctr-watchdog.o diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile index 3d5671939542..1351c3982ee4 100644 --- a/arch/x86/kernel/acpi/Makefile +++ b/arch/x86/kernel/acpi/Makefile @@ -1,5 +1,7 @@ -ifeq ($(CONFIG_X86_32),y) -include ${srctree}/arch/x86/kernel/acpi/Makefile_32 -else -include ${srctree}/arch/x86/kernel/acpi/Makefile_64 +obj-$(CONFIG_ACPI) += boot.o +obj-$(CONFIG_ACPI_SLEEP) += sleep_$(BITS).o wakeup_$(BITS).o + +ifneq ($(CONFIG_ACPI_PROCESSOR),) +obj-y += cstate.o processor.o endif + diff --git a/arch/x86/kernel/acpi/Makefile_32 b/arch/x86/kernel/acpi/Makefile_32 deleted file mode 100644 index 045dd54b33e0..000000000000 --- a/arch/x86/kernel/acpi/Makefile_32 +++ /dev/null @@ -1,7 +0,0 @@ -obj-$(CONFIG_ACPI) += boot.o -obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o - -ifneq ($(CONFIG_ACPI_PROCESSOR),) -obj-y += cstate.o processor.o -endif - diff --git a/arch/x86/kernel/acpi/Makefile_64 b/arch/x86/kernel/acpi/Makefile_64 deleted file mode 100644 index 629425bc002d..000000000000 --- a/arch/x86/kernel/acpi/Makefile_64 +++ /dev/null @@ -1,7 +0,0 @@ -obj-y := boot.o -obj-$(CONFIG_ACPI_SLEEP) += sleep_64.o wakeup_64.o - -ifneq ($(CONFIG_ACPI_PROCESSOR),) -obj-y += processor.o cstate.o -endif - diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 55608ec2ed72..5ed3bc5c61d7 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -4,6 +4,7 @@ #include <asm/pgtable.h> #include <asm/page.h> #include <asm/msr.h> +#include <asm/asm-offsets.h> # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 # @@ -342,31 +343,32 @@ do_suspend_lowlevel: xorl %eax, %eax call save_processor_state - movq %rsp, saved_context_esp(%rip) - movq %rax, saved_context_eax(%rip) - movq %rbx, saved_context_ebx(%rip) - movq %rcx, saved_context_ecx(%rip) - movq %rdx, saved_context_edx(%rip) - movq %rbp, saved_context_ebp(%rip) - movq %rsi, saved_context_esi(%rip) - movq %rdi, saved_context_edi(%rip) - movq %r8, saved_context_r08(%rip) - movq %r9, saved_context_r09(%rip) - movq %r10, saved_context_r10(%rip) - movq %r11, saved_context_r11(%rip) - movq %r12, saved_context_r12(%rip) - movq %r13, saved_context_r13(%rip) - movq %r14, saved_context_r14(%rip) - movq %r15, saved_context_r15(%rip) - pushfq ; popq saved_context_eflags(%rip) + movq $saved_context, %rax + movq %rsp, pt_regs_rsp(%rax) + movq %rbp, pt_regs_rbp(%rax) + movq %rsi, pt_regs_rsi(%rax) + movq %rdi, pt_regs_rdi(%rax) + movq %rbx, pt_regs_rbx(%rax) + movq %rcx, pt_regs_rcx(%rax) + movq %rdx, pt_regs_rdx(%rax) + movq %r8, pt_regs_r8(%rax) + movq %r9, pt_regs_r9(%rax) + movq %r10, pt_regs_r10(%rax) + movq %r11, pt_regs_r11(%rax) + movq %r12, pt_regs_r12(%rax) + movq %r13, pt_regs_r13(%rax) + movq %r14, pt_regs_r14(%rax) + movq %r15, pt_regs_r15(%rax) + pushfq + popq pt_regs_eflags(%rax) movq $.L97, saved_rip(%rip) - movq %rsp,saved_rsp - movq %rbp,saved_rbp - movq %rbx,saved_rbx - movq %rdi,saved_rdi - movq %rsi,saved_rsi + movq %rsp, saved_rsp + movq %rbp, saved_rbp + movq %rbx, saved_rbx + movq %rdi, saved_rdi + movq %rsi, saved_rsi addq $8, %rsp movl $3, %edi @@ -377,32 +379,35 @@ do_suspend_lowlevel: .L99: .align 4 movl $24, %eax - movw %ax, %ds - movq saved_context+58(%rip), %rax - movq %rax, %cr4 - movq saved_context+50(%rip), %rax - movq %rax, %cr3 - movq saved_context+42(%rip), %rax - movq %rax, %cr2 - movq saved_context+34(%rip), %rax - movq %rax, %cr0 - pushq saved_context_eflags(%rip) ; popfq - movq saved_context_esp(%rip), %rsp - movq saved_context_ebp(%rip), %rbp - movq saved_context_eax(%rip), %rax - movq saved_context_ebx(%rip), %rbx - movq saved_context_ecx(%rip), %rcx - movq saved_context_edx(%rip), %rdx - movq saved_context_esi(%rip), %rsi - movq saved_context_edi(%rip), %rdi - movq saved_context_r08(%rip), %r8 - movq saved_context_r09(%rip), %r9 - movq saved_context_r10(%rip), %r10 - movq saved_context_r11(%rip), %r11 - movq saved_context_r12(%rip), %r12 - movq saved_context_r13(%rip), %r13 - movq saved_context_r14(%rip), %r14 - movq saved_context_r15(%rip), %r15 + movw %ax, %ds + + /* We don't restore %rax, it must be 0 anyway */ + movq $saved_context, %rax + movq saved_context_cr4(%rax), %rbx + movq %rbx, %cr4 + movq saved_context_cr3(%rax), %rbx + movq %rbx, %cr3 + movq saved_context_cr2(%rax), %rbx + movq %rbx, %cr2 + movq saved_context_cr0(%rax), %rbx + movq %rbx, %cr0 + pushq pt_regs_eflags(%rax) + popfq + movq pt_regs_rsp(%rax), %rsp + movq pt_regs_rbp(%rax), %rbp + movq pt_regs_rsi(%rax), %rsi + movq pt_regs_rdi(%rax), %rdi + movq pt_regs_rbx(%rax), %rbx + movq pt_regs_rcx(%rax), %rcx + movq pt_regs_rdx(%rax), %rdx + movq pt_regs_r8(%rax), %r8 + movq pt_regs_r9(%rax), %r9 + movq pt_regs_r10(%rax), %r10 + movq pt_regs_r11(%rax), %r11 + movq pt_regs_r12(%rax), %r12 + movq pt_regs_r13(%rax), %r13 + movq pt_regs_r14(%rax), %r14 + movq pt_regs_r15(%rax), %r15 xorl %eax, %eax addq $8, %rsp diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c index f47bc493dba9..f28ccb588fba 100644 --- a/arch/x86/kernel/apic_64.c +++ b/arch/x86/kernel/apic_64.c @@ -287,6 +287,20 @@ void disable_local_APIC(void) apic_write(APIC_SPIV, value); } +void lapic_shutdown(void) +{ + unsigned long flags; + + if (!cpu_has_apic) + return; + + local_irq_save(flags); + + disable_local_APIC(); + + local_irq_restore(flags); +} + /* * This is to verify that we're looking at a real local APIC. * Check these against your board if the CPUs aren't getting diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 778953bc636c..7e50bda565b4 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -76,6 +76,34 @@ int main(void) DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); DEFINE(pbe_next, offsetof(struct pbe, next)); BLANK(); +#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry)) + ENTRY(rbx); + ENTRY(rbx); + ENTRY(rcx); + ENTRY(rdx); + ENTRY(rsp); + ENTRY(rbp); + ENTRY(rsi); + ENTRY(rdi); + ENTRY(r8); + ENTRY(r9); + ENTRY(r10); + ENTRY(r11); + ENTRY(r12); + ENTRY(r13); + ENTRY(r14); + ENTRY(r15); + ENTRY(eflags); + BLANK(); +#undef ENTRY +#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry)) + ENTRY(cr0); + ENTRY(cr2); + ENTRY(cr3); + ENTRY(cr4); + ENTRY(cr8); + BLANK(); +#undef ENTRY DEFINE(TSS_ist, offsetof(struct tss_struct, ist)); BLANK(); DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 778396c78d65..cfdb2f3bd763 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -2,19 +2,19 @@ # Makefile for x86-compatible CPU details and quirks # -obj-y := common.o proc.o bugs.o +obj-y := intel_cacheinfo.o addon_cpuid_features.o -obj-y += amd.o -obj-y += cyrix.o -obj-y += centaur.o -obj-y += transmeta.o -obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o -obj-y += nexgen.o -obj-y += umc.o +obj-$(CONFIG_X86_32) += common.o proc.o bugs.o +obj-$(CONFIG_X86_32) += amd.o +obj-$(CONFIG_X86_32) += cyrix.o +obj-$(CONFIG_X86_32) += centaur.o +obj-$(CONFIG_X86_32) += transmeta.o +obj-$(CONFIG_X86_32) += intel.o +obj-$(CONFIG_X86_32) += nexgen.o +obj-$(CONFIG_X86_32) += umc.o -obj-$(CONFIG_X86_MCE) += mcheck/ - -obj-$(CONFIG_MTRR) += mtrr/ -obj-$(CONFIG_CPU_FREQ) += cpufreq/ +obj-$(CONFIG_X86_MCE) += mcheck/ +obj-$(CONFIG_MTRR) += mtrr/ +obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index f1ebe1c1c17a..d7d2323bbb69 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile @@ -1,2 +1,6 @@ -obj-y = mce.o k7.o p4.o p5.o p6.o winchip.o therm_throt.o -obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o +obj-y = mce_$(BITS).o therm_throt.o + +obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o +obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o +obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o +obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce_32.c index 34c781eddee4..34c781eddee4 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce_32.c diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 07bbfe7aa7f7..b9f802e35209 100644 --- a/arch/x86/kernel/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c @@ -1,8 +1,8 @@ /* * Machine check handler. * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. - * Rest from unknown author(s). - * 2004 Andi Kleen. Rewrote most of it. + * Rest from unknown author(s). + * 2004 Andi Kleen. Rewrote most of it. */ #include <linux/init.h> @@ -23,7 +23,7 @@ #include <linux/ctype.h> #include <linux/kmod.h> #include <linux/kdebug.h> -#include <asm/processor.h> +#include <asm/processor.h> #include <asm/msr.h> #include <asm/mce.h> #include <asm/uaccess.h> @@ -63,10 +63,10 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait); * separate MCEs from kernel messages to avoid bogus bug reports. */ -struct mce_log mcelog = { +struct mce_log mcelog = { MCE_LOG_SIGNATURE, MCE_LOG_LEN, -}; +}; void mce_log(struct mce *mce) { @@ -111,42 +111,42 @@ static void print_mce(struct mce *m) "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", m->cpu, m->mcgstatus, m->bank, m->status); if (m->rip) { - printk(KERN_EMERG - "RIP%s %02x:<%016Lx> ", + printk(KERN_EMERG "RIP%s %02x:<%016Lx> ", !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", m->cs, m->rip); if (m->cs == __KERNEL_CS) print_symbol("{%s}", m->rip); printk("\n"); } - printk(KERN_EMERG "TSC %Lx ", m->tsc); + printk(KERN_EMERG "TSC %Lx ", m->tsc); if (m->addr) printk("ADDR %Lx ", m->addr); if (m->misc) - printk("MISC %Lx ", m->misc); + printk("MISC %Lx ", m->misc); printk("\n"); printk(KERN_EMERG "This is not a software problem!\n"); - printk(KERN_EMERG - "Run through mcelog --ascii to decode and contact your hardware vendor\n"); + printk(KERN_EMERG "Run through mcelog --ascii to decode " + "and contact your hardware vendor\n"); } static void mce_panic(char *msg, struct mce *backup, unsigned long start) -{ +{ int i; oops_begin(); for (i = 0; i < MCE_LOG_LEN; i++) { unsigned long tsc = mcelog.entry[i].tsc; + if (time_before(tsc, start)) continue; - print_mce(&mcelog.entry[i]); + print_mce(&mcelog.entry[i]); if (backup && mcelog.entry[i].tsc == backup->tsc) backup = NULL; } if (backup) print_mce(backup); panic(msg); -} +} static int mce_available(struct cpuinfo_x86 *c) { @@ -170,10 +170,9 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) } } -/* +/* * The actual machine check handler */ - void do_machine_check(struct pt_regs * regs, long error_code) { struct mce m, panicm; @@ -194,7 +193,8 @@ void do_machine_check(struct pt_regs * regs, long error_code) atomic_inc(&mce_entry); if (regs) - notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL); + notify_die(DIE_NMI, "machine check", regs, error_code, 18, + SIGKILL); if (!banks) goto out2; @@ -204,15 +204,15 @@ void do_machine_check(struct pt_regs * regs, long error_code) /* if the restart IP is not valid, we're done for */ if (!(m.mcgstatus & MCG_STATUS_RIPV)) no_way_out = 1; - + rdtscll(mcestart); barrier(); for (i = 0; i < banks; i++) { if (!bank[i]) continue; - - m.misc = 0; + + m.misc = 0; m.addr = 0; m.bank = i; m.tsc = 0; @@ -372,7 +372,7 @@ static void mcheck_timer(struct work_struct *work) if (mce_notify_user()) { next_interval = max(next_interval/2, HZ/100); } else { - next_interval = min(next_interval*2, + next_interval = min(next_interval * 2, (int)round_jiffies_relative(check_interval*HZ)); } @@ -423,18 +423,18 @@ static struct notifier_block mce_idle_notifier = { }; static __init int periodic_mcheck_init(void) -{ +{ next_interval = check_interval * HZ; if (next_interval) schedule_delayed_work(&mcheck_work, round_jiffies_relative(next_interval)); idle_notifier_register(&mce_idle_notifier); return 0; -} +} __initcall(periodic_mcheck_init); -/* +/* * Initialize Machine Checks for a CPU. */ static void mce_init(void *dummy) @@ -444,9 +444,9 @@ static void mce_init(void *dummy) rdmsrl(MSR_IA32_MCG_CAP, cap); banks = cap & 0xff; - if (banks > NR_BANKS) { + if (banks > NR_BANKS) { printk(KERN_INFO "MCE: warning: using only %d banks\n", banks); - banks = NR_BANKS; + banks = NR_BANKS; } /* Use accurate RIP reporting if available. */ if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) @@ -464,15 +464,15 @@ static void mce_init(void *dummy) for (i = 0; i < banks; i++) { wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); - } + } } /* Add per CPU specific workarounds here */ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) -{ +{ /* This should be disabled by the BIOS, but isn't always */ if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { - /* disable GART TBL walk error reporting, which trips off + /* disable GART TBL walk error reporting, which trips off incorrectly with the IOMMU & 3ware & Cerberus. */ clear_bit(10, &bank[4]); /* Lots of broken BIOS around that don't clear them @@ -480,7 +480,7 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) mce_bootlog = 0; } -} +} static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) { @@ -496,15 +496,15 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) } } -/* +/* * Called for each booted CPU to set up machine checks. - * Must be called with preempt off. + * Must be called with preempt off. */ void __cpuinit mcheck_init(struct cpuinfo_x86 *c) { static cpumask_t mce_cpus = CPU_MASK_NONE; - mce_cpu_quirks(c); + mce_cpu_quirks(c); if (mce_dont_init || cpu_test_and_set(smp_processor_id(), mce_cpus) || @@ -553,13 +553,15 @@ static int mce_release(struct inode *inode, struct file *file) return 0; } -static void collect_tscs(void *data) -{ +static void collect_tscs(void *data) +{ unsigned long *cpu_tsc = (unsigned long *)data; + rdtscll(cpu_tsc[smp_processor_id()]); -} +} -static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) +static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, + loff_t *off) { unsigned long *cpu_tsc; static DECLARE_MUTEX(mce_read_sem); @@ -571,19 +573,20 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff if (!cpu_tsc) return -ENOMEM; - down(&mce_read_sem); + down(&mce_read_sem); next = rcu_dereference(mcelog.next); /* Only supports full reads right now */ - if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { + if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { up(&mce_read_sem); kfree(cpu_tsc); return -EINVAL; } err = 0; - for (i = 0; i < next; i++) { + for (i = 0; i < next; i++) { unsigned long start = jiffies; + while (!mcelog.entry[i].finished) { if (time_after_eq(jiffies, start + 2)) { memset(mcelog.entry + i,0, sizeof(struct mce)); @@ -593,31 +596,34 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff } smp_rmb(); err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); - buf += sizeof(struct mce); + buf += sizeof(struct mce); timeout: ; - } + } memset(mcelog.entry, 0, next * sizeof(struct mce)); mcelog.next = 0; synchronize_sched(); - /* Collect entries that were still getting written before the synchronize. */ - + /* + * Collect entries that were still getting written before the + * synchronize. + */ on_each_cpu(collect_tscs, cpu_tsc, 1, 1); - for (i = next; i < MCE_LOG_LEN; i++) { - if (mcelog.entry[i].finished && - mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { - err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce)); + for (i = next; i < MCE_LOG_LEN; i++) { + if (mcelog.entry[i].finished && + mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { + err |= copy_to_user(buf, mcelog.entry+i, + sizeof(struct mce)); smp_rmb(); buf += sizeof(struct mce); memset(&mcelog.entry[i], 0, sizeof(struct mce)); } - } + } up(&mce_read_sem); kfree(cpu_tsc); - return err ? -EFAULT : buf - ubuf; + return err ? -EFAULT : buf - ubuf; } static unsigned int mce_poll(struct file *file, poll_table *wait) @@ -628,26 +634,29 @@ static unsigned int mce_poll(struct file *file, poll_table *wait) return 0; } -static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg) +static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, + unsigned long arg) { int __user *p = (int __user *)arg; + if (!capable(CAP_SYS_ADMIN)) - return -EPERM; + return -EPERM; switch (cmd) { - case MCE_GET_RECORD_LEN: + case MCE_GET_RECORD_LEN: return put_user(sizeof(struct mce), p); case MCE_GET_LOG_LEN: - return put_user(MCE_LOG_LEN, p); + return put_user(MCE_LOG_LEN, p); case MCE_GETCLEAR_FLAGS: { unsigned flags; - do { + + do { flags = mcelog.flags; - } while (cmpxchg(&mcelog.flags, flags, 0) != flags); - return put_user(flags, p); + } while (cmpxchg(&mcelog.flags, flags, 0) != flags); + return put_user(flags, p); } default: - return -ENOTTY; - } + return -ENOTTY; + } } static const struct file_operations mce_chrdev_ops = { @@ -678,10 +687,9 @@ void __init restart_mce(void) set_in_cr4(X86_CR4_MCE); } -/* - * Old style boot options parsing. Only for compatibility. +/* + * Old style boot options parsing. Only for compatibility. */ - static int __init mcheck_disable(char *str) { mce_dont_init = 1; @@ -702,16 +710,16 @@ static int __init mcheck_enable(char *str) else if (isdigit(str[0])) get_option(&str, &tolerant); else - printk("mce= argument %s ignored. Please use /sys", str); + printk("mce= argument %s ignored. Please use /sys", str); return 1; } __setup("nomce", mcheck_disable); __setup("mce=", mcheck_enable); -/* +/* * Sysfs support - */ + */ /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. Only one CPU is active at this time, the others get readded later using @@ -723,12 +731,12 @@ static int mce_resume(struct sys_device *dev) } /* Reinit MCEs after user configuration changes */ -static void mce_restart(void) -{ +static void mce_restart(void) +{ if (next_interval) cancel_delayed_work(&mcheck_work); /* Timer race is harmless here */ - on_each_cpu(mce_init, NULL, 1, 1); + on_each_cpu(mce_init, NULL, 1, 1); next_interval = check_interval * HZ; if (next_interval) schedule_delayed_work(&mcheck_work, @@ -744,17 +752,17 @@ DEFINE_PER_CPU(struct sys_device, device_mce); /* Why are there no generic functions for this? */ #define ACCESSOR(name, var, start) \ - static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ - return sprintf(buf, "%lx\n", (unsigned long)var); \ - } \ + static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ + return sprintf(buf, "%lx\n", (unsigned long)var); \ + } \ static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \ - char *end; \ - unsigned long new = simple_strtoul(buf, &end, 0); \ - if (end == buf) return -EINVAL; \ - var = new; \ - start; \ - return end-buf; \ - } \ + char *end; \ + unsigned long new = simple_strtoul(buf, &end, 0); \ + if (end == buf) return -EINVAL; \ + var = new; \ + start; \ + return end-buf; \ + } \ static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); /* TBD should generate these dynamically based on number of available banks */ diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 752fb16a817d..752fb16a817d 100644 --- a/arch/x86/kernel/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c diff --git a/arch/x86/kernel/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index c17eaf5dd6dd..c17eaf5dd6dd 100644 --- a/arch/x86/kernel/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c diff --git a/arch/x86/kernel/crash_32.c b/arch/x86/kernel/crash.c index 53589d1b1a05..af0253f94a9a 100644 --- a/arch/x86/kernel/crash_32.c +++ b/arch/x86/kernel/crash.c @@ -1,5 +1,5 @@ /* - * Architecture specific (i386) functions for kexec based crash dumps. + * Architecture specific (i386/x86_64) functions for kexec based crash dumps. * * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) * @@ -25,8 +25,11 @@ #include <linux/kdebug.h> #include <asm/smp.h> +#ifdef X86_32 #include <mach_ipi.h> - +#else +#include <asm/mach_apic.h> +#endif /* This keeps a track of which one is crashing cpu. */ static int crashing_cpu; @@ -38,7 +41,9 @@ static int crash_nmi_callback(struct notifier_block *self, unsigned long val, void *data) { struct pt_regs *regs; +#ifdef X86_32 struct pt_regs fixed_regs; +#endif int cpu; if (val != DIE_NMI_IPI) @@ -55,10 +60,12 @@ static int crash_nmi_callback(struct notifier_block *self, return NOTIFY_STOP; local_irq_disable(); +#ifdef X86_32 if (!user_mode_vm(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } +#endif crash_save_cpu(regs, cpu); disable_local_APIC(); atomic_dec(&waiting_for_crash_ipi); diff --git a/arch/x86/kernel/crash_64.c b/arch/x86/kernel/crash_64.c deleted file mode 100644 index 13432a1ae904..000000000000 --- a/arch/x86/kernel/crash_64.c +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Architecture specific (x86_64) functions for kexec based crash dumps. - * - * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) - * - * Copyright (C) IBM Corporation, 2004. All rights reserved. - * - */ - -#include <linux/init.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/smp.h> -#include <linux/irq.h> -#include <linux/reboot.h> -#include <linux/kexec.h> -#include <linux/delay.h> -#include <linux/elf.h> -#include <linux/elfcore.h> -#include <linux/kdebug.h> - -#include <asm/processor.h> -#include <asm/hardirq.h> -#include <asm/nmi.h> -#include <asm/hw_irq.h> -#include <asm/mach_apic.h> - -/* This keeps a track of which one is crashing cpu. */ -static int crashing_cpu; - -#ifdef CONFIG_SMP -static atomic_t waiting_for_crash_ipi; - -static int crash_nmi_callback(struct notifier_block *self, - unsigned long val, void *data) -{ - struct pt_regs *regs; - int cpu; - - if (val != DIE_NMI_IPI) - return NOTIFY_OK; - - regs = ((struct die_args *)data)->regs; - cpu = raw_smp_processor_id(); - - /* - * Don't do anything if this handler is invoked on crashing cpu. - * Otherwise, system will completely hang. Crashing cpu can get - * an NMI if system was initially booted with nmi_watchdog parameter. - */ - if (cpu == crashing_cpu) - return NOTIFY_STOP; - local_irq_disable(); - - crash_save_cpu(regs, cpu); - disable_local_APIC(); - atomic_dec(&waiting_for_crash_ipi); - /* Assume hlt works */ - for(;;) - halt(); - - return 1; -} - -static void smp_send_nmi_allbutself(void) -{ - send_IPI_allbutself(NMI_VECTOR); -} - -/* - * This code is a best effort heuristic to get the - * other cpus to stop executing. So races with - * cpu hotplug shouldn't matter. - */ - -static struct notifier_block crash_nmi_nb = { - .notifier_call = crash_nmi_callback, -}; - -static void nmi_shootdown_cpus(void) -{ - unsigned long msecs; - - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); - if (register_die_notifier(&crash_nmi_nb)) - return; /* return what? */ - - /* - * Ensure the new callback function is set before sending - * out the NMI - */ - wmb(); - - smp_send_nmi_allbutself(); - - msecs = 1000; /* Wait at most a second for the other cpus to stop */ - while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { - mdelay(1); - msecs--; - } - /* Leave the nmi callback set */ - disable_local_APIC(); -} -#else -static void nmi_shootdown_cpus(void) -{ - /* There are no cpus to shootdown */ -} -#endif - -void machine_crash_shutdown(struct pt_regs *regs) -{ - /* - * This function is only called after the system - * has panicked or is otherwise in a critical state. - * The minimum amount of code to allow a kexec'd kernel - * to run successfully needs to happen here. - * - * In practice this means shooting down the other cpus in - * an SMP system. - */ - /* The kernel is broken so disable interrupts */ - local_irq_disable(); - - /* Make a note of crashing cpu. Will be used in NMI callback.*/ - crashing_cpu = smp_processor_id(); - nmi_shootdown_cpus(); - - if(cpu_has_apic) - disable_local_APIC(); - - disable_IO_APIC(); - - crash_save_cpu(regs, smp_processor_id()); -} diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 00b1c2c56454..374b7ece8961 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -124,12 +124,7 @@ ENTRY(startup_32) movsl movl boot_params - __PAGE_OFFSET + NEW_CL_POINTER,%esi andl %esi,%esi - jnz 2f # New command line protocol - cmpw $(OLD_CL_MAGIC),OLD_CL_MAGIC_ADDR - jne 1f - movzwl OLD_CL_OFFSET,%esi - addl $(OLD_CL_BASE_ADDR),%esi -2: + jz 1f # No comand line movl $(boot_command_line - __PAGE_OFFSET),%edi movl $(COMMAND_LINE_SIZE/4),%ecx rep diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index a4ce1911efdf..fab30e134836 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -60,7 +60,8 @@ static enum { NONE_FORCE_HPET_RESUME, OLD_ICH_FORCE_HPET_RESUME, ICH_FORCE_HPET_RESUME, - VT8237_FORCE_HPET_RESUME + VT8237_FORCE_HPET_RESUME, + NVIDIA_FORCE_HPET_RESUME, } force_hpet_resume_type; static void __iomem *rcba_base; @@ -321,6 +322,55 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, vt8237_force_enable_hpet); +/* + * Undocumented chipset feature taken from LinuxBIOS. + */ +static void nvidia_force_hpet_resume(void) +{ + pci_write_config_dword(cached_dev, 0x44, 0xfed00001); + printk(KERN_DEBUG "Force enabled HPET at resume\n"); +} + +static void nvidia_force_enable_hpet(struct pci_dev *dev) +{ + u32 uninitialized_var(val); + + if (!hpet_force_user || hpet_address || force_hpet_address) + return; + + pci_write_config_dword(dev, 0x44, 0xfed00001); + pci_read_config_dword(dev, 0x44, &val); + force_hpet_address = val & 0xfffffffe; + force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME; + printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n", + force_hpet_address); + cached_dev = dev; + return; +} + +/* ISA Bridges */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050, + nvidia_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051, + nvidia_force_enable_hpet); + +/* LPC bridges */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360, + nvidia_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361, + nvidia_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362, + nvidia_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363, + nvidia_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364, + nvidia_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365, + nvidia_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366, + nvidia_force_enable_hpet); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367, + nvidia_force_enable_hpet); void force_hpet_resume(void) { @@ -334,6 +384,9 @@ void force_hpet_resume(void) case VT8237_FORCE_HPET_RESUME: return vt8237_force_hpet_resume(); + case NVIDIA_FORCE_HPET_RESUME: + return nvidia_force_hpet_resume(); + default: break; } diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index b7e768dd87c9..500670c93d81 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -388,7 +388,7 @@ static void inquire_remote_apic(int apicid) printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); - for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) { + for (i = 0; i < ARRAY_SIZE(regs); i++) { printk("... APIC #%d %s: ", apicid, names[i]); /* diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c index bc9f59c246fd..db284ef44d53 100644 --- a/arch/x86/kernel/suspend_64.c +++ b/arch/x86/kernel/suspend_64.c @@ -19,12 +19,6 @@ extern const void __nosave_begin, __nosave_end; struct saved_context saved_context; -unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx; -unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi; -unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11; -unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15; -unsigned long saved_context_eflags; - void __save_processor_state(struct saved_context *ctxt) { kernel_fpu_begin(); diff --git a/arch/x86/kernel/suspend_asm_64.S b/arch/x86/kernel/suspend_asm_64.S index 48344b666d2c..72f952103e50 100644 --- a/arch/x86/kernel/suspend_asm_64.S +++ b/arch/x86/kernel/suspend_asm_64.S @@ -17,24 +17,24 @@ #include <asm/asm-offsets.h> ENTRY(swsusp_arch_suspend) - - movq %rsp, saved_context_esp(%rip) - movq %rax, saved_context_eax(%rip) - movq %rbx, saved_context_ebx(%rip) - movq %rcx, saved_context_ecx(%rip) - movq %rdx, saved_context_edx(%rip) - movq %rbp, saved_context_ebp(%rip) - movq %rsi, saved_context_esi(%rip) - movq %rdi, saved_context_edi(%rip) - movq %r8, saved_context_r08(%rip) - movq %r9, saved_context_r09(%rip) - movq %r10, saved_context_r10(%rip) - movq %r11, saved_context_r11(%rip) - movq %r12, saved_context_r12(%rip) - movq %r13, saved_context_r13(%rip) - movq %r14, saved_context_r14(%rip) - movq %r15, saved_context_r15(%rip) - pushfq ; popq saved_context_eflags(%rip) + movq $saved_context, %rax + movq %rsp, pt_regs_rsp(%rax) + movq %rbp, pt_regs_rbp(%rax) + movq %rsi, pt_regs_rsi(%rax) + movq %rdi, pt_regs_rdi(%rax) + movq %rbx, pt_regs_rbx(%rax) + movq %rcx, pt_regs_rcx(%rax) + movq %rdx, pt_regs_rdx(%rax) + movq %r8, pt_regs_r8(%rax) + movq %r9, pt_regs_r9(%rax) + movq %r10, pt_regs_r10(%rax) + movq %r11, pt_regs_r11(%rax) + movq %r12, pt_regs_r12(%rax) + movq %r13, pt_regs_r13(%rax) + movq %r14, pt_regs_r14(%rax) + movq %r15, pt_regs_r15(%rax) + pushfq + popq pt_regs_eflags(%rax) /* save the address of restore_registers */ movq $restore_registers, %rax @@ -113,23 +113,25 @@ ENTRY(restore_registers) movq %rcx, %cr3 movq %rax, %cr4; # turn PGE back on - movq saved_context_esp(%rip), %rsp - movq saved_context_ebp(%rip), %rbp - /* restore GPRs (we don't restore %rax, it must be 0 anyway) */ - movq saved_context_ebx(%rip), %rbx - movq saved_context_ecx(%rip), %rcx - movq saved_context_edx(%rip), %rdx - movq saved_context_esi(%rip), %rsi - movq saved_context_edi(%rip), %rdi - movq saved_context_r08(%rip), %r8 - movq saved_context_r09(%rip), %r9 - movq saved_context_r10(%rip), %r10 - movq saved_context_r11(%rip), %r11 - movq saved_context_r12(%rip), %r12 - movq saved_context_r13(%rip), %r13 - movq saved_context_r14(%rip), %r14 - movq saved_context_r15(%rip), %r15 - pushq saved_context_eflags(%rip) ; popfq + /* We don't restore %rax, it must be 0 anyway */ + movq $saved_context, %rax + movq pt_regs_rsp(%rax), %rsp + movq pt_regs_rbp(%rax), %rbp + movq pt_regs_rsi(%rax), %rsi + movq pt_regs_rdi(%rax), %rdi + movq pt_regs_rbx(%rax), %rbx + movq pt_regs_rcx(%rax), %rcx + movq pt_regs_rdx(%rax), %rdx + movq pt_regs_r8(%rax), %r8 + movq pt_regs_r9(%rax), %r9 + movq pt_regs_r10(%rax), %r10 + movq pt_regs_r11(%rax), %r11 + movq pt_regs_r12(%rax), %r12 + movq pt_regs_r13(%rax), %r13 + movq pt_regs_r14(%rax), %r14 + movq pt_regs_r15(%rax), %r15 + pushq pt_regs_eflags(%rax) + popfq xorq %rax, %rax diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index d78444c788a3..9ebc0dab66b4 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c @@ -131,38 +131,43 @@ unsigned long native_calculate_cpu_khz(void) { unsigned long long start, end; unsigned long count; - u64 delta64; + u64 delta64 = (u64)ULLONG_MAX; int i; unsigned long flags; local_irq_save(flags); - /* run 3 times to ensure the cache is warm */ + /* run 3 times to ensure the cache is warm and to get an accurate reading */ for (i = 0; i < 3; i++) { mach_prepare_counter(); rdtscll(start); mach_countup(&count); rdtscll(end); - } - /* - * Error: ECTCNEVERSET - * The CTC wasn't reliable: we got a hit on the very first read, - * or the CPU was so fast/slow that the quotient wouldn't fit in - * 32 bits.. - */ - if (count <= 1) - goto err; - delta64 = end - start; + /* + * Error: ECTCNEVERSET + * The CTC wasn't reliable: we got a hit on the very first read, + * or the CPU was so fast/slow that the quotient wouldn't fit in + * 32 bits.. + */ + if (count <= 1) + continue; + + /* cpu freq too slow: */ + if ((end - start) <= CALIBRATE_TIME_MSEC) + continue; + + /* + * We want the minimum time of all runs in case one of them + * is inaccurate due to SMI or other delay + */ + delta64 = min(delta64, (end - start)); + } - /* cpu freq too fast: */ + /* cpu freq too fast (or every run was bad): */ if (delta64 > (1ULL<<32)) goto err; - /* cpu freq too slow: */ - if (delta64 <= CALIBRATE_TIME_MSEC) - goto err; - delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */ do_div(delta64,CALIBRATE_TIME_MSEC); diff --git a/arch/x86/oprofile/Kconfig b/arch/x86/oprofile/Kconfig deleted file mode 100644 index d8a84088471a..000000000000 --- a/arch/x86/oprofile/Kconfig +++ /dev/null @@ -1,17 +0,0 @@ -config PROFILING - bool "Profiling support (EXPERIMENTAL)" - help - Say Y here to enable the extended profiling support mechanisms used - by profilers such as OProfile. - - -config OPROFILE - tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - help - OProfile is a profiling system capable of profiling the - whole system, include the kernel, kernel modules, libraries, - and applications. - - If unsure, say N. - diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index c2d24991bb2b..308970aa5382 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig @@ -833,6 +833,8 @@ source "drivers/firmware/Kconfig" source fs/Kconfig +source "kernel/Kconfig.instrumentation" + source "arch/x86_64/Kconfig.debug" source "security/Kconfig" diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile index 6d89ab762ffc..20eb69bd5a6d 100644 --- a/arch/x86_64/Makefile +++ b/arch/x86_64/Makefile @@ -24,6 +24,12 @@ # Fill in SRCARCH SRCARCH := x86 +# BITS is used as extension for files which are available in a 32 bit +# and a 64 bit version to simplify shared Makefiles. +# e.g.: obj-y += foo_$(BITS).o +BITS := 64 +export BITS + LDFLAGS := -m elf_x86_64 OBJCOPYFLAGS := -O binary -R .note -R .comment -S LDFLAGS_vmlinux := |