summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-27 21:11:26 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-27 21:11:26 -0800
commit1b17366d695c8ab03f98d0155357e97a427e1dce (patch)
treed223c79cc33ca1d890d264a202a1dd9c29655039
parentd12de1ef5eba3adb88f8e9dd81b6a60349466378 (diff)
parent7179ba52889bef7e5e23f72908270e1ab2b7fc6f (diff)
downloadlinux-stable-1b17366d695c8ab03f98d0155357e97a427e1dce.tar.gz
linux-stable-1b17366d695c8ab03f98d0155357e97a427e1dce.tar.bz2
linux-stable-1b17366d695c8ab03f98d0155357e97a427e1dce.zip
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt: "So here's my next branch for powerpc. A bit late as I was on vacation last week. It's mostly the same stuff that was in next already, I just added two patches today which are the wiring up of lockref for powerpc, which for some reason fell through the cracks last time and is trivial. The highlights are, in addition to a bunch of bug fixes: - Reworked Machine Check handling on kernels running without a hypervisor (or acting as a hypervisor). Provides hooks to handle some errors in real mode such as TLB errors, handle SLB errors, etc... - Support for retrieving memory error information from the service processor on IBM servers running without a hypervisor and routing them to the memory poison infrastructure. - _PAGE_NUMA support on server processors - 32-bit BookE relocatable kernel support - FSL e6500 hardware tablewalk support - A bunch of new/revived board support - FSL e6500 deeper idle states and altivec powerdown support You'll notice a generic mm change here, it has been acked by the relevant authorities and is a pre-req for our _PAGE_NUMA support" * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (121 commits) powerpc: Implement arch_spin_is_locked() using arch_spin_value_unlocked() powerpc: Add support for the optimised lockref implementation powerpc/powernv: Call OPAL sync before kexec'ing powerpc/eeh: Escalate error on non-existing PE powerpc/eeh: Handle multiple EEH errors powerpc: Fix transactional FP/VMX/VSX unavailable handlers powerpc: Don't corrupt transactional state when using FP/VMX in kernel powerpc: Reclaim two unused thread_info flag bits powerpc: Fix races with irq_work Move precessing of MCE queued event out from syscall exit path. pseries/cpuidle: Remove redundant call to ppc64_runlatch_off() in cpu idle routines powerpc: Make add_system_ram_resources() __init powerpc: add SATA_MV to ppc64_defconfig powerpc/powernv: Increase candidate fw image size powerpc: Add debug checks to catch invalid cpu-to-node mappings powerpc: Fix the setup of CPU-to-Node mappings during CPU online powerpc/iommu: Don't detach device without IOMMU group powerpc/eeh: Hotplug improvement powerpc/eeh: Call opal_pci_reinit() on powernv for restoring config space powerpc/eeh: Add restore_config operation ...
-rw-r--r--Documentation/devicetree/bindings/video/ssd1289fb.txt13
-rw-r--r--arch/powerpc/Kconfig15
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/Makefile7
-rw-r--r--arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi82
-rw-r--r--arch/powerpc/boot/dts/fsl/p1020si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1021si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1022si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/kilauea.dts2
-rw-r--r--arch/powerpc/boot/dts/mvme5100.dts185
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pa.dts23
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pa.dtsi85
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pa_36b.dts (renamed from arch/powerpc/boot/dts/p1010rdb_36b.dts)47
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pb.dts35
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pb_36b.dts58
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dts66
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dtsi43
-rw-r--r--arch/powerpc/boot/dts/p1010rdb_32b.dtsi79
-rw-r--r--arch/powerpc/boot/dts/p1010rdb_36b.dtsi79
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dtsi3
-rw-r--r--arch/powerpc/boot/dts/p1025twr.dts95
-rw-r--r--arch/powerpc/boot/dts/p1025twr.dtsi280
-rw-r--r--arch/powerpc/boot/dts/virtex440-ml507.dts2
-rw-r--r--arch/powerpc/boot/mvme5100.c27
-rwxr-xr-xarch/powerpc/boot/wrapper4
-rw-r--r--arch/powerpc/configs/85xx/p1023_defconfig188
-rw-r--r--arch/powerpc/configs/adder875_defconfig1
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig3
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig3
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mvme5100_defconfig144
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig1
-rw-r--r--arch/powerpc/include/asm/bitops.h5
-rw-r--r--arch/powerpc/include/asm/cache.h14
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h1
-rw-r--r--arch/powerpc/include/asm/code-patching.h7
-rw-r--r--arch/powerpc/include/asm/cputable.h12
-rw-r--r--arch/powerpc/include/asm/eeh.h14
-rw-r--r--arch/powerpc/include/asm/exception-64s.h21
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h2
-rw-r--r--arch/powerpc/include/asm/hardirq.h3
-rw-r--r--arch/powerpc/include/asm/io.h16
-rw-r--r--arch/powerpc/include/asm/iommu.h54
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/lppaca.h2
-rw-r--r--arch/powerpc/include/asm/mce.h197
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h13
-rw-r--r--arch/powerpc/include/asm/mmu.h21
-rw-r--r--arch/powerpc/include/asm/opal.h108
-rw-r--r--arch/powerpc/include/asm/paca.h16
-rw-r--r--arch/powerpc/include/asm/pgtable.h66
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h14
-rw-r--r--arch/powerpc/include/asm/processor.h8
-rw-r--r--arch/powerpc/include/asm/ps3.h1
-rw-r--r--arch/powerpc/include/asm/pte-hash64.h8
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h10
-rw-r--r--arch/powerpc/include/asm/spinlock.h12
-rw-r--r--arch/powerpc/include/asm/thread_info.h9
-rw-r--r--arch/powerpc/include/asm/tm.h1
-rw-r--r--arch/powerpc/include/asm/topology.h10
-rw-r--r--arch/powerpc/include/asm/vio.h1
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c13
-rw-r--r--arch/powerpc/kernel/cacheinfo.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S54
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S38
-rw-r--r--arch/powerpc/kernel/cputable.c16
-rw-r--r--arch/powerpc/kernel/crash.c1
-rw-r--r--arch/powerpc/kernel/dma-iommu.c4
-rw-r--r--arch/powerpc/kernel/eeh.c17
-rw-r--r--arch/powerpc/kernel/eeh_driver.c166
-rw-r--r--arch/powerpc/kernel/eeh_pe.c4
-rw-r--r--arch/powerpc/kernel/entry_64.S12
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S27
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S242
-rw-r--r--arch/powerpc/kernel/fpu.S16
-rw-r--r--arch/powerpc/kernel/fsl_booke_entry_mapping.S2
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S266
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c1
-rw-r--r--arch/powerpc/kernel/idle_power7.S1
-rw-r--r--arch/powerpc/kernel/iomap.c1
-rw-r--r--arch/powerpc/kernel/iommu.c145
-rw-r--r--arch/powerpc/kernel/irq.c12
-rw-r--r--arch/powerpc/kernel/kgdb.c1
-rw-r--r--arch/powerpc/kernel/mce.c352
-rw-r--r--arch/powerpc/kernel/mce_power.c284
-rw-r--r--arch/powerpc/kernel/misc_32.S4
-rw-r--r--arch/powerpc/kernel/misc_64.S6
-rw-r--r--arch/powerpc/kernel/paca.c37
-rw-r--r--arch/powerpc/kernel/process.c177
-rw-r--r--arch/powerpc/kernel/prom.c41
-rw-r--r--arch/powerpc/kernel/setup_64.c47
-rw-r--r--arch/powerpc/kernel/signal.c3
-rw-r--r--arch/powerpc/kernel/signal_32.c21
-rw-r--r--arch/powerpc/kernel/signal_64.c14
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c1
-rw-r--r--arch/powerpc/kernel/smp.c9
-rw-r--r--arch/powerpc/kernel/swsusp_booke.S32
-rw-r--r--arch/powerpc/kernel/syscalls.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c388
-rw-r--r--arch/powerpc/kernel/time.c14
-rw-r--r--arch/powerpc/kernel/traps.c72
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32_wrapper.S1
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64_wrapper.S1
-rw-r--r--arch/powerpc/kernel/vector.S10
-rw-r--r--arch/powerpc/kernel/vio.c31
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c50
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S2
-rw-r--r--arch/powerpc/lib/code-patching.c15
-rw-r--r--arch/powerpc/lib/crtsavres.S186
-rw-r--r--arch/powerpc/math-emu/math_efp.c316
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c80
-rw-r--r--arch/powerpc/mm/hash_low_64.S15
-rw-r--r--arch/powerpc/mm/hash_utils_64.c7
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c54
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c4
-rw-r--r--arch/powerpc/mm/mem.c8
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/numa.c96
-rw-r--r--arch/powerpc/mm/pgtable.c3
-rw-r--r--arch/powerpc/mm/pgtable_32.c1
-rw-r--r--arch/powerpc/mm/pgtable_64.c15
-rw-r--r--arch/powerpc/mm/tlb_hash64.c1
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S174
-rw-r--r--arch/powerpc/mm/tlb_nohash.c112
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S4
-rw-r--r--arch/powerpc/oprofile/op_model_7450.c1
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c1
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_emb.c1
-rw-r--r--arch/powerpc/oprofile/op_model_pa6t.c1
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c1
-rw-r--r--arch/powerpc/oprofile/op_model_rs64.c1
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c1
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig6
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/common.c38
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx.h6
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c29
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c25
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c1
-rw-r--r--arch/powerpc/platforms/85xx/smp.c17
-rw-r--r--arch/powerpc/platforms/85xx/twr_p102x.c147
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype1
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c14
-rw-r--r--arch/powerpc/platforms/chrp/smp.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig13
-rw-r--r--arch/powerpc/platforms/embedded6xx/Makefile1
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c221
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c1
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c5
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c1
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig5
-rw-r--r--arch/powerpc/platforms/powernv/Makefile1
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c223
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c24
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c35
-rw-r--r--arch/powerpc/platforms/powernv/opal-memory-errors.c146
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S3
-rw-r--r--arch/powerpc/platforms/powernv/opal.c267
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c11
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci.c230
-rw-r--r--arch/powerpc/platforms/powernv/pci.h3
-rw-r--r--arch/powerpc/platforms/powernv/setup.c6
-rw-r--r--arch/powerpc/platforms/ps3/spu.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c1
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c167
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c5
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c4
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c11
-rw-r--r--arch/powerpc/sysdev/Kconfig2
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_ifc.c1
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c31
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c5
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.h1
-rw-r--r--arch/powerpc/sysdev/i8259.c1
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c6
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c1
-rw-r--r--arch/powerpc/sysdev/mpic_timer.c10
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_fast.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c1
-rw-r--r--arch/powerpc/sysdev/udbg_memcons.c1
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c1
-rw-r--r--arch/powerpc/xmon/xmon.c4
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c12
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c28
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/of_fdt.h1
-rw-r--r--include/math-emu/op-common.h9
-rw-r--r--mm/mempolicy.c5
-rw-r--r--scripts/mod/modpost.c8
214 files changed, 6171 insertions, 1649 deletions
diff --git a/Documentation/devicetree/bindings/video/ssd1289fb.txt b/Documentation/devicetree/bindings/video/ssd1289fb.txt
new file mode 100644
index 000000000000..4fcd5e68cb6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/ssd1289fb.txt
@@ -0,0 +1,13 @@
+* Solomon SSD1289 Framebuffer Driver
+
+Required properties:
+ - compatible: Should be "solomon,ssd1289fb". The only supported bus for
+ now is lbc.
+ - reg: Should contain address of the controller on the LBC bus. The detail
+ was described in Documentation/devicetree/bindings/powerpc/fsl/lbc.txt
+
+Examples:
+display@2,0 {
+ compatible = "solomon,ssd1289fb";
+ reg = <0x2 0x0000 0x0004>;
+};
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1695b6ab503d..25493a0b174c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -140,6 +140,7 @@ config PPC
select OLD_SIGACTION if PPC32
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select ARCH_USE_CMPXCHG_LOCKREF if PPC64
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -214,9 +215,6 @@ config DEFAULT_UIMAGE
Used to allow a board to specify it wants a uImage built by default
default n
-config REDBOOT
- bool
-
config ARCH_HIBERNATION_POSSIBLE
bool
default y
@@ -384,6 +382,12 @@ config ARCH_HAS_WALK_MEMORY
config ARCH_ENABLE_MEMORY_HOTREMOVE
def_bool y
+config PPC64_SUPPORTS_MEMORY_FAILURE
+ bool "Add support for memory hwpoison"
+ depends on PPC_BOOK3S_64
+ default "y" if PPC_POWERNV
+ select ARCH_SUPPORTS_MEMORY_FAILURE
+
config KEXEC
bool "kexec system call"
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
@@ -404,8 +408,7 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
- select RELOCATABLE if PPC64 || 44x
- select DYNAMIC_MEMSTART if FSL_BOOKE
+ select RELOCATABLE if PPC64 || 44x || FSL_BOOKE
help
Build a kernel suitable for use as a kdump capture kernel.
The same kernel binary can be used as production kernel and dump
@@ -886,7 +889,7 @@ config DYNAMIC_MEMSTART
config RELOCATABLE
bool "Build a relocatable kernel"
- depends on ADVANCED_OPTIONS && FLATMEM && 44x
+ depends on ADVANCED_OPTIONS && FLATMEM && (44x || FSL_BOOKE)
select NONSTATIC_KERNEL
help
This builds a kernel image that is capable of running at the
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 554734ff302e..d61c03525777 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -16,6 +16,7 @@ mktree
uImage
cuImage.*
dtbImage.*
+*.dtb
treeImage.*
zImage
zImage.initrd
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index ca7f08cc4afd..90e9d9548660 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -71,9 +71,9 @@ src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
uartlite.c mpc52xx-psc.c
src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
-src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
+src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c
src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
-src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
+src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c fsl-soc.c
src-plat-y := of.c epapr.c
src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
@@ -95,7 +95,7 @@ src-plat-$(CONFIG_FSL_SOC_BOOKE) += cuboot-85xx.c cuboot-85xx-cpm2.c
src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
cuboot-c2k.c gamecube-head.S \
gamecube.c wii-head.S wii.c holly.c \
- prpmc2800.c
+ prpmc2800.c fixed-head.S mvme5100.c
src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
@@ -286,6 +286,7 @@ image-$(CONFIG_MPC7448HPC2) += cuImage.mpc7448hpc2
image-$(CONFIG_PPC_C2K) += cuImage.c2k
image-$(CONFIG_GAMECUBE) += dtbImage.gamecube
image-$(CONFIG_WII) += dtbImage.wii
+image-$(CONFIG_MVME5100) += dtbImage.mvme5100
# Board port in arch/powerpc/platform/amigaone/Kconfig
image-$(CONFIG_AMIGAONE) += cuImage.amigaone
diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi
new file mode 100644
index 000000000000..d3cc8d0f7c25
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi
@@ -0,0 +1,82 @@
+/*
+ * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x102300 ]
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma2: dma@102300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,elo3-dma";
+ reg = <0x102300 0x4>,
+ <0x102600 0x4>;
+ ranges = <0x0 0x102100 0x500>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ interrupts = <464 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ interrupts = <465 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ interrupts = <466 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ interrupts = <467 2 0 0>;
+ };
+ dma-channel@300 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x300 0x80>;
+ interrupts = <468 2 0 0>;
+ };
+ dma-channel@380 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x380 0x80>;
+ interrupts = <469 2 0 0>;
+ };
+ dma-channel@400 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x400 0x80>;
+ interrupts = <470 2 0 0>;
+ };
+ dma-channel@480 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x480 0x80>;
+ interrupts = <471 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
index 68cc5e7f6477..642dc3a83d0e 100644
--- a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
@@ -36,7 +36,8 @@
#address-cells = <2>;
#size-cells = <1>;
compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
- interrupts = <19 2 0 0>;
+ interrupts = <19 2 0 0>,
+ <16 2 0 0>;
};
/* controller at 0x9000 */
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
index adb82fd9057f..407cb5fd0f5b 100644
--- a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
@@ -36,7 +36,8 @@
#address-cells = <2>;
#size-cells = <1>;
compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus";
- interrupts = <19 2 0 0>;
+ interrupts = <19 2 0 0>,
+ <16 2 0 0>;
};
/* controller at 0x9000 */
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
index e179803a81ef..ebf202234549 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -40,7 +40,8 @@
* pin muxing when the DIU is enabled.
*/
compatible = "fsl,p1022-elbc", "fsl,elbc";
- interrupts = <19 2 0 0>;
+ interrupts = <19 2 0 0>,
+ <16 2 0 0>;
};
/* controller at 0x9000 */
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
index f1105bffa915..81437fdf1db4 100644
--- a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
@@ -36,7 +36,8 @@
#address-cells = <2>;
#size-cells = <1>;
compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus";
- interrupts = <19 2 0 0>;
+ interrupts = <19 2 0 0>,
+ <16 2 0 0>;
};
/* controller at 0xa000 */
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index 1613d6e4049e..5ba7f01e2a29 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -406,7 +406,7 @@
MSI: ppc4xx-msi@C10000000 {
compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = < 0x0 0xEF620000 0x100>;
+ reg = <0xEF620000 0x100>;
sdr-base = <0x4B0>;
msi-data = <0x00000000>;
msi-mask = <0x44440000>;
diff --git a/arch/powerpc/boot/dts/mvme5100.dts b/arch/powerpc/boot/dts/mvme5100.dts
new file mode 100644
index 000000000000..1ecb341a232a
--- /dev/null
+++ b/arch/powerpc/boot/dts/mvme5100.dts
@@ -0,0 +1,185 @@
+/*
+ * Device Tree Source for Motorola/Emerson MVME5100.
+ *
+ * Copyright 2013 CSC Australia Pty. Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/ {
+ model = "MVME5100";
+ compatible = "MVME5100";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aliases {
+ serial0 = &serial0;
+ pci0 = &pci0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,7410 {
+ device_type = "cpu";
+ reg = <0x0>;
+ /* Following required by dtc but not used */
+ d-cache-line-size = <32>;
+ i-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ timebase-frequency = <25000000>;
+ clock-frequency = <500000000>;
+ bus-frequency = <100000000>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x20000000>;
+ };
+
+ hawk@fef80000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "hawk-bridge", "simple-bus";
+ ranges = <0x0 0xfef80000 0x10000>;
+ reg = <0xfef80000 0x10000>;
+
+ serial0: serial@8000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x8000 0x80>;
+ reg-shift = <4>;
+ clock-frequency = <1843200>;
+ current-speed = <9600>;
+ interrupts = <1 1>; // IRQ1 Level Active Low.
+ interrupt-parent = <&mpic>;
+ };
+
+ serial1: serial@8200 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x8200 0x80>;
+ reg-shift = <4>;
+ clock-frequency = <1843200>;
+ current-speed = <9600>;
+ interrupts = <1 1>; // IRQ1 Level Active Low.
+ interrupt-parent = <&mpic>;
+ };
+
+ mpic: interrupt-controller@f3f80000 {
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ device_type = "open-pic";
+ compatible = "chrp,open-pic";
+ interrupt-controller;
+ reg = <0xf3f80000 0x40000>;
+ };
+ };
+
+ pci0: pci@feff0000 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ device_type = "pci";
+ compatible = "hawk-pci";
+ reg = <0xfec00000 0x400000>;
+ 8259-interrupt-acknowledge = <0xfeff0030>;
+ ranges = <0x1000000 0x0 0x0 0xfe000000 0x0 0x800000
+ 0x2000000 0x0 0x80000000 0x80000000 0x0 0x74000000>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+
+ /*
+ * This definition (IDSEL 11) duplicates the
+ * interrupts definition in the i8259
+ * interrupt controller below.
+ *
+ * Do not change the interrupt sense/polarity from
+ * 0x2 to anything else, doing so will cause endless
+ * "spurious" i8259 interrupts to be fielded.
+ */
+ // IDSEL 11 - iPMC712 PCI/ISA Bridge
+ 0x5800 0x0 0x0 0x1 &mpic 0x0 0x2
+ 0x5800 0x0 0x0 0x2 &mpic 0x0 0x2
+ 0x5800 0x0 0x0 0x3 &mpic 0x0 0x2
+ 0x5800 0x0 0x0 0x4 &mpic 0x0 0x2
+
+ /* IDSEL 12 - Not Used */
+
+ /* IDSEL 13 - Universe VME Bridge */
+ 0x6800 0x0 0x0 0x1 &mpic 0x5 0x1
+ 0x6800 0x0 0x0 0x2 &mpic 0x6 0x1
+ 0x6800 0x0 0x0 0x3 &mpic 0x7 0x1
+ 0x6800 0x0 0x0 0x4 &mpic 0x8 0x1
+
+ /* IDSEL 14 - ENET 1 */
+ 0x7000 0x0 0x0 0x1 &mpic 0x2 0x1
+
+ /* IDSEL 15 - Not Used */
+
+ /* IDSEL 16 - PMC Slot 1 */
+ 0x8000 0x0 0x0 0x1 &mpic 0x9 0x1
+ 0x8000 0x0 0x0 0x2 &mpic 0xa 0x1
+ 0x8000 0x0 0x0 0x3 &mpic 0xb 0x1
+ 0x8000 0x0 0x0 0x4 &mpic 0xc 0x1
+
+ /* IDSEL 17 - PMC Slot 2 */
+ 0x8800 0x0 0x0 0x1 &mpic 0xc 0x1
+ 0x8800 0x0 0x0 0x2 &mpic 0x9 0x1
+ 0x8800 0x0 0x0 0x3 &mpic 0xa 0x1
+ 0x8800 0x0 0x0 0x4 &mpic 0xb 0x1
+
+ /* IDSEL 18 - Not Used */
+
+ /* IDSEL 19 - ENET 2 */
+ 0x9800 0x0 0x0 0x1 &mpic 0xd 0x1
+
+ /* IDSEL 20 - PMCSPAN (PCI-X) */
+ 0xa000 0x0 0x0 0x1 &mpic 0x9 0x1
+ 0xa000 0x0 0x0 0x2 &mpic 0xa 0x1
+ 0xa000 0x0 0x0 0x3 &mpic 0xb 0x1
+ 0xa000 0x0 0x0 0x4 &mpic 0xc 0x1
+
+ >;
+
+ isa {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ #interrupt-cells = <2>;
+ device_type = "isa";
+ compatible = "isa";
+ ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00001000>;
+ interrupt-parent = <&i8259>;
+
+ i8259: interrupt-controller@20 {
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ interrupts = <0 2>;
+ device_type = "interrupt-controller";
+ compatible = "chrp,iic";
+ interrupt-controller;
+ reg = <1 0x00000020 0x00000002
+ 1 0x000000a0 0x00000002
+ 1 0x000004d0 0x00000002>;
+ interrupt-parent = <&mpic>;
+ };
+
+ };
+
+ };
+
+ chosen {
+ linux,stdout-path = &serial0;
+ };
+
+};
diff --git a/arch/powerpc/boot/dts/p1010rdb-pa.dts b/arch/powerpc/boot/dts/p1010rdb-pa.dts
new file mode 100644
index 000000000000..767d4c032857
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pa.dts
@@ -0,0 +1,23 @@
+/*
+ * P1010 RDB Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "fsl/p1010si-pre.dtsi"
+
+/ {
+ model = "fsl,P1010RDB";
+ compatible = "fsl,P1010RDB";
+
+ /include/ "p1010rdb_32b.dtsi"
+};
+
+/include/ "p1010rdb.dtsi"
+/include/ "p1010rdb-pa.dtsi"
+/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb-pa.dtsi b/arch/powerpc/boot/dts/p1010rdb-pa.dtsi
new file mode 100644
index 000000000000..434fb2d58575
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pa.dtsi
@@ -0,0 +1,85 @@
+/*
+ * P1010 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&ifc_nand {
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@200000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00200000 0x00400000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@600000 {
+ /* 4MB for Compressed Root file System Image */
+ reg = <0x00600000 0x00400000>;
+ label = "NAND Compressed RFS Image";
+ };
+
+ partition@a00000 {
+ /* 15MB for JFFS2 based Root file System */
+ reg = <0x00a00000 0x00f00000>;
+ label = "NAND JFFS2 Root File System";
+ };
+
+ partition@1900000 {
+ /* 7MB for User Area */
+ reg = <0x01900000 0x00700000>;
+ label = "NAND User area";
+ };
+};
+
+&phy0 {
+ interrupts = <1 1 0 0>;
+};
+
+&phy1 {
+ interrupts = <2 1 0 0>;
+};
+
+&phy2 {
+ interrupts = <4 1 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/p1010rdb_36b.dts b/arch/powerpc/boot/dts/p1010rdb-pa_36b.dts
index 64776f4a4651..3033371bc007 100644
--- a/arch/powerpc/boot/dts/p1010rdb_36b.dts
+++ b/arch/powerpc/boot/dts/p1010rdb-pa_36b.dts
@@ -38,52 +38,9 @@
model = "fsl,P1010RDB";
compatible = "fsl,P1010RDB";
- memory {
- device_type = "memory";
- };
-
- board_ifc: ifc: ifc@fffe1e000 {
- /* NOR, NAND Flashes and CPLD on board */
- ranges = <0x0 0x0 0xf 0xee000000 0x02000000
- 0x1 0x0 0xf 0xff800000 0x00010000
- 0x3 0x0 0xf 0xffb00000 0x00000020>;
- reg = <0xf 0xffe1e000 0 0x2000>;
- };
-
- board_soc: soc: soc@fffe00000 {
- ranges = <0x0 0xf 0xffe00000 0x100000>;
- };
-
- pci0: pcie@fffe09000 {
- reg = <0xf 0xffe09000 0 0x1000>;
- ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
- pcie@0 {
- ranges = <0x2000000 0x0 0xc0000000
- 0x2000000 0x0 0xc0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-
- pci1: pcie@fffe0a000 {
- reg = <0xf 0xffe0a000 0 0x1000>;
- ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
- pcie@0 {
- ranges = <0x2000000 0x0 0xc0000000
- 0x2000000 0x0 0xc0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
+ /include/ "p1010rdb_36b.dtsi"
};
/include/ "p1010rdb.dtsi"
+/include/ "p1010rdb-pa.dtsi"
/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb-pb.dts b/arch/powerpc/boot/dts/p1010rdb-pb.dts
new file mode 100644
index 000000000000..6eeb7d3185be
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pb.dts
@@ -0,0 +1,35 @@
+/*
+ * P1010 RDB Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "fsl/p1010si-pre.dtsi"
+
+/ {
+ model = "fsl,P1010RDB-PB";
+ compatible = "fsl,P1010RDB-PB";
+
+ /include/ "p1010rdb_32b.dtsi"
+};
+
+/include/ "p1010rdb.dtsi"
+
+&phy0 {
+ interrupts = <0 1 0 0>;
+};
+
+&phy1 {
+ interrupts = <2 1 0 0>;
+};
+
+&phy2 {
+ interrupts = <1 1 0 0>;
+};
+
+/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb-pb_36b.dts b/arch/powerpc/boot/dts/p1010rdb-pb_36b.dts
new file mode 100644
index 000000000000..7ab3c907b326
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pb_36b.dts
@@ -0,0 +1,58 @@
+/*
+ * P1010 RDB Device Tree Source (36-bit address map)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1010si-pre.dtsi"
+
+/ {
+ model = "fsl,P1010RDB-PB";
+ compatible = "fsl,P1010RDB-PB";
+
+ /include/ "p1010rdb_36b.dtsi"
+};
+
+/include/ "p1010rdb.dtsi"
+
+&phy0 {
+ interrupts = <0 1 0 0>;
+};
+
+&phy1 {
+ interrupts = <2 1 0 0>;
+};
+
+&phy2 {
+ interrupts = <1 1 0 0>;
+};
+
+/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb.dts b/arch/powerpc/boot/dts/p1010rdb.dts
deleted file mode 100644
index b868d22984e9..000000000000
--- a/arch/powerpc/boot/dts/p1010rdb.dts
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * P1010 RDB Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/include/ "fsl/p1010si-pre.dtsi"
-
-/ {
- model = "fsl,P1010RDB";
- compatible = "fsl,P1010RDB";
-
- memory {
- device_type = "memory";
- };
-
- board_ifc: ifc: ifc@ffe1e000 {
- /* NOR, NAND Flashes and CPLD on board */
- ranges = <0x0 0x0 0x0 0xee000000 0x02000000
- 0x1 0x0 0x0 0xff800000 0x00010000
- 0x3 0x0 0x0 0xffb00000 0x00000020>;
- reg = <0x0 0xffe1e000 0 0x2000>;
- };
-
- board_soc: soc: soc@ffe00000 {
- ranges = <0x0 0x0 0xffe00000 0x100000>;
- };
-
- pci0: pcie@ffe09000 {
- reg = <0 0xffe09000 0 0x1000>;
- ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
- pcie@0 {
- ranges = <0x2000000 0x0 0xa0000000
- 0x2000000 0x0 0xa0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-
- pci1: pcie@ffe0a000 {
- reg = <0 0xffe0a000 0 0x1000>;
- ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
- pcie@0 {
- ranges = <0x2000000 0x0 0x80000000
- 0x2000000 0x0 0x80000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-};
-
-/include/ "p1010rdb.dtsi"
-/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb.dtsi b/arch/powerpc/boot/dts/p1010rdb.dtsi
index ec7c27a64671..ea534efa790d 100644
--- a/arch/powerpc/boot/dts/p1010rdb.dtsi
+++ b/arch/powerpc/boot/dts/p1010rdb.dtsi
@@ -69,49 +69,11 @@
};
};
- nand@1,0 {
+ ifc_nand: nand@1,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,ifc-nand";
reg = <0x1 0x0 0x10000>;
-
- partition@0 {
- /* This location must not be altered */
- /* 1MB for u-boot Bootloader Image */
- reg = <0x0 0x00100000>;
- label = "NAND U-Boot Image";
- read-only;
- };
-
- partition@100000 {
- /* 1MB for DTB Image */
- reg = <0x00100000 0x00100000>;
- label = "NAND DTB Image";
- };
-
- partition@200000 {
- /* 4MB for Linux Kernel Image */
- reg = <0x00200000 0x00400000>;
- label = "NAND Linux Kernel Image";
- };
-
- partition@600000 {
- /* 4MB for Compressed Root file System Image */
- reg = <0x00600000 0x00400000>;
- label = "NAND Compressed RFS Image";
- };
-
- partition@a00000 {
- /* 15MB for JFFS2 based Root file System */
- reg = <0x00a00000 0x00f00000>;
- label = "NAND JFFS2 Root File System";
- };
-
- partition@1900000 {
- /* 7MB for User Area */
- reg = <0x01900000 0x00700000>;
- label = "NAND User area";
- };
};
cpld@3,0 {
@@ -193,17 +155,14 @@
mdio@24000 {
phy0: ethernet-phy@0 {
- interrupts = <3 1 0 0>;
reg = <0x1>;
};
phy1: ethernet-phy@1 {
- interrupts = <2 1 0 0>;
reg = <0x0>;
};
phy2: ethernet-phy@2 {
- interrupts = <2 1 0 0>;
reg = <0x2>;
};
diff --git a/arch/powerpc/boot/dts/p1010rdb_32b.dtsi b/arch/powerpc/boot/dts/p1010rdb_32b.dtsi
new file mode 100644
index 000000000000..fdc19aab2f70
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb_32b.dtsi
@@ -0,0 +1,79 @@
+/*
+ * P1010 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+memory {
+ device_type = "memory";
+};
+
+board_ifc: ifc: ifc@ffe1e000 {
+ /* NOR, NAND Flashes and CPLD on board */
+ ranges = <0x0 0x0 0x0 0xee000000 0x02000000
+ 0x1 0x0 0x0 0xff800000 0x00010000
+ 0x3 0x0 0x0 0xffb00000 0x00000020>;
+ reg = <0x0 0xffe1e000 0 0x2000>;
+};
+
+board_soc: soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+};
+
+pci0: pcie@ffe09000 {
+ reg = <0 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+};
+
+pci1: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1010rdb_36b.dtsi b/arch/powerpc/boot/dts/p1010rdb_36b.dtsi
new file mode 100644
index 000000000000..de2fceed4f79
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb_36b.dtsi
@@ -0,0 +1,79 @@
+/*
+ * P1010 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+memory {
+ device_type = "memory";
+};
+
+board_ifc: ifc: ifc@fffe1e000 {
+ /* NOR, NAND Flashes and CPLD on board */
+ ranges = <0x0 0x0 0xf 0xee000000 0x02000000
+ 0x1 0x0 0xf 0xff800000 0x00010000
+ 0x3 0x0 0xf 0xffb00000 0x00000020>;
+ reg = <0xf 0xffe1e000 0 0x2000>;
+};
+
+board_soc: soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+};
+
+pci0: pcie@fffe09000 {
+ reg = <0xf 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+};
+
+pci1: pcie@fffe0a000 {
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1022ds.dtsi b/arch/powerpc/boot/dts/p1022ds.dtsi
index 873da350d01b..957e0dc1dc0f 100644
--- a/arch/powerpc/boot/dts/p1022ds.dtsi
+++ b/arch/powerpc/boot/dts/p1022ds.dtsi
@@ -146,8 +146,9 @@
*/
};
rtc@68 {
- compatible = "dallas,ds1339";
+ compatible = "dallas,ds3232";
reg = <0x68>;
+ interrupts = <0x1 0x1 0 0>;
};
adt7461@4c {
compatible = "adi,adt7461";
diff --git a/arch/powerpc/boot/dts/p1025twr.dts b/arch/powerpc/boot/dts/p1025twr.dts
new file mode 100644
index 000000000000..9036a4987905
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025twr.dts
@@ -0,0 +1,95 @@
+/*
+ * P1025 TWR Device Tree Source (32-bit address map)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1021si-pre.dtsi"
+/ {
+ model = "fsl,P1025";
+ compatible = "fsl,TWR-P1025";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
+
+ /* NOR Flash and SSD1289 */
+ ranges = <0x0 0x0 0x0 0xec000000 0x04000000
+ 0x2 0x0 0x0 0xe0000000 0x00020000>;
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@ffe09000 {
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ reg = <0 0xffe09000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ qe: qe@ffe80000 {
+ ranges = <0x0 0x0 0xffe80000 0x40000>;
+ reg = <0 0xffe80000 0 0x480>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+ };
+};
+
+/include/ "p1025twr.dtsi"
+/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1025twr.dtsi b/arch/powerpc/boot/dts/p1025twr.dtsi
new file mode 100644
index 000000000000..8453501c256e
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025twr.dtsi
@@ -0,0 +1,280 @@
+/*
+ * P1025 TWR Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/{
+ aliases {
+ ethernet3 = &enet3;
+ ethernet4 = &enet4;
+ };
+};
+
+&lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x4000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 256KB for Vitesse 7385 Switch firmware */
+ reg = <0x0 0x00040000>;
+ label = "NOR Vitesse-7385 Firmware";
+ read-only;
+ };
+
+ partition@40000 {
+ /* 256KB for DTB Image */
+ reg = <0x00040000 0x00040000>;
+ label = "NOR DTB Image";
+ };
+
+ partition@80000 {
+ /* 5.5 MB for Linux Kernel Image */
+ reg = <0x00080000 0x00580000>;
+ label = "NOR Linux Kernel Image";
+ };
+
+ partition@400000 {
+ /* 56.75MB for Root file System */
+ reg = <0x00600000 0x038c0000>;
+ label = "NOR Root File System";
+ };
+
+ partition@ec0000 {
+ /* This location must not be altered */
+ /* 256KB for QE ucode firmware*/
+ reg = <0x03ec0000 0x00040000>;
+ label = "NOR QE microcode firmware";
+ read-only;
+ };
+
+ partition@f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x03f00000 0x00100000>;
+ label = "NOR U-Boot Image";
+ read-only;
+ };
+ };
+
+ /* CS2 for Display */
+ display@2,0 {
+ compatible = "solomon,ssd1289fb";
+ reg = <0x2 0x0000 0x0004>;
+ };
+
+};
+
+&soc {
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ mdio@24000 {
+ phy0: ethernet-phy@2 {
+ interrupt-parent = <&mpic>;
+ interrupts = <1 1 0 0>;
+ reg = <0x2>;
+ };
+
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <2 1 0 0>;
+ reg = <0x1>;
+ };
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25000 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26000 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+
+ };
+
+ enet1: ethernet@b1000 {
+ status = "disabled";
+ };
+
+ enet2: ethernet@b2000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ par_io@e0100 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xe0100 0x60>;
+ ranges = <0x0 0xe0100 0x60>;
+ device_type = "par_io";
+ num-ports = <3>;
+ pio1: ucc_pin@01 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
+ 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
+ 0x0 0x17 0x2 0x0 0x2 0x0 /* CLK12 */
+ 0x0 0x18 0x2 0x0 0x1 0x0 /* CLK9 */
+ 0x0 0x7 0x1 0x0 0x2 0x0 /* ENET1_TXD0_SER1_TXD0 */
+ 0x0 0x9 0x1 0x0 0x2 0x0 /* ENET1_TXD1_SER1_TXD1 */
+ 0x0 0xb 0x1 0x0 0x2 0x0 /* ENET1_TXD2_SER1_TXD2 */
+ 0x0 0xc 0x1 0x0 0x2 0x0 /* ENET1_TXD3_SER1_TXD3 */
+ 0x0 0x6 0x2 0x0 0x2 0x0 /* ENET1_RXD0_SER1_RXD0 */
+ 0x0 0xa 0x2 0x0 0x2 0x0 /* ENET1_RXD1_SER1_RXD1 */
+ 0x0 0xe 0x2 0x0 0x2 0x0 /* ENET1_RXD2_SER1_RXD2 */
+ 0x0 0xf 0x2 0x0 0x2 0x0 /* ENET1_RXD3_SER1_RXD3 */
+ 0x0 0x5 0x1 0x0 0x2 0x0 /* ENET1_TX_EN_SER1_RTS_B */
+ 0x0 0xd 0x1 0x0 0x2 0x0 /* ENET1_TX_ER */
+ 0x0 0x4 0x2 0x0 0x2 0x0 /* ENET1_RX_DV_SER1_CTS_B */
+ 0x0 0x8 0x2 0x0 0x2 0x0 /* ENET1_RX_ER_SER1_CD_B */
+ 0x0 0x11 0x2 0x0 0x2 0x0 /* ENET1_CRS */
+ 0x0 0x10 0x2 0x0 0x2 0x0>; /* ENET1_COL */
+ };
+
+ pio2: ucc_pin@02 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
+ 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
+ 0x1 0xb 0x2 0x0 0x1 0x0 /* CLK13 */
+ 0x1 0x7 0x1 0x0 0x2 0x0 /* ENET5_TXD0_SER5_TXD0 */
+ 0x1 0xa 0x1 0x0 0x2 0x0 /* ENET5_TXD1_SER5_TXD1 */
+ 0x1 0x6 0x2 0x0 0x2 0x0 /* ENET5_RXD0_SER5_RXD0 */
+ 0x1 0x9 0x2 0x0 0x2 0x0 /* ENET5_RXD1_SER5_RXD1 */
+ 0x1 0x5 0x1 0x0 0x2 0x0 /* ENET5_TX_EN_SER5_RTS_B */
+ 0x1 0x4 0x2 0x0 0x2 0x0 /* ENET5_RX_DV_SER5_CTS_B */
+ 0x1 0x8 0x2 0x0 0x2 0x0>; /* ENET5_RX_ER_SER5_CD_B */
+ };
+
+ pio3: ucc_pin@03 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x0 0x16 0x2 0x0 0x2 0x0 /* SER7_CD_B*/
+ 0x0 0x12 0x2 0x0 0x2 0x0 /* SER7_CTS_B*/
+ 0x0 0x13 0x1 0x0 0x2 0x0 /* SER7_RTS_B*/
+ 0x0 0x14 0x2 0x0 0x2 0x0 /* SER7_RXD0*/
+ 0x0 0x15 0x1 0x0 0x2 0x0>; /* SER7_TXD0*/
+ };
+
+ pio4: ucc_pin@04 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x1 0x0 0x2 0x0 0x2 0x0 /* SER3_CD_B*/
+ 0x0 0x1c 0x2 0x0 0x2 0x0 /* SER3_CTS_B*/
+ 0x0 0x1d 0x1 0x0 0x2 0x0 /* SER3_RTS_B*/
+ 0x0 0x1e 0x2 0x0 0x2 0x0 /* SER3_RXD0*/
+ 0x0 0x1f 0x1 0x0 0x2 0x0>; /* SER3_TXD0*/
+ };
+ };
+};
+
+&qe {
+ enet3: ucc@2000 {
+ device_type = "network";
+ compatible = "ucc_geth";
+ rx-clock-name = "clk12";
+ tx-clock-name = "clk9";
+ pio-handle = <&pio1>;
+ phy-handle = <&qe_phy0>;
+ phy-connection-type = "mii";
+ };
+
+ mdio@2120 {
+ qe_phy0: ethernet-phy@18 {
+ interrupt-parent = <&mpic>;
+ interrupts = <4 1 0 0>;
+ reg = <0x18>;
+ device_type = "ethernet-phy";
+ };
+ qe_phy1: ethernet-phy@19 {
+ interrupt-parent = <&mpic>;
+ interrupts = <5 1 0 0>;
+ reg = <0x19>;
+ device_type = "ethernet-phy";
+ };
+ tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet4: ucc@2400 {
+ device_type = "network";
+ compatible = "ucc_geth";
+ rx-clock-name = "none";
+ tx-clock-name = "clk13";
+ pio-handle = <&pio2>;
+ phy-handle = <&qe_phy1>;
+ phy-connection-type = "rmii";
+ };
+
+ serial2: ucc@2600 {
+ device_type = "serial";
+ compatible = "ucc_uart";
+ port-number = <0>;
+ rx-clock-name = "brg6";
+ tx-clock-name = "brg6";
+ pio-handle = <&pio3>;
+ };
+
+ serial3: ucc@2200 {
+ device_type = "serial";
+ compatible = "ucc_uart";
+ port-number = <1>;
+ rx-clock-name = "brg2";
+ tx-clock-name = "brg2";
+ pio-handle = <&pio4>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/virtex440-ml507.dts b/arch/powerpc/boot/dts/virtex440-ml507.dts
index fc7073bc547e..391a4e299783 100644
--- a/arch/powerpc/boot/dts/virtex440-ml507.dts
+++ b/arch/powerpc/boot/dts/virtex440-ml507.dts
@@ -257,6 +257,8 @@
#size-cells = <1>;
compatible = "xlnx,compound";
ethernet@81c00000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
compatible = "xlnx,xps-ll-temac-1.01.b";
device_type = "network";
interrupt-parent = <&xps_intc_0>;
diff --git a/arch/powerpc/boot/mvme5100.c b/arch/powerpc/boot/mvme5100.c
new file mode 100644
index 000000000000..cb865f83c60b
--- /dev/null
+++ b/arch/powerpc/boot/mvme5100.c
@@ -0,0 +1,27 @@
+/*
+ * Motorola/Emerson MVME5100 with PPCBug firmware.
+ *
+ * Author: Stephen Chivers <schivers@csc.com>
+ *
+ * Copyright 2013 CSC Australia Pty. Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+#include "types.h"
+#include "ops.h"
+#include "io.h"
+
+BSS_STACK(4096);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
+{
+ u32 heapsize;
+
+ heapsize = 0x8000000 - (u32)_end; /* 128M */
+ simple_alloc_init(_end, heapsize, 32, 64);
+ fdt_init(_dtb_start);
+ serial_console_init();
+}
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 2e1af74a64be..d27a25518b01 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -265,6 +265,10 @@ epapr)
link_address='0x20000000'
pie=-pie
;;
+mvme5100)
+ platformo="$object/fixed-head.o $object/mvme5100.o"
+ binary=y
+ ;;
esac
vmz="$tmpdir/`basename \"$kernel\"`.$ext"
diff --git a/arch/powerpc/configs/85xx/p1023_defconfig b/arch/powerpc/configs/85xx/p1023_defconfig
deleted file mode 100644
index b06d37da44f4..000000000000
--- a/arch/powerpc/configs/85xx/p1023_defconfig
+++ /dev/null
@@ -1,188 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=2
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_RCU_FANOUT=32
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_PHYSICAL_START=0x00000000
-CONFIG_P1023_RDB=y
-CONFIG_P1023_RDS=y
-CONFIG_QUICC_ENGINE=y
-CONFIG_QE_GPIO=y
-CONFIG_CPM2=y
-CONFIG_HIGHMEM=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_MATH_EMULATION=y
-CONFIG_SWIOTLB=y
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-# CONFIG_PCIEAER is not set
-# CONFIG_PCIEASPM is not set
-CONFIG_PCI_MSI=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_PROC_DEVICETREE=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_EEPROM_AT24=y
-CONFIG_EEPROM_LEGACY=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_ATA=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_FS_ENET=y
-CONFIG_FSL_PQ_MDIO=y
-CONFIG_E1000E=y
-CONFIG_PHYLIB=y
-CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_HW_RANDOM=y
-CONFIG_NVRAM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_CPM=m
-CONFIG_I2C_MPC=y
-CONFIG_GPIO_MPC8XXX=y
-# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_STORAGE=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_DMADEVICES=y
-CONFIG_FSL_DMA=y
-# CONFIG_NET_DMA is not set
-CONFIG_STAGING=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_ADFS_FS=m
-CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_CRC_T10DIF=y
-CONFIG_FRAME_WARN=8092
-CONFIG_DEBUG_FS=y
-CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_INFO=y
-CONFIG_STRICT_DEVMEM=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index 69128740c14d..15b1ff5d96e7 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -70,3 +70,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index 219fd470ed22..b8a79d7ee89f 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -72,3 +72,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index d2e0fab5ee5b..83d3550fdb54 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -31,6 +31,7 @@ CONFIG_C293_PCIE=y
CONFIG_P1010_RDB=y
CONFIG_P1022_DS=y
CONFIG_P1022_RDK=y
+CONFIG_P1023_RDB=y
CONFIG_P1023_RDS=y
CONFIG_SOCRATES=y
CONFIG_KSI8560=y
@@ -113,6 +114,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -211,6 +213,7 @@ CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_DS1307=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
# CONFIG_NET_DMA is not set
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 4cb7b59e98bd..4b686294feb4 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -34,6 +34,7 @@ CONFIG_C293_PCIE=y
CONFIG_P1010_RDB=y
CONFIG_P1022_DS=y
CONFIG_P1022_RDK=y
+CONFIG_P1023_RDB=y
CONFIG_P1023_RDS=y
CONFIG_SOCRATES=y
CONFIG_KSI8560=y
@@ -116,6 +117,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -212,6 +214,7 @@ CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_DS1307=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
# CONFIG_NET_DMA is not set
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 5c258823e694..d954e80c286a 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -55,3 +55,4 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_CRC_CCITT=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 9e146cdf63de..3f47d00a10c0 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -78,3 +78,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig
new file mode 100644
index 000000000000..93c7752e2dbb
--- /dev/null
+++ b/arch/powerpc/configs/mvme5100_defconfig
@@ -0,0 +1,144 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_PPC_CHRP is not set
+# CONFIG_PPC_PMAC is not set
+CONFIG_EMBEDDED6xx=y
+CONFIG_MVME5100=y
+CONFIG_KVM_GUEST=y
+CONFIG_HZ_100=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,9600 ip=dhcp root=/dev/nfs"
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CT_PROTO_SCTP=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_LAPB=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_EEPROM_LEGACY=m
+CONFIG_NETDEVICES=y
+CONFIG_TUN=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_E100=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=10
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MPC=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_VME_BUS=m
+CONFIG_VME_CA91CX42=m
+CONFIG_EXT2_FS=m
+CONFIG_EXT3_FS=m
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_CIFS=m
+CONFIG_NLS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_UTF8=m
+CONFIG_CRC_CCITT=m
+CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=20
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 581a3bcae728..e015896b7e5c 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -186,6 +186,7 @@ CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y
CONFIG_SATA_SIL24=y
+CONFIG_SATA_MV=y
CONFIG_SATA_SVW=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index 8616fde0896f..4b6f8bf104e0 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -84,3 +84,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 910194e9a1e2..a5e9a7d494d8 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -46,6 +46,11 @@
#include <asm/asm-compat.h>
#include <asm/synch.h>
+/* PPC bit number conversion */
+#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
+#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
+#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+
/*
* clear_bit doesn't imply a memory barrier
*/
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 9e495c9a6a88..ed0afc1e44a4 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -41,8 +41,20 @@ struct ppc64_caches {
extern struct ppc64_caches ppc64_caches;
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
-#if !defined(__ASSEMBLY__)
+#if defined(__ASSEMBLY__)
+/*
+ * For a snooping icache, we still need a dummy icbi to purge all the
+ * prefetched instructions from the ifetch buffers. We also need a sync
+ * before the icbi to order the the actual stores to memory that might
+ * have modified instructions with the icbi.
+ */
+#define PURGE_PREFETCHED_INS \
+ sync; \
+ icbi 0,r3; \
+ sync; \
+ isync
+#else
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#ifdef CONFIG_6xx
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index e245aab7f191..d463c68fe7f0 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -300,6 +300,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
+#define cmpxchg64_relaxed cmpxchg64_local
#else
#include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index a6f8c7a5cbb7..97e02f985df8 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -34,6 +34,13 @@ int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
unsigned long branch_target(const unsigned int *instr);
unsigned int translate_branch(const unsigned int *dest,
const unsigned int *src);
+#ifdef CONFIG_PPC_BOOK3E_64
+void __patch_exception(int exc, unsigned long addr);
+#define patch_exception(exc, name) do { \
+ extern unsigned int name; \
+ __patch_exception((exc), (unsigned long)&name); \
+} while (0)
+#endif
static inline unsigned long ppc_function_entry(void *func)
{
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 0d4939ba48e7..617cc767c076 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -90,6 +90,18 @@ struct cpu_spec {
* if the error is fatal, 1 if it was fully recovered and 0 to
* pass up (not CPU originated) */
int (*machine_check)(struct pt_regs *regs);
+
+ /*
+ * Processor specific early machine check handler which is
+ * called in real mode to handle SLB and TLB errors.
+ */
+ long (*machine_check_early)(struct pt_regs *regs);
+
+ /*
+ * Processor specific routine to flush tlbs.
+ */
+ void (*flush_tlb)(unsigned long inval_selector);
+
};
extern struct cpu_spec *cur_cpu_spec;
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index d3e5e9bc8f94..9e39ceb1d19f 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -90,7 +90,8 @@ struct eeh_pe {
#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */
#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */
-#define EEH_DEV_SYSFS (1 << 8) /* Sysfs created */
+#define EEH_DEV_NO_HANDLER (1 << 8) /* No error handler */
+#define EEH_DEV_SYSFS (1 << 9) /* Sysfs created */
struct eeh_dev {
int mode; /* EEH mode */
@@ -117,6 +118,16 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
return edev ? edev->pdev : NULL;
}
+/* Return values from eeh_ops::next_error */
+enum {
+ EEH_NEXT_ERR_NONE = 0,
+ EEH_NEXT_ERR_INF,
+ EEH_NEXT_ERR_FROZEN_PE,
+ EEH_NEXT_ERR_FENCED_PHB,
+ EEH_NEXT_ERR_DEAD_PHB,
+ EEH_NEXT_ERR_DEAD_IOC
+};
+
/*
* The struct is used to trace the registered EEH operation
* callback functions. Actually, those operation callback
@@ -157,6 +168,7 @@ struct eeh_ops {
int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
int (*write_config)(struct device_node *dn, int where, int size, u32 val);
int (*next_error)(struct eeh_pe **pe);
+ int (*restore_config)(struct device_node *dn);
};
extern struct eeh_ops *eeh_ops;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 243ce69ad685..66830618cc19 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -301,9 +301,12 @@ do_kvm_##n: \
beq 4f; /* if from kernel mode */ \
ACCOUNT_CPU_USER_ENTRY(r9, r10); \
SAVE_PPR(area, r9, r10); \
-4: std r2,GPR2(r1); /* save r2 in stackframe */ \
- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
+4: EXCEPTION_PROLOG_COMMON_2(area) \
+ EXCEPTION_PROLOG_COMMON_3(n) \
+ ACCOUNT_STOLEN_TIME
+
+/* Save original regs values from save area to stack frame. */
+#define EXCEPTION_PROLOG_COMMON_2(area) \
ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
ld r10,area+EX_R10(r13); \
std r9,GPR9(r1); \
@@ -318,11 +321,16 @@ do_kvm_##n: \
ld r10,area+EX_CFAR(r13); \
std r10,ORIG_GPR3(r1); \
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
+ GET_CTR(r10, area); \
+ std r10,_CTR(r1);
+
+#define EXCEPTION_PROLOG_COMMON_3(n) \
+ std r2,GPR2(r1); /* save r2 in stackframe */ \
+ SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
+ SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
mflr r9; /* Get LR, later save to stack */ \
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
std r9,_LINK(r1); \
- GET_CTR(r10, area); \
- std r10,_CTR(r1); \
lbz r10,PACASOFTIRQEN(r13); \
mfspr r11,SPRN_XER; /* save XER in stackframe */ \
std r10,SOFTE(r1); \
@@ -332,8 +340,7 @@ do_kvm_##n: \
li r10,0; \
ld r11,exception_marker@toc(r2); \
std r10,RESULT(r1); /* clear regs->result */ \
- std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
- ACCOUNT_STOLEN_TIME
+ std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
/*
* Exception vectors.
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 420b45368fcf..067fb0dca549 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -285,7 +285,7 @@ struct fsl_lbc_ctrl {
/* device info */
struct device *dev;
struct fsl_lbc_regs __iomem *regs;
- int irq;
+ int irq[2];
wait_queue_head_t irq_wait;
spinlock_t lock;
void *nand;
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 3bdcfce2c42a..418fb654370d 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -6,7 +6,8 @@
typedef struct {
unsigned int __softirq_pending;
- unsigned int timer_irqs;
+ unsigned int timer_irqs_event;
+ unsigned int timer_irqs_others;
unsigned int pmu_irqs;
unsigned int mce_exceptions;
unsigned int spurious_irqs;
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 575fbf81fad0..97d3869991ca 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -191,8 +191,24 @@ DEF_MMIO_OUT_D(out_le32, 32, stw);
#endif /* __BIG_ENDIAN */
+/*
+ * Cache inhibitied accessors for use in real mode, you don't want to use these
+ * unless you know what you're doing.
+ *
+ * NB. These use the cpu byte ordering.
+ */
+DEF_MMIO_OUT_X(out_rm8, 8, stbcix);
+DEF_MMIO_OUT_X(out_rm16, 16, sthcix);
+DEF_MMIO_OUT_X(out_rm32, 32, stwcix);
+DEF_MMIO_IN_X(in_rm8, 8, lbzcix);
+DEF_MMIO_IN_X(in_rm16, 16, lhzcix);
+DEF_MMIO_IN_X(in_rm32, 32, lwzcix);
+
#ifdef __powerpc64__
+DEF_MMIO_OUT_X(out_rm64, 64, stdcix);
+DEF_MMIO_IN_X(in_rm64, 64, ldcix);
+
#ifdef __BIG_ENDIAN__
DEF_MMIO_OUT_D(out_be64, 64, std);
DEF_MMIO_IN_D(in_be64, 64, ld);
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index c34656a8925e..f7a8036579b5 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -30,22 +30,19 @@
#include <asm/machdep.h>
#include <asm/types.h>
-#define IOMMU_PAGE_SHIFT 12
-#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT)
-#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
-#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
+#define IOMMU_PAGE_SHIFT_4K 12
+#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
+#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
+#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
+
+#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
+#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
+#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))
/* Boot time flags */
extern int iommu_is_off;
extern int iommu_force_on;
-/* Pure 2^n version of get_order */
-static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
-{
- return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
-}
-
-
/*
* IOMAP_MAX_ORDER defines the largest contiguous block
* of dma space we can get. IOMAP_MAX_ORDER = 13
@@ -76,11 +73,20 @@ struct iommu_table {
struct iommu_pool large_pool;
struct iommu_pool pools[IOMMU_NR_POOLS];
unsigned long *it_map; /* A simple allocation bitmap for now */
+ unsigned long it_page_shift;/* table iommu page size */
#ifdef CONFIG_IOMMU_API
struct iommu_group *it_group;
#endif
};
+/* Pure 2^n version of get_order */
+static inline __attribute_const__
+int get_iommu_order(unsigned long size, struct iommu_table *tbl)
+{
+ return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
+}
+
+
struct scatterlist;
static inline void set_iommu_table_base(struct device *dev, void *base)
@@ -101,8 +107,34 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
*/
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
int nid);
+#ifdef CONFIG_IOMMU_API
extern void iommu_register_group(struct iommu_table *tbl,
int pci_domain_number, unsigned long pe_num);
+extern int iommu_add_device(struct device *dev);
+extern void iommu_del_device(struct device *dev);
+#else
+static inline void iommu_register_group(struct iommu_table *tbl,
+ int pci_domain_number,
+ unsigned long pe_num)
+{
+}
+
+static inline int iommu_add_device(struct device *dev)
+{
+ return 0;
+}
+
+static inline void iommu_del_device(struct device *dev)
+{
+}
+#endif /* !CONFIG_IOMMU_API */
+
+static inline void set_iommu_table_base_and_group(struct device *dev,
+ void *base)
+{
+ set_iommu_table_base(dev, base);
+ iommu_add_device(dev);
+}
extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 1bd92fd43cfb..1503d8c7c41b 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -74,6 +74,7 @@
#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
#define BOOKE_INTERRUPT_HV_SYSCALL 40
#define BOOKE_INTERRUPT_HV_PRIV 41
+#define BOOKE_INTERRUPT_LRAT_ERROR 42
/* book3s */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 844c28de7ec0..d0a2a2f99564 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -132,8 +132,6 @@ struct slb_shadow {
} save_area[SLB_NUM_BOLTED];
} ____cacheline_aligned;
-extern struct slb_shadow slb_shadow[];
-
/*
* Layout of entries in the hypervisor's dispatch trace log buffer.
*/
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
new file mode 100644
index 000000000000..8e99edf6d966
--- /dev/null
+++ b/arch/powerpc/include/asm/mce.h
@@ -0,0 +1,197 @@
+/*
+ * Machine check exception header file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2013 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef __ASM_PPC64_MCE_H__
+#define __ASM_PPC64_MCE_H__
+
+#include <linux/bitops.h>
+
+/*
+ * Machine Check bits on power7 and power8
+ */
+#define P7_SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42)) /* P8 too */
+
+/* SRR1 bits for machine check (On Power7 and Power8) */
+#define P7_SRR1_MC_IFETCH(srr1) ((srr1) & PPC_BITMASK(43, 45)) /* P8 too */
+
+#define P7_SRR1_MC_IFETCH_UE (0x1 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_SLB_PARITY (0x2 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_SLB_MULTIHIT (0x3 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_SLB_BOTH (0x4 << PPC_BITLSHIFT(45))
+#define P7_SRR1_MC_IFETCH_TLB_MULTIHIT (0x5 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_UE_TLB_RELOAD (0x6 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL (0x7 << PPC_BITLSHIFT(45))
+
+/* SRR1 bits for machine check (On Power8) */
+#define P8_SRR1_MC_IFETCH_ERAT_MULTIHIT (0x4 << PPC_BITLSHIFT(45))
+
+/* DSISR bits for machine check (On Power7 and Power8) */
+#define P7_DSISR_MC_UE (PPC_BIT(48)) /* P8 too */
+#define P7_DSISR_MC_UE_TABLEWALK (PPC_BIT(49)) /* P8 too */
+#define P7_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52)) /* P8 too */
+#define P7_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53)) /* P8 too */
+#define P7_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55)) /* P8 too */
+#define P7_DSISR_MC_SLB_MULTIHIT (PPC_BIT(56)) /* P8 too */
+#define P7_DSISR_MC_SLB_MULTIHIT_PARITY (PPC_BIT(57)) /* P8 too */
+
+/*
+ * DSISR bits for machine check (Power8) in addition to above.
+ * Secondary DERAT Multihit
+ */
+#define P8_DSISR_MC_ERAT_MULTIHIT_SEC (PPC_BIT(54))
+
+/* SLB error bits */
+#define P7_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_ERAT_MULTIHIT | \
+ P7_DSISR_MC_SLB_PARITY_MFSLB | \
+ P7_DSISR_MC_SLB_MULTIHIT | \
+ P7_DSISR_MC_SLB_MULTIHIT_PARITY)
+
+#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
+ P8_DSISR_MC_ERAT_MULTIHIT_SEC)
+enum MCE_Version {
+ MCE_V1 = 1,
+};
+
+enum MCE_Severity {
+ MCE_SEV_NO_ERROR = 0,
+ MCE_SEV_WARNING = 1,
+ MCE_SEV_ERROR_SYNC = 2,
+ MCE_SEV_FATAL = 3,
+};
+
+enum MCE_Disposition {
+ MCE_DISPOSITION_RECOVERED = 0,
+ MCE_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum MCE_Initiator {
+ MCE_INITIATOR_UNKNOWN = 0,
+ MCE_INITIATOR_CPU = 1,
+};
+
+enum MCE_ErrorType {
+ MCE_ERROR_TYPE_UNKNOWN = 0,
+ MCE_ERROR_TYPE_UE = 1,
+ MCE_ERROR_TYPE_SLB = 2,
+ MCE_ERROR_TYPE_ERAT = 3,
+ MCE_ERROR_TYPE_TLB = 4,
+};
+
+enum MCE_UeErrorType {
+ MCE_UE_ERROR_INDETERMINATE = 0,
+ MCE_UE_ERROR_IFETCH = 1,
+ MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+ MCE_UE_ERROR_LOAD_STORE = 3,
+ MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
+};
+
+enum MCE_SlbErrorType {
+ MCE_SLB_ERROR_INDETERMINATE = 0,
+ MCE_SLB_ERROR_PARITY = 1,
+ MCE_SLB_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_EratErrorType {
+ MCE_ERAT_ERROR_INDETERMINATE = 0,
+ MCE_ERAT_ERROR_PARITY = 1,
+ MCE_ERAT_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_TlbErrorType {
+ MCE_TLB_ERROR_INDETERMINATE = 0,
+ MCE_TLB_ERROR_PARITY = 1,
+ MCE_TLB_ERROR_MULTIHIT = 2,
+};
+
+struct machine_check_event {
+ enum MCE_Version version:8; /* 0x00 */
+ uint8_t in_use; /* 0x01 */
+ enum MCE_Severity severity:8; /* 0x02 */
+ enum MCE_Initiator initiator:8; /* 0x03 */
+ enum MCE_ErrorType error_type:8; /* 0x04 */
+ enum MCE_Disposition disposition:8; /* 0x05 */
+ uint8_t reserved_1[2]; /* 0x06 */
+ uint64_t gpr3; /* 0x08 */
+ uint64_t srr0; /* 0x10 */
+ uint64_t srr1; /* 0x18 */
+ union { /* 0x20 */
+ struct {
+ enum MCE_UeErrorType ue_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t physical_address_provided;
+ uint8_t reserved_1[5];
+ uint64_t effective_address;
+ uint64_t physical_address;
+ uint8_t reserved_2[8];
+ } ue_error;
+
+ struct {
+ enum MCE_SlbErrorType slb_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } slb_error;
+
+ struct {
+ enum MCE_EratErrorType erat_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } erat_error;
+
+ struct {
+ enum MCE_TlbErrorType tlb_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } tlb_error;
+ } u;
+};
+
+struct mce_error_info {
+ enum MCE_ErrorType error_type:8;
+ union {
+ enum MCE_UeErrorType ue_error_type:8;
+ enum MCE_SlbErrorType slb_error_type:8;
+ enum MCE_EratErrorType erat_error_type:8;
+ enum MCE_TlbErrorType tlb_error_type:8;
+ } u;
+ uint8_t reserved[2];
+};
+
+#define MAX_MC_EVT 100
+
+/* Release flags for get_mce_event() */
+#define MCE_EVENT_RELEASE true
+#define MCE_EVENT_DONTRELEASE false
+
+extern void save_mce_event(struct pt_regs *regs, long handled,
+ struct mce_error_info *mce_err, uint64_t addr);
+extern int get_mce_event(struct machine_check_event *mce, bool release);
+extern void release_mce_event(void);
+extern void machine_check_queue_event(void);
+extern void machine_check_print_event_info(struct machine_check_event *evt);
+extern uint64_t get_mce_fault_addr(struct machine_check_event *evt);
+
+#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 936db360790a..89b785d16846 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -286,8 +286,21 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
extern int mmu_linear_psize;
extern int mmu_vmemmap_psize;
+struct tlb_core_data {
+ /* For software way selection, as on Freescale TLB1 */
+ u8 esel_next, esel_max, esel_first;
+
+ /* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */
+ u8 lock;
+};
+
#ifdef CONFIG_PPC64
extern unsigned long linear_map_top;
+extern int book3e_htw_mode;
+
+#define PPC_HTW_NONE 0
+#define PPC_HTW_IBM 1
+#define PPC_HTW_E6500 2
/*
* 64-bit booke platforms don't load the tlb in the tlb miss handler code.
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 691fd8aca939..f8d1d6dcf7db 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -180,16 +180,17 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
#define MMU_PAGE_256K 4
#define MMU_PAGE_1M 5
-#define MMU_PAGE_4M 6
-#define MMU_PAGE_8M 7
-#define MMU_PAGE_16M 8
-#define MMU_PAGE_64M 9
-#define MMU_PAGE_256M 10
-#define MMU_PAGE_1G 11
-#define MMU_PAGE_16G 12
-#define MMU_PAGE_64G 13
-
-#define MMU_PAGE_COUNT 14
+#define MMU_PAGE_2M 6
+#define MMU_PAGE_4M 7
+#define MMU_PAGE_8M 8
+#define MMU_PAGE_16M 9
+#define MMU_PAGE_64M 10
+#define MMU_PAGE_256M 11
+#define MMU_PAGE_1G 12
+#define MMU_PAGE_16G 13
+#define MMU_PAGE_64G 14
+
+#define MMU_PAGE_COUNT 15
#if defined(CONFIG_PPC_STD_MMU_64)
/* 64-bit classic hash table MMU */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 7bdcf340016c..40157e2ca691 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -33,6 +33,28 @@ struct opal_takeover_args {
u64 rd_loc; /* r11 */
};
+/*
+ * SG entry
+ *
+ * WARNING: The current implementation requires each entry
+ * to represent a block that is 4k aligned *and* each block
+ * size except the last one in the list to be as well.
+ */
+struct opal_sg_entry {
+ void *data;
+ long length;
+};
+
+/* sg list */
+struct opal_sg_list {
+ unsigned long num_entries;
+ struct opal_sg_list *next;
+ struct opal_sg_entry entry[];
+};
+
+/* We calculate number of sg entries based on PAGE_SIZE */
+#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+
extern long opal_query_takeover(u64 *hal_size, u64 *hal_align);
extern long opal_do_takeover(struct opal_takeover_args *args);
@@ -132,6 +154,9 @@ extern int opal_enter_rtas(struct rtas_args *args,
#define OPAL_FLASH_VALIDATE 76
#define OPAL_FLASH_MANAGE 77
#define OPAL_FLASH_UPDATE 78
+#define OPAL_GET_MSG 85
+#define OPAL_CHECK_ASYNC_COMPLETION 86
+#define OPAL_SYNC_HOST_REBOOT 87
#ifndef __ASSEMBLY__
@@ -211,7 +236,16 @@ enum OpalPendingState {
OPAL_EVENT_ERROR_LOG = 0x40,
OPAL_EVENT_EPOW = 0x80,
OPAL_EVENT_LED_STATUS = 0x100,
- OPAL_EVENT_PCI_ERROR = 0x200
+ OPAL_EVENT_PCI_ERROR = 0x200,
+ OPAL_EVENT_MSG_PENDING = 0x800,
+};
+
+enum OpalMessageType {
+ OPAL_MSG_ASYNC_COMP = 0,
+ OPAL_MSG_MEM_ERR,
+ OPAL_MSG_EPOW,
+ OPAL_MSG_SHUTDOWN,
+ OPAL_MSG_TYPE_MAX,
};
/* Machine check related definitions */
@@ -311,12 +345,16 @@ enum OpalMveEnableAction {
OPAL_ENABLE_MVE = 1
};
-enum OpalPciResetAndReinitScope {
+enum OpalPciResetScope {
OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3,
OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5,
OPAL_PCI_IODA_TABLE_RESET = 6,
};
+enum OpalPciReinitScope {
+ OPAL_REINIT_PCI_DEV = 1000
+};
+
enum OpalPciResetState {
OPAL_DEASSERT_RESET = 0,
OPAL_ASSERT_RESET = 1
@@ -356,6 +394,12 @@ enum OpalLPCAddressType {
OPAL_LPC_FW = 2,
};
+struct opal_msg {
+ uint32_t msg_type;
+ uint32_t reserved;
+ uint64_t params[8];
+};
+
struct opal_machine_check_event {
enum OpalMCE_Version version:8; /* 0x00 */
uint8_t in_use; /* 0x01 */
@@ -404,6 +448,58 @@ struct opal_machine_check_event {
} u;
};
+/* FSP memory errors handling */
+enum OpalMemErr_Version {
+ OpalMemErr_V1 = 1,
+};
+
+enum OpalMemErrType {
+ OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
+ OPAL_MEM_ERR_TYPE_DYN_DALLOC,
+ OPAL_MEM_ERR_TYPE_SCRUB,
+};
+
+/* Memory Reilience error type */
+enum OpalMemErr_ResilErrType {
+ OPAL_MEM_RESILIENCE_CE = 0,
+ OPAL_MEM_RESILIENCE_UE,
+ OPAL_MEM_RESILIENCE_UE_SCRUB,
+};
+
+/* Dynamic Memory Deallocation type */
+enum OpalMemErr_DynErrType {
+ OPAL_MEM_DYNAMIC_DEALLOC = 0,
+};
+
+/* OpalMemoryErrorData->flags */
+#define OPAL_MEM_CORRECTED_ERROR 0x0001
+#define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002
+#define OPAL_MEM_ACK_REQUIRED 0x8000
+
+struct OpalMemoryErrorData {
+ enum OpalMemErr_Version version:8; /* 0x00 */
+ enum OpalMemErrType type:8; /* 0x01 */
+ uint16_t flags; /* 0x02 */
+ uint8_t reserved_1[4]; /* 0x04 */
+
+ union {
+ /* Memory Resilience corrected/uncorrected error info */
+ struct {
+ enum OpalMemErr_ResilErrType resil_err_type:8;
+ uint8_t reserved_1[7];
+ uint64_t physical_address_start;
+ uint64_t physical_address_end;
+ } resilience;
+ /* Dynamic memory deallocation error info */
+ struct {
+ enum OpalMemErr_DynErrType dyn_err_type:8;
+ uint8_t reserved_1[7];
+ uint64_t physical_address_start;
+ uint64_t physical_address_end;
+ } dyn_dealloc;
+ } u;
+};
+
enum {
OPAL_P7IOC_DIAG_TYPE_NONE = 0,
OPAL_P7IOC_DIAG_TYPE_RGC = 1,
@@ -710,7 +806,7 @@ int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer,
int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer,
uint64_t diag_buffer_len);
int64_t opal_pci_fence_phb(uint64_t phb_id);
-int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope);
+int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
int64_t opal_get_epow_status(__be64 *status);
@@ -731,6 +827,10 @@ int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
int64_t opal_manage_flash(uint8_t op);
int64_t opal_update_flash(uint64_t blk_list);
+int64_t opal_get_msg(uint64_t buffer, size_t size);
+int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token);
+int64_t opal_sync_host_reboot(void);
+
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
@@ -744,6 +844,8 @@ extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int opal_notifier_register(struct notifier_block *nb);
+extern int opal_message_notifier_register(enum OpalMessageType msg_type,
+ struct notifier_block *nb);
extern void opal_notifier_enable(void);
extern void opal_notifier_disable(void);
extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index b6ea9e068c13..9c5dbc3833fb 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -16,7 +16,6 @@
#ifdef CONFIG_PPC64
-#include <linux/init.h>
#include <asm/types.h>
#include <asm/lppaca.h>
#include <asm/mmu.h>
@@ -113,6 +112,10 @@ struct paca_struct {
/* Keep pgd in the same cacheline as the start of extlb */
pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */
pgd_t *kernel_pgd; /* Kernel PGD */
+
+ /* Shared by all threads of a core -- points to tcd of first thread */
+ struct tlb_core_data *tcd_ptr;
+
/* We can have up to 3 levels of reentrancy in the TLB miss handler */
u64 extlb[3][EX_TLB_SIZE / sizeof(u64)];
u64 exmc[8]; /* used for machine checks */
@@ -123,6 +126,8 @@ struct paca_struct {
void *mc_kstack;
void *crit_kstack;
void *dbg_kstack;
+
+ struct tlb_core_data tcd;
#endif /* CONFIG_PPC_BOOK3E */
mm_context_t context;
@@ -152,6 +157,15 @@ struct paca_struct {
*/
struct opal_machine_check_event *opal_mc_evt;
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* Exclusive emergency stack pointer for machine check exception. */
+ void *mc_emergency_sp;
+ /*
+ * Flag to check whether we are in machine check early handler
+ * and already using emergency stack.
+ */
+ u16 in_mce;
+#endif
/* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 7d6eacf249cf..b999ca318985 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
+#include <linux/mmdebug.h>
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
@@ -33,10 +34,73 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
-static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+#ifdef CONFIG_NUMA_BALANCING
+
+static inline int pte_present(pte_t pte)
+{
+ return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA);
+}
+
+#define pte_numa pte_numa
+static inline int pte_numa(pte_t pte)
+{
+ return (pte_val(pte) &
+ (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
+}
+
+#define pte_mknonnuma pte_mknonnuma
+static inline pte_t pte_mknonnuma(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_NUMA;
+ pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED;
+ return pte;
+}
+
+#define pte_mknuma pte_mknuma
+static inline pte_t pte_mknuma(pte_t pte)
+{
+ /*
+ * We should not set _PAGE_NUMA on non present ptes. Also clear the
+ * present bit so that hash_page will return 1 and we collect this
+ * as numa fault.
+ */
+ if (pte_present(pte)) {
+ pte_val(pte) |= _PAGE_NUMA;
+ pte_val(pte) &= ~_PAGE_PRESENT;
+ } else
+ VM_BUG_ON(1);
+ return pte;
+}
+
+#define pmd_numa pmd_numa
+static inline int pmd_numa(pmd_t pmd)
+{
+ return pte_numa(pmd_pte(pmd));
+}
+
+#define pmd_mknonnuma pmd_mknonnuma
+static inline pmd_t pmd_mknonnuma(pmd_t pmd)
+{
+ return pte_pmd(pte_mknonnuma(pmd_pte(pmd)));
+}
+
+#define pmd_mknuma pmd_mknuma
+static inline pmd_t pmd_mknuma(pmd_t pmd)
+{
+ return pte_pmd(pte_mknuma(pmd_pte(pmd)));
+}
+
+# else
+
+static inline int pte_present(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PRESENT;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
/* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index f595b98079ee..6586a40a46ce 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -4,7 +4,6 @@
#ifndef _ASM_POWERPC_PPC_ASM_H
#define _ASM_POWERPC_PPC_ASM_H
-#include <linux/init.h>
#include <linux/stringify.h>
#include <asm/asm-compat.h>
#include <asm/processor.h>
@@ -295,6 +294,11 @@ n:
* you want to access various offsets within it). On ppc32 this is
* identical to LOAD_REG_IMMEDIATE.
*
+ * LOAD_REG_ADDR_PIC(rn, name)
+ * Loads the address of label 'name' into register 'run'. Use this when
+ * the kernel doesn't run at the linked or relocated address. Please
+ * note that this macro will clobber the lr register.
+ *
* LOAD_REG_ADDRBASE(rn, name)
* ADDROFF(name)
* LOAD_REG_ADDRBASE loads part of the address of label 'name' into
@@ -305,6 +309,14 @@ n:
* LOAD_REG_ADDRBASE(rX, name)
* ld rY,ADDROFF(name)(rX)
*/
+
+/* Be careful, this will clobber the lr register. */
+#define LOAD_REG_ADDR_PIC(reg, name) \
+ bl 0f; \
+0: mflr reg; \
+ addis reg,reg,(name - 0b)@ha; \
+ addi reg,reg,(name - 0b)@l;
+
#ifdef __powerpc64__
#define LOAD_REG_IMMEDIATE(reg,expr) \
lis reg,(expr)@highest; \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index fc14a38c7ccf..8ca20ac28dc2 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -256,6 +256,8 @@ struct thread_struct {
unsigned long evr[32]; /* upper 32-bits of SPE regs */
u64 acc; /* Accumulator */
unsigned long spefscr; /* SPE & eFP status */
+ unsigned long spefscr_last; /* SPEFSCR value on last prctl
+ call or trap return */
int used_spe; /* set if process has used spe */
#endif /* CONFIG_SPE */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -317,7 +319,9 @@ struct thread_struct {
(_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
#ifdef CONFIG_SPE
-#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
+#define SPEFSCR_INIT \
+ .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \
+ .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
#else
#define SPEFSCR_INIT
#endif
@@ -373,6 +377,8 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+extern void fp_enable(void);
+extern void vec_enable(void);
extern void load_fp_state(struct thread_fp_state *fp);
extern void store_fp_state(struct thread_fp_state *fp);
extern void load_vr_state(struct thread_vr_state *vr);
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index 678a7c1d9cb8..a1bc7e758422 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -21,7 +21,6 @@
#if !defined(_ASM_POWERPC_PS3_H)
#define _ASM_POWERPC_PS3_H
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <asm/cell-pmu.h>
diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h
index 0419eeb53274..2505d8eab15c 100644
--- a/arch/powerpc/include/asm/pte-hash64.h
+++ b/arch/powerpc/include/asm/pte-hash64.h
@@ -19,7 +19,7 @@
#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
#define _PAGE_GUARDED 0x0008
-#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
+/* We can derive Memory coherence from _PAGE_NO_CACHE */
#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
#define _PAGE_DIRTY 0x0080 /* C: page changed */
@@ -27,6 +27,12 @@
#define _PAGE_RW 0x0200 /* software: user write access allowed */
#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
+/*
+ * Used for tracking numa faults
+ */
+#define _PAGE_NUMA 0x00000010 /* Gather numa placement stats */
+
+
/* No separate kernel read-only */
#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fa8388ed94c5..62b114e079cf 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1075,6 +1075,8 @@
#define PVR_8560 0x80200000
#define PVR_VER_E500V1 0x8020
#define PVR_VER_E500V2 0x8021
+#define PVR_VER_E6500 0x8040
+
/*
* For the 8xx processors, all of them report the same PVR family for
* the PowerPC core. The various versions of these processors must be
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 2e31aacd8acc..163c3b05a76e 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -101,6 +101,7 @@
#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
+#define SPRN_IVOR42 0x1B4 /* Interrupt Vector Offset Register 42 */
#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */
#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */
#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */
@@ -170,6 +171,7 @@
#define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */
#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */
#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */
+#define SPRN_PWRMGTCR0 0x3FB /* Power management control register 0 */
#define SPRN_SVR 0x3FF /* System Version Register */
/*
@@ -216,6 +218,14 @@
#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
#define CCR1_TCS 0x00000080 /* Timer Clock Select */
+/* Bit definitions for PWRMGTCR0. */
+#define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */
+#define PWRMGTCR0_PW20_ENT_SHIFT 8
+#define PWRMGTCR0_PW20_ENT 0x3F00
+#define PWRMGTCR0_AV_IDLE_PD_EN (1 << 22) /* Altivec idle enable */
+#define PWRMGTCR0_AV_IDLE_CNT_SHIFT 16
+#define PWRMGTCR0_AV_IDLE_CNT 0x3F0000
+
/* Bit definitions for the MCSR. */
#define MCSR_MCS 0x80000000 /* Machine Check Summary */
#define MCSR_IB 0x40000000 /* Instruction PLB Error */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index f6e78d63fb6a..35aa339410bd 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -30,8 +30,6 @@
#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
-#define arch_spin_is_locked(x) ((x)->slock != 0)
-
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
#ifdef __BIG_ENDIAN__
@@ -56,6 +54,16 @@
#define SYNC_IO
#endif
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.slock == 0;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(*lock);
+}
+
/*
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 9854c564ac52..b034ecdb7c74 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -91,8 +91,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define TIF_32BIT 4 /* 32 bit binary */
-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
-#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
+#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SINGLESTEP 8 /* singlestepping active */
#define TIF_NOHZ 9 /* in adaptive nohz mode */
@@ -115,8 +114,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_32BIT (1<<TIF_32BIT)
-#define _TIF_PERFMON_WORK (1<<TIF_PERFMON_WORK)
-#define _TIF_PERFMON_CTXSW (1<<TIF_PERFMON_CTXSW)
+#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
@@ -132,7 +130,8 @@ static inline struct thread_info *current_thread_info(void)
_TIF_NOHZ)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_RESTORE_TM)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
/* Bits in local_flags */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index 9dfbc34bdbf5..0c9f8b74dd97 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -15,6 +15,7 @@ extern void do_load_up_transact_altivec(struct thread_struct *thread);
extern void tm_enable(void);
extern void tm_reclaim(struct thread_struct *thread,
unsigned long orig_msr, uint8_t cause);
+extern void tm_reclaim_current(uint8_t cause);
extern void tm_recheckpoint(struct thread_struct *thread,
unsigned long orig_msr);
extern void tm_abort(uint8_t cause);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 89e3ef2496ac..d0b5fca6b077 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -22,7 +22,15 @@ struct device_node;
static inline int cpu_to_node(int cpu)
{
- return numa_cpu_lookup_table[cpu];
+ int nid;
+
+ nid = numa_cpu_lookup_table[cpu];
+
+ /*
+ * During early boot, the numa-cpu lookup table might not have been
+ * setup for all CPUs yet. In such cases, default to node 0.
+ */
+ return (nid < 0) ? 0 : nid;
}
#define parent_node(node) (node)
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 68d0cc998b1b..4f9b7ca0710f 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -15,7 +15,6 @@
#define _ASM_POWERPC_VIO_H
#ifdef __KERNEL__
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 445cb6e39d5b..904d713366ff 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
+obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d3de01066f7d..8d1d94d9c649 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -203,6 +203,15 @@ int main(void)
DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack));
DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack));
DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack));
+ DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr));
+
+ DEFINE(TCD_ESEL_NEXT,
+ offsetof(struct tlb_core_data, esel_next));
+ DEFINE(TCD_ESEL_MAX,
+ offsetof(struct tlb_core_data, esel_max));
+ DEFINE(TCD_ESEL_FIRST,
+ offsetof(struct tlb_core_data, esel_first));
+ DEFINE(TCD_LOCK, offsetof(struct tlb_core_data, lock));
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_STD_MMU_64
@@ -232,6 +241,10 @@ int main(void)
DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
#endif /* CONFIG_PPC_STD_MMU_64 */
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
+#ifdef CONFIG_PPC_BOOK3S_64
+ DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp));
+ DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce));
+#endif
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime));
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 654932727873..abfa011344d9 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -12,7 +12,6 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/list.h>
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index bfb18c7290b7..cc2d8962e090 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -53,11 +53,57 @@ _GLOBAL(__e500_dcache_setup)
isync
blr
+/*
+ * FIXME - we haven't yet done testing to determine a reasonable default
+ * value for PW20_WAIT_IDLE_BIT.
+ */
+#define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
+_GLOBAL(setup_pw20_idle)
+ mfspr r3, SPRN_PWRMGTCR0
+
+ /* Set PW20_WAIT bit, enable pw20 state*/
+ ori r3, r3, PWRMGTCR0_PW20_WAIT
+ li r11, PW20_WAIT_IDLE_BIT
+
+ /* Set Automatic PW20 Core Idle Count */
+ rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
+
+ mtspr SPRN_PWRMGTCR0, r3
+
+ blr
+
+/*
+ * FIXME - we haven't yet done testing to determine a reasonable default
+ * value for AV_WAIT_IDLE_BIT.
+ */
+#define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
+_GLOBAL(setup_altivec_idle)
+ mfspr r3, SPRN_PWRMGTCR0
+
+ /* Enable Altivec Idle */
+ oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
+ li r11, AV_WAIT_IDLE_BIT
+
+ /* Set Automatic AltiVec Idle Count */
+ rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
+
+ mtspr SPRN_PWRMGTCR0, r3
+
+ blr
+
_GLOBAL(__setup_cpu_e6500)
mflr r6
#ifdef CONFIG_PPC64
bl .setup_altivec_ivors
+ /* Touch IVOR42 only if the CPU supports E.HV category */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beq 1f
+ bl .setup_lrat_ivor
+1:
#endif
+ bl setup_pw20_idle
+ bl setup_altivec_idle
bl __setup_cpu_e5500
mtlr r6
blr
@@ -119,6 +165,14 @@ _GLOBAL(__setup_cpu_e5500)
_GLOBAL(__restore_cpu_e6500)
mflr r5
bl .setup_altivec_ivors
+ /* Touch IVOR42 only if the CPU supports E.HV category */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beq 1f
+ bl .setup_lrat_ivor
+1:
+ bl .setup_pw20_idle
+ bl .setup_altivec_idle
bl __restore_cpu_e5500
mtlr r5
blr
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 18b5b9cf8e37..37d1bb002aa9 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -29,7 +29,7 @@ _GLOBAL(__setup_cpu_power7)
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
bl __init_LPCR
- bl __init_TLB
+ bl __init_tlb_power7
mtlr r11
blr
@@ -42,7 +42,7 @@ _GLOBAL(__restore_cpu_power7)
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
bl __init_LPCR
- bl __init_TLB
+ bl __init_tlb_power7
mtlr r11
blr
@@ -59,7 +59,7 @@ _GLOBAL(__setup_cpu_power8)
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
bl __init_HFSCR
- bl __init_TLB
+ bl __init_tlb_power8
bl __init_PMU_HV
mtlr r11
blr
@@ -78,7 +78,7 @@ _GLOBAL(__restore_cpu_power8)
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
bl __init_HFSCR
- bl __init_TLB
+ bl __init_tlb_power8
bl __init_PMU_HV
mtlr r11
blr
@@ -134,15 +134,31 @@ __init_HFSCR:
mtspr SPRN_HFSCR,r3
blr
-__init_TLB:
- /*
- * Clear the TLB using the "IS 3" form of tlbiel instruction
- * (invalidate by congruence class). P7 has 128 CCs, P8 has 512
- * so we just always do 512
- */
+/*
+ * Clear the TLB using the specified IS form of tlbiel instruction
+ * (invalidate by congruence class). P7 has 128 CCs., P8 has 512.
+ *
+ * r3 = IS field
+ */
+__init_tlb_power7:
+ li r3,0xc00 /* IS field = 0b11 */
+_GLOBAL(__flush_tlb_power7)
+ li r6,128
+ mtctr r6
+ mr r7,r3 /* IS field */
+ ptesync
+2: tlbiel r7
+ addi r7,r7,0x1000
+ bdnz 2b
+ ptesync
+1: blr
+
+__init_tlb_power8:
+ li r3,0xc00 /* IS field = 0b11 */
+_GLOBAL(__flush_tlb_power8)
li r6,512
mtctr r6
- li r7,0xc00 /* IS field = 0b11 */
+ mr r7,r3 /* IS field */
ptesync
2: tlbiel r7
addi r7,r7,0x1000
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 597d954e5860..6c8dd5da4de5 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -71,6 +71,10 @@ extern void __restore_cpu_power7(void);
extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power8(void);
extern void __restore_cpu_a2(void);
+extern void __flush_tlb_power7(unsigned long inval_selector);
+extern void __flush_tlb_power8(unsigned long inval_selector);
+extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
+extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -440,6 +444,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
+ .flush_tlb = __flush_tlb_power7,
+ .machine_check_early = __machine_check_early_realmode_p7,
.platform = "power7",
},
{ /* 2.07-compliant processor, i.e. Power8 "architected" mode */
@@ -456,6 +462,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
+ .flush_tlb = __flush_tlb_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power7 */
@@ -474,6 +482,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
+ .flush_tlb = __flush_tlb_power7,
+ .machine_check_early = __machine_check_early_realmode_p7,
.platform = "power7",
},
{ /* Power7+ */
@@ -492,6 +502,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
+ .flush_tlb = __flush_tlb_power7,
+ .machine_check_early = __machine_check_early_realmode_p7,
.platform = "power7+",
},
{ /* Power8E */
@@ -510,6 +522,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
+ .flush_tlb = __flush_tlb_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8 */
@@ -528,6 +542,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
+ .flush_tlb = __flush_tlb_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Cell Broadband Engine */
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index fdcd8f551aff..18d7c80ddeb9 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -17,7 +17,6 @@
#include <linux/export.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/irq.h>
#include <linux/types.h>
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index e4897523de41..54d0116256f7 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -83,10 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
return 0;
}
- if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) {
+ if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
- mask, tbl->it_offset << IOMMU_PAGE_SHIFT);
+ mask, tbl->it_offset << tbl->it_page_shift);
return 0;
} else
return 1;
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 4bd687d5e7aa..148db72a8c43 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -84,7 +84,7 @@
#define EEH_MAX_FAILS 2100000
/* Time to wait for a PCI slot to report status, in milliseconds */
-#define PCI_BUS_RESET_WAIT_MSEC (60*1000)
+#define PCI_BUS_RESET_WAIT_MSEC (5*60*1000)
/* Platform dependent EEH operations */
struct eeh_ops *eeh_ops = NULL;
@@ -921,6 +921,13 @@ void eeh_add_device_late(struct pci_dev *dev)
eeh_sysfs_remove_device(edev->pdev);
edev->mode &= ~EEH_DEV_SYSFS;
+ /*
+ * We definitely should have the PCI device removed
+ * though it wasn't correctly. So we needn't call
+ * into error handler afterwards.
+ */
+ edev->mode |= EEH_DEV_NO_HANDLER;
+
edev->pdev = NULL;
dev->dev.archdata.edev = NULL;
}
@@ -1023,6 +1030,14 @@ void eeh_remove_device(struct pci_dev *dev)
else
edev->mode |= EEH_DEV_DISCONNECTED;
+ /*
+ * We're removing from the PCI subsystem, that means
+ * the PCI device driver can't support EEH or not
+ * well. So we rely on hotplug completely to do recovery
+ * for the specific PCI device.
+ */
+ edev->mode |= EEH_DEV_NO_HANDLER;
+
eeh_addr_cache_rmv_dev(dev);
eeh_sysfs_remove_device(dev);
edev->mode &= ~EEH_DEV_SYSFS;
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index c17f90d0f73c..7bb30dca4e19 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -217,7 +217,8 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata)
if (!driver) return NULL;
if (!driver->err_handler ||
- !driver->err_handler->mmio_enabled) {
+ !driver->err_handler->mmio_enabled ||
+ (edev->mode & EEH_DEV_NO_HANDLER)) {
eeh_pcid_put(dev);
return NULL;
}
@@ -258,7 +259,8 @@ static void *eeh_report_reset(void *data, void *userdata)
eeh_enable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->slot_reset) {
+ !driver->err_handler->slot_reset ||
+ (edev->mode & EEH_DEV_NO_HANDLER)) {
eeh_pcid_put(dev);
return NULL;
}
@@ -297,7 +299,9 @@ static void *eeh_report_resume(void *data, void *userdata)
eeh_enable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->resume) {
+ !driver->err_handler->resume ||
+ (edev->mode & EEH_DEV_NO_HANDLER)) {
+ edev->mode &= ~EEH_DEV_NO_HANDLER;
eeh_pcid_put(dev);
return NULL;
}
@@ -476,7 +480,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
/* The longest amount of time to wait for a pci device
* to come back on line, in seconds.
*/
-#define MAX_WAIT_FOR_RECOVERY 150
+#define MAX_WAIT_FOR_RECOVERY 300
static void eeh_handle_normal_event(struct eeh_pe *pe)
{
@@ -637,86 +641,92 @@ static void eeh_handle_special_event(void)
{
struct eeh_pe *pe, *phb_pe;
struct pci_bus *bus;
- struct pci_controller *hose, *tmp;
+ struct pci_controller *hose;
unsigned long flags;
- int rc = 0;
+ int rc;
- /*
- * The return value from next_error() has been classified as follows.
- * It might be good to enumerate them. However, next_error() is only
- * supported by PowerNV platform for now. So it would be fine to use
- * integer directly:
- *
- * 4 - Dead IOC 3 - Dead PHB
- * 2 - Fenced PHB 1 - Frozen PE
- * 0 - No error found
- *
- */
- rc = eeh_ops->next_error(&pe);
- if (rc <= 0)
- return;
- switch (rc) {
- case 4:
- /* Mark all PHBs in dead state */
- eeh_serialize_lock(&flags);
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
- phb_pe = eeh_phb_pe_get(hose);
- if (!phb_pe) continue;
-
- eeh_pe_state_mark(phb_pe,
- EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
+ do {
+ rc = eeh_ops->next_error(&pe);
+
+ switch (rc) {
+ case EEH_NEXT_ERR_DEAD_IOC:
+ /* Mark all PHBs in dead state */
+ eeh_serialize_lock(&flags);
+
+ /* Purge all events */
+ eeh_remove_event(NULL);
+
+ list_for_each_entry(hose, &hose_list, list_node) {
+ phb_pe = eeh_phb_pe_get(hose);
+ if (!phb_pe) continue;
+
+ eeh_pe_state_mark(phb_pe,
+ EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
+ }
+
+ eeh_serialize_unlock(flags);
+
+ break;
+ case EEH_NEXT_ERR_FROZEN_PE:
+ case EEH_NEXT_ERR_FENCED_PHB:
+ case EEH_NEXT_ERR_DEAD_PHB:
+ /* Mark the PE in fenced state */
+ eeh_serialize_lock(&flags);
+
+ /* Purge all events of the PHB */
+ eeh_remove_event(pe);
+
+ if (rc == EEH_NEXT_ERR_DEAD_PHB)
+ eeh_pe_state_mark(pe,
+ EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
+ else
+ eeh_pe_state_mark(pe,
+ EEH_PE_ISOLATED | EEH_PE_RECOVERING);
+
+ eeh_serialize_unlock(flags);
+
+ break;
+ case EEH_NEXT_ERR_NONE:
+ return;
+ default:
+ pr_warn("%s: Invalid value %d from next_error()\n",
+ __func__, rc);
+ return;
}
- eeh_serialize_unlock(flags);
-
- /* Purge all events */
- eeh_remove_event(NULL);
- break;
- case 3:
- case 2:
- case 1:
- /* Mark the PE in fenced state */
- eeh_serialize_lock(&flags);
- if (rc == 3)
- eeh_pe_state_mark(pe,
- EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
- else
- eeh_pe_state_mark(pe,
- EEH_PE_ISOLATED | EEH_PE_RECOVERING);
- eeh_serialize_unlock(flags);
-
- /* Purge all events of the PHB */
- eeh_remove_event(pe);
- break;
- default:
- pr_err("%s: Invalid value %d from next_error()\n",
- __func__, rc);
- return;
- }
- /*
- * For fenced PHB and frozen PE, it's handled as normal
- * event. We have to remove the affected PHBs for dead
- * PHB and IOC
- */
- if (rc == 2 || rc == 1)
- eeh_handle_normal_event(pe);
- else {
- pci_lock_rescan_remove();
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
- phb_pe = eeh_phb_pe_get(hose);
- if (!phb_pe || !(phb_pe->state & EEH_PE_PHB_DEAD))
- continue;
-
- bus = eeh_pe_bus_get(phb_pe);
- /* Notify all devices that they're about to go down. */
- eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
- pcibios_remove_pci_devices(bus);
+ /*
+ * For fenced PHB and frozen PE, it's handled as normal
+ * event. We have to remove the affected PHBs for dead
+ * PHB and IOC
+ */
+ if (rc == EEH_NEXT_ERR_FROZEN_PE ||
+ rc == EEH_NEXT_ERR_FENCED_PHB) {
+ eeh_handle_normal_event(pe);
+ } else {
+ pci_lock_rescan_remove();
+ list_for_each_entry(hose, &hose_list, list_node) {
+ phb_pe = eeh_phb_pe_get(hose);
+ if (!phb_pe ||
+ !(phb_pe->state & EEH_PE_PHB_DEAD))
+ continue;
+
+ /* Notify all devices to be down */
+ bus = eeh_pe_bus_get(phb_pe);
+ eeh_pe_dev_traverse(pe,
+ eeh_report_failure, NULL);
+ pcibios_remove_pci_devices(bus);
+ }
+ pci_unlock_rescan_remove();
}
- pci_unlock_rescan_remove();
- }
+
+ /*
+ * If we have detected dead IOC, we needn't proceed
+ * any more since all PHBs would have been removed
+ */
+ if (rc == EEH_NEXT_ERR_DEAD_IOC)
+ break;
+ } while (rc != EEH_NEXT_ERR_NONE);
}
/**
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index f9450537e335..f0c353fa655a 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gfp.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -737,6 +736,9 @@ static void *eeh_restore_one_device_bars(void *data, void *flag)
else
eeh_restore_device_bars(edev, dn);
+ if (eeh_ops->restore_config)
+ eeh_ops->restore_config(dn);
+
return NULL;
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index bbfb0294b354..662c6dd98072 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -664,8 +664,16 @@ _GLOBAL(ret_from_except_lite)
bl .restore_interrupts
SCHEDULE_USER
b .ret_from_except_lite
-
-2: bl .save_nvgprs
+2:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
+ bne 3f /* only restore TM if nothing else to do */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .restore_tm_state
+ b restore
+3:
+#endif
+ bl .save_nvgprs
bl .restore_interrupts
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_notify_resume
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index e7751561fd1d..063b65dd4f27 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -308,6 +308,7 @@ interrupt_base_book3e: /* fake trap */
EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
EXCEPTION_STUB(0x300, hypercall)
EXCEPTION_STUB(0x320, ehpriv)
+ EXCEPTION_STUB(0x340, lrat_error)
.globl interrupt_end_book3e
interrupt_end_book3e:
@@ -677,6 +678,17 @@ kernel_dbg_exc:
bl .unknown_exception
b .ret_from_except
+/* LRAT Error interrupt */
+ START_EXCEPTION(lrat_error);
+ NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR,
+ PROLOG_ADDITION_NONE)
+ EXCEPTION_COMMON(0x340, PACA_EXGEN, INTS_KEEP)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .save_nvgprs
+ INTS_RESTORE_HARD
+ bl .unknown_exception
+ b .ret_from_except
+
/*
* An interrupt came in while soft-disabled; We mark paca->irq_happened
* accordingly and if the interrupt is level sensitive, we hard disable
@@ -859,6 +871,7 @@ BAD_STACK_TRAMPOLINE(0x2e0)
BAD_STACK_TRAMPOLINE(0x300)
BAD_STACK_TRAMPOLINE(0x310)
BAD_STACK_TRAMPOLINE(0x320)
+BAD_STACK_TRAMPOLINE(0x340)
BAD_STACK_TRAMPOLINE(0x400)
BAD_STACK_TRAMPOLINE(0x500)
BAD_STACK_TRAMPOLINE(0x600)
@@ -1055,12 +1068,9 @@ skpinv: addi r6,r6,1 /* Increment */
mtspr SPRN_MAS0,r3
tlbre
mfspr r6,SPRN_MAS1
- rlwinm r6,r6,0,2,0 /* clear IPROT */
+ rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */
mtspr SPRN_MAS1,r6
tlbwe
-
- /* Invalidate TLB1 */
- PPC_TLBILX_ALL(0,R0)
sync
isync
@@ -1114,12 +1124,9 @@ skpinv: addi r6,r6,1 /* Increment */
mtspr SPRN_MAS0,r4
tlbre
mfspr r5,SPRN_MAS1
- rlwinm r5,r5,0,2,0 /* clear IPROT */
+ rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */
mtspr SPRN_MAS1,r5
tlbwe
-
- /* Invalidate TLB1 */
- PPC_TLBILX_ALL(0,R0)
sync
isync
@@ -1414,3 +1421,7 @@ _GLOBAL(setup_ehv_ivors)
SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
blr
+
+_GLOBAL(setup_lrat_ivor)
+ SET_IVOR(42, 0x340) /* LRAT Error */
+ blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9f905e40922e..38d507306a11 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -155,8 +155,30 @@ machine_check_pSeries_1:
*/
HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13) /* save r13 */
+#ifdef CONFIG_PPC_P7_NAP
+BEGIN_FTR_SECTION
+ /* Running native on arch 2.06 or later, check if we are
+ * waking up from nap. We only handle no state loss and
+ * supervisor state loss. We do -not- handle hypervisor
+ * state loss at this time.
+ */
+ mfspr r13,SPRN_SRR1
+ rlwinm. r13,r13,47-31,30,31
+ beq 9f
+
+ /* waking up from powersave (nap) state */
+ cmpwi cr1,r13,2
+ /* Total loss of HV state is fatal. let's just stay stuck here */
+ bgt cr1,.
+9:
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+#endif /* CONFIG_PPC_P7_NAP */
EXCEPTION_PROLOG_0(PACA_EXMC)
+BEGIN_FTR_SECTION
+ b machine_check_pSeries_early
+FTR_SECTION_ELSE
b machine_check_pSeries_0
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
. = 0x300
.globl data_access_pSeries
@@ -405,6 +427,64 @@ denorm_exception_hv:
.align 7
/* moved from 0x200 */
+machine_check_pSeries_early:
+BEGIN_FTR_SECTION
+ EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
+ /*
+ * Register contents:
+ * R13 = PACA
+ * R9 = CR
+ * Original R9 to R13 is saved on PACA_EXMC
+ *
+ * Switch to mc_emergency stack and handle re-entrancy (though we
+ * currently don't test for overflow). Save MCE registers srr1,
+ * srr0, dar and dsisr and then set ME=1
+ *
+ * We use paca->in_mce to check whether this is the first entry or
+ * nested machine check. We increment paca->in_mce to track nested
+ * machine checks.
+ *
+ * If this is the first entry then set stack pointer to
+ * paca->mc_emergency_sp, otherwise r1 is already pointing to
+ * stack frame on mc_emergency stack.
+ *
+ * NOTE: We are here with MSR_ME=0 (off), which means we risk a
+ * checkstop if we get another machine check exception before we do
+ * rfid with MSR_ME=1.
+ */
+ mr r11,r1 /* Save r1 */
+ lhz r10,PACA_IN_MCE(r13)
+ cmpwi r10,0 /* Are we in nested machine check */
+ bne 0f /* Yes, we are. */
+ /* First machine check entry */
+ ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
+0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
+ addi r10,r10,1 /* increment paca->in_mce */
+ sth r10,PACA_IN_MCE(r13)
+ std r11,GPR1(r1) /* Save r1 on the stack. */
+ std r11,0(r1) /* make stack chain pointer */
+ mfspr r11,SPRN_SRR0 /* Save SRR0 */
+ std r11,_NIP(r1)
+ mfspr r11,SPRN_SRR1 /* Save SRR1 */
+ std r11,_MSR(r1)
+ mfspr r11,SPRN_DAR /* Save DAR */
+ std r11,_DAR(r1)
+ mfspr r11,SPRN_DSISR /* Save DSISR */
+ std r11,_DSISR(r1)
+ std r9,_CCR(r1) /* Save CR in stackframe */
+ /* Save r9 through r13 from EXMC save area to stack frame. */
+ EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
+ mfmsr r11 /* get MSR value */
+ ori r11,r11,MSR_ME /* turn on ME bit */
+ ori r11,r11,MSR_RI /* turn on RI bit */
+ ld r12,PACAKBASE(r13) /* get high part of &label */
+ LOAD_HANDLER(r12, machine_check_handle_early)
+ mtspr SPRN_SRR0,r12
+ mtspr SPRN_SRR1,r11
+ rfid
+ b . /* prevent speculative execution */
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+
machine_check_pSeries:
.globl machine_check_fwnmi
machine_check_fwnmi:
@@ -688,30 +768,6 @@ kvmppc_skip_Hinterrupt:
STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
- /*
- * Machine check is different because we use a different
- * save area: PACA_EXMC instead of PACA_EXGEN.
- */
- .align 7
- .globl machine_check_common
-machine_check_common:
-
- mfspr r10,SPRN_DAR
- std r10,PACA_EXGEN+EX_DAR(r13)
- mfspr r10,SPRN_DSISR
- stw r10,PACA_EXGEN+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
- FINISH_NAP
- DISABLE_INTS
- ld r3,PACA_EXGEN+EX_DAR(r13)
- lwz r4,PACA_EXGEN+EX_DSISR(r13)
- std r3,_DAR(r1)
- std r4,_DSISR(r1)
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .machine_check_exception
- b .ret_from_except
-
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
@@ -1080,6 +1136,30 @@ unrecov_user_slb:
#endif /* __DISABLED__ */
+ /*
+ * Machine check is different because we use a different
+ * save area: PACA_EXMC instead of PACA_EXGEN.
+ */
+ .align 7
+ .globl machine_check_common
+machine_check_common:
+
+ mfspr r10,SPRN_DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_DSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
+ FINISH_NAP
+ DISABLE_INTS
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ lwz r4,PACA_EXGEN+EX_DSISR(r13)
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .machine_check_exception
+ b .ret_from_except
+
.align 7
.globl alignment_common
alignment_common:
@@ -1263,6 +1343,120 @@ _GLOBAL(opal_mc_secondary_handler)
#endif /* CONFIG_PPC_POWERNV */
+#define MACHINE_CHECK_HANDLER_WINDUP \
+ /* Clear MSR_RI before setting SRR0 and SRR1. */\
+ li r0,MSR_RI; \
+ mfmsr r9; /* get MSR value */ \
+ andc r9,r9,r0; \
+ mtmsrd r9,1; /* Clear MSR_RI */ \
+ /* Move original SRR0 and SRR1 into the respective regs */ \
+ ld r9,_MSR(r1); \
+ mtspr SPRN_SRR1,r9; \
+ ld r3,_NIP(r1); \
+ mtspr SPRN_SRR0,r3; \
+ ld r9,_CTR(r1); \
+ mtctr r9; \
+ ld r9,_XER(r1); \
+ mtxer r9; \
+ ld r9,_LINK(r1); \
+ mtlr r9; \
+ REST_GPR(0, r1); \
+ REST_8GPRS(2, r1); \
+ REST_GPR(10, r1); \
+ ld r11,_CCR(r1); \
+ mtcr r11; \
+ /* Decrement paca->in_mce. */ \
+ lhz r12,PACA_IN_MCE(r13); \
+ subi r12,r12,1; \
+ sth r12,PACA_IN_MCE(r13); \
+ REST_GPR(11, r1); \
+ REST_2GPRS(12, r1); \
+ /* restore original r1. */ \
+ ld r1,GPR1(r1)
+
+ /*
+ * Handle machine check early in real mode. We come here with
+ * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
+ */
+ .align 7
+ .globl machine_check_handle_early
+machine_check_handle_early:
+ std r0,GPR0(r1) /* Save r0 */
+ EXCEPTION_PROLOG_COMMON_3(0x200)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .machine_check_early
+ ld r12,_MSR(r1)
+#ifdef CONFIG_PPC_P7_NAP
+ /*
+ * Check if thread was in power saving mode. We come here when any
+ * of the following is true:
+ * a. thread wasn't in power saving mode
+ * b. thread was in power saving mode with no state loss or
+ * supervisor state loss
+ *
+ * Go back to nap again if (b) is true.
+ */
+ rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */
+ beq 4f /* No, it wasn;t */
+ /* Thread was in power saving mode. Go back to nap again. */
+ cmpwi r11,2
+ bne 3f
+ /* Supervisor state loss */
+ li r0,1
+ stb r0,PACA_NAPSTATELOST(r13)
+3: bl .machine_check_queue_event
+ MACHINE_CHECK_HANDLER_WINDUP
+ GET_PACA(r13)
+ ld r1,PACAR1(r13)
+ b .power7_enter_nap_mode
+4:
+#endif
+ /*
+ * Check if we are coming from hypervisor userspace. If yes then we
+ * continue in host kernel in V mode to deliver the MC event.
+ */
+ rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
+ beq 5f
+ andi. r11,r12,MSR_PR /* See if coming from user. */
+ bne 9f /* continue in V mode if we are. */
+
+5:
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ /*
+ * We are coming from kernel context. Check if we are coming from
+ * guest. if yes, then we can continue. We will fall through
+ * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
+ */
+ lbz r11,HSTATE_IN_GUEST(r13)
+ cmpwi r11,0 /* Check if coming from guest */
+ bne 9f /* continue if we are. */
+#endif
+ /*
+ * At this point we are not sure about what context we come from.
+ * Queue up the MCE event and return from the interrupt.
+ * But before that, check if this is an un-recoverable exception.
+ * If yes, then stay on emergency stack and panic.
+ */
+ andi. r11,r12,MSR_RI
+ bne 2f
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+2:
+ /*
+ * Return from MC interrupt.
+ * Queue up the MCE event so that we can log it later, while
+ * returning from kernel or opal call.
+ */
+ bl .machine_check_queue_event
+ MACHINE_CHECK_HANDLER_WINDUP
+ rfid
+9:
+ /* Deliver the machine check to host kernel in V mode. */
+ MACHINE_CHECK_HANDLER_WINDUP
+ b machine_check_pSeries
+
/*
* r13 points to the PACA, r9 contains the saved CR,
* r12 contain the saved SRR1, SRR0 is still ready for return
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index f7f5b8bed68f..9ad236e5d2c9 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -81,6 +81,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
/*
+ * Enable use of the FPU, and VSX if possible, for the caller.
+ */
+_GLOBAL(fp_enable)
+ mfmsr r3
+ ori r3,r3,MSR_FP
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ oris r3,r3,MSR_VSX@h
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ SYNC
+ MTMSRD(r3)
+ isync /* (not necessary for arch 2.02 and later) */
+ blr
+
+/*
* Load state from memory into FP registers including FPSCR.
* Assumes the caller has enabled FP in the MSR.
*/
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
index a92c79be2728..f22e7e44fbf3 100644
--- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S
+++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
@@ -176,6 +176,8 @@ skpinv: addi r6,r6,1 /* Increment */
/* 7. Jump to KERNELBASE mapping */
lis r6,(KERNELBASE & ~0xfff)@h
ori r6,r6,(KERNELBASE & ~0xfff)@l
+ rlwinm r7,r25,0,0x03ffffff
+ add r6,r7,r6
#elif defined(ENTRY_MAPPING_KEXEC_SETUP)
/*
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 4f0946de2d5c..b7363bd42452 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -23,6 +23,7 @@
*/
#include <linux/threads.h>
+#include <linux/init.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index f45726a1d963..b497188a94a1 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -65,29 +65,78 @@ _ENTRY(_start);
nop
/* Translate device tree address to physical, save in r30/r31 */
- mfmsr r16
- mfspr r17,SPRN_PID
- rlwinm r17,r17,16,0x3fff0000 /* turn PID into MAS6[SPID] */
- rlwimi r17,r16,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */
- mtspr SPRN_MAS6,r17
-
- tlbsx 0,r3 /* must succeed */
-
- mfspr r16,SPRN_MAS1
- mfspr r20,SPRN_MAS3
- rlwinm r17,r16,25,0x1f /* r17 = log2(page size) */
- li r18,1024
- slw r18,r18,r17 /* r18 = page size */
- addi r18,r18,-1
- and r19,r3,r18 /* r19 = page offset */
- andc r31,r20,r18 /* r31 = page base */
- or r31,r31,r19 /* r31 = devtree phys addr */
- mfspr r30,SPRN_MAS7
+ bl get_phys_addr
+ mr r30,r3
+ mr r31,r4
li r25,0 /* phys kernel start (low) */
li r24,0 /* CPU number */
li r23,0 /* phys kernel start (high) */
+#ifdef CONFIG_RELOCATABLE
+ LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */
+
+ /* Translate _stext address to physical, save in r23/r25 */
+ bl get_phys_addr
+ mr r23,r3
+ mr r25,r4
+
+ bl 0f
+0: mflr r8
+ addis r3,r8,(is_second_reloc - 0b)@ha
+ lwz r19,(is_second_reloc - 0b)@l(r3)
+
+ /* Check if this is the second relocation. */
+ cmpwi r19,1
+ bne 1f
+
+ /*
+ * For the second relocation, we already get the real memstart_addr
+ * from device tree. So we will map PAGE_OFFSET to memstart_addr,
+ * then the virtual address of start kernel should be:
+ * PAGE_OFFSET + (kernstart_addr - memstart_addr)
+ * Since the offset between kernstart_addr and memstart_addr should
+ * never be beyond 1G, so we can just use the lower 32bit of them
+ * for the calculation.
+ */
+ lis r3,PAGE_OFFSET@h
+
+ addis r4,r8,(kernstart_addr - 0b)@ha
+ addi r4,r4,(kernstart_addr - 0b)@l
+ lwz r5,4(r4)
+
+ addis r6,r8,(memstart_addr - 0b)@ha
+ addi r6,r6,(memstart_addr - 0b)@l
+ lwz r7,4(r6)
+
+ subf r5,r7,r5
+ add r3,r3,r5
+ b 2f
+
+1:
+ /*
+ * We have the runtime (virutal) address of our base.
+ * We calculate our shift of offset from a 64M page.
+ * We could map the 64M page we belong to at PAGE_OFFSET and
+ * get going from there.
+ */
+ lis r4,KERNELBASE@h
+ ori r4,r4,KERNELBASE@l
+ rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */
+ rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */
+ subf r3,r5,r6 /* r3 = r6 - r5 */
+ add r3,r4,r3 /* Required Virtual Address */
+
+2: bl relocate
+
+ /*
+ * For the second relocation, we already set the right tlb entries
+ * for the kernel space, so skip the code in fsl_booke_entry_mapping.S
+ */
+ cmpwi r19,1
+ beq set_ivor
+#endif
+
/* We try to not make any assumptions about how the boot loader
* setup or used the TLBs. We invalidate all mappings from the
* boot loader and load a single entry in TLB1[0] to map the
@@ -113,6 +162,7 @@ _ENTRY(__early_start)
#include "fsl_booke_entry_mapping.S"
#undef ENTRY_MAPPING_BOOT_SETUP
+set_ivor:
/* Establish the interrupt vector offsets */
SET_IVOR(0, CriticalInput);
SET_IVOR(1, MachineCheck);
@@ -166,8 +216,7 @@ _ENTRY(__early_start)
/* Check to see if we're the second processor, and jump
* to the secondary_start code if so
*/
- lis r24, boot_cpuid@h
- ori r24, r24, boot_cpuid@l
+ LOAD_REG_ADDR_PIC(r24, boot_cpuid)
lwz r24, 0(r24)
cmpwi r24, -1
mfspr r24,SPRN_PIR
@@ -197,6 +246,18 @@ _ENTRY(__early_start)
bl early_init
+#ifdef CONFIG_RELOCATABLE
+ mr r3,r30
+ mr r4,r31
+#ifdef CONFIG_PHYS_64BIT
+ mr r5,r23
+ mr r6,r25
+#else
+ mr r5,r25
+#endif
+ bl relocate_init
+#endif
+
#ifdef CONFIG_DYNAMIC_MEMSTART
lis r3,kernstart_addr@ha
la r3,kernstart_addr@l(r3)
@@ -856,6 +917,33 @@ KernelSPE:
#endif /* CONFIG_SPE */
/*
+ * Translate the effec addr in r3 to phys addr. The phys addr will be put
+ * into r3(higher 32bit) and r4(lower 32bit)
+ */
+get_phys_addr:
+ mfmsr r8
+ mfspr r9,SPRN_PID
+ rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */
+ rlwimi r9,r8,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */
+ mtspr SPRN_MAS6,r9
+
+ tlbsx 0,r3 /* must succeed */
+
+ mfspr r8,SPRN_MAS1
+ mfspr r12,SPRN_MAS3
+ rlwinm r9,r8,25,0x1f /* r9 = log2(page size) */
+ li r10,1024
+ slw r10,r10,r9 /* r10 = page size */
+ addi r10,r10,-1
+ and r11,r3,r10 /* r11 = page offset */
+ andc r4,r12,r10 /* r4 = page base */
+ or r4,r4,r11 /* r4 = devtree phys addr */
+#ifdef CONFIG_PHYS_64BIT
+ mfspr r3,SPRN_MAS7
+#endif
+ blr
+
+/*
* Global functions
*/
@@ -1057,24 +1145,36 @@ _GLOBAL(__flush_disable_L1)
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start
__secondary_start:
- lis r3,__secondary_hold_acknowledge@h
- ori r3,r3,__secondary_hold_acknowledge@l
- stw r24,0(r3)
-
- li r3,0
- mr r4,r24 /* Why? */
- bl call_setup_cpu
-
- lis r3,tlbcam_index@ha
- lwz r3,tlbcam_index@l(r3)
+ LOAD_REG_ADDR_PIC(r3, tlbcam_index)
+ lwz r3,0(r3)
mtctr r3
li r26,0 /* r26 safe? */
+ bl switch_to_as1
+ mr r27,r3 /* tlb entry */
/* Load each CAM entry */
1: mr r3,r26
bl loadcam_entry
addi r26,r26,1
bdnz 1b
+ mr r3,r27 /* tlb entry */
+ LOAD_REG_ADDR_PIC(r4, memstart_addr)
+ lwz r4,0(r4)
+ mr r5,r25 /* phys kernel start */
+ rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */
+ subf r4,r5,r4 /* memstart_addr - phys kernel start */
+ li r5,0 /* no device tree */
+ li r6,0 /* not boot cpu */
+ bl restore_to_as0
+
+
+ lis r3,__secondary_hold_acknowledge@h
+ ori r3,r3,__secondary_hold_acknowledge@l
+ stw r24,0(r3)
+
+ li r3,0
+ mr r4,r24 /* Why? */
+ bl call_setup_cpu
/* get current_thread_info and current */
lis r1,secondary_ti@ha
@@ -1111,6 +1211,112 @@ __secondary_hold_acknowledge:
#endif
/*
+ * Create a tlb entry with the same effective and physical address as
+ * the tlb entry used by the current running code. But set the TS to 1.
+ * Then switch to the address space 1. It will return with the r3 set to
+ * the ESEL of the new created tlb.
+ */
+_GLOBAL(switch_to_as1)
+ mflr r5
+
+ /* Find a entry not used */
+ mfspr r3,SPRN_TLB1CFG
+ andi. r3,r3,0xfff
+ mfspr r4,SPRN_PID
+ rlwinm r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */
+ mtspr SPRN_MAS6,r4
+1: lis r4,0x1000 /* Set MAS0(TLBSEL) = 1 */
+ addi r3,r3,-1
+ rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
+ mtspr SPRN_MAS0,r4
+ tlbre
+ mfspr r4,SPRN_MAS1
+ andis. r4,r4,MAS1_VALID@h
+ bne 1b
+
+ /* Get the tlb entry used by the current running code */
+ bl 0f
+0: mflr r4
+ tlbsx 0,r4
+
+ mfspr r4,SPRN_MAS1
+ ori r4,r4,MAS1_TS /* Set the TS = 1 */
+ mtspr SPRN_MAS1,r4
+
+ mfspr r4,SPRN_MAS0
+ rlwinm r4,r4,0,~MAS0_ESEL_MASK
+ rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
+ mtspr SPRN_MAS0,r4
+ tlbwe
+ isync
+ sync
+
+ mfmsr r4
+ ori r4,r4,MSR_IS | MSR_DS
+ mtspr SPRN_SRR0,r5
+ mtspr SPRN_SRR1,r4
+ sync
+ rfi
+
+/*
+ * Restore to the address space 0 and also invalidate the tlb entry created
+ * by switch_to_as1.
+ * r3 - the tlb entry which should be invalidated
+ * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0)
+ * r5 - device tree virtual address. If r4 is 0, r5 is ignored.
+ * r6 - boot cpu
+*/
+_GLOBAL(restore_to_as0)
+ mflr r0
+
+ bl 0f
+0: mflr r9
+ addi r9,r9,1f - 0b
+
+ /*
+ * We may map the PAGE_OFFSET in AS0 to a different physical address,
+ * so we need calculate the right jump and device tree address based
+ * on the offset passed by r4.
+ */
+ add r9,r9,r4
+ add r5,r5,r4
+ add r0,r0,r4
+
+2: mfmsr r7
+ li r8,(MSR_IS | MSR_DS)
+ andc r7,r7,r8
+
+ mtspr SPRN_SRR0,r9
+ mtspr SPRN_SRR1,r7
+ sync
+ rfi
+
+ /* Invalidate the temporary tlb entry for AS1 */
+1: lis r9,0x1000 /* Set MAS0(TLBSEL) = 1 */
+ rlwimi r9,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
+ mtspr SPRN_MAS0,r9
+ tlbre
+ mfspr r9,SPRN_MAS1
+ rlwinm r9,r9,0,2,31 /* Clear MAS1 Valid and IPPROT */
+ mtspr SPRN_MAS1,r9
+ tlbwe
+ isync
+
+ cmpwi r4,0
+ cmpwi cr1,r6,0
+ cror eq,4*cr1+eq,eq
+ bne 3f /* offset != 0 && is_boot_cpu */
+ mtlr r0
+ blr
+
+ /*
+ * The PAGE_OFFSET will map to a different physical address,
+ * jump to _start to do another relocation again.
+ */
+3: mr r3,r5
+ bl _start
+
+/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
*/
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index f0b47d1a6b0e..b0a1792279bb 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -28,7 +28,6 @@
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/hw_breakpoint.h>
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 847e40e62fce..3fdef0f0c67f 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -84,6 +84,7 @@ _GLOBAL(power7_nap)
std r9,_MSR(r1)
std r1,PACAR1(r13)
+_GLOBAL(power7_enter_nap_mode)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're napping */
li r4,KVM_HWTHREAD_IN_NAP
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index 97a3715ac8bd..b82227e7e21b 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -3,7 +3,6 @@
*
* (C) Copyright 2004 Linus Torvalds
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <linux/export.h>
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 572bb5b95f35..d773dd440a45 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -251,14 +251,13 @@ again:
if (dev)
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- 1 << IOMMU_PAGE_SHIFT);
+ 1 << tbl->it_page_shift);
else
- boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
+ boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
- n = iommu_area_alloc(tbl->it_map, limit, start, npages,
- tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
- align_mask);
+ n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
+ boundary_size >> tbl->it_page_shift, align_mask);
if (n == -1) {
if (likely(pass == 0)) {
/* First try the pool from the start */
@@ -320,12 +319,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
return DMA_ERROR_CODE;
entry += tbl->it_offset; /* Offset into real TCE table */
- ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
+ ret = entry << tbl->it_page_shift; /* Set the return dma address */
/* Put the TCEs in the HW table */
build_fail = ppc_md.tce_build(tbl, entry, npages,
- (unsigned long)page & IOMMU_PAGE_MASK,
- direction, attrs);
+ (unsigned long)page &
+ IOMMU_PAGE_MASK(tbl), direction, attrs);
/* ppc_md.tce_build() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return
@@ -352,7 +351,7 @@ static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
{
unsigned long entry, free_entry;
- entry = dma_addr >> IOMMU_PAGE_SHIFT;
+ entry = dma_addr >> tbl->it_page_shift;
free_entry = entry - tbl->it_offset;
if (((free_entry + npages) > tbl->it_size) ||
@@ -401,7 +400,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long flags;
struct iommu_pool *pool;
- entry = dma_addr >> IOMMU_PAGE_SHIFT;
+ entry = dma_addr >> tbl->it_page_shift;
free_entry = entry - tbl->it_offset;
pool = get_pool(tbl, free_entry);
@@ -468,13 +467,13 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
}
/* Allocate iommu entries for that segment */
vaddr = (unsigned long) sg_virt(s);
- npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
+ npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
align = 0;
- if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
+ if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
(vaddr & ~PAGE_MASK) == 0)
- align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
+ align = PAGE_SHIFT - tbl->it_page_shift;
entry = iommu_range_alloc(dev, tbl, npages, &handle,
- mask >> IOMMU_PAGE_SHIFT, align);
+ mask >> tbl->it_page_shift, align);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
@@ -489,16 +488,16 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Convert entry to a dma_addr_t */
entry += tbl->it_offset;
- dma_addr = entry << IOMMU_PAGE_SHIFT;
- dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
+ dma_addr = entry << tbl->it_page_shift;
+ dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
npages, entry, dma_addr);
/* Insert into HW table */
build_fail = ppc_md.tce_build(tbl, entry, npages,
- vaddr & IOMMU_PAGE_MASK,
- direction, attrs);
+ vaddr & IOMMU_PAGE_MASK(tbl),
+ direction, attrs);
if(unlikely(build_fail))
goto failure;
@@ -559,9 +558,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
if (s->dma_length != 0) {
unsigned long vaddr, npages;
- vaddr = s->dma_address & IOMMU_PAGE_MASK;
+ vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
npages = iommu_num_pages(s->dma_address, s->dma_length,
- IOMMU_PAGE_SIZE);
+ IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -592,7 +591,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (sg->dma_length == 0)
break;
npages = iommu_num_pages(dma_handle, sg->dma_length,
- IOMMU_PAGE_SIZE);
+ IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, dma_handle, npages);
sg = sg_next(sg);
}
@@ -676,7 +675,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
set_bit(0, tbl->it_map);
/* We only split the IOMMU table if we have 1GB or more of space */
- if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024))
+ if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
tbl->nr_pools = IOMMU_NR_POOLS;
else
tbl->nr_pools = 1;
@@ -768,16 +767,16 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
vaddr = page_address(page) + offset;
uaddr = (unsigned long)vaddr;
- npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
+ npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
if (tbl) {
align = 0;
- if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
+ if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
((unsigned long)vaddr & ~PAGE_MASK) == 0)
- align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
+ align = PAGE_SHIFT - tbl->it_page_shift;
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
- mask >> IOMMU_PAGE_SHIFT, align,
+ mask >> tbl->it_page_shift, align,
attrs);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
@@ -786,7 +785,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
npages);
}
} else
- dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
+ dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
}
return dma_handle;
@@ -801,7 +800,8 @@ void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
BUG_ON(direction == DMA_NONE);
if (tbl) {
- npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
+ npages = iommu_num_pages(dma_handle, size,
+ IOMMU_PAGE_SIZE(tbl));
iommu_free(tbl, dma_handle, npages);
}
}
@@ -845,10 +845,10 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
- nio_pages = size >> IOMMU_PAGE_SHIFT;
- io_order = get_iommu_order(size);
+ nio_pages = size >> tbl->it_page_shift;
+ io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
- mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
+ mask >> tbl->it_page_shift, io_order, NULL);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
@@ -864,7 +864,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
unsigned int nio_pages;
size = PAGE_ALIGN(size);
- nio_pages = size >> IOMMU_PAGE_SHIFT;
+ nio_pages = size >> tbl->it_page_shift;
iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
free_pages((unsigned long)vaddr, get_order(size));
@@ -935,10 +935,10 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl,
if (tce_value)
return -EINVAL;
- if (ioba & ~IOMMU_PAGE_MASK)
+ if (ioba & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
- ioba >>= IOMMU_PAGE_SHIFT;
+ ioba >>= tbl->it_page_shift;
if (ioba < tbl->it_offset)
return -EINVAL;
@@ -955,13 +955,13 @@ int iommu_tce_put_param_check(struct iommu_table *tbl,
if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
return -EINVAL;
- if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ))
+ if (tce & ~(IOMMU_PAGE_MASK(tbl) | TCE_PCI_WRITE | TCE_PCI_READ))
return -EINVAL;
- if (ioba & ~IOMMU_PAGE_MASK)
+ if (ioba & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
- ioba >>= IOMMU_PAGE_SHIFT;
+ ioba >>= tbl->it_page_shift;
if (ioba < tbl->it_offset)
return -EINVAL;
@@ -1037,7 +1037,7 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
/* if (unlikely(ret))
pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
- __func__, hwaddr, entry << IOMMU_PAGE_SHIFT,
+ __func__, hwaddr, entry << IOMMU_PAGE_SHIFT(tbl),
hwaddr, ret); */
return ret;
@@ -1049,14 +1049,14 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
{
int ret;
struct page *page = NULL;
- unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK;
+ unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
enum dma_data_direction direction = iommu_tce_direction(tce);
ret = get_user_pages_fast(tce & PAGE_MASK, 1,
direction != DMA_TO_DEVICE, &page);
if (unlikely(ret != 1)) {
/* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
- tce, entry << IOMMU_PAGE_SHIFT, ret); */
+ tce, entry << IOMMU_PAGE_SHIFT(tbl), ret); */
return -EFAULT;
}
hwaddr = (unsigned long) page_address(page) + offset;
@@ -1067,7 +1067,7 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
if (ret < 0)
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
- __func__, entry << IOMMU_PAGE_SHIFT, tce, ret);
+ __func__, entry << tbl->it_page_shift, tce, ret);
return ret;
}
@@ -1105,7 +1105,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
}
EXPORT_SYMBOL_GPL(iommu_release_ownership);
-static int iommu_add_device(struct device *dev)
+int iommu_add_device(struct device *dev)
{
struct iommu_table *tbl;
int ret = 0;
@@ -1127,6 +1127,12 @@ static int iommu_add_device(struct device *dev)
pr_debug("iommu_tce: adding %s to iommu group %d\n",
dev_name(dev), iommu_group_id(tbl->it_group));
+ if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
+ pr_err("iommu_tce: unsupported iommu page size.");
+ pr_err("%s has not been added\n", dev_name(dev));
+ return -EINVAL;
+ }
+
ret = iommu_group_add_device(tbl->it_group, dev);
if (ret < 0)
pr_err("iommu_tce: %s has not been added, ret=%d\n",
@@ -1134,52 +1140,23 @@ static int iommu_add_device(struct device *dev)
return ret;
}
+EXPORT_SYMBOL_GPL(iommu_add_device);
-static void iommu_del_device(struct device *dev)
-{
- iommu_group_remove_device(dev);
-}
-
-static int iommu_bus_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+void iommu_del_device(struct device *dev)
{
- struct device *dev = data;
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- return iommu_add_device(dev);
- case BUS_NOTIFY_DEL_DEVICE:
- iommu_del_device(dev);
- return 0;
- default:
- return 0;
+ /*
+ * Some devices might not have IOMMU table and group
+ * and we needn't detach them from the associated
+ * IOMMU groups
+ */
+ if (!dev->iommu_group) {
+ pr_debug("iommu_tce: skipping device %s with no tbl\n",
+ dev_name(dev));
+ return;
}
-}
-static struct notifier_block tce_iommu_bus_nb = {
- .notifier_call = iommu_bus_notifier,
-};
-
-static int __init tce_iommu_init(void)
-{
- struct pci_dev *pdev = NULL;
-
- BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE);
-
- for_each_pci_dev(pdev)
- iommu_add_device(&pdev->dev);
-
- bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
- return 0;
-}
-
-subsys_initcall_sync(tce_iommu_init);
-
-#else
-
-void iommu_register_group(struct iommu_table *tbl,
- int pci_domain_number, unsigned long pe_num)
-{
+ iommu_group_remove_device(dev);
}
+EXPORT_SYMBOL_GPL(iommu_del_device);
#endif /* CONFIG_IOMMU_API */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ba0165615215..9729b23bfb0a 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -354,8 +354,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%*s: ", prec, "LOC");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
- seq_printf(p, " Local timer interrupts\n");
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
+ seq_printf(p, " Local timer interrupts for timer event device\n");
+
+ seq_printf(p, "%*s: ", prec, "LOC");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
+ seq_printf(p, " Local timer interrupts for others\n");
seq_printf(p, "%*s: ", prec, "SPU");
for_each_online_cpu(j)
@@ -389,11 +394,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)
*/
u64 arch_irq_stat_cpu(unsigned int cpu)
{
- u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
+ u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
sum += per_cpu(irq_stat, cpu).pmu_irqs;
sum += per_cpu(irq_stat, cpu).mce_exceptions;
sum += per_cpu(irq_stat, cpu).spurious_irqs;
+ sum += per_cpu(irq_stat, cpu).timer_irqs_others;
#ifdef CONFIG_PPC_DOORBELL
sum += per_cpu(irq_stat, cpu).doorbell_irqs;
#endif
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 83e89d310734..8504657379f1 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -15,7 +15,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/kgdb.h>
#include <linux/smp.h>
#include <linux/signal.h>
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
new file mode 100644
index 000000000000..cadef7e64e42
--- /dev/null
+++ b/arch/powerpc/kernel/mce.c
@@ -0,0 +1,352 @@
+/*
+ * Machine check exception handling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2013 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#undef DEBUG
+#define pr_fmt(fmt) "mce: " fmt
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+#include <linux/export.h>
+#include <linux/irq_work.h>
+#include <asm/mce.h>
+
+static DEFINE_PER_CPU(int, mce_nest_count);
+static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
+
+/* Queue for delayed MCE events. */
+static DEFINE_PER_CPU(int, mce_queue_count);
+static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
+
+static void machine_check_process_queued_event(struct irq_work *work);
+struct irq_work mce_event_process_work = {
+ .func = machine_check_process_queued_event,
+};
+
+static void mce_set_error_info(struct machine_check_event *mce,
+ struct mce_error_info *mce_err)
+{
+ mce->error_type = mce_err->error_type;
+ switch (mce_err->error_type) {
+ case MCE_ERROR_TYPE_UE:
+ mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
+ break;
+ case MCE_ERROR_TYPE_SLB:
+ mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
+ break;
+ case MCE_ERROR_TYPE_UNKNOWN:
+ default:
+ break;
+ }
+}
+
+/*
+ * Decode and save high level MCE information into per cpu buffer which
+ * is an array of machine_check_event structure.
+ */
+void save_mce_event(struct pt_regs *regs, long handled,
+ struct mce_error_info *mce_err,
+ uint64_t addr)
+{
+ uint64_t srr1;
+ int index = __get_cpu_var(mce_nest_count)++;
+ struct machine_check_event *mce = &__get_cpu_var(mce_event[index]);
+
+ /*
+ * Return if we don't have enough space to log mce event.
+ * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
+ * the check below will stop buffer overrun.
+ */
+ if (index >= MAX_MC_EVT)
+ return;
+
+ /* Populate generic machine check info */
+ mce->version = MCE_V1;
+ mce->srr0 = regs->nip;
+ mce->srr1 = regs->msr;
+ mce->gpr3 = regs->gpr[3];
+ mce->in_use = 1;
+
+ mce->initiator = MCE_INITIATOR_CPU;
+ if (handled)
+ mce->disposition = MCE_DISPOSITION_RECOVERED;
+ else
+ mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
+ mce->severity = MCE_SEV_ERROR_SYNC;
+
+ srr1 = regs->msr;
+
+ /*
+ * Populate the mce error_type and type-specific error_type.
+ */
+ mce_set_error_info(mce, mce_err);
+
+ if (!addr)
+ return;
+
+ if (mce->error_type == MCE_ERROR_TYPE_TLB) {
+ mce->u.tlb_error.effective_address_provided = true;
+ mce->u.tlb_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
+ mce->u.slb_error.effective_address_provided = true;
+ mce->u.slb_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
+ mce->u.erat_error.effective_address_provided = true;
+ mce->u.erat_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
+ mce->u.ue_error.effective_address_provided = true;
+ mce->u.ue_error.effective_address = addr;
+ }
+ return;
+}
+
+/*
+ * get_mce_event:
+ * mce Pointer to machine_check_event structure to be filled.
+ * release Flag to indicate whether to free the event slot or not.
+ * 0 <= do not release the mce event. Caller will invoke
+ * release_mce_event() once event has been consumed.
+ * 1 <= release the slot.
+ *
+ * return 1 = success
+ * 0 = failure
+ *
+ * get_mce_event() will be called by platform specific machine check
+ * handle routine and in KVM.
+ * When we call get_mce_event(), we are still in interrupt context and
+ * preemption will not be scheduled until ret_from_expect() routine
+ * is called.
+ */
+int get_mce_event(struct machine_check_event *mce, bool release)
+{
+ int index = __get_cpu_var(mce_nest_count) - 1;
+ struct machine_check_event *mc_evt;
+ int ret = 0;
+
+ /* Sanity check */
+ if (index < 0)
+ return ret;
+
+ /* Check if we have MCE info to process. */
+ if (index < MAX_MC_EVT) {
+ mc_evt = &__get_cpu_var(mce_event[index]);
+ /* Copy the event structure and release the original */
+ if (mce)
+ *mce = *mc_evt;
+ if (release)
+ mc_evt->in_use = 0;
+ ret = 1;
+ }
+ /* Decrement the count to free the slot. */
+ if (release)
+ __get_cpu_var(mce_nest_count)--;
+
+ return ret;
+}
+
+void release_mce_event(void)
+{
+ get_mce_event(NULL, true);
+}
+
+/*
+ * Queue up the MCE event which then can be handled later.
+ */
+void machine_check_queue_event(void)
+{
+ int index;
+ struct machine_check_event evt;
+
+ if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
+ return;
+
+ index = __get_cpu_var(mce_queue_count)++;
+ /* If queue is full, just return for now. */
+ if (index >= MAX_MC_EVT) {
+ __get_cpu_var(mce_queue_count)--;
+ return;
+ }
+ __get_cpu_var(mce_event_queue[index]) = evt;
+
+ /* Queue irq work to process this event later. */
+ irq_work_queue(&mce_event_process_work);
+}
+
+/*
+ * process pending MCE event from the mce event queue. This function will be
+ * called during syscall exit.
+ */
+static void machine_check_process_queued_event(struct irq_work *work)
+{
+ int index;
+
+ /*
+ * For now just print it to console.
+ * TODO: log this error event to FSP or nvram.
+ */
+ while (__get_cpu_var(mce_queue_count) > 0) {
+ index = __get_cpu_var(mce_queue_count) - 1;
+ machine_check_print_event_info(
+ &__get_cpu_var(mce_event_queue[index]));
+ __get_cpu_var(mce_queue_count)--;
+ }
+}
+
+void machine_check_print_event_info(struct machine_check_event *evt)
+{
+ const char *level, *sevstr, *subtype;
+ static const char *mc_ue_types[] = {
+ "Indeterminate",
+ "Instruction fetch",
+ "Page table walk ifetch",
+ "Load/Store",
+ "Page table walk Load/Store",
+ };
+ static const char *mc_slb_types[] = {
+ "Indeterminate",
+ "Parity",
+ "Multihit",
+ };
+ static const char *mc_erat_types[] = {
+ "Indeterminate",
+ "Parity",
+ "Multihit",
+ };
+ static const char *mc_tlb_types[] = {
+ "Indeterminate",
+ "Parity",
+ "Multihit",
+ };
+
+ /* Print things out */
+ if (evt->version != MCE_V1) {
+ pr_err("Machine Check Exception, Unknown event version %d !\n",
+ evt->version);
+ return;
+ }
+ switch (evt->severity) {
+ case MCE_SEV_NO_ERROR:
+ level = KERN_INFO;
+ sevstr = "Harmless";
+ break;
+ case MCE_SEV_WARNING:
+ level = KERN_WARNING;
+ sevstr = "";
+ break;
+ case MCE_SEV_ERROR_SYNC:
+ level = KERN_ERR;
+ sevstr = "Severe";
+ break;
+ case MCE_SEV_FATAL:
+ default:
+ level = KERN_ERR;
+ sevstr = "Fatal";
+ break;
+ }
+
+ printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
+ evt->disposition == MCE_DISPOSITION_RECOVERED ?
+ "Recovered" : "[Not recovered");
+ printk("%s Initiator: %s\n", level,
+ evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown");
+ switch (evt->error_type) {
+ case MCE_ERROR_TYPE_UE:
+ subtype = evt->u.ue_error.ue_error_type <
+ ARRAY_SIZE(mc_ue_types) ?
+ mc_ue_types[evt->u.ue_error.ue_error_type]
+ : "Unknown";
+ printk("%s Error type: UE [%s]\n", level, subtype);
+ if (evt->u.ue_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.ue_error.effective_address);
+ if (evt->u.ue_error.physical_address_provided)
+ printk("%s Physial address: %016llx\n",
+ level, evt->u.ue_error.physical_address);
+ break;
+ case MCE_ERROR_TYPE_SLB:
+ subtype = evt->u.slb_error.slb_error_type <
+ ARRAY_SIZE(mc_slb_types) ?
+ mc_slb_types[evt->u.slb_error.slb_error_type]
+ : "Unknown";
+ printk("%s Error type: SLB [%s]\n", level, subtype);
+ if (evt->u.slb_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.slb_error.effective_address);
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ subtype = evt->u.erat_error.erat_error_type <
+ ARRAY_SIZE(mc_erat_types) ?
+ mc_erat_types[evt->u.erat_error.erat_error_type]
+ : "Unknown";
+ printk("%s Error type: ERAT [%s]\n", level, subtype);
+ if (evt->u.erat_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.erat_error.effective_address);
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ subtype = evt->u.tlb_error.tlb_error_type <
+ ARRAY_SIZE(mc_tlb_types) ?
+ mc_tlb_types[evt->u.tlb_error.tlb_error_type]
+ : "Unknown";
+ printk("%s Error type: TLB [%s]\n", level, subtype);
+ if (evt->u.tlb_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.tlb_error.effective_address);
+ break;
+ default:
+ case MCE_ERROR_TYPE_UNKNOWN:
+ printk("%s Error type: Unknown\n", level);
+ break;
+ }
+}
+
+uint64_t get_mce_fault_addr(struct machine_check_event *evt)
+{
+ switch (evt->error_type) {
+ case MCE_ERROR_TYPE_UE:
+ if (evt->u.ue_error.effective_address_provided)
+ return evt->u.ue_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_SLB:
+ if (evt->u.slb_error.effective_address_provided)
+ return evt->u.slb_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ if (evt->u.erat_error.effective_address_provided)
+ return evt->u.erat_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ if (evt->u.tlb_error.effective_address_provided)
+ return evt->u.tlb_error.effective_address;
+ break;
+ default:
+ case MCE_ERROR_TYPE_UNKNOWN:
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(get_mce_fault_addr);
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
new file mode 100644
index 000000000000..27c93f41166f
--- /dev/null
+++ b/arch/powerpc/kernel/mce_power.c
@@ -0,0 +1,284 @@
+/*
+ * Machine check exception handling CPU-side for power7 and power8
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2013 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#undef DEBUG
+#define pr_fmt(fmt) "mce_power: " fmt
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <asm/mmu.h>
+#include <asm/mce.h>
+
+/* flush SLBs and reload */
+static void flush_and_reload_slb(void)
+{
+ struct slb_shadow *slb;
+ unsigned long i, n;
+
+ /* Invalidate all SLBs */
+ asm volatile("slbmte %0,%0; slbia" : : "r" (0));
+
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
+ /*
+ * If machine check is hit when in guest or in transition, we will
+ * only flush the SLBs and continue.
+ */
+ if (get_paca()->kvm_hstate.in_guest)
+ return;
+#endif
+
+ /* For host kernel, reload the SLBs from shadow SLB buffer. */
+ slb = get_slb_shadow();
+ if (!slb)
+ return;
+
+ n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
+
+ /* Load up the SLB entries from shadow SLB */
+ for (i = 0; i < n; i++) {
+ unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
+ unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
+
+ rb = (rb & ~0xFFFul) | i;
+ asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
+ }
+}
+
+static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
+{
+ long handled = 1;
+
+ /*
+ * flush and reload SLBs for SLB errors and flush TLBs for TLB errors.
+ * reset the error bits whenever we handle them so that at the end
+ * we can check whether we handled all of them or not.
+ * */
+ if (dsisr & slb_error_bits) {
+ flush_and_reload_slb();
+ /* reset error bits */
+ dsisr &= ~(slb_error_bits);
+ }
+ if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
+ /* reset error bits */
+ dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
+ }
+ /* Any other errors we don't understand? */
+ if (dsisr & 0xffffffffUL)
+ handled = 0;
+
+ return handled;
+}
+
+static long mce_handle_derror_p7(uint64_t dsisr)
+{
+ return mce_handle_derror(dsisr, P7_DSISR_MC_SLB_ERRORS);
+}
+
+static long mce_handle_common_ierror(uint64_t srr1)
+{
+ long handled = 0;
+
+ switch (P7_SRR1_MC_IFETCH(srr1)) {
+ case 0:
+ break;
+ case P7_SRR1_MC_IFETCH_SLB_PARITY:
+ case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
+ /* flush and reload SLBs for SLB errors. */
+ flush_and_reload_slb();
+ handled = 1;
+ break;
+ case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
+ handled = 1;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return handled;
+}
+
+static long mce_handle_ierror_p7(uint64_t srr1)
+{
+ long handled = 0;
+
+ handled = mce_handle_common_ierror(srr1);
+
+ if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
+ flush_and_reload_slb();
+ handled = 1;
+ }
+ return handled;
+}
+
+static void mce_get_common_ierror(struct mce_error_info *mce_err, uint64_t srr1)
+{
+ switch (P7_SRR1_MC_IFETCH(srr1)) {
+ case P7_SRR1_MC_IFETCH_SLB_PARITY:
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+ break;
+ case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+ break;
+ case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_TLB;
+ mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+ break;
+ case P7_SRR1_MC_IFETCH_UE:
+ case P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL:
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
+ break;
+ case P7_SRR1_MC_IFETCH_UE_TLB_RELOAD:
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type =
+ MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
+ break;
+ }
+}
+
+static void mce_get_ierror_p7(struct mce_error_info *mce_err, uint64_t srr1)
+{
+ mce_get_common_ierror(mce_err, srr1);
+ if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE;
+ }
+}
+
+static void mce_get_derror_p7(struct mce_error_info *mce_err, uint64_t dsisr)
+{
+ if (dsisr & P7_DSISR_MC_UE) {
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
+ } else if (dsisr & P7_DSISR_MC_UE_TABLEWALK) {
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type =
+ MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+ } else if (dsisr & P7_DSISR_MC_ERAT_MULTIHIT) {
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+ } else if (dsisr & P7_DSISR_MC_SLB_PARITY_MFSLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+ } else if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_TLB;
+ mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+ } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT_PARITY) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE;
+ }
+}
+
+long __machine_check_early_realmode_p7(struct pt_regs *regs)
+{
+ uint64_t srr1, addr;
+ long handled = 1;
+ struct mce_error_info mce_error_info = { 0 };
+
+ srr1 = regs->msr;
+
+ /*
+ * Handle memory errors depending whether this was a load/store or
+ * ifetch exception. Also, populate the mce error_type and
+ * type-specific error_type from either SRR1 or DSISR, depending
+ * whether this was a load/store or ifetch exception
+ */
+ if (P7_SRR1_MC_LOADSTORE(srr1)) {
+ handled = mce_handle_derror_p7(regs->dsisr);
+ mce_get_derror_p7(&mce_error_info, regs->dsisr);
+ addr = regs->dar;
+ } else {
+ handled = mce_handle_ierror_p7(srr1);
+ mce_get_ierror_p7(&mce_error_info, srr1);
+ addr = regs->nip;
+ }
+
+ save_mce_event(regs, handled, &mce_error_info, addr);
+ return handled;
+}
+
+static void mce_get_ierror_p8(struct mce_error_info *mce_err, uint64_t srr1)
+{
+ mce_get_common_ierror(mce_err, srr1);
+ if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ }
+}
+
+static void mce_get_derror_p8(struct mce_error_info *mce_err, uint64_t dsisr)
+{
+ mce_get_derror_p7(mce_err, dsisr);
+ if (dsisr & P8_DSISR_MC_ERAT_MULTIHIT_SEC) {
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ }
+}
+
+static long mce_handle_ierror_p8(uint64_t srr1)
+{
+ long handled = 0;
+
+ handled = mce_handle_common_ierror(srr1);
+
+ if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
+ flush_and_reload_slb();
+ handled = 1;
+ }
+ return handled;
+}
+
+static long mce_handle_derror_p8(uint64_t dsisr)
+{
+ return mce_handle_derror(dsisr, P8_DSISR_MC_SLB_ERRORS);
+}
+
+long __machine_check_early_realmode_p8(struct pt_regs *regs)
+{
+ uint64_t srr1, addr;
+ long handled = 1;
+ struct mce_error_info mce_error_info = { 0 };
+
+ srr1 = regs->msr;
+
+ if (P7_SRR1_MC_LOADSTORE(srr1)) {
+ handled = mce_handle_derror_p8(regs->dsisr);
+ mce_get_derror_p8(&mce_error_info, regs->dsisr);
+ addr = regs->dar;
+ } else {
+ handled = mce_handle_ierror_p8(srr1);
+ mce_get_ierror_p8(&mce_error_info, srr1);
+ addr = regs->nip;
+ }
+
+ save_mce_event(regs, handled, &mce_error_info, addr);
+ return handled;
+}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index e47d268727a4..879f09620f83 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -344,7 +344,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
*/
_KPROBE(flush_icache_range)
BEGIN_FTR_SECTION
- isync
+ PURGE_PREFETCHED_INS
blr /* for 601, do nothing */
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
li r5,L1_CACHE_BYTES-1
@@ -448,6 +448,7 @@ _GLOBAL(invalidate_dcache_range)
*/
_GLOBAL(__flush_dcache_icache)
BEGIN_FTR_SECTION
+ PURGE_PREFETCHED_INS
blr
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
@@ -489,6 +490,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
*/
_GLOBAL(__flush_dcache_icache_phys)
BEGIN_FTR_SECTION
+ PURGE_PREFETCHED_INS
blr /* for 601, do nothing */
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
mfmsr r10
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 64bf8db12b15..3d0249599d52 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,6 +67,7 @@ PPC64_CACHES:
_KPROBE(flush_icache_range)
BEGIN_FTR_SECTION
+ PURGE_PREFETCHED_INS
blr
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
/*
@@ -211,6 +212,11 @@ _GLOBAL(__flush_dcache_icache)
* Different systems have different cache line sizes
*/
+BEGIN_FTR_SECTION
+ PURGE_PREFETCHED_INS
+ blr
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
+
/* Flush the dcache */
ld r7,PPC64_CACHES@toc(r2)
clrrdi r3,r3,PAGE_SHIFT /* Page align */
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0620eaaaad45..bf0aada02fe4 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -99,12 +99,28 @@ static inline void free_lppacas(void) { }
* 3 persistent SLBs are registered here. The buffer will be zero
* initially, hence will all be invaild until we actually write them.
*/
-struct slb_shadow slb_shadow[] __cacheline_aligned = {
- [0 ... (NR_CPUS-1)] = {
- .persistent = cpu_to_be32(SLB_NUM_BOLTED),
- .buffer_length = cpu_to_be32(sizeof(struct slb_shadow)),
- },
-};
+static struct slb_shadow *slb_shadow;
+
+static void __init allocate_slb_shadows(int nr_cpus, int limit)
+{
+ int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus);
+ slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit));
+ memset(slb_shadow, 0, size);
+}
+
+static struct slb_shadow * __init init_slb_shadow(int cpu)
+{
+ struct slb_shadow *s = &slb_shadow[cpu];
+
+ s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
+ s->buffer_length = cpu_to_be32(sizeof(*s));
+
+ return s;
+}
+
+#else /* CONFIG_PPC_STD_MMU_64 */
+
+static void __init allocate_slb_shadows(int nr_cpus, int limit) { }
#endif /* CONFIG_PPC_STD_MMU_64 */
@@ -142,8 +158,13 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
new_paca->__current = &init_task;
new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
#ifdef CONFIG_PPC_STD_MMU_64
- new_paca->slb_shadow_ptr = &slb_shadow[cpu];
+ new_paca->slb_shadow_ptr = init_slb_shadow(cpu);
#endif /* CONFIG_PPC_STD_MMU_64 */
+
+#ifdef CONFIG_PPC_BOOK3E
+ /* For now -- if we have threads this will be adjusted later */
+ new_paca->tcd_ptr = &new_paca->tcd;
+#endif
}
/* Put the paca pointer into r13 and SPRG_PACA */
@@ -190,6 +211,8 @@ void __init allocate_pacas(void)
allocate_lppacas(nr_cpu_ids, limit);
+ allocate_slb_shadows(nr_cpu_ids, limit);
+
/* Can't use for_each_*_cpu, as they aren't functional yet */
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
initialise_paca(&paca[cpu], cpu);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 4a96556fd2d4..64b7a6e61dd1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/elf.h>
-#include <linux/init.h>
#include <linux/prctl.h>
#include <linux/init_task.h>
#include <linux/export.h>
@@ -74,6 +73,48 @@ struct task_struct *last_task_used_vsx = NULL;
struct task_struct *last_task_used_spe = NULL;
#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void giveup_fpu_maybe_transactional(struct task_struct *tsk)
+{
+ /*
+ * If we are saving the current thread's registers, and the
+ * thread is in a transactional state, set the TIF_RESTORE_TM
+ * bit so that we know to restore the registers before
+ * returning to userspace.
+ */
+ if (tsk == current && tsk->thread.regs &&
+ MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
+ !test_thread_flag(TIF_RESTORE_TM)) {
+ tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
+ set_thread_flag(TIF_RESTORE_TM);
+ }
+
+ giveup_fpu(tsk);
+}
+
+void giveup_altivec_maybe_transactional(struct task_struct *tsk)
+{
+ /*
+ * If we are saving the current thread's registers, and the
+ * thread is in a transactional state, set the TIF_RESTORE_TM
+ * bit so that we know to restore the registers before
+ * returning to userspace.
+ */
+ if (tsk == current && tsk->thread.regs &&
+ MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
+ !test_thread_flag(TIF_RESTORE_TM)) {
+ tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
+ set_thread_flag(TIF_RESTORE_TM);
+ }
+
+ giveup_altivec(tsk);
+}
+
+#else
+#define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk)
+#define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk)
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
#ifdef CONFIG_PPC_FPU
/*
* Make sure the floating-point register state in the
@@ -102,13 +143,13 @@ void flush_fp_to_thread(struct task_struct *tsk)
*/
BUG_ON(tsk != current);
#endif
- giveup_fpu(tsk);
+ giveup_fpu_maybe_transactional(tsk);
}
preempt_enable();
}
}
EXPORT_SYMBOL_GPL(flush_fp_to_thread);
-#endif
+#endif /* CONFIG_PPC_FPU */
void enable_kernel_fp(void)
{
@@ -116,11 +157,11 @@ void enable_kernel_fp(void)
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
- giveup_fpu(current);
+ giveup_fpu_maybe_transactional(current);
else
giveup_fpu(NULL); /* just enables FP for kernel */
#else
- giveup_fpu(last_task_used_math);
+ giveup_fpu_maybe_transactional(last_task_used_math);
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_fp);
@@ -132,11 +173,11 @@ void enable_kernel_altivec(void)
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
- giveup_altivec(current);
+ giveup_altivec_maybe_transactional(current);
else
giveup_altivec_notask();
#else
- giveup_altivec(last_task_used_altivec);
+ giveup_altivec_maybe_transactional(last_task_used_altivec);
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_altivec);
@@ -153,7 +194,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
- giveup_altivec(tsk);
+ giveup_altivec_maybe_transactional(tsk);
}
preempt_enable();
}
@@ -182,8 +223,8 @@ EXPORT_SYMBOL(enable_kernel_vsx);
void giveup_vsx(struct task_struct *tsk)
{
- giveup_fpu(tsk);
- giveup_altivec(tsk);
+ giveup_fpu_maybe_transactional(tsk);
+ giveup_altivec_maybe_transactional(tsk);
__giveup_vsx(tsk);
}
@@ -479,7 +520,48 @@ static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
return false;
return true;
}
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static void tm_reclaim_thread(struct thread_struct *thr,
+ struct thread_info *ti, uint8_t cause)
+{
+ unsigned long msr_diff = 0;
+
+ /*
+ * If FP/VSX registers have been already saved to the
+ * thread_struct, move them to the transact_fp array.
+ * We clear the TIF_RESTORE_TM bit since after the reclaim
+ * the thread will no longer be transactional.
+ */
+ if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
+ msr_diff = thr->tm_orig_msr & ~thr->regs->msr;
+ if (msr_diff & MSR_FP)
+ memcpy(&thr->transact_fp, &thr->fp_state,
+ sizeof(struct thread_fp_state));
+ if (msr_diff & MSR_VEC)
+ memcpy(&thr->transact_vr, &thr->vr_state,
+ sizeof(struct thread_vr_state));
+ clear_ti_thread_flag(ti, TIF_RESTORE_TM);
+ msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
+ }
+
+ tm_reclaim(thr, thr->regs->msr, cause);
+
+ /* Having done the reclaim, we now have the checkpointed
+ * FP/VSX values in the registers. These might be valid
+ * even if we have previously called enable_kernel_fp() or
+ * flush_fp_to_thread(), so update thr->regs->msr to
+ * indicate their current validity.
+ */
+ thr->regs->msr |= msr_diff;
+}
+
+void tm_reclaim_current(uint8_t cause)
+{
+ tm_enable();
+ tm_reclaim_thread(&current->thread, current_thread_info(), cause);
+}
+
static inline void tm_reclaim_task(struct task_struct *tsk)
{
/* We have to work out if we're switching from/to a task that's in the
@@ -502,9 +584,11 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
/* Stash the original thread MSR, as giveup_fpu et al will
* modify it. We hold onto it to see whether the task used
- * FP & vector regs.
+ * FP & vector regs. If the TIF_RESTORE_TM flag is set,
+ * tm_orig_msr is already set.
*/
- thr->tm_orig_msr = thr->regs->msr;
+ if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
+ thr->tm_orig_msr = thr->regs->msr;
TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
"ccr=%lx, msr=%lx, trap=%lx)\n",
@@ -512,7 +596,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
thr->regs->ccr, thr->regs->msr,
thr->regs->trap);
- tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED);
+ tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
TM_DEBUG("--- tm_reclaim on pid %d complete\n",
tsk->pid);
@@ -588,6 +672,43 @@ static inline void __switch_to_tm(struct task_struct *prev)
tm_reclaim_task(prev);
}
}
+
+/*
+ * This is called if we are on the way out to userspace and the
+ * TIF_RESTORE_TM flag is set. It checks if we need to reload
+ * FP and/or vector state and does so if necessary.
+ * If userspace is inside a transaction (whether active or
+ * suspended) and FP/VMX/VSX instructions have ever been enabled
+ * inside that transaction, then we have to keep them enabled
+ * and keep the FP/VMX/VSX state loaded while ever the transaction
+ * continues. The reason is that if we didn't, and subsequently
+ * got a FP/VMX/VSX unavailable interrupt inside a transaction,
+ * we don't know whether it's the same transaction, and thus we
+ * don't know which of the checkpointed state and the transactional
+ * state to use.
+ */
+void restore_tm_state(struct pt_regs *regs)
+{
+ unsigned long msr_diff;
+
+ clear_thread_flag(TIF_RESTORE_TM);
+ if (!MSR_TM_ACTIVE(regs->msr))
+ return;
+
+ msr_diff = current->thread.tm_orig_msr & ~regs->msr;
+ msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
+ if (msr_diff & MSR_FP) {
+ fp_enable();
+ load_fp_state(&current->thread.fp_state);
+ regs->msr |= current->thread.fpexc_mode;
+ }
+ if (msr_diff & MSR_VEC) {
+ vec_enable();
+ load_vr_state(&current->thread.vr_state);
+ }
+ regs->msr |= msr_diff;
+}
+
#else
#define tm_recheckpoint_new_task(new)
#define __switch_to_tm(prev)
@@ -1175,6 +1296,19 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
if (val & PR_FP_EXC_SW_ENABLE) {
#ifdef CONFIG_SPE
if (cpu_has_feature(CPU_FTR_SPE)) {
+ /*
+ * When the sticky exception bits are set
+ * directly by userspace, it must call prctl
+ * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
+ * in the existing prctl settings) or
+ * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
+ * the bits being set). <fenv.h> functions
+ * saving and restoring the whole
+ * floating-point environment need to do so
+ * anyway to restore the prctl settings from
+ * the saved environment.
+ */
+ tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
tsk->thread.fpexc_mode = val &
(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
return 0;
@@ -1206,9 +1340,22 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
#ifdef CONFIG_SPE
- if (cpu_has_feature(CPU_FTR_SPE))
+ if (cpu_has_feature(CPU_FTR_SPE)) {
+ /*
+ * When the sticky exception bits are set
+ * directly by userspace, it must call prctl
+ * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
+ * in the existing prctl settings) or
+ * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
+ * the bits being set). <fenv.h> functions
+ * saving and restoring the whole
+ * floating-point environment need to do so
+ * anyway to restore the prctl settings from
+ * the saved environment.
+ */
+ tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
val = tsk->thread.fpexc_mode;
- else
+ } else
return -EINVAL;
#else
return -EINVAL;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index fa0ad8aafbcc..f58c0d3aaeb4 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -523,6 +523,20 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node,
return early_init_dt_scan_memory(node, uname, depth, data);
}
+/*
+ * For a relocatable kernel, we need to get the memstart_addr first,
+ * then use it to calculate the virtual kernel start address. This has
+ * to happen at a very early stage (before machine_init). In this case,
+ * we just want to get the memstart_address and would not like to mess the
+ * memblock at this stage. So introduce a variable to skip the memblock_add()
+ * for this reason.
+ */
+#ifdef CONFIG_RELOCATABLE
+static int add_mem_to_memblock = 1;
+#else
+#define add_mem_to_memblock 1
+#endif
+
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
#ifdef CONFIG_PPC64
@@ -543,7 +557,8 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
}
/* Add the chunk to the MEMBLOCK list */
- memblock_add(base, size);
+ if (add_mem_to_memblock)
+ memblock_add(base, size);
}
static void __init early_reserve_mem_dt(void)
@@ -740,6 +755,30 @@ void __init early_init_devtree(void *params)
DBG(" <- early_init_devtree()\n");
}
+#ifdef CONFIG_RELOCATABLE
+/*
+ * This function run before early_init_devtree, so we have to init
+ * initial_boot_params.
+ */
+void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
+{
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+ /*
+ * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
+ * mess the memblock.
+ */
+ add_mem_to_memblock = 0;
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
+ add_mem_to_memblock = 1;
+
+ if (size)
+ *size = first_memblock_size;
+}
+#endif
+
/*******
*
* New implementation of the OF "find" APIs, return a refcounted
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 856dd4e99bfe..f5f11a7d30e5 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -97,6 +97,36 @@ int dcache_bsize;
int icache_bsize;
int ucache_bsize;
+#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
+static void setup_tlb_core_data(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ int first = cpu_first_thread_sibling(cpu);
+
+ paca[cpu].tcd_ptr = &paca[first].tcd;
+
+ /*
+ * If we have threads, we need either tlbsrx.
+ * or e6500 tablewalk mode, or else TLB handlers
+ * will be racy and could produce duplicate entries.
+ */
+ if (smt_enabled_at_boot >= 2 &&
+ !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
+ book3e_htw_mode != PPC_HTW_E6500) {
+ /* Should we panic instead? */
+ WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
+ __func__);
+ }
+ }
+}
+#else
+static void setup_tlb_core_data(void)
+{
+}
+#endif
+
#ifdef CONFIG_SMP
static char *smt_enabled_cmdline;
@@ -445,6 +475,7 @@ void __init setup_system(void)
smp_setup_cpu_maps();
check_smt_enabled();
+ setup_tlb_core_data();
#ifdef CONFIG_SMP
/* Release secondary cpus out of their spinloops at 0x60 now that
@@ -520,9 +551,6 @@ static void __init irqstack_early_init(void)
#ifdef CONFIG_PPC_BOOK3E
static void __init exc_lvl_early_init(void)
{
- extern unsigned int interrupt_base_book3e;
- extern unsigned int exc_debug_debug_book3e;
-
unsigned int i;
for_each_possible_cpu(i) {
@@ -535,8 +563,7 @@ static void __init exc_lvl_early_init(void)
}
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
- patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1,
- (unsigned long)&exc_debug_debug_book3e, 0);
+ patch_exception(0x040, exc_debug_debug_book3e);
}
#else
#define exc_lvl_early_init()
@@ -544,7 +571,8 @@ static void __init exc_lvl_early_init(void)
/*
* Stack space used when we detect a bad kernel stack pointer, and
- * early in SMP boots before relocation is enabled.
+ * early in SMP boots before relocation is enabled. Exclusive emergency
+ * stack for machine checks.
*/
static void __init emergency_stack_init(void)
{
@@ -567,6 +595,13 @@ static void __init emergency_stack_init(void)
sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
sp += THREAD_SIZE;
paca[i].emergency_sp = __va(sp);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* emergency stack for machine check exception handling. */
+ sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
+ sp += THREAD_SIZE;
+ paca[i].mc_emergency_sp = __va(sp);
+#endif
}
}
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 457e97aa2945..8fc4177ed65a 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -203,8 +203,7 @@ unsigned long get_tm_stackpointer(struct pt_regs *regs)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(regs->msr)) {
- tm_enable();
- tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
+ tm_reclaim_current(TM_CAUSE_SIGNAL);
if (MSR_TM_TRANSACTIONAL(regs->msr))
return current->thread.ckpt_regs.gpr[1];
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 68027bfa5f8e..6ce69e6f1fcb 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -519,6 +519,13 @@ static int save_tm_user_regs(struct pt_regs *regs,
{
unsigned long msr = regs->msr;
+ /* Remove TM bits from thread's MSR. The MSR in the sigcontext
+ * just indicates to userland that we were doing a transaction, but we
+ * don't want to return in transactional state. This also ensures
+ * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
+ */
+ regs->msr &= ~MSR_TS_MASK;
+
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
@@ -1056,13 +1063,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
/* enter the signal handler in native-endian mode */
regs->msr &= ~MSR_LE;
regs->msr |= (MSR_KERNEL & MSR_LE);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
- * just indicates to userland that we were doing a transaction, but we
- * don't want to return in transactional state:
- */
- regs->msr &= ~MSR_TS_MASK;
-#endif
return 1;
badframe:
@@ -1484,13 +1484,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
- * just indicates to userland that we were doing a transaction, but we
- * don't want to return in transactional state:
- */
- regs->msr &= ~MSR_TS_MASK;
-#endif
return 1;
badframe:
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 42991045349f..e35bf773df7a 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -192,6 +192,13 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
BUG_ON(!MSR_TM_ACTIVE(regs->msr));
+ /* Remove TM bits from thread's MSR. The MSR in the sigcontext
+ * just indicates to userland that we were doing a transaction, but we
+ * don't want to return in transactional state. This also ensures
+ * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
+ */
+ regs->msr &= ~MSR_TS_MASK;
+
flush_fp_to_thread(current);
#ifdef CONFIG_ALTIVEC
@@ -749,13 +756,6 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
/* Make sure signal handler doesn't get spurious FP exceptions */
current->thread.fp_state.fpscr = 0;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
- * just indicates to userland that we were doing a transaction, but we
- * don't want to return in transactional state:
- */
- regs->msr &= ~MSR_TS_MASK;
-#endif
/* Set up to return from userspace. */
if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index e68fd1ae727a..7a37ecd3afa3 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -9,7 +9,6 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/unistd.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c1cf4a1522d9..ac2621af3154 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -369,13 +369,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
- if (smp_ops)
- if (smp_ops->probe)
- max_cpus = smp_ops->probe();
- else
- max_cpus = NR_CPUS;
- else
- max_cpus = 1;
+ if (smp_ops && smp_ops->probe)
+ smp_ops->probe();
}
void smp_prepare_boot_cpu(void)
diff --git a/arch/powerpc/kernel/swsusp_booke.S b/arch/powerpc/kernel/swsusp_booke.S
index 0f204053e5b5..553c1405ee05 100644
--- a/arch/powerpc/kernel/swsusp_booke.S
+++ b/arch/powerpc/kernel/swsusp_booke.S
@@ -74,21 +74,21 @@ _GLOBAL(swsusp_arch_suspend)
bne 1b
/* Save SPRGs */
- mfsprg r4,0
+ mfspr r4,SPRN_SPRG0
stw r4,SL_SPRG0(r11)
- mfsprg r4,1
+ mfspr r4,SPRN_SPRG1
stw r4,SL_SPRG1(r11)
- mfsprg r4,2
+ mfspr r4,SPRN_SPRG2
stw r4,SL_SPRG2(r11)
- mfsprg r4,3
+ mfspr r4,SPRN_SPRG3
stw r4,SL_SPRG3(r11)
- mfsprg r4,4
+ mfspr r4,SPRN_SPRG4
stw r4,SL_SPRG4(r11)
- mfsprg r4,5
+ mfspr r4,SPRN_SPRG5
stw r4,SL_SPRG5(r11)
- mfsprg r4,6
+ mfspr r4,SPRN_SPRG6
stw r4,SL_SPRG6(r11)
- mfsprg r4,7
+ mfspr r4,SPRN_SPRG7
stw r4,SL_SPRG7(r11)
/* Call the low level suspend stuff (we should probably have made
@@ -150,21 +150,21 @@ _GLOBAL(swsusp_arch_resume)
bl _tlbil_all
lwz r4,SL_SPRG0(r11)
- mtsprg 0,r4
+ mtspr SPRN_SPRG0,r4
lwz r4,SL_SPRG1(r11)
- mtsprg 1,r4
+ mtspr SPRN_SPRG1,r4
lwz r4,SL_SPRG2(r11)
- mtsprg 2,r4
+ mtspr SPRN_SPRG2,r4
lwz r4,SL_SPRG3(r11)
- mtsprg 3,r4
+ mtspr SPRN_SPRG3,r4
lwz r4,SL_SPRG4(r11)
- mtsprg 4,r4
+ mtspr SPRN_SPRG4,r4
lwz r4,SL_SPRG5(r11)
- mtsprg 5,r4
+ mtspr SPRN_SPRG5,r4
lwz r4,SL_SPRG6(r11)
- mtsprg 6,r4
+ mtspr SPRN_SPRG6,r4
lwz r4,SL_SPRG7(r11)
- mtsprg 7,r4
+ mtspr SPRN_SPRG7,r4
/* restore the MSR */
lwz r3,SL_MSR(r11)
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 4e3cc47f26b9..cd9be9aa016d 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -34,7 +34,6 @@
#include <linux/ipc.h>
#include <linux/utsname.h>
#include <linux/file.h>
-#include <linux/init.h>
#include <linux/personality.h>
#include <asm/uaccess.h>
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index b4e667663d9b..d4a43e64a6a9 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -86,6 +86,304 @@ __setup("smt-snooze-delay=", setup_smt_snooze_delay);
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define MAX_BIT 63
+
+static u64 pw20_wt;
+static u64 altivec_idle_wt;
+
+static unsigned int get_idle_ticks_bit(u64 ns)
+{
+ u64 cycle;
+
+ if (ns >= 10000)
+ cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
+ else
+ cycle = div_u64(ns * tb_ticks_per_usec, 1000);
+
+ if (!cycle)
+ return 0;
+
+ return ilog2(cycle);
+}
+
+static void do_show_pwrmgtcr0(void *val)
+{
+ u32 *value = val;
+
+ *value = mfspr(SPRN_PWRMGTCR0);
+}
+
+static ssize_t show_pw20_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 value;
+ unsigned int cpu = dev->id;
+
+ smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
+
+ value &= PWRMGTCR0_PW20_WAIT;
+
+ return sprintf(buf, "%u\n", value ? 1 : 0);
+}
+
+static void do_store_pw20_state(void *val)
+{
+ u32 *value = val;
+ u32 pw20_state;
+
+ pw20_state = mfspr(SPRN_PWRMGTCR0);
+
+ if (*value)
+ pw20_state |= PWRMGTCR0_PW20_WAIT;
+ else
+ pw20_state &= ~PWRMGTCR0_PW20_WAIT;
+
+ mtspr(SPRN_PWRMGTCR0, pw20_state);
+}
+
+static ssize_t store_pw20_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 value;
+ unsigned int cpu = dev->id;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ if (value > 1)
+ return -EINVAL;
+
+ smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
+
+ return count;
+}
+
+static ssize_t show_pw20_wait_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 value;
+ u64 tb_cycle = 1;
+ u64 time;
+
+ unsigned int cpu = dev->id;
+
+ if (!pw20_wt) {
+ smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
+ value = (value & PWRMGTCR0_PW20_ENT) >>
+ PWRMGTCR0_PW20_ENT_SHIFT;
+
+ tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
+ /* convert ms to ns */
+ if (tb_ticks_per_usec > 1000) {
+ time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
+ } else {
+ u32 rem_us;
+
+ time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
+ &rem_us);
+ time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
+ }
+ } else {
+ time = pw20_wt;
+ }
+
+ return sprintf(buf, "%llu\n", time > 0 ? time : 0);
+}
+
+static void set_pw20_wait_entry_bit(void *val)
+{
+ u32 *value = val;
+ u32 pw20_idle;
+
+ pw20_idle = mfspr(SPRN_PWRMGTCR0);
+
+ /* Set Automatic PW20 Core Idle Count */
+ /* clear count */
+ pw20_idle &= ~PWRMGTCR0_PW20_ENT;
+
+ /* set count */
+ pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
+
+ mtspr(SPRN_PWRMGTCR0, pw20_idle);
+}
+
+static ssize_t store_pw20_wait_time(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 entry_bit;
+ u64 value;
+
+ unsigned int cpu = dev->id;
+
+ if (kstrtou64(buf, 0, &value))
+ return -EINVAL;
+
+ if (!value)
+ return -EINVAL;
+
+ entry_bit = get_idle_ticks_bit(value);
+ if (entry_bit > MAX_BIT)
+ return -EINVAL;
+
+ pw20_wt = value;
+
+ smp_call_function_single(cpu, set_pw20_wait_entry_bit,
+ &entry_bit, 1);
+
+ return count;
+}
+
+static ssize_t show_altivec_idle(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 value;
+ unsigned int cpu = dev->id;
+
+ smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
+
+ value &= PWRMGTCR0_AV_IDLE_PD_EN;
+
+ return sprintf(buf, "%u\n", value ? 1 : 0);
+}
+
+static void do_store_altivec_idle(void *val)
+{
+ u32 *value = val;
+ u32 altivec_idle;
+
+ altivec_idle = mfspr(SPRN_PWRMGTCR0);
+
+ if (*value)
+ altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
+ else
+ altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
+
+ mtspr(SPRN_PWRMGTCR0, altivec_idle);
+}
+
+static ssize_t store_altivec_idle(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 value;
+ unsigned int cpu = dev->id;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ if (value > 1)
+ return -EINVAL;
+
+ smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
+
+ return count;
+}
+
+static ssize_t show_altivec_idle_wait_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 value;
+ u64 tb_cycle = 1;
+ u64 time;
+
+ unsigned int cpu = dev->id;
+
+ if (!altivec_idle_wt) {
+ smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
+ value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
+ PWRMGTCR0_AV_IDLE_CNT_SHIFT;
+
+ tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
+ /* convert ms to ns */
+ if (tb_ticks_per_usec > 1000) {
+ time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
+ } else {
+ u32 rem_us;
+
+ time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
+ &rem_us);
+ time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
+ }
+ } else {
+ time = altivec_idle_wt;
+ }
+
+ return sprintf(buf, "%llu\n", time > 0 ? time : 0);
+}
+
+static void set_altivec_idle_wait_entry_bit(void *val)
+{
+ u32 *value = val;
+ u32 altivec_idle;
+
+ altivec_idle = mfspr(SPRN_PWRMGTCR0);
+
+ /* Set Automatic AltiVec Idle Count */
+ /* clear count */
+ altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
+
+ /* set count */
+ altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
+
+ mtspr(SPRN_PWRMGTCR0, altivec_idle);
+}
+
+static ssize_t store_altivec_idle_wait_time(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 entry_bit;
+ u64 value;
+
+ unsigned int cpu = dev->id;
+
+ if (kstrtou64(buf, 0, &value))
+ return -EINVAL;
+
+ if (!value)
+ return -EINVAL;
+
+ entry_bit = get_idle_ticks_bit(value);
+ if (entry_bit > MAX_BIT)
+ return -EINVAL;
+
+ altivec_idle_wt = value;
+
+ smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
+ &entry_bit, 1);
+
+ return count;
+}
+
+/*
+ * Enable/Disable interface:
+ * 0, disable. 1, enable.
+ */
+static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
+static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
+
+/*
+ * Set wait time interface:(Nanosecond)
+ * Example: Base on TBfreq is 41MHZ.
+ * 1~48(ns): TB[63]
+ * 49~97(ns): TB[62]
+ * 98~195(ns): TB[61]
+ * 196~390(ns): TB[60]
+ * 391~780(ns): TB[59]
+ * 781~1560(ns): TB[58]
+ * ...
+ */
+static DEVICE_ATTR(pw20_wait_time, 0600,
+ show_pw20_wait_time,
+ store_pw20_wait_time);
+static DEVICE_ATTR(altivec_idle_wait_time, 0600,
+ show_altivec_idle_wait_time,
+ store_altivec_idle_wait_time);
+#endif
+
/*
* Enabling PMCs will slow partition context switch times so we only do
* it the first time we write to the PMCs.
@@ -108,14 +406,14 @@ void ppc_enable_pmcs(void)
}
EXPORT_SYMBOL(ppc_enable_pmcs);
-#define SYSFS_PMCSETUP(NAME, ADDRESS) \
+#define __SYSFS_SPRSETUP(NAME, ADDRESS, EXTRA) \
static void read_##NAME(void *val) \
{ \
*(unsigned long *)val = mfspr(ADDRESS); \
} \
static void write_##NAME(void *val) \
{ \
- ppc_enable_pmcs(); \
+ EXTRA; \
mtspr(ADDRESS, *(unsigned long *)val); \
} \
static ssize_t show_##NAME(struct device *dev, \
@@ -140,6 +438,10 @@ static ssize_t __used \
return count; \
}
+#define SYSFS_PMCSETUP(NAME, ADDRESS) \
+ __SYSFS_SPRSETUP(NAME, ADDRESS, ppc_enable_pmcs())
+#define SYSFS_SPRSETUP(NAME, ADDRESS) \
+ __SYSFS_SPRSETUP(NAME, ADDRESS, )
/* Let's define all possible registers, we'll only hook up the ones
* that are implemented on the current processor
@@ -175,10 +477,10 @@ SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
-SYSFS_PMCSETUP(purr, SPRN_PURR);
-SYSFS_PMCSETUP(spurr, SPRN_SPURR);
-SYSFS_PMCSETUP(dscr, SPRN_DSCR);
-SYSFS_PMCSETUP(pir, SPRN_PIR);
+SYSFS_SPRSETUP(purr, SPRN_PURR);
+SYSFS_SPRSETUP(spurr, SPRN_SPURR);
+SYSFS_SPRSETUP(dscr, SPRN_DSCR);
+SYSFS_SPRSETUP(pir, SPRN_PIR);
/*
Lets only enable read for phyp resources and
@@ -249,34 +551,34 @@ SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
#ifdef CONFIG_DEBUG_KERNEL
-SYSFS_PMCSETUP(hid0, SPRN_HID0);
-SYSFS_PMCSETUP(hid1, SPRN_HID1);
-SYSFS_PMCSETUP(hid4, SPRN_HID4);
-SYSFS_PMCSETUP(hid5, SPRN_HID5);
-SYSFS_PMCSETUP(ima0, SPRN_PA6T_IMA0);
-SYSFS_PMCSETUP(ima1, SPRN_PA6T_IMA1);
-SYSFS_PMCSETUP(ima2, SPRN_PA6T_IMA2);
-SYSFS_PMCSETUP(ima3, SPRN_PA6T_IMA3);
-SYSFS_PMCSETUP(ima4, SPRN_PA6T_IMA4);
-SYSFS_PMCSETUP(ima5, SPRN_PA6T_IMA5);
-SYSFS_PMCSETUP(ima6, SPRN_PA6T_IMA6);
-SYSFS_PMCSETUP(ima7, SPRN_PA6T_IMA7);
-SYSFS_PMCSETUP(ima8, SPRN_PA6T_IMA8);
-SYSFS_PMCSETUP(ima9, SPRN_PA6T_IMA9);
-SYSFS_PMCSETUP(imaat, SPRN_PA6T_IMAAT);
-SYSFS_PMCSETUP(btcr, SPRN_PA6T_BTCR);
-SYSFS_PMCSETUP(pccr, SPRN_PA6T_PCCR);
-SYSFS_PMCSETUP(rpccr, SPRN_PA6T_RPCCR);
-SYSFS_PMCSETUP(der, SPRN_PA6T_DER);
-SYSFS_PMCSETUP(mer, SPRN_PA6T_MER);
-SYSFS_PMCSETUP(ber, SPRN_PA6T_BER);
-SYSFS_PMCSETUP(ier, SPRN_PA6T_IER);
-SYSFS_PMCSETUP(sier, SPRN_PA6T_SIER);
-SYSFS_PMCSETUP(siar, SPRN_PA6T_SIAR);
-SYSFS_PMCSETUP(tsr0, SPRN_PA6T_TSR0);
-SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1);
-SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2);
-SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3);
+SYSFS_SPRSETUP(hid0, SPRN_HID0);
+SYSFS_SPRSETUP(hid1, SPRN_HID1);
+SYSFS_SPRSETUP(hid4, SPRN_HID4);
+SYSFS_SPRSETUP(hid5, SPRN_HID5);
+SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
+SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
+SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
+SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
+SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
+SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
+SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
+SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
+SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
+SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
+SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
+SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
+SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
+SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
+SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
+SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
+SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
+SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
+SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
+SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
+SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
+SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
+SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
+SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
#endif /* CONFIG_DEBUG_KERNEL */
#endif /* HAS_PPC_PMC_PA6T */
@@ -421,6 +723,15 @@ static void register_cpu_online(unsigned int cpu)
device_create_file(s, &dev_attr_pir);
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
+ device_create_file(s, &dev_attr_pw20_state);
+ device_create_file(s, &dev_attr_pw20_wait_time);
+
+ device_create_file(s, &dev_attr_altivec_idle);
+ device_create_file(s, &dev_attr_altivec_idle_wait_time);
+ }
+#endif
cacheinfo_cpu_online(cpu);
}
@@ -493,6 +804,15 @@ static void unregister_cpu_online(unsigned int cpu)
device_remove_file(s, &dev_attr_pir);
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
+ device_remove_file(s, &dev_attr_pw20_state);
+ device_remove_file(s, &dev_attr_pw20_wait_time);
+
+ device_remove_file(s, &dev_attr_altivec_idle);
+ device_remove_file(s, &dev_attr_altivec_idle_wait_time);
+ }
+#endif
cacheinfo_cpu_offline(cpu);
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index b3b144121cc9..b3dab20acf34 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -510,7 +510,6 @@ void timer_interrupt(struct pt_regs * regs)
*/
may_hard_irq_enable();
- __get_cpu_var(irq_stat).timer_irqs++;
#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
@@ -532,10 +531,15 @@ void timer_interrupt(struct pt_regs * regs)
*next_tb = ~(u64)0;
if (evt->event_handler)
evt->event_handler(evt);
+ __get_cpu_var(irq_stat).timer_irqs_event++;
} else {
now = *next_tb - now;
if (now <= DECREMENTER_MAX)
set_dec((int)now);
+ /* We may have raced with new irq work */
+ if (test_irq_work_pending())
+ set_dec(1);
+ __get_cpu_var(irq_stat).timer_irqs_others++;
}
#ifdef CONFIG_PPC64
@@ -801,8 +805,16 @@ static void __init clocksource_init(void)
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
+ /* Don't adjust the decrementer if some irq work is pending */
+ if (test_irq_work_pending())
+ return 0;
__get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
set_dec(evt);
+
+ /* We may have raced with new irq work */
+ if (test_irq_work_pending())
+ set_dec(1);
+
return 0;
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 907a472f9a9e..33cd7a0b8e73 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -285,6 +285,21 @@ void system_reset_exception(struct pt_regs *regs)
/* What should we do here? We could issue a shutdown or hard reset. */
}
+
+/*
+ * This function is called in real mode. Strictly no printk's please.
+ *
+ * regs->nip and regs->msr contains srr0 and ssr1.
+ */
+long machine_check_early(struct pt_regs *regs)
+{
+ long handled = 0;
+
+ if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
+ handled = cur_cpu_spec->machine_check_early(regs);
+ return handled;
+}
+
#endif
/*
@@ -1384,7 +1399,6 @@ void fp_unavailable_tm(struct pt_regs *regs)
TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
regs->nip, regs->msr);
- tm_enable();
/* We can only have got here if the task started using FP after
* beginning the transaction. So, the transactional regs are just a
@@ -1393,8 +1407,7 @@ void fp_unavailable_tm(struct pt_regs *regs)
* transaction, and probably retry but now with FP enabled. So the
* checkpointed FP registers need to be loaded.
*/
- tm_reclaim(&current->thread, current->thread.regs->msr,
- TM_CAUSE_FAC_UNAV);
+ tm_reclaim_current(TM_CAUSE_FAC_UNAV);
/* Reclaim didn't save out any FPRs to transact_fprs. */
/* Enable FP for the task: */
@@ -1403,11 +1416,19 @@ void fp_unavailable_tm(struct pt_regs *regs)
/* This loads and recheckpoints the FP registers from
* thread.fpr[]. They will remain in registers after the
* checkpoint so we don't need to reload them after.
+ * If VMX is in use, the VRs now hold checkpointed values,
+ * so we don't want to load the VRs from the thread_struct.
*/
- tm_recheckpoint(&current->thread, regs->msr);
+ tm_recheckpoint(&current->thread, MSR_FP);
+
+ /* If VMX is in use, get the transactional values back */
+ if (regs->msr & MSR_VEC) {
+ do_load_up_transact_altivec(&current->thread);
+ /* At this point all the VSX state is loaded, so enable it */
+ regs->msr |= MSR_VSX;
+ }
}
-#ifdef CONFIG_ALTIVEC
void altivec_unavailable_tm(struct pt_regs *regs)
{
/* See the comments in fp_unavailable_tm(). This function operates
@@ -1417,18 +1438,21 @@ void altivec_unavailable_tm(struct pt_regs *regs)
TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
"MSR=%lx\n",
regs->nip, regs->msr);
- tm_enable();
- tm_reclaim(&current->thread, current->thread.regs->msr,
- TM_CAUSE_FAC_UNAV);
+ tm_reclaim_current(TM_CAUSE_FAC_UNAV);
regs->msr |= MSR_VEC;
- tm_recheckpoint(&current->thread, regs->msr);
+ tm_recheckpoint(&current->thread, MSR_VEC);
current->thread.used_vr = 1;
+
+ if (regs->msr & MSR_FP) {
+ do_load_up_transact_fpu(&current->thread);
+ regs->msr |= MSR_VSX;
+ }
}
-#endif
-#ifdef CONFIG_VSX
void vsx_unavailable_tm(struct pt_regs *regs)
{
+ unsigned long orig_msr = regs->msr;
+
/* See the comments in fp_unavailable_tm(). This works similarly,
* though we're loading both FP and VEC registers in here.
*
@@ -1440,18 +1464,30 @@ void vsx_unavailable_tm(struct pt_regs *regs)
"MSR=%lx\n",
regs->nip, regs->msr);
- tm_enable();
+ current->thread.used_vsr = 1;
+
+ /* If FP and VMX are already loaded, we have all the state we need */
+ if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
+ regs->msr |= MSR_VSX;
+ return;
+ }
+
/* This reclaims FP and/or VR regs if they're already enabled */
- tm_reclaim(&current->thread, current->thread.regs->msr,
- TM_CAUSE_FAC_UNAV);
+ tm_reclaim_current(TM_CAUSE_FAC_UNAV);
regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
MSR_VSX;
- /* This loads & recheckpoints FP and VRs. */
- tm_recheckpoint(&current->thread, regs->msr);
- current->thread.used_vsr = 1;
+
+ /* This loads & recheckpoints FP and VRs; but we have
+ * to be sure not to overwrite previously-valid state.
+ */
+ tm_recheckpoint(&current->thread, regs->msr & ~orig_msr);
+
+ if (orig_msr & MSR_FP)
+ do_load_up_transact_fpu(&current->thread);
+ if (orig_msr & MSR_VEC)
+ do_load_up_transact_altivec(&current->thread);
}
-#endif
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
void performance_monitor_exception(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
index 6e8f507ed32b..79683d0393f5 100644
--- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
@@ -1,4 +1,3 @@
-#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/page.h>
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
index b8553d62b792..8df9e2463007 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
@@ -1,4 +1,3 @@
-#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/page.h>
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 0458a9aaba9d..74f8050518d6 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -37,6 +37,16 @@ _GLOBAL(do_load_up_transact_altivec)
#endif
/*
+ * Enable use of VMX/Altivec for the caller.
+ */
+_GLOBAL(vec_enable)
+ mfmsr r3
+ oris r3,r3,MSR_VEC@h
+ MTMSRD(r3)
+ isync
+ blr
+
+/*
* Load state from memory into VMX registers including VSCR.
* Assumes the caller has enabled VMX in the MSR.
*/
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 76a64821f4a2..826d8bd9e522 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -518,16 +518,18 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
dma_addr_t ret = DMA_ERROR_CODE;
- if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
+ tbl = get_iommu_table_base(dev);
+ if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
atomic_inc(&viodev->cmo.allocs_failed);
return ret;
}
ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
if (unlikely(dma_mapping_error(dev, ret))) {
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+ vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
atomic_inc(&viodev->cmo.allocs_failed);
}
@@ -540,10 +542,12 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
+ tbl = get_iommu_table_base(dev);
dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+ vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
}
static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -551,12 +555,14 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
struct scatterlist *sgl;
int ret, count = 0;
size_t alloc_size = 0;
+ tbl = get_iommu_table_base(dev);
for (sgl = sglist; count < nelems; count++, sgl++)
- alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
+ alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
if (vio_cmo_alloc(viodev, alloc_size)) {
atomic_inc(&viodev->cmo.allocs_failed);
@@ -572,7 +578,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
}
for (sgl = sglist, count = 0; count < ret; count++, sgl++)
- alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
+ alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
if (alloc_size)
vio_cmo_dealloc(viodev, alloc_size);
@@ -585,12 +591,14 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
struct scatterlist *sgl;
size_t alloc_size = 0;
int count = 0;
+ tbl = get_iommu_table_base(dev);
for (sgl = sglist; count < nelems; count++, sgl++)
- alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
+ alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
@@ -706,11 +714,14 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
{
struct vio_cmo_dev_entry *dev_ent;
struct device *dev = &viodev->dev;
+ struct iommu_table *tbl;
struct vio_driver *viodrv = to_vio_driver(dev->driver);
unsigned long flags;
size_t size;
bool dma_capable = false;
+ tbl = get_iommu_table_base(dev);
+
/* A device requires entitlement if it has a DMA window property */
switch (viodev->family) {
case VDEVICE:
@@ -736,7 +747,8 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
return -EINVAL;
}
- viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
+ viodev->cmo.desired =
+ IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
viodev->cmo.desired = VIO_CMO_MIN_ENT;
size = VIO_CMO_MIN_ENT;
@@ -1176,9 +1188,10 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
&tbl->it_index, &offset, &size);
/* TCE table size - measured in tce entries */
- tbl->it_size = size >> IOMMU_PAGE_SHIFT;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
+ tbl->it_size = size >> tbl->it_page_shift;
/* offset for VIO should always be 0 */
- tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
+ tbl->it_offset = offset >> tbl->it_page_shift;
tbl->it_busno = 0;
tbl->it_type = TCE_VB;
tbl->it_blocksize = 16;
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index a353c485808c..768a9f977c00 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -12,6 +12,7 @@
#include <linux/kvm_host.h>
#include <linux/kernel.h>
#include <asm/opal.h>
+#include <asm/mce.h>
/* SRR1 bits for machine check on POWER7 */
#define SRR1_MC_LDSTERR (1ul << (63-42))
@@ -58,18 +59,6 @@ static void reload_slb(struct kvm_vcpu *vcpu)
}
}
-/* POWER7 TLB flush */
-static void flush_tlb_power7(struct kvm_vcpu *vcpu)
-{
- unsigned long i, rb;
-
- rb = TLBIEL_INVAL_SET_LPID;
- for (i = 0; i < POWER7_TLB_SETS; ++i) {
- asm volatile("tlbiel %0" : : "r" (rb));
- rb += 1 << TLBIEL_INVAL_SET_SHIFT;
- }
-}
-
/*
* On POWER7, see if we can handle a machine check that occurred inside
* the guest in real mode, without switching to the host partition.
@@ -79,9 +68,7 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu)
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{
unsigned long srr1 = vcpu->arch.shregs.msr;
-#ifdef CONFIG_PPC_POWERNV
- struct opal_machine_check_event *opal_evt;
-#endif
+ struct machine_check_event mce_evt;
long handled = 1;
if (srr1 & SRR1_MC_LDSTERR) {
@@ -96,7 +83,8 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
}
if (dsisr & DSISR_MC_TLB_MULTI) {
- flush_tlb_power7(vcpu);
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID);
dsisr &= ~DSISR_MC_TLB_MULTI;
}
/* Any other errors we don't understand? */
@@ -113,28 +101,38 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
reload_slb(vcpu);
break;
case SRR1_MC_IFETCH_TLBMULTI:
- flush_tlb_power7(vcpu);
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID);
break;
default:
handled = 0;
}
-#ifdef CONFIG_PPC_POWERNV
/*
- * See if OPAL has already handled the condition.
- * We assume that if the condition is recovered then OPAL
+ * See if we have already handled the condition in the linux host.
+ * We assume that if the condition is recovered then linux host
* will have generated an error log event that we will pick
* up and log later.
+ * Don't release mce event now. In case if condition is not
+ * recovered we do guest exit and go back to linux host machine
+ * check handler. Hence we need make sure that current mce event
+ * is available for linux host to consume.
*/
- opal_evt = local_paca->opal_mc_evt;
- if (opal_evt->version == OpalMCE_V1 &&
- (opal_evt->severity == OpalMCE_SEV_NO_ERROR ||
- opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED))
+ if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE))
+ goto out;
+
+ if (mce_evt.version == MCE_V1 &&
+ (mce_evt.severity == MCE_SEV_NO_ERROR ||
+ mce_evt.disposition == MCE_DISPOSITION_RECOVERED))
handled = 1;
+out:
+ /*
+ * If we have handled the error, then release the mce event because
+ * we will be delivering machine check to guest.
+ */
if (handled)
- opal_evt->in_use = 0;
-#endif
+ release_mce_event();
return handled;
}
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index e8ed7d659c55..a0d6929d8678 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -319,6 +319,8 @@ kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(DBG), \
SPRN_DSRR0, SPRN_DSRR1, 0
kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \
SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_handler BOOKE_INTERRUPT_LRAT_ERROR, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
#else
/*
* For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 17e5b2364312..d5edbeb8eb82 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -159,6 +159,21 @@ unsigned int translate_branch(const unsigned int *dest, const unsigned int *src)
return 0;
}
+#ifdef CONFIG_PPC_BOOK3E_64
+void __patch_exception(int exc, unsigned long addr)
+{
+ extern unsigned int interrupt_base_book3e;
+ unsigned int *ibase = &interrupt_base_book3e;
+
+ /* Our exceptions vectors start with a NOP and -then- a branch
+ * to deal with single stepping from userspace which stops on
+ * the second instruction. Thus we need to patch the second
+ * instruction of the exception, not the first one
+ */
+
+ patch_branch(ibase + (exc / 4) + 1, addr, 0);
+}
+#endif
#ifdef CONFIG_CODE_PATCHING_SELFTEST
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index b2c68ce139ae..a5b30c71a8d3 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -231,6 +231,87 @@ _GLOBAL(_rest32gpr_31_x)
mr 1,11
blr
+#ifdef CONFIG_ALTIVEC
+/* Called with r0 pointing just beyond the end of the vector save area. */
+
+_GLOBAL(_savevr_20)
+ li r11,-192
+ stvx vr20,r11,r0
+_GLOBAL(_savevr_21)
+ li r11,-176
+ stvx vr21,r11,r0
+_GLOBAL(_savevr_22)
+ li r11,-160
+ stvx vr22,r11,r0
+_GLOBAL(_savevr_23)
+ li r11,-144
+ stvx vr23,r11,r0
+_GLOBAL(_savevr_24)
+ li r11,-128
+ stvx vr24,r11,r0
+_GLOBAL(_savevr_25)
+ li r11,-112
+ stvx vr25,r11,r0
+_GLOBAL(_savevr_26)
+ li r11,-96
+ stvx vr26,r11,r0
+_GLOBAL(_savevr_27)
+ li r11,-80
+ stvx vr27,r11,r0
+_GLOBAL(_savevr_28)
+ li r11,-64
+ stvx vr28,r11,r0
+_GLOBAL(_savevr_29)
+ li r11,-48
+ stvx vr29,r11,r0
+_GLOBAL(_savevr_30)
+ li r11,-32
+ stvx vr30,r11,r0
+_GLOBAL(_savevr_31)
+ li r11,-16
+ stvx vr31,r11,r0
+ blr
+
+_GLOBAL(_restvr_20)
+ li r11,-192
+ lvx vr20,r11,r0
+_GLOBAL(_restvr_21)
+ li r11,-176
+ lvx vr21,r11,r0
+_GLOBAL(_restvr_22)
+ li r11,-160
+ lvx vr22,r11,r0
+_GLOBAL(_restvr_23)
+ li r11,-144
+ lvx vr23,r11,r0
+_GLOBAL(_restvr_24)
+ li r11,-128
+ lvx vr24,r11,r0
+_GLOBAL(_restvr_25)
+ li r11,-112
+ lvx vr25,r11,r0
+_GLOBAL(_restvr_26)
+ li r11,-96
+ lvx vr26,r11,r0
+_GLOBAL(_restvr_27)
+ li r11,-80
+ lvx vr27,r11,r0
+_GLOBAL(_restvr_28)
+ li r11,-64
+ lvx vr28,r11,r0
+_GLOBAL(_restvr_29)
+ li r11,-48
+ lvx vr29,r11,r0
+_GLOBAL(_restvr_30)
+ li r11,-32
+ lvx vr30,r11,r0
+_GLOBAL(_restvr_31)
+ li r11,-16
+ lvx vr31,r11,r0
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
#else /* CONFIG_PPC64 */
.section ".text.save.restore","ax",@progbits
@@ -356,6 +437,111 @@ _restgpr0_31:
mtlr r0
blr
+#ifdef CONFIG_ALTIVEC
+/* Called with r0 pointing just beyond the end of the vector save area. */
+
+.globl _savevr_20
+_savevr_20:
+ li r12,-192
+ stvx vr20,r12,r0
+.globl _savevr_21
+_savevr_21:
+ li r12,-176
+ stvx vr21,r12,r0
+.globl _savevr_22
+_savevr_22:
+ li r12,-160
+ stvx vr22,r12,r0
+.globl _savevr_23
+_savevr_23:
+ li r12,-144
+ stvx vr23,r12,r0
+.globl _savevr_24
+_savevr_24:
+ li r12,-128
+ stvx vr24,r12,r0
+.globl _savevr_25
+_savevr_25:
+ li r12,-112
+ stvx vr25,r12,r0
+.globl _savevr_26
+_savevr_26:
+ li r12,-96
+ stvx vr26,r12,r0
+.globl _savevr_27
+_savevr_27:
+ li r12,-80
+ stvx vr27,r12,r0
+.globl _savevr_28
+_savevr_28:
+ li r12,-64
+ stvx vr28,r12,r0
+.globl _savevr_29
+_savevr_29:
+ li r12,-48
+ stvx vr29,r12,r0
+.globl _savevr_30
+_savevr_30:
+ li r12,-32
+ stvx vr30,r12,r0
+.globl _savevr_31
+_savevr_31:
+ li r12,-16
+ stvx vr31,r12,r0
+ blr
+
+.globl _restvr_20
+_restvr_20:
+ li r12,-192
+ lvx vr20,r12,r0
+.globl _restvr_21
+_restvr_21:
+ li r12,-176
+ lvx vr21,r12,r0
+.globl _restvr_22
+_restvr_22:
+ li r12,-160
+ lvx vr22,r12,r0
+.globl _restvr_23
+_restvr_23:
+ li r12,-144
+ lvx vr23,r12,r0
+.globl _restvr_24
+_restvr_24:
+ li r12,-128
+ lvx vr24,r12,r0
+.globl _restvr_25
+_restvr_25:
+ li r12,-112
+ lvx vr25,r12,r0
+.globl _restvr_26
+_restvr_26:
+ li r12,-96
+ lvx vr26,r12,r0
+.globl _restvr_27
+_restvr_27:
+ li r12,-80
+ lvx vr27,r12,r0
+.globl _restvr_28
+_restvr_28:
+ li r12,-64
+ lvx vr28,r12,r0
+.globl _restvr_29
+_restvr_29:
+ li r12,-48
+ lvx vr29,r12,r0
+.globl _restvr_30
+_restvr_30:
+ li r12,-32
+ lvx vr30,r12,r0
+.globl _restvr_31
+_restvr_31:
+ li r12,-16
+ lvx vr31,r12,r0
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
#endif /* CONFIG_PPC64 */
#endif
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
index a73f0884d358..28337c9709ae 100644
--- a/arch/powerpc/math-emu/math_efp.c
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -20,6 +20,7 @@
*/
#include <linux/types.h>
+#include <linux/prctl.h>
#include <asm/uaccess.h>
#include <asm/reg.h>
@@ -275,21 +276,13 @@ int do_spe_mathemu(struct pt_regs *regs)
case EFSCTSF:
case EFSCTUF:
- if (!((vb.wp[1] >> 23) == 0xff && ((vb.wp[1] & 0x7fffff) > 0))) {
- /* NaN */
- if (((vb.wp[1] >> 23) & 0xff) == 0) {
- /* denorm */
- vc.wp[1] = 0x0;
- } else if ((vb.wp[1] >> 31) == 0) {
- /* positive normal */
- vc.wp[1] = (func == EFSCTSF) ?
- 0x7fffffff : 0xffffffff;
- } else { /* negative normal */
- vc.wp[1] = (func == EFSCTSF) ?
- 0x80000000 : 0x0;
- }
- } else { /* rB is NaN */
- vc.wp[1] = 0x0;
+ if (SB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ SB_e += (func == EFSCTSF ? 31 : 32);
+ FP_TO_INT_ROUND_S(vc.wp[1], SB, 32,
+ (func == EFSCTSF));
}
goto update_regs;
@@ -306,16 +299,25 @@ int do_spe_mathemu(struct pt_regs *regs)
}
case EFSCTSI:
- case EFSCTSIZ:
case EFSCTUI:
+ if (SB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_ROUND_S(vc.wp[1], SB, 32,
+ ((func & 0x3) != 0));
+ }
+ goto update_regs;
+
+ case EFSCTSIZ:
case EFSCTUIZ:
- if (func & 0x4) {
- _FP_ROUND(1, SB);
+ if (SB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
- _FP_ROUND_ZERO(1, SB);
+ FP_TO_INT_S(vc.wp[1], SB, 32,
+ ((func & 0x3) != 0));
}
- FP_TO_INT_S(vc.wp[1], SB, 32,
- (((func & 0x3) != 0) || SB_s));
goto update_regs;
default:
@@ -404,22 +406,13 @@ cmp_s:
case EFDCTSF:
case EFDCTUF:
- if (!((vb.wp[0] >> 20) == 0x7ff &&
- ((vb.wp[0] & 0xfffff) > 0 || (vb.wp[1] > 0)))) {
- /* not a NaN */
- if (((vb.wp[0] >> 20) & 0x7ff) == 0) {
- /* denorm */
- vc.wp[1] = 0x0;
- } else if ((vb.wp[0] >> 31) == 0) {
- /* positive normal */
- vc.wp[1] = (func == EFDCTSF) ?
- 0x7fffffff : 0xffffffff;
- } else { /* negative normal */
- vc.wp[1] = (func == EFDCTSF) ?
- 0x80000000 : 0x0;
- }
- } else { /* NaN */
- vc.wp[1] = 0x0;
+ if (DB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ DB_e += (func == EFDCTSF ? 31 : 32);
+ FP_TO_INT_ROUND_D(vc.wp[1], DB, 32,
+ (func == EFDCTSF));
}
goto update_regs;
@@ -437,21 +430,35 @@ cmp_s:
case EFDCTUIDZ:
case EFDCTSIDZ:
- _FP_ROUND_ZERO(2, DB);
- FP_TO_INT_D(vc.dp[0], DB, 64, ((func & 0x1) == 0));
+ if (DB_c == FP_CLS_NAN) {
+ vc.dp[0] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_D(vc.dp[0], DB, 64,
+ ((func & 0x1) == 0));
+ }
goto update_regs;
case EFDCTUI:
case EFDCTSI:
+ if (DB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_ROUND_D(vc.wp[1], DB, 32,
+ ((func & 0x3) != 0));
+ }
+ goto update_regs;
+
case EFDCTUIZ:
case EFDCTSIZ:
- if (func & 0x4) {
- _FP_ROUND(2, DB);
+ if (DB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
- _FP_ROUND_ZERO(2, DB);
+ FP_TO_INT_D(vc.wp[1], DB, 32,
+ ((func & 0x3) != 0));
}
- FP_TO_INT_D(vc.wp[1], DB, 32,
- (((func & 0x3) != 0) || DB_s));
goto update_regs;
default:
@@ -556,37 +563,60 @@ cmp_d:
cmp = -1;
goto cmp_vs;
- case EVFSCTSF:
- __asm__ __volatile__ ("mtspr 512, %4\n"
- "efsctsf %0, %2\n"
- "efsctsf %1, %3\n"
- : "=r" (vc.wp[0]), "=r" (vc.wp[1])
- : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0));
- goto update_regs;
-
case EVFSCTUF:
- __asm__ __volatile__ ("mtspr 512, %4\n"
- "efsctuf %0, %2\n"
- "efsctuf %1, %3\n"
- : "=r" (vc.wp[0]), "=r" (vc.wp[1])
- : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0));
+ case EVFSCTSF:
+ if (SB0_c == FP_CLS_NAN) {
+ vc.wp[0] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ SB0_e += (func == EVFSCTSF ? 31 : 32);
+ FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32,
+ (func == EVFSCTSF));
+ }
+ if (SB1_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ SB1_e += (func == EVFSCTSF ? 31 : 32);
+ FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32,
+ (func == EVFSCTSF));
+ }
goto update_regs;
case EVFSCTUI:
case EVFSCTSI:
+ if (SB0_c == FP_CLS_NAN) {
+ vc.wp[0] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32,
+ ((func & 0x3) != 0));
+ }
+ if (SB1_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32,
+ ((func & 0x3) != 0));
+ }
+ goto update_regs;
+
case EVFSCTUIZ:
case EVFSCTSIZ:
- if (func & 0x4) {
- _FP_ROUND(1, SB0);
- _FP_ROUND(1, SB1);
+ if (SB0_c == FP_CLS_NAN) {
+ vc.wp[0] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
- _FP_ROUND_ZERO(1, SB0);
- _FP_ROUND_ZERO(1, SB1);
+ FP_TO_INT_S(vc.wp[0], SB0, 32,
+ ((func & 0x3) != 0));
+ }
+ if (SB1_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_S(vc.wp[1], SB1, 32,
+ ((func & 0x3) != 0));
}
- FP_TO_INT_S(vc.wp[0], SB0, 32,
- (((func & 0x3) != 0) || SB0_s));
- FP_TO_INT_S(vc.wp[1], SB1, 32,
- (((func & 0x3) != 0) || SB1_s));
goto update_regs;
default:
@@ -630,9 +660,27 @@ update_ccr:
regs->ccr |= (IR << ((7 - ((speinsn >> 23) & 0x7)) << 2));
update_regs:
- __FPU_FPSCR &= ~FP_EX_MASK;
+ /*
+ * If the "invalid" exception sticky bit was set by the
+ * processor for non-finite input, but was not set before the
+ * instruction being emulated, clear it. Likewise for the
+ * "underflow" bit, which may have been set by the processor
+ * for exact underflow, not just inexact underflow when the
+ * flag should be set for IEEE 754 semantics. Other sticky
+ * exceptions will only be set by the processor when they are
+ * correct according to IEEE 754 semantics, and we must not
+ * clear sticky bits that were already set before the emulated
+ * instruction as they represent the user-visible sticky
+ * exception status. "inexact" traps to kernel are not
+ * required for IEEE semantics and are not enabled by default,
+ * so the "inexact" sticky bit may have been set by a previous
+ * instruction without the kernel being aware of it.
+ */
+ __FPU_FPSCR
+ &= ~(FP_EX_INVALID | FP_EX_UNDERFLOW) | current->thread.spefscr_last;
__FPU_FPSCR |= (FP_CUR_EXCEPTIONS & FP_EX_MASK);
mtspr(SPRN_SPEFSCR, __FPU_FPSCR);
+ current->thread.spefscr_last = __FPU_FPSCR;
current->thread.evr[fc] = vc.wp[0];
regs->gpr[fc] = vc.wp[1];
@@ -644,6 +692,23 @@ update_regs:
pr_debug("va: %08x %08x\n", va.wp[0], va.wp[1]);
pr_debug("vb: %08x %08x\n", vb.wp[0], vb.wp[1]);
+ if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
+ if ((FP_CUR_EXCEPTIONS & FP_EX_DIVZERO)
+ && (current->thread.fpexc_mode & PR_FP_EXC_DIV))
+ return 1;
+ if ((FP_CUR_EXCEPTIONS & FP_EX_OVERFLOW)
+ && (current->thread.fpexc_mode & PR_FP_EXC_OVF))
+ return 1;
+ if ((FP_CUR_EXCEPTIONS & FP_EX_UNDERFLOW)
+ && (current->thread.fpexc_mode & PR_FP_EXC_UND))
+ return 1;
+ if ((FP_CUR_EXCEPTIONS & FP_EX_INEXACT)
+ && (current->thread.fpexc_mode & PR_FP_EXC_RES))
+ return 1;
+ if ((FP_CUR_EXCEPTIONS & FP_EX_INVALID)
+ && (current->thread.fpexc_mode & PR_FP_EXC_INV))
+ return 1;
+ }
return 0;
illegal:
@@ -662,21 +727,28 @@ int speround_handler(struct pt_regs *regs)
{
union dw_union fgpr;
int s_lo, s_hi;
- unsigned long speinsn, type, fc;
+ int lo_inexact, hi_inexact;
+ int fp_result;
+ unsigned long speinsn, type, fb, fc, fptype, func;
if (get_user(speinsn, (unsigned int __user *) regs->nip))
return -EFAULT;
if ((speinsn >> 26) != 4)
return -EINVAL; /* not an spe instruction */
- type = insn_type(speinsn & 0x7ff);
+ func = speinsn & 0x7ff;
+ type = insn_type(func);
if (type == XCR) return -ENOSYS;
__FPU_FPSCR = mfspr(SPRN_SPEFSCR);
pr_debug("speinsn:%08lx spefscr:%08lx\n", speinsn, __FPU_FPSCR);
+ fptype = (speinsn >> 5) & 0x7;
+
/* No need to round if the result is exact */
- if (!(__FPU_FPSCR & FP_EX_INEXACT))
+ lo_inexact = __FPU_FPSCR & (SPEFSCR_FG | SPEFSCR_FX);
+ hi_inexact = __FPU_FPSCR & (SPEFSCR_FGH | SPEFSCR_FXH);
+ if (!(lo_inexact || (hi_inexact && fptype == VCT)))
return 0;
fc = (speinsn >> 21) & 0x1f;
@@ -685,9 +757,68 @@ int speround_handler(struct pt_regs *regs)
fgpr.wp[0] = current->thread.evr[fc];
fgpr.wp[1] = regs->gpr[fc];
+ fb = (speinsn >> 11) & 0x1f;
+ switch (func) {
+ case EFSCTUIZ:
+ case EFSCTSIZ:
+ case EVFSCTUIZ:
+ case EVFSCTSIZ:
+ case EFDCTUIDZ:
+ case EFDCTSIDZ:
+ case EFDCTUIZ:
+ case EFDCTSIZ:
+ /*
+ * These instructions always round to zero,
+ * independent of the rounding mode.
+ */
+ return 0;
+
+ case EFSCTUI:
+ case EFSCTUF:
+ case EVFSCTUI:
+ case EVFSCTUF:
+ case EFDCTUI:
+ case EFDCTUF:
+ fp_result = 0;
+ s_lo = 0;
+ s_hi = 0;
+ break;
+
+ case EFSCTSI:
+ case EFSCTSF:
+ fp_result = 0;
+ /* Recover the sign of a zero result if possible. */
+ if (fgpr.wp[1] == 0)
+ s_lo = regs->gpr[fb] & SIGN_BIT_S;
+ break;
+
+ case EVFSCTSI:
+ case EVFSCTSF:
+ fp_result = 0;
+ /* Recover the sign of a zero result if possible. */
+ if (fgpr.wp[1] == 0)
+ s_lo = regs->gpr[fb] & SIGN_BIT_S;
+ if (fgpr.wp[0] == 0)
+ s_hi = current->thread.evr[fb] & SIGN_BIT_S;
+ break;
+
+ case EFDCTSI:
+ case EFDCTSF:
+ fp_result = 0;
+ s_hi = s_lo;
+ /* Recover the sign of a zero result if possible. */
+ if (fgpr.wp[1] == 0)
+ s_hi = current->thread.evr[fb] & SIGN_BIT_S;
+ break;
+
+ default:
+ fp_result = 1;
+ break;
+ }
+
pr_debug("round fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]);
- switch ((speinsn >> 5) & 0x7) {
+ switch (fptype) {
/* Since SPE instructions on E500 core can handle round to nearest
* and round toward zero with IEEE-754 complied, we just need
* to handle round toward +Inf and round toward -Inf by software.
@@ -696,25 +827,52 @@ int speround_handler(struct pt_regs *regs)
if ((FP_ROUNDMODE) == FP_RND_PINF) {
if (!s_lo) fgpr.wp[1]++; /* Z > 0, choose Z1 */
} else { /* round to -Inf */
- if (s_lo) fgpr.wp[1]++; /* Z < 0, choose Z2 */
+ if (s_lo) {
+ if (fp_result)
+ fgpr.wp[1]++; /* Z < 0, choose Z2 */
+ else
+ fgpr.wp[1]--; /* Z < 0, choose Z2 */
+ }
}
break;
case DPFP:
if (FP_ROUNDMODE == FP_RND_PINF) {
- if (!s_hi) fgpr.dp[0]++; /* Z > 0, choose Z1 */
+ if (!s_hi) {
+ if (fp_result)
+ fgpr.dp[0]++; /* Z > 0, choose Z1 */
+ else
+ fgpr.wp[1]++; /* Z > 0, choose Z1 */
+ }
} else { /* round to -Inf */
- if (s_hi) fgpr.dp[0]++; /* Z < 0, choose Z2 */
+ if (s_hi) {
+ if (fp_result)
+ fgpr.dp[0]++; /* Z < 0, choose Z2 */
+ else
+ fgpr.wp[1]--; /* Z < 0, choose Z2 */
+ }
}
break;
case VCT:
if (FP_ROUNDMODE == FP_RND_PINF) {
- if (!s_lo) fgpr.wp[1]++; /* Z_low > 0, choose Z1 */
- if (!s_hi) fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */
+ if (lo_inexact && !s_lo)
+ fgpr.wp[1]++; /* Z_low > 0, choose Z1 */
+ if (hi_inexact && !s_hi)
+ fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */
} else { /* round to -Inf */
- if (s_lo) fgpr.wp[1]++; /* Z_low < 0, choose Z2 */
- if (s_hi) fgpr.wp[0]++; /* Z_high < 0, choose Z2 */
+ if (lo_inexact && s_lo) {
+ if (fp_result)
+ fgpr.wp[1]++; /* Z_low < 0, choose Z2 */
+ else
+ fgpr.wp[1]--; /* Z_low < 0, choose Z2 */
+ }
+ if (hi_inexact && s_hi) {
+ if (fp_result)
+ fgpr.wp[0]++; /* Z_high < 0, choose Z2 */
+ else
+ fgpr.wp[0]--; /* Z_high < 0, choose Z2 */
+ }
}
break;
@@ -727,6 +885,8 @@ int speround_handler(struct pt_regs *regs)
pr_debug(" to fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]);
+ if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
+ return (current->thread.fpexc_mode & PR_FP_EXC_RES) ? 1 : 0;
return 0;
}
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 07ba45b0f07c..94cd728166d3 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -52,6 +52,7 @@
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/setup.h>
+#include <asm/paca.h>
#include "mmu_decl.h"
@@ -171,11 +172,10 @@ unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
return 1UL << camsize;
}
-unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
+static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
+ unsigned long ram, int max_cam_idx)
{
int i;
- unsigned long virt = PAGE_OFFSET;
- phys_addr_t phys = memstart_addr;
unsigned long amount_mapped = 0;
/* Calculate CAM values */
@@ -192,9 +192,23 @@ unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
}
tlbcam_index = i;
+#ifdef CONFIG_PPC64
+ get_paca()->tcd.esel_next = i;
+ get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
+ get_paca()->tcd.esel_first = i;
+#endif
+
return amount_mapped;
}
+unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
+{
+ unsigned long virt = PAGE_OFFSET;
+ phys_addr_t phys = memstart_addr;
+
+ return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx);
+}
+
#ifdef CONFIG_PPC32
#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
@@ -222,7 +236,9 @@ void __init adjust_total_lowmem(void)
/* adjust lowmem size to __max_low_memory */
ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
+ i = switch_to_as1();
__max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM);
+ restore_to_as0(i, 0, 0, 1);
pr_info("Memory CAM mapping: ");
for (i = 0; i < tlbcam_index - 1; i++)
@@ -241,4 +257,62 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
/* 64M mapped initially according to head_fsl_booke.S */
memblock_set_current_limit(min_t(u64, limit, 0x04000000));
}
+
+#ifdef CONFIG_RELOCATABLE
+int __initdata is_second_reloc;
+notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
+{
+ unsigned long base = KERNELBASE;
+
+ kernstart_addr = start;
+ if (is_second_reloc) {
+ virt_phys_offset = PAGE_OFFSET - memstart_addr;
+ return;
+ }
+
+ /*
+ * Relocatable kernel support based on processing of dynamic
+ * relocation entries. Before we get the real memstart_addr,
+ * We will compute the virt_phys_offset like this:
+ * virt_phys_offset = stext.run - kernstart_addr
+ *
+ * stext.run = (KERNELBASE & ~0x3ffffff) +
+ * (kernstart_addr & 0x3ffffff)
+ * When we relocate, we have :
+ *
+ * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff)
+ *
+ * hence:
+ * virt_phys_offset = (KERNELBASE & ~0x3ffffff) -
+ * (kernstart_addr & ~0x3ffffff)
+ *
+ */
+ start &= ~0x3ffffff;
+ base &= ~0x3ffffff;
+ virt_phys_offset = base - start;
+ early_get_first_memblock_info(__va(dt_ptr), NULL);
+ /*
+ * We now get the memstart_addr, then we should check if this
+ * address is the same as what the PAGE_OFFSET map to now. If
+ * not we have to change the map of PAGE_OFFSET to memstart_addr
+ * and do a second relocation.
+ */
+ if (start != memstart_addr) {
+ int n;
+ long offset = start - memstart_addr;
+
+ is_second_reloc = 1;
+ n = switch_to_as1();
+ /* map a 64M area for the second relocation */
+ if (memstart_addr > start)
+ map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM);
+ else
+ map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
+ 0x4000000, CONFIG_LOWMEM_CAM_NUM);
+ restore_to_as0(n, offset, __va(dt_ptr), 1);
+ /* We should never reach here */
+ panic("Relocation error");
+ }
+}
+#endif
#endif
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index d3cbda62857b..1136d26a95ae 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -148,7 +148,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
- ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
+ /*
+ * Always add "C" bit for perf. Memory coherence is always enabled
+ */
+ ori r3,r3,HPTE_R_C | HPTE_R_M
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
@@ -457,7 +460,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r3,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
- ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
+ /*
+ * Always add "C" bit for perf. Memory coherence is always enabled
+ */
+ ori r3,r3,HPTE_R_C | HPTE_R_M
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
@@ -795,7 +801,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
- ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
+ /*
+ * Always add "C" bit for perf. Memory coherence is always enabled
+ */
+ ori r3,r3,HPTE_R_C | HPTE_R_M
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 6176b3cdf579..de6881259aef 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -169,9 +169,10 @@ static unsigned long htab_convert_pte_flags(unsigned long pteflags)
if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
(pteflags & _PAGE_DIRTY)))
rflags |= 1;
-
- /* Always add C */
- return rflags | HPTE_R_C;
+ /*
+ * Always add "C" bit for perf. Memory coherence is always enabled
+ */
+ return rflags | HPTE_R_C | HPTE_R_M;
}
int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 34de9e0cdc34..826893fcb3a7 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -127,7 +127,11 @@ repeat:
/* Add in WIMG bits */
rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
- _PAGE_COHERENT | _PAGE_GUARDED));
+ _PAGE_GUARDED));
+ /*
+ * enable the memory coherence always
+ */
+ rflags |= HPTE_R_M;
/* Insert into the hash table, primary slot */
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 74551b5e41e5..5e4ee2573903 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -8,6 +8,44 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC64
+static inline int tlb1_next(void)
+{
+ struct paca_struct *paca = get_paca();
+ struct tlb_core_data *tcd;
+ int this, next;
+
+ tcd = paca->tcd_ptr;
+ this = tcd->esel_next;
+
+ next = this + 1;
+ if (next >= tcd->esel_max)
+ next = tcd->esel_first;
+
+ tcd->esel_next = next;
+ return this;
+}
+#else
+static inline int tlb1_next(void)
+{
+ int index, ncams;
+
+ ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
+
+ index = __get_cpu_var(next_tlbcam_idx);
+
+ /* Just round-robin the entries and wrap when we hit the end */
+ if (unlikely(index == ncams - 1))
+ __get_cpu_var(next_tlbcam_idx) = tlbcam_index;
+ else
+ __get_cpu_var(next_tlbcam_idx)++;
+
+ return index;
+}
+#endif /* !PPC64 */
+#endif /* FSL */
+
static inline int mmu_get_tsize(int psize)
{
return mmu_psize_defs[psize].enc;
@@ -47,7 +85,7 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
struct mm_struct *mm;
#ifdef CONFIG_PPC_FSL_BOOK3E
- int index, ncams;
+ int index;
#endif
if (unlikely(is_kernel_addr(ea)))
@@ -77,18 +115,11 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
}
#ifdef CONFIG_PPC_FSL_BOOK3E
- ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
-
/* We have to use the CAM(TLB1) on FSL parts for hugepages */
- index = __get_cpu_var(next_tlbcam_idx);
+ index = tlb1_next();
mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
-
- /* Just round-robin the entries and wrap when we hit the end */
- if (unlikely(index == ncams - 1))
- __get_cpu_var(next_tlbcam_idx) = tlbcam_index;
- else
- __get_cpu_var(next_tlbcam_idx)++;
#endif
+
mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
mas2 = ea & ~((1UL << shift) - 1);
mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
@@ -103,7 +134,8 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) {
mtspr(SPRN_MAS7_MAS3, mas7_3);
} else {
- mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
+ if (mmu_has_feature(MMU_FTR_BIG_PHYS))
+ mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
}
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 0b7fb6761015..a5bcf9301196 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -99,6 +99,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
/* Add in WIMG bits */
rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
_PAGE_COHERENT | _PAGE_GUARDED));
+ /*
+ * enable the memory coherence always
+ */
+ rflags |= HPTE_R_M;
slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
mmu_psize, ssize);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 8c1dd23652a1..4b5cd5c2594d 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -307,6 +307,12 @@ static void __init register_page_bootmem_info(void)
void __init mem_init(void)
{
+ /*
+ * book3s is limited to 16 page sizes due to encoding this in
+ * a 4-bit field for slices.
+ */
+ BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
+
#ifdef CONFIG_SWIOTLB
swiotlb_init(0);
#endif
@@ -507,7 +513,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* System memory should not be in /proc/iomem but various tools expect it
* (eg kdump).
*/
-static int add_system_ram_resources(void)
+static int __init add_system_ram_resources(void)
{
struct memblock_region *reg;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 83eb5d5f53d5..9615d82919b8 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -148,6 +148,8 @@ extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(unsigned long top);
extern void adjust_total_lowmem(void);
+extern int switch_to_as1(void);
+extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu);
#endif
extern void loadcam_entry(unsigned int index);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 5a944f25e94f..86a63de072c6 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -31,6 +31,8 @@
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
+#include <asm/cputhreads.h>
+#include <asm/topology.h>
#include <asm/firmware.h>
#include <asm/paca.h>
#include <asm/hvcall.h>
@@ -152,9 +154,22 @@ static void __init get_node_active_region(unsigned long pfn,
}
}
-static void map_cpu_to_node(int cpu, int node)
+static void reset_numa_cpu_lookup_table(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ numa_cpu_lookup_table[cpu] = -1;
+}
+
+static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
{
numa_cpu_lookup_table[cpu] = node;
+}
+
+static void map_cpu_to_node(int cpu, int node)
+{
+ update_numa_cpu_lookup_table(cpu, node);
dbg("adding cpu %d to node %d\n", cpu, node);
@@ -522,11 +537,24 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
*/
static int numa_setup_cpu(unsigned long lcpu)
{
- int nid = 0;
- struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
+ int nid;
+ struct device_node *cpu;
+
+ /*
+ * If a valid cpu-to-node mapping is already available, use it
+ * directly instead of querying the firmware, since it represents
+ * the most recent mapping notified to us by the platform (eg: VPHN).
+ */
+ if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
+ map_cpu_to_node(lcpu, nid);
+ return nid;
+ }
+
+ cpu = of_get_cpu_node(lcpu, NULL);
if (!cpu) {
WARN_ON(1);
+ nid = 0;
goto out;
}
@@ -542,16 +570,38 @@ out:
return nid;
}
+static void verify_cpu_node_mapping(int cpu, int node)
+{
+ int base, sibling, i;
+
+ /* Verify that all the threads in the core belong to the same node */
+ base = cpu_first_thread_sibling(cpu);
+
+ for (i = 0; i < threads_per_core; i++) {
+ sibling = base + i;
+
+ if (sibling == cpu || cpu_is_offline(sibling))
+ continue;
+
+ if (cpu_to_node(sibling) != node) {
+ WARN(1, "CPU thread siblings %d and %d don't belong"
+ " to the same node!\n", cpu, sibling);
+ break;
+ }
+ }
+}
+
static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
unsigned long lcpu = (unsigned long)hcpu;
- int ret = NOTIFY_DONE;
+ int ret = NOTIFY_DONE, nid;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- numa_setup_cpu(lcpu);
+ nid = numa_setup_cpu(lcpu);
+ verify_cpu_node_mapping((int)lcpu, nid);
ret = NOTIFY_OK;
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -1069,6 +1119,7 @@ void __init do_init_bootmem(void)
*/
setup_node_to_cpumask_map();
+ reset_numa_cpu_lookup_table();
register_cpu_notifier(&ppc64_numa_nb);
cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
(void *)(unsigned long)boot_cpuid);
@@ -1447,6 +1498,33 @@ static int update_cpu_topology(void *data)
return 0;
}
+static int update_lookup_table(void *data)
+{
+ struct topology_update_data *update;
+
+ if (!data)
+ return -EINVAL;
+
+ /*
+ * Upon topology update, the numa-cpu lookup table needs to be updated
+ * for all threads in the core, including offline CPUs, to ensure that
+ * future hotplug operations respect the cpu-to-node associativity
+ * properly.
+ */
+ for (update = data; update; update = update->next) {
+ int nid, base, j;
+
+ nid = update->new_nid;
+ base = cpu_first_thread_sibling(update->cpu);
+
+ for (j = 0; j < threads_per_core; j++) {
+ update_numa_cpu_lookup_table(base + j, nid);
+ }
+ }
+
+ return 0;
+}
+
/*
* Update the node maps and sysfs entries for each cpu whose home node
* has changed. Returns 1 when the topology has changed, and 0 otherwise.
@@ -1515,6 +1593,14 @@ int arch_update_cpu_topology(void)
stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+ /*
+ * Update the numa-cpu lookup table with the new mappings, even for
+ * offline CPUs. It is best to perform this update from the stop-
+ * machine context.
+ */
+ stop_machine(update_lookup_table, &updates[0],
+ cpumask_of(raw_smp_processor_id()));
+
for (ud = &updates[0]; ud; ud = ud->next) {
unregister_cpu_under_node(ud->cpu, ud->old_nid);
register_cpu_under_node(ud->cpu, ud->new_nid);
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 841e0d00863c..c695943a513c 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -24,7 +24,6 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/mm.h>
-#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/hugetlb.h>
@@ -174,7 +173,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte)
{
#ifdef CONFIG_DEBUG_VM
- WARN_ON(pte_present(*ptep));
+ WARN_ON(pte_val(*ptep) & _PAGE_PRESENT);
#endif
/* Note: mm->context.id might not yet have been assigned as
* this context might not have been activated yet when this
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 5b9601715289..343a87fa78b5 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -299,6 +299,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
}
+ smp_wmb();
return err;
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 9d95786aa80f..65b7b65e8708 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -33,7 +33,6 @@
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
-#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
@@ -153,6 +152,18 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
}
#endif /* !CONFIG_PPC_MMU_NOHASH */
}
+
+#ifdef CONFIG_PPC_BOOK3E_64
+ /*
+ * With hardware tablewalk, a sync is needed to ensure that
+ * subsequent accesses see the PTE we just wrote. Unlike userspace
+ * mappings, we can't tolerate spurious faults, so make sure
+ * the new PTE will be seen the first time.
+ */
+ mb();
+#else
+ smp_wmb();
+#endif
return 0;
}
@@ -687,7 +698,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_DEBUG_VM
- WARN_ON(!pmd_none(*pmdp));
+ WARN_ON(pmd_val(*pmdp) & _PAGE_PRESENT);
assert_spin_locked(&mm->page_table_lock);
WARN_ON(!pmd_trans_huge(pmd));
#endif
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 36e44b4260eb..c99f6510a0b2 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -23,7 +23,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/pgalloc.h>
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index b4113bf86353..16250b162375 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -136,7 +136,7 @@ BEGIN_MMU_FTR_SECTION
*/
PPC_TLBSRX_DOT(0,R16)
ldx r14,r14,r15 /* grab pgd entry */
- beq normal_tlb_miss_done /* tlb exists already, bail */
+ beq tlb_miss_done_bolted /* tlb exists already, bail */
MMU_FTR_SECTION_ELSE
ldx r14,r14,r15 /* grab pgd entry */
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
@@ -192,6 +192,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
mtspr SPRN_MAS7_MAS3,r15
tlbwe
+tlb_miss_done_bolted:
TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
tlb_epilog_bolted
rfi
@@ -239,6 +240,177 @@ itlb_miss_fault_bolted:
beq tlb_miss_common_bolted
b itlb_miss_kernel_bolted
+/*
+ * TLB miss handling for e6500 and derivatives, using hardware tablewalk.
+ *
+ * Linear mapping is bolted: no virtual page table or nested TLB misses
+ * Indirect entries in TLB1, hardware loads resulting direct entries
+ * into TLB0
+ * No HES or NV hint on TLB1, so we need to do software round-robin
+ * No tlbsrx. so we need a spinlock, and we have to deal
+ * with MAS-damage caused by tlbsx
+ * 4K pages only
+ */
+
+ START_EXCEPTION(instruction_tlb_miss_e6500)
+ tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
+
+ ld r11,PACA_TCD_PTR(r13)
+ srdi. r15,r16,60 /* get region */
+ ori r16,r16,1
+
+ TLB_MISS_STATS_SAVE_INFO_BOLTED
+ bne tlb_miss_kernel_e6500 /* user/kernel test */
+
+ b tlb_miss_common_e6500
+
+ START_EXCEPTION(data_tlb_miss_e6500)
+ tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
+
+ ld r11,PACA_TCD_PTR(r13)
+ srdi. r15,r16,60 /* get region */
+ rldicr r16,r16,0,62
+
+ TLB_MISS_STATS_SAVE_INFO_BOLTED
+ bne tlb_miss_kernel_e6500 /* user vs kernel check */
+
+/*
+ * This is the guts of the TLB miss handler for e6500 and derivatives.
+ * We are entered with:
+ *
+ * r16 = page of faulting address (low bit 0 if data, 1 if instruction)
+ * r15 = crap (free to use)
+ * r14 = page table base
+ * r13 = PACA
+ * r11 = tlb_per_core ptr
+ * r10 = crap (free to use)
+ */
+tlb_miss_common_e6500:
+ /*
+ * Search if we already have an indirect entry for that virtual
+ * address, and if we do, bail out.
+ *
+ * MAS6:IND should be already set based on MAS4
+ */
+ addi r10,r11,TCD_LOCK
+1: lbarx r15,0,r10
+ cmpdi r15,0
+ bne 2f
+ li r15,1
+ stbcx. r15,0,r10
+ bne 1b
+ .subsection 1
+2: lbz r15,0(r10)
+ cmpdi r15,0
+ bne 2b
+ b 1b
+ .previous
+
+ mfspr r15,SPRN_MAS2
+
+ tlbsx 0,r16
+ mfspr r10,SPRN_MAS1
+ andis. r10,r10,MAS1_VALID@h
+ bne tlb_miss_done_e6500
+
+ /* Undo MAS-damage from the tlbsx */
+ mfspr r10,SPRN_MAS1
+ oris r10,r10,MAS1_VALID@h
+ mtspr SPRN_MAS1,r10
+ mtspr SPRN_MAS2,r15
+
+ /* Now, we need to walk the page tables. First check if we are in
+ * range.
+ */
+ rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
+ bne- tlb_miss_fault_e6500
+
+ rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
+ cmpldi cr0,r14,0
+ clrrdi r15,r15,3
+ beq- tlb_miss_fault_e6500 /* No PGDIR, bail */
+ ldx r14,r14,r15 /* grab pgd entry */
+
+ rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
+ clrrdi r15,r15,3
+ cmpdi cr0,r14,0
+ bge tlb_miss_fault_e6500 /* Bad pgd entry or hugepage; bail */
+ ldx r14,r14,r15 /* grab pud entry */
+
+ rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
+ clrrdi r15,r15,3
+ cmpdi cr0,r14,0
+ bge tlb_miss_fault_e6500
+ ldx r14,r14,r15 /* Grab pmd entry */
+
+ mfspr r10,SPRN_MAS0
+ cmpdi cr0,r14,0
+ bge tlb_miss_fault_e6500
+
+ /* Now we build the MAS for a 2M indirect page:
+ *
+ * MAS 0 : ESEL needs to be filled by software round-robin
+ * MAS 1 : Fully set up
+ * - PID already updated by caller if necessary
+ * - TSIZE for now is base ind page size always
+ * - TID already cleared if necessary
+ * MAS 2 : Default not 2M-aligned, need to be redone
+ * MAS 3+7 : Needs to be done
+ */
+
+ ori r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
+ mtspr SPRN_MAS7_MAS3,r14
+
+ clrrdi r15,r16,21 /* make EA 2M-aligned */
+ mtspr SPRN_MAS2,r15
+
+ lbz r15,TCD_ESEL_NEXT(r11)
+ lbz r16,TCD_ESEL_MAX(r11)
+ lbz r14,TCD_ESEL_FIRST(r11)
+ rlwimi r10,r15,16,0x00ff0000 /* insert esel_next into MAS0 */
+ addi r15,r15,1 /* increment esel_next */
+ mtspr SPRN_MAS0,r10
+ cmpw r15,r16
+ iseleq r15,r14,r15 /* if next == last use first */
+ stb r15,TCD_ESEL_NEXT(r11)
+
+ tlbwe
+
+tlb_miss_done_e6500:
+ .macro tlb_unlock_e6500
+ li r15,0
+ isync
+ stb r15,TCD_LOCK(r11)
+ .endm
+
+ tlb_unlock_e6500
+ TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
+ tlb_epilog_bolted
+ rfi
+
+tlb_miss_kernel_e6500:
+ mfspr r10,SPRN_MAS1
+ ld r14,PACA_KERNELPGD(r13)
+ cmpldi cr0,r15,8 /* Check for vmalloc region */
+ rlwinm r10,r10,0,16,1 /* Clear TID */
+ mtspr SPRN_MAS1,r10
+ beq+ tlb_miss_common_e6500
+
+tlb_miss_fault_e6500:
+ tlb_unlock_e6500
+ /* We need to check if it was an instruction miss */
+ andi. r16,r16,1
+ bne itlb_miss_fault_e6500
+dtlb_miss_fault_e6500:
+ TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
+ tlb_epilog_bolted
+ b exc_data_storage_book3e
+itlb_miss_fault_e6500:
+ TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
+ tlb_epilog_bolted
+ b exc_instruction_storage_book3e
+
+
/**********************************************************************
* *
* TLB miss handling for Book3E with TLB reservation and HES support *
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 358d74303138..735839b74dc5 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -43,6 +43,7 @@
#include <asm/tlb.h>
#include <asm/code-patching.h>
#include <asm/hugetlb.h>
+#include <asm/paca.h>
#include "mmu_decl.h"
@@ -58,6 +59,10 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
.shift = 12,
.enc = BOOK3E_PAGESZ_4K,
},
+ [MMU_PAGE_2M] = {
+ .shift = 21,
+ .enc = BOOK3E_PAGESZ_2M,
+ },
[MMU_PAGE_4M] = {
.shift = 22,
.enc = BOOK3E_PAGESZ_4M,
@@ -136,7 +141,7 @@ static inline int mmu_get_tsize(int psize)
int mmu_linear_psize; /* Page size used for the linear mapping */
int mmu_pte_psize; /* Page size used for PTE pages */
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
-int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
+int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
unsigned long linear_map_top; /* Top of linear mapping */
#endif /* CONFIG_PPC64 */
@@ -377,7 +382,7 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
{
int tsize = mmu_psize_defs[mmu_pte_psize].enc;
- if (book3e_htw_enabled) {
+ if (book3e_htw_mode != PPC_HTW_NONE) {
unsigned long start = address & PMD_MASK;
unsigned long end = address + PMD_SIZE;
unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
@@ -430,7 +435,7 @@ static void setup_page_sizes(void)
def = &mmu_psize_defs[psize];
shift = def->shift;
- if (shift == 0)
+ if (shift == 0 || shift & 1)
continue;
/* adjust to be in terms of 4^shift Kb */
@@ -440,21 +445,40 @@ static void setup_page_sizes(void)
def->flags |= MMU_PAGE_SIZE_DIRECT;
}
- goto no_indirect;
+ goto out;
}
if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
- u32 tlb1ps = mfspr(SPRN_TLB1PS);
+ u32 tlb1cfg, tlb1ps;
+
+ tlb0cfg = mfspr(SPRN_TLB0CFG);
+ tlb1cfg = mfspr(SPRN_TLB1CFG);
+ tlb1ps = mfspr(SPRN_TLB1PS);
+ eptcfg = mfspr(SPRN_EPTCFG);
+
+ if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
+ book3e_htw_mode = PPC_HTW_E6500;
+
+ /*
+ * We expect 4K subpage size and unrestricted indirect size.
+ * The lack of a restriction on indirect size is a Freescale
+ * extension, indicated by PSn = 0 but SPSn != 0.
+ */
+ if (eptcfg != 2)
+ book3e_htw_mode = PPC_HTW_NONE;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
if (tlb1ps & (1U << (def->shift - 10))) {
def->flags |= MMU_PAGE_SIZE_DIRECT;
+
+ if (book3e_htw_mode && psize == MMU_PAGE_2M)
+ def->flags |= MMU_PAGE_SIZE_INDIRECT;
}
}
- goto no_indirect;
+ goto out;
}
#endif
@@ -471,8 +495,11 @@ static void setup_page_sizes(void)
}
/* Indirect page sizes supported ? */
- if ((tlb0cfg & TLBnCFG_IND) == 0)
- goto no_indirect;
+ if ((tlb0cfg & TLBnCFG_IND) == 0 ||
+ (tlb0cfg & TLBnCFG_PT) == 0)
+ goto out;
+
+ book3e_htw_mode = PPC_HTW_IBM;
/* Now, we only deal with one IND page size for each
* direct size. Hopefully all implementations today are
@@ -497,8 +524,8 @@ static void setup_page_sizes(void)
def->ind = ps + 10;
}
}
- no_indirect:
+out:
/* Cleanup array and print summary */
pr_info("MMU: Supported page sizes\n");
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
@@ -518,44 +545,25 @@ static void setup_page_sizes(void)
}
}
-static void __patch_exception(int exc, unsigned long addr)
-{
- extern unsigned int interrupt_base_book3e;
- unsigned int *ibase = &interrupt_base_book3e;
-
- /* Our exceptions vectors start with a NOP and -then- a branch
- * to deal with single stepping from userspace which stops on
- * the second instruction. Thus we need to patch the second
- * instruction of the exception, not the first one
- */
-
- patch_branch(ibase + (exc / 4) + 1, addr, 0);
-}
-
-#define patch_exception(exc, name) do { \
- extern unsigned int name; \
- __patch_exception((exc), (unsigned long)&name); \
-} while (0)
-
static void setup_mmu_htw(void)
{
- /* Check if HW tablewalk is present, and if yes, enable it by:
- *
- * - patching the TLB miss handlers to branch to the
- * one dedicates to it
- *
- * - setting the global book3e_htw_enabled
- */
- unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
+ /*
+ * If we want to use HW tablewalk, enable it by patching the TLB miss
+ * handlers to branch to the one dedicated to it.
+ */
- if ((tlb0cfg & TLBnCFG_IND) &&
- (tlb0cfg & TLBnCFG_PT)) {
+ switch (book3e_htw_mode) {
+ case PPC_HTW_IBM:
patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
- book3e_htw_enabled = 1;
+ break;
+ case PPC_HTW_E6500:
+ patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
+ patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
+ break;
}
pr_info("MMU: Book3E HW tablewalk %s\n",
- book3e_htw_enabled ? "enabled" : "not supported");
+ book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
}
/*
@@ -595,8 +603,16 @@ static void __early_init_mmu(int boot_cpu)
/* Set MAS4 based on page table setting */
mas4 = 0x4 << MAS4_WIMGED_SHIFT;
- if (book3e_htw_enabled) {
- mas4 |= mas4 | MAS4_INDD;
+ switch (book3e_htw_mode) {
+ case PPC_HTW_E6500:
+ mas4 |= MAS4_INDD;
+ mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
+ mas4 |= MAS4_TLBSELD(1);
+ mmu_pte_psize = MMU_PAGE_2M;
+ break;
+
+ case PPC_HTW_IBM:
+ mas4 |= MAS4_INDD;
#ifdef CONFIG_PPC_64K_PAGES
mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
mmu_pte_psize = MMU_PAGE_256M;
@@ -604,13 +620,16 @@ static void __early_init_mmu(int boot_cpu)
mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
mmu_pte_psize = MMU_PAGE_1M;
#endif
- } else {
+ break;
+
+ case PPC_HTW_NONE:
#ifdef CONFIG_PPC_64K_PAGES
mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
#else
mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
#endif
mmu_pte_psize = mmu_virtual_psize;
+ break;
}
mtspr(SPRN_MAS4, mas4);
@@ -630,8 +649,11 @@ static void __early_init_mmu(int boot_cpu)
/* limit memory so we dont have linear faults */
memblock_enforce_memory_limit(linear_map_top);
- patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
- patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
+ if (book3e_htw_mode == PPC_HTW_NONE) {
+ patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
+ patch_exception(0x1e0,
+ exc_instruction_tlb_miss_bolted_book3e);
+ }
}
#endif
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index 626ad081639f..43ff3c797fbf 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -402,7 +402,9 @@ _GLOBAL(set_context)
* Load TLBCAM[index] entry in to the L2 CAM MMU
*/
_GLOBAL(loadcam_entry)
- LOAD_REG_ADDR(r4, TLBCAM)
+ mflr r5
+ LOAD_REG_ADDR_PIC(r4, TLBCAM)
+ mtlr r5
mulli r5,r3,TLBCAM_SIZE
add r3,r5,r4
lwz r4,TLBCAM_MAS0(r3)
diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c
index ff617246d128..d29b6e4e5e72 100644
--- a/arch/powerpc/oprofile/op_model_7450.c
+++ b/arch/powerpc/oprofile/op_model_7450.c
@@ -16,7 +16,6 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index b9589c19ccda..1f0ebdeea5f7 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -16,7 +16,6 @@
#include <linux/cpufreq.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kthread.h>
#include <linux/oprofile.h>
diff --git a/arch/powerpc/oprofile/op_model_fsl_emb.c b/arch/powerpc/oprofile/op_model_fsl_emb.c
index 2a82d3ed464d..14cf86fdddab 100644
--- a/arch/powerpc/oprofile/op_model_fsl_emb.c
+++ b/arch/powerpc/oprofile/op_model_fsl_emb.c
@@ -14,7 +14,6 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/oprofile/op_model_pa6t.c b/arch/powerpc/oprofile/op_model_pa6t.c
index 42f778dff919..a114a7c22d40 100644
--- a/arch/powerpc/oprofile/op_model_pa6t.c
+++ b/arch/powerpc/oprofile/op_model_pa6t.c
@@ -22,7 +22,6 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index f444b94935f5..962fe7b3e3fb 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -10,7 +10,6 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
diff --git a/arch/powerpc/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c
index 9b801b8c8c5a..7e5b8ed3a1b7 100644
--- a/arch/powerpc/oprofile/op_model_rs64.c
+++ b/arch/powerpc/oprofile/op_model_rs64.c
@@ -8,7 +8,6 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 670a033264c0..2bdc8c862c46 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -99,7 +99,6 @@ config SBC834x
config ASP834x
bool "Analogue & Micro ASP 834x"
select PPC_MPC834x
- select REDBOOT
help
This enables support for the Analogue & Micro ASP 83xx
board.
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index fd71cfdf2380..e238b6a55b15 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -11,7 +11,6 @@
* (at your option) any later version.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 3d9716ccd327..4b4c081df94d 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -10,7 +10,6 @@
* by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pm.h>
#include <linux/types.h>
#include <linux/ioport.h>
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 4d4634958cfb..c17aae80e7ff 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -123,6 +123,12 @@ config P1023_RDS
help
This option enables support for the P1023 RDS and RDB boards
+config TWR_P102x
+ bool "Freescale TWR-P102x"
+ select DEFAULT_UIMAGE
+ help
+ This option enables support for the TWR-P1025 board.
+
config SOCRATES
bool "Socrates"
select DEFAULT_UIMAGE
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index dd4c0b59577b..25cebe74ac46 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_P1010_RDB) += p1010rdb.o
obj-$(CONFIG_P1022_DS) += p1022_ds.o
obj-$(CONFIG_P1022_RDK) += p1022_rdk.o
obj-$(CONFIG_P1023_RDS) += p1023_rds.o
+obj-$(CONFIG_TWR_P102x) += twr_p102x.o
obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
obj-$(CONFIG_TQM85xx) += tqm85xx.o
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index eba78c85303f..3b085c7ee539 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -9,6 +9,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <asm/qe.h>
#include <sysdev/cpm2_pic.h>
#include "mpc85xx.h"
@@ -82,3 +83,40 @@ void __init mpc85xx_cpm2_pic_init(void)
irq_set_chained_handler(irq, cpm2_cascade);
}
#endif
+
+#ifdef CONFIG_QUICC_ENGINE
+void __init mpc85xx_qe_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!np) {
+ np = of_find_node_by_name(NULL, "qe");
+ if (!np) {
+ pr_err("%s: Could not find Quicc Engine node\n",
+ __func__);
+ return;
+ }
+ }
+
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ return;
+ }
+
+ qe_reset();
+ of_node_put(np);
+
+ np = of_find_node_by_name(NULL, "par_io");
+ if (np) {
+ struct device_node *ucc;
+
+ par_io_init(np);
+ of_node_put(np);
+
+ for_each_node_by_name(ucc, "ucc")
+ par_io_of_config(ucc);
+
+ }
+}
+#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx.h b/arch/powerpc/platforms/85xx/mpc85xx.h
index 2aa7c5dc2c7f..fc51dd4092e5 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx.h
+++ b/arch/powerpc/platforms/85xx/mpc85xx.h
@@ -8,4 +8,10 @@ extern void mpc85xx_cpm2_pic_init(void);
static inline void __init mpc85xx_cpm2_pic_init(void) {}
#endif /* CONFIG_CPM2 */
+#ifdef CONFIG_QUICC_ENGINE
+extern void mpc85xx_qe_init(void);
+#else
+static inline void __init mpc85xx_qe_init(void) {}
+#endif
+
#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index a7b3621a8df5..34f3c5eb3bee 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010, 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2006-2010, 2012-2013 Freescale Semiconductor, Inc.
* All rights reserved.
*
* Author: Andy Fleming <afleming@freescale.com>
@@ -238,32 +238,7 @@ static void __init mpc85xx_mds_qe_init(void)
{
struct device_node *np;
- np = of_find_compatible_node(NULL, NULL, "fsl,qe");
- if (!np) {
- np = of_find_node_by_name(NULL, "qe");
- if (!np)
- return;
- }
-
- if (!of_device_is_available(np)) {
- of_node_put(np);
- return;
- }
-
- qe_reset();
- of_node_put(np);
-
- np = of_find_node_by_name(NULL, "par_io");
- if (np) {
- struct device_node *ucc;
-
- par_io_init(np);
- of_node_put(np);
-
- for_each_node_by_name(ucc, "ucc")
- par_io_of_config(ucc);
- }
-
+ mpc85xx_qe_init();
mpc85xx_mds_reset_ucc_phys();
if (machine_is(p1021_mds)) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 53b6fb0a3d56..e15bdd18fdb2 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -1,7 +1,7 @@
/*
* MPC85xx RDB Board Setup
*
- * Copyright 2009,2012 Freescale Semiconductor Inc.
+ * Copyright 2009,2012-2013 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -98,26 +98,7 @@ static void __init mpc85xx_rdb_setup_arch(void)
fsl_pci_assign_primary();
#ifdef CONFIG_QUICC_ENGINE
- np = of_find_compatible_node(NULL, NULL, "fsl,qe");
- if (!np) {
- pr_err("%s: Could not find Quicc Engine node\n", __func__);
- goto qe_fail;
- }
-
- qe_reset();
- of_node_put(np);
-
- np = of_find_node_by_name(NULL, "par_io");
- if (np) {
- struct device_node *ucc;
-
- par_io_init(np);
- of_node_put(np);
-
- for_each_node_by_name(ucc, "ucc")
- par_io_of_config(ucc);
-
- }
+ mpc85xx_qe_init();
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
if (machine_is(p1025_rdb)) {
@@ -148,8 +129,6 @@ static void __init mpc85xx_rdb_setup_arch(void)
}
#endif
-
-qe_fail:
#endif /* CONFIG_QUICC_ENGINE */
printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n");
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index b9197cea1854..bb75add67084 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -14,7 +14,6 @@
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/workqueue.h>
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 393f975ab397..6382098d6f8d 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -389,15 +389,18 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
}
#endif /* CONFIG_KEXEC */
-static void smp_85xx_setup_cpu(int cpu_nr)
+static void smp_85xx_basic_setup(int cpu_nr)
{
- if (smp_85xx_ops.probe == smp_mpic_probe)
- mpic_setup_this_cpu();
-
if (cpu_has_feature(CPU_FTR_DBELL))
doorbell_setup_this_cpu();
}
+static void smp_85xx_setup_cpu(int cpu_nr)
+{
+ mpic_setup_this_cpu();
+ smp_85xx_basic_setup(cpu_nr);
+}
+
static const struct of_device_id mpc85xx_smp_guts_ids[] = {
{ .compatible = "fsl,mpc8572-guts", },
{ .compatible = "fsl,p1020-guts", },
@@ -412,13 +415,14 @@ void __init mpc85xx_smp_init(void)
{
struct device_node *np;
- smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
np = of_find_node_by_type(NULL, "open-pic");
if (np) {
smp_85xx_ops.probe = smp_mpic_probe;
+ smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
smp_85xx_ops.message_pass = smp_mpic_message_pass;
- }
+ } else
+ smp_85xx_ops.setup_cpu = smp_85xx_basic_setup;
if (cpu_has_feature(CPU_FTR_DBELL)) {
/*
@@ -427,6 +431,7 @@ void __init mpc85xx_smp_init(void)
*/
smp_85xx_ops.message_pass = NULL;
smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
+ smp_85xx_ops.probe = NULL;
}
np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
new file mode 100644
index 000000000000..c25ff10f05ee
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2010-2011, 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Michael Johnston <michael.johnston@freescale.com>
+ *
+ * Description:
+ * TWR-P102x Board Setup
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/of_platform.h>
+
+#include <asm/pci-bridge.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <asm/qe.h>
+#include <asm/qe_ic.h>
+#include <asm/fsl_guts.h>
+
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
+
+static void __init twr_p1025_pic_init(void)
+{
+ struct mpic *mpic;
+
+#ifdef CONFIG_QUICC_ENGINE
+ struct device_node *np;
+#endif
+
+ mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
+ MPIC_SINGLE_DEST_CPU,
+ 0, 256, " OpenPIC ");
+
+ BUG_ON(mpic == NULL);
+ mpic_init(mpic);
+
+#ifdef CONFIG_QUICC_ENGINE
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
+ if (np) {
+ qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
+ qe_ic_cascade_high_mpic);
+ of_node_put(np);
+ } else
+ pr_err("Could not find qe-ic node\n");
+#endif
+}
+
+/* ************************************************************************
+ *
+ * Setup the architecture
+ *
+ */
+static void __init twr_p1025_setup_arch(void)
+{
+#ifdef CONFIG_QUICC_ENGINE
+ struct device_node *np;
+#endif
+
+ if (ppc_md.progress)
+ ppc_md.progress("twr_p1025_setup_arch()", 0);
+
+ mpc85xx_smp_init();
+
+ fsl_pci_assign_primary();
+
+#ifdef CONFIG_QUICC_ENGINE
+ mpc85xx_qe_init();
+
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
+ if (machine_is(twr_p1025)) {
+ struct ccsr_guts __iomem *guts;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,p1021-guts");
+ if (np) {
+ guts = of_iomap(np, 0);
+ if (!guts)
+ pr_err("twr_p1025: could not map global utilities register\n");
+ else {
+ /* P1025 has pins muxed for QE and other functions. To
+ * enable QE UEC mode, we need to set bit QE0 for UCC1
+ * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
+ * and QE12 for QE MII management signals in PMUXCR
+ * register.
+ * Set QE mux bits in PMUXCR */
+ setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) |
+ MPC85xx_PMUXCR_QE(3) |
+ MPC85xx_PMUXCR_QE(9) |
+ MPC85xx_PMUXCR_QE(12));
+ iounmap(guts);
+
+#if defined(CONFIG_SERIAL_QE)
+ /* On P1025TWR board, the UCC7 acted as UART port.
+ * However, The UCC7's CTS pin is low level in default,
+ * it will impact the transmission in full duplex
+ * communication. So disable the Flow control pin PA18.
+ * The UCC7 UART just can use RXD and TXD pins.
+ */
+ par_io_config_pin(0, 18, 0, 0, 0, 0);
+#endif
+ /* Drive PB29 to CPLD low - CPLD will then change
+ * muxing from LBC to QE */
+ par_io_config_pin(1, 29, 1, 0, 0, 0);
+ par_io_data_set(1, 29, 0);
+ }
+ of_node_put(np);
+ }
+ }
+#endif
+#endif /* CONFIG_QUICC_ENGINE */
+
+ pr_info("TWR-P1025 board from Freescale Semiconductor\n");
+}
+
+machine_arch_initcall(twr_p1025, mpc85xx_common_publish_devices);
+
+static int __init twr_p1025_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "fsl,TWR-P1025");
+}
+
+define_machine(twr_p1025) {
+ .name = "TWR-P1025",
+ .probe = twr_p1025_probe,
+ .setup_arch = twr_p1025_setup_arch,
+ .init_IRQ = twr_p1025_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 8dec3c0911ad..bd6f1a1cf922 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -45,7 +45,6 @@ config PPC_EP88XC
config PPC_ADDER875
bool "Analogue & Micro Adder 875"
select CPM1
- select REDBOOT
help
This enables support for the Analogue & Micro Adder 875
board.
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index bca2465a9c34..434fda39bf8b 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -72,6 +72,7 @@ config PPC_BOOK3S_64
select PPC_HAVE_PMU_SUPPORT
select SYS_SUPPORTS_HUGETLBFS
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if PPC_64K_PAGES
+ select ARCH_SUPPORTS_NUMA_BALANCING
config PPC_BOOK3E_64
bool "Embedded processors"
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index c34ee4e60873..d4d245c0d787 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -111,7 +111,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
if (rflags & _PAGE_NO_CACHE)
- hpte_r &= ~_PAGE_COHERENT;
+ hpte_r &= ~HPTE_R_M;
raw_spin_lock(&beat_htab_lock);
lpar_rc = beat_read_mask(hpte_group);
@@ -337,7 +337,7 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
if (rflags & _PAGE_NO_CACHE)
- hpte_r &= ~_PAGE_COHERENT;
+ hpte_r &= ~HPTE_R_M;
/* insert into not-volted entry */
lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r,
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index b53560660b72..2b90ff8a93be 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
- for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
+ for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
mb();
@@ -430,7 +430,7 @@ static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
{
cell_iommu_setup_stab(iommu, base, size, 0, 0);
iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
- IOMMU_PAGE_SHIFT);
+ IOMMU_PAGE_SHIFT_4K);
cell_iommu_enable_hardware(iommu);
}
@@ -487,8 +487,10 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
window->table.it_blocksize = 16;
window->table.it_base = (unsigned long)iommu->ptab;
window->table.it_index = iommu->nid;
- window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset;
- window->table.it_size = size >> IOMMU_PAGE_SHIFT;
+ window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
+ window->table.it_offset =
+ (offset >> window->table.it_page_shift) + pte_offset;
+ window->table.it_size = size >> window->table.it_page_shift;
iommu_init_table(&window->table, iommu->nid);
@@ -773,7 +775,7 @@ static void __init cell_iommu_init_one(struct device_node *np,
/* Setup the iommu_table */
cell_iommu_setup_window(iommu, np, base, size,
- offset >> IOMMU_PAGE_SHIFT);
+ offset >> IOMMU_PAGE_SHIFT_4K);
}
static void __init cell_disable_iommus(void)
@@ -1122,7 +1124,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
- IOMMU_PAGE_SHIFT);
+ IOMMU_PAGE_SHIFT_4K);
cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
fbase, fsize);
cell_iommu_enable_hardware(iommu);
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index dead91b177b9..b6c9a0dcc924 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <asm/ptrace.h>
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 302ba43d73a1..6d3c7a9fd047 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -67,6 +67,18 @@ config PPC_C2K
This option enables support for the GE Fanuc C2K board (formerly
an SBS board).
+config MVME5100
+ bool "Motorola/Emerson MVME5100"
+ depends on EMBEDDED6xx
+ select MPIC
+ select PCI
+ select PPC_INDIRECT_PCI
+ select PPC_I8259
+ select PPC_NATIVE
+ help
+ This option enables support for the Motorola (now Emerson) MVME5100
+ board.
+
config TSI108_BRIDGE
bool
select PCI
@@ -113,4 +125,3 @@ config WII
help
Select WII if configuring for the Nintendo Wii.
More information at: <http://gc-linux.sourceforge.net/>
-
diff --git a/arch/powerpc/platforms/embedded6xx/Makefile b/arch/powerpc/platforms/embedded6xx/Makefile
index 66c23e423f40..cdd48d402b93 100644
--- a/arch/powerpc/platforms/embedded6xx/Makefile
+++ b/arch/powerpc/platforms/embedded6xx/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_USBGECKO_UDBG) += usbgecko_udbg.o
obj-$(CONFIG_GAMECUBE_COMMON) += flipper-pic.o
obj-$(CONFIG_GAMECUBE) += gamecube.o
obj-$(CONFIG_WII) += wii.o hlwd-pic.o
+obj-$(CONFIG_MVME5100) += mvme5100.o
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 6c03034dbbd3..c269caee58f9 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -15,7 +15,6 @@
#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
new file mode 100644
index 000000000000..25e3bfb64efb
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -0,0 +1,221 @@
+/*
+ * Board setup routines for the Motorola/Emerson MVME5100.
+ *
+ * Copyright 2013 CSC Australia Pty. Ltd.
+ *
+ * Based on earlier code by:
+ *
+ * Matt Porter, MontaVista Software Inc.
+ * Copyright 2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Author: Stephen Chivers <schivers@csc.com>
+ *
+ */
+
+#include <linux/of_platform.h>
+
+#include <asm/i8259.h>
+#include <asm/pci-bridge.h>
+#include <asm/mpic.h>
+#include <asm/prom.h>
+#include <mm/mmu_decl.h>
+#include <asm/udbg.h>
+
+#define HAWK_MPIC_SIZE 0x00040000U
+#define MVME5100_PCI_MEM_OFFSET 0x00000000
+
+/* Board register addresses. */
+#define BOARD_STATUS_REG 0xfef88080
+#define BOARD_MODFAIL_REG 0xfef88090
+#define BOARD_MODRST_REG 0xfef880a0
+#define BOARD_TBEN_REG 0xfef880c0
+#define BOARD_SW_READ_REG 0xfef880e0
+#define BOARD_GEO_ADDR_REG 0xfef880e8
+#define BOARD_EXT_FEATURE1_REG 0xfef880f0
+#define BOARD_EXT_FEATURE2_REG 0xfef88100
+
+static phys_addr_t pci_membase;
+static u_char *restart;
+
+static void mvme5100_8259_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int cascade_irq = i8259_irq();
+
+ if (cascade_irq != NO_IRQ)
+ generic_handle_irq(cascade_irq);
+
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void __init mvme5100_pic_init(void)
+{
+ struct mpic *mpic;
+ struct device_node *np;
+ struct device_node *cp = NULL;
+ unsigned int cirq;
+ unsigned long intack = 0;
+ const u32 *prop = NULL;
+
+ np = of_find_node_by_type(NULL, "open-pic");
+ if (!np) {
+ pr_err("Could not find open-pic node\n");
+ return;
+ }
+
+ mpic = mpic_alloc(np, pci_membase, 0, 16, 256, " OpenPIC ");
+
+ BUG_ON(mpic == NULL);
+ of_node_put(np);
+
+ mpic_assign_isu(mpic, 0, pci_membase + 0x10000);
+
+ mpic_init(mpic);
+
+ cp = of_find_compatible_node(NULL, NULL, "chrp,iic");
+ if (cp == NULL) {
+ pr_warn("mvme5100_pic_init: couldn't find i8259\n");
+ return;
+ }
+
+ cirq = irq_of_parse_and_map(cp, 0);
+ if (cirq == NO_IRQ) {
+ pr_warn("mvme5100_pic_init: no cascade interrupt?\n");
+ return;
+ }
+
+ np = of_find_compatible_node(NULL, "pci", "mpc10x-pci");
+ if (np) {
+ prop = of_get_property(np, "8259-interrupt-acknowledge", NULL);
+
+ if (prop)
+ intack = prop[0];
+
+ of_node_put(np);
+ }
+
+ if (intack)
+ pr_debug("mvme5100_pic_init: PCI 8259 intack at 0x%016lx\n",
+ intack);
+
+ i8259_init(cp, intack);
+ of_node_put(cp);
+ irq_set_chained_handler(cirq, mvme5100_8259_cascade);
+}
+
+static int __init mvme5100_add_bridge(struct device_node *dev)
+{
+ const int *bus_range;
+ int len;
+ struct pci_controller *hose;
+ unsigned short devid;
+
+ pr_info("Adding PCI host bridge %s\n", dev->full_name);
+
+ bus_range = of_get_property(dev, "bus-range", &len);
+
+ hose = pcibios_alloc_controller(dev);
+ if (hose == NULL)
+ return -ENOMEM;
+
+ hose->first_busno = bus_range ? bus_range[0] : 0;
+ hose->last_busno = bus_range ? bus_range[1] : 0xff;
+
+ setup_indirect_pci(hose, 0xfe000cf8, 0xfe000cfc, 0);
+
+ pci_process_bridge_OF_ranges(hose, dev, 1);
+
+ early_read_config_word(hose, 0, 0, PCI_DEVICE_ID, &devid);
+
+ if (devid != PCI_DEVICE_ID_MOTOROLA_HAWK) {
+ pr_err("HAWK PHB not present?\n");
+ return 0;
+ }
+
+ early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
+
+ if (pci_membase == 0) {
+ pr_err("HAWK PHB mibar not correctly set?\n");
+ return 0;
+ }
+
+ pr_info("mvme5100_pic_init: pci_membase: %x\n", pci_membase);
+
+ return 0;
+}
+
+static struct of_device_id mvme5100_of_bus_ids[] __initdata = {
+ { .compatible = "hawk-bridge", },
+ {},
+};
+
+/*
+ * Setup the architecture
+ */
+static void __init mvme5100_setup_arch(void)
+{
+ struct device_node *np;
+
+ if (ppc_md.progress)
+ ppc_md.progress("mvme5100_setup_arch()", 0);
+
+ for_each_compatible_node(np, "pci", "hawk-pci")
+ mvme5100_add_bridge(np);
+
+ restart = ioremap(BOARD_MODRST_REG, 4);
+}
+
+
+static void mvme5100_show_cpuinfo(struct seq_file *m)
+{
+ seq_puts(m, "Vendor\t\t: Motorola/Emerson\n");
+ seq_puts(m, "Machine\t\t: MVME5100\n");
+}
+
+static void mvme5100_restart(char *cmd)
+{
+
+ local_irq_disable();
+ mtmsr(mfmsr() | MSR_IP);
+
+ out_8((u_char *) restart, 0x01);
+
+ while (1)
+ ;
+}
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init mvme5100_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "MVME5100");
+}
+
+static int __init probe_of_platform_devices(void)
+{
+
+ of_platform_bus_probe(NULL, mvme5100_of_bus_ids, NULL);
+ return 0;
+}
+
+machine_device_initcall(mvme5100, probe_of_platform_devices);
+
+define_machine(mvme5100) {
+ .name = "MVME5100",
+ .probe = mvme5100_probe,
+ .setup_arch = mvme5100_setup_arch,
+ .init_IRQ = mvme5100_pic_init,
+ .show_cpuinfo = mvme5100_show_cpuinfo,
+ .get_irq = mpic_get_irq,
+ .restart = mvme5100_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index f3defd8a2806..aafa01ba062f 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -18,7 +18,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 7d2d036754b5..2e576f2ae442 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -138,8 +138,11 @@ static void iommu_table_iobmap_setup(void)
pr_debug(" -> %s\n", __func__);
iommu_table_iobmap.it_busno = 0;
iommu_table_iobmap.it_offset = 0;
+ iommu_table_iobmap.it_page_shift = IOBMAP_PAGE_SHIFT;
+
/* it_size is in number of entries */
- iommu_table_iobmap.it_size = 0x80000000 >> IOBMAP_PAGE_SHIFT;
+ iommu_table_iobmap.it_size =
+ 0x80000000 >> iommu_table_iobmap.it_page_shift;
/* Initialize the common IOMMU code */
iommu_table_iobmap.it_base = (unsigned long)iob_l2_base;
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index d588e48dff74..43075081721f 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -5,7 +5,6 @@
* FIXME: LOCKING !!!
*/
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 9fced3f6d2dc..895e8a20a3fc 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -13,11 +13,6 @@ config PPC_POWERNV
select ARCH_RANDOM
default y
-config POWERNV_MSI
- bool "Support PCI MSI on PowerNV platform"
- depends on PCI_MSI
- default y
-
config PPC_POWERNV_RTAS
depends on PPC_POWERNV
bool "Support for RTAS based PowerNV platforms such as BML"
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 873fa1370dc4..8d767fde5a6a 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
+obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index d7ddcee7feb8..e1e71618b70c 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -14,7 +14,6 @@
#include <linux/bootmem.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -578,11 +577,8 @@ static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
return -EIO;
}
- /*
- * FIXME: We probably need log the error in somewhere.
- * Lets make it up in future.
- */
- /* pr_info("%s", phb->diag.blob); */
+ /* The PHB diag-data is always indicative */
+ pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
spin_unlock_irqrestore(&phb->lock, flags);
@@ -670,143 +666,9 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose)
}
}
-static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose,
- struct OpalIoPhbErrorCommon *common)
-{
- struct OpalIoP7IOCPhbErrorData *data;
- int i;
-
- data = (struct OpalIoP7IOCPhbErrorData *)common;
-
- pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n\n",
- hose->global_number, common->version);
-
- pr_info(" brdgCtl: %08x\n", data->brdgCtl);
-
- pr_info(" portStatusReg: %08x\n", data->portStatusReg);
- pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
- pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
-
- pr_info(" deviceStatus: %08x\n", data->deviceStatus);
- pr_info(" slotStatus: %08x\n", data->slotStatus);
- pr_info(" linkStatus: %08x\n", data->linkStatus);
- pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
- pr_info(" devSecStatus: %08x\n", data->devSecStatus);
-
- pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
- pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
- pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
- pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
- pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
- pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
- pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
- pr_info(" sourceId: %08x\n", data->sourceId);
-
- pr_info(" errorClass: %016llx\n", data->errorClass);
- pr_info(" correlator: %016llx\n", data->correlator);
- pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
- pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
- pr_info(" lemFir: %016llx\n", data->lemFir);
- pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
- pr_info(" lemWOF: %016llx\n", data->lemWOF);
- pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
- pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
- pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
- pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
- pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
- pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
- pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
- pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
- pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
- pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
- pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
- pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
- pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
- pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
- pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
- pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
-
- for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
- if ((data->pestA[i] >> 63) == 0 &&
- (data->pestB[i] >> 63) == 0)
- continue;
-
- pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
- pr_info(" PESTB: %016llx\n", data->pestB[i]);
- }
-}
-
-static void ioda_eeh_phb3_phb_diag(struct pci_controller *hose,
- struct OpalIoPhbErrorCommon *common)
-{
- struct OpalIoPhb3ErrorData *data;
- int i;
-
- data = (struct OpalIoPhb3ErrorData*)common;
- pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n\n",
- hose->global_number, common->version);
-
- pr_info(" brdgCtl: %08x\n", data->brdgCtl);
-
- pr_info(" portStatusReg: %08x\n", data->portStatusReg);
- pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
- pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
-
- pr_info(" deviceStatus: %08x\n", data->deviceStatus);
- pr_info(" slotStatus: %08x\n", data->slotStatus);
- pr_info(" linkStatus: %08x\n", data->linkStatus);
- pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
- pr_info(" devSecStatus: %08x\n", data->devSecStatus);
-
- pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
- pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
- pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
- pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
- pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
- pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
- pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
- pr_info(" sourceId: %08x\n", data->sourceId);
- pr_info(" errorClass: %016llx\n", data->errorClass);
- pr_info(" correlator: %016llx\n", data->correlator);
- pr_info(" nFir: %016llx\n", data->nFir);
- pr_info(" nFirMask: %016llx\n", data->nFirMask);
- pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
- pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
- pr_info(" PhbCsr: %016llx\n", data->phbCsr);
- pr_info(" lemFir: %016llx\n", data->lemFir);
- pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
- pr_info(" lemWOF: %016llx\n", data->lemWOF);
- pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
- pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
- pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
- pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
- pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
- pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
- pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
- pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
- pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
- pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
- pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
- pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
- pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
- pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
- pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
- pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
-
- for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
- if ((data->pestA[i] >> 63) == 0 &&
- (data->pestB[i] >> 63) == 0)
- continue;
-
- pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
- pr_info(" PESTB: %016llx\n", data->pestB[i]);
- }
-}
-
static void ioda_eeh_phb_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
- struct OpalIoPhbErrorCommon *common;
long rc;
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
@@ -817,18 +679,7 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
return;
}
- common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
- switch (common->ioType) {
- case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
- ioda_eeh_p7ioc_phb_diag(hose, common);
- break;
- case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
- ioda_eeh_phb3_phb_diag(hose, common);
- break;
- default:
- pr_warning("%s: Unrecognized I/O chip %d\n",
- __func__, common->ioType);
- }
+ pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
}
static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
@@ -862,11 +713,7 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
dev.phb = hose;
dev.pe_config_addr = pe_no;
dev_pe = eeh_pe_get(&dev);
- if (!dev_pe) {
- pr_warning("%s: Can't find PE for PHB#%x - PE#%x\n",
- __func__, hose->global_number, pe_no);
- return -EEXIST;
- }
+ if (!dev_pe) return -EEXIST;
*pe = dev_pe;
return 0;
@@ -884,12 +731,12 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
*/
static int ioda_eeh_next_error(struct eeh_pe **pe)
{
- struct pci_controller *hose, *tmp;
+ struct pci_controller *hose;
struct pnv_phb *phb;
u64 frozen_pe_no;
u16 err_type, severity;
long rc;
- int ret = 1;
+ int ret = EEH_NEXT_ERR_NONE;
/*
* While running here, it's safe to purge the event queue.
@@ -899,7 +746,7 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
eeh_remove_event(NULL);
opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ list_for_each_entry(hose, &hose_list, list_node) {
/*
* If the subordinate PCI buses of the PHB has been
* removed, we needn't take care of it any more.
@@ -938,19 +785,19 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
switch (err_type) {
case OPAL_EEH_IOC_ERROR:
if (severity == OPAL_EEH_SEV_IOC_DEAD) {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
+ list_for_each_entry(hose, &hose_list,
+ list_node) {
phb = hose->private_data;
phb->eeh_state |= PNV_EEH_STATE_REMOVED;
}
pr_err("EEH: dead IOC detected\n");
- ret = 4;
- goto out;
+ ret = EEH_NEXT_ERR_DEAD_IOC;
} else if (severity == OPAL_EEH_SEV_INF) {
pr_info("EEH: IOC informative error "
"detected\n");
ioda_eeh_hub_diag(hose);
+ ret = EEH_NEXT_ERR_NONE;
}
break;
@@ -962,37 +809,61 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
pr_err("EEH: dead PHB#%x detected\n",
hose->global_number);
phb->eeh_state |= PNV_EEH_STATE_REMOVED;
- ret = 3;
- goto out;
+ ret = EEH_NEXT_ERR_DEAD_PHB;
} else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
if (ioda_eeh_get_phb_pe(hose, pe))
break;
pr_err("EEH: fenced PHB#%x detected\n",
hose->global_number);
- ret = 2;
- goto out;
+ ret = EEH_NEXT_ERR_FENCED_PHB;
} else if (severity == OPAL_EEH_SEV_INF) {
pr_info("EEH: PHB#%x informative error "
"detected\n",
hose->global_number);
ioda_eeh_phb_diag(hose);
+ ret = EEH_NEXT_ERR_NONE;
}
break;
case OPAL_EEH_PE_ERROR:
- if (ioda_eeh_get_pe(hose, frozen_pe_no, pe))
- break;
+ /*
+ * If we can't find the corresponding PE, the
+ * PEEV / PEST would be messy. So we force an
+ * fenced PHB so that it can be recovered.
+ */
+ if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) {
+ if (!ioda_eeh_get_phb_pe(hose, pe)) {
+ pr_err("EEH: Escalated fenced PHB#%x "
+ "detected for PE#%llx\n",
+ hose->global_number,
+ frozen_pe_no);
+ ret = EEH_NEXT_ERR_FENCED_PHB;
+ } else {
+ ret = EEH_NEXT_ERR_NONE;
+ }
+ } else {
+ pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
+ (*pe)->addr, (*pe)->phb->global_number);
+ ret = EEH_NEXT_ERR_FROZEN_PE;
+ }
- pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
- (*pe)->addr, (*pe)->phb->global_number);
- ret = 1;
- goto out;
+ break;
+ default:
+ pr_warn("%s: Unexpected error type %d\n",
+ __func__, err_type);
}
+
+ /*
+ * If we have no errors on the specific PHB or only
+ * informative error there, we continue poking it.
+ * Otherwise, we need actions to be taken by upper
+ * layer.
+ */
+ if (ret > EEH_NEXT_ERR_INF)
+ break;
}
- ret = 0;
-out:
return ret;
}
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 73b981438cc5..a79fddc5e74e 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -344,6 +344,27 @@ static int powernv_eeh_next_error(struct eeh_pe **pe)
return -EEXIST;
}
+static int powernv_eeh_restore_config(struct device_node *dn)
+{
+ struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct pnv_phb *phb;
+ s64 ret;
+
+ if (!edev)
+ return -EEXIST;
+
+ phb = edev->phb->private_data;
+ ret = opal_pci_reinit(phb->opal_id,
+ OPAL_REINIT_PCI_DEV, edev->config_addr);
+ if (ret) {
+ pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
+ __func__, edev->config_addr, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static struct eeh_ops powernv_eeh_ops = {
.name = "powernv",
.init = powernv_eeh_init,
@@ -359,7 +380,8 @@ static struct eeh_ops powernv_eeh_ops = {
.configure_bridge = powernv_eeh_configure_bridge,
.read_config = pnv_pci_cfg_read,
.write_config = pnv_pci_cfg_write,
- .next_error = powernv_eeh_next_error
+ .next_error = powernv_eeh_next_error,
+ .restore_config = powernv_eeh_restore_config
};
/**
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index d8773079ce19..714ef972406b 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -76,8 +76,8 @@
/* Validate buffer size */
#define VALIDATE_BUF_SIZE 4096
-/* XXX: Assume candidate image size is <= 256MB */
-#define MAX_IMAGE_SIZE 0x10000000
+/* XXX: Assume candidate image size is <= 1GB */
+#define MAX_IMAGE_SIZE 0x40000000
/* Flash sg list version */
#define SG_LIST_VERSION (1UL)
@@ -103,27 +103,6 @@ struct image_header_t {
uint32_t size;
};
-/* Scatter/gather entry */
-struct opal_sg_entry {
- void *data;
- long length;
-};
-
-/* We calculate number of entries based on PAGE_SIZE */
-#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
-
-/*
- * This struct is very similar but not identical to that
- * needed by the opal flash update. All we need to do for
- * opal is rewrite num_entries into a version/length and
- * translate the pointers to absolute.
- */
-struct opal_sg_list {
- unsigned long num_entries;
- struct opal_sg_list *next;
- struct opal_sg_entry entry[SG_ENTRIES_PER_NODE];
-};
-
struct validate_flash_t {
int status; /* Return status */
void *buf; /* Candidate image buffer */
@@ -333,7 +312,7 @@ static struct opal_sg_list *image_data_to_sglist(void)
addr = image_data.data;
size = image_data.size;
- sg1 = kzalloc((sizeof(struct opal_sg_list)), GFP_KERNEL);
+ sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1)
return NULL;
@@ -351,8 +330,7 @@ static struct opal_sg_list *image_data_to_sglist(void)
sg1->num_entries++;
if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
- sg1->next = kzalloc((sizeof(struct opal_sg_list)),
- GFP_KERNEL);
+ sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1->next) {
pr_err("%s : Failed to allocate memory\n",
__func__);
@@ -402,7 +380,10 @@ static int opal_flash_update(int op)
else
sg->next = NULL;
- /* Make num_entries into the version/length field */
+ /*
+ * Convert num_entries to version/length format
+ * to satisfy OPAL.
+ */
sg->num_entries = (SG_LIST_VERSION << 56) |
(sg->num_entries * sizeof(struct opal_sg_entry) + 16);
}
diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c
new file mode 100644
index 000000000000..ec4132239cdf
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c
@@ -0,0 +1,146 @@
+/*
+ * OPAL asynchronus Memory error handling support in PowreNV.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2013 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/opal.h>
+#include <asm/cputable.h>
+
+static int opal_mem_err_nb_init;
+static LIST_HEAD(opal_memory_err_list);
+static DEFINE_SPINLOCK(opal_mem_err_lock);
+
+struct OpalMsgNode {
+ struct list_head list;
+ struct opal_msg msg;
+};
+
+static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt)
+{
+ uint64_t paddr_start, paddr_end;
+
+ pr_debug("%s: Retrived memory error event, type: 0x%x\n",
+ __func__, merr_evt->type);
+ switch (merr_evt->type) {
+ case OPAL_MEM_ERR_TYPE_RESILIENCE:
+ paddr_start = merr_evt->u.resilience.physical_address_start;
+ paddr_end = merr_evt->u.resilience.physical_address_end;
+ break;
+ case OPAL_MEM_ERR_TYPE_DYN_DALLOC:
+ paddr_start = merr_evt->u.dyn_dealloc.physical_address_start;
+ paddr_end = merr_evt->u.dyn_dealloc.physical_address_end;
+ break;
+ default:
+ return;
+ }
+
+ for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) {
+ memory_failure(paddr_start >> PAGE_SHIFT, 0, 0);
+ }
+}
+
+static void handle_memory_error(void)
+{
+ unsigned long flags;
+ struct OpalMemoryErrorData *merr_evt;
+ struct OpalMsgNode *msg_node;
+
+ spin_lock_irqsave(&opal_mem_err_lock, flags);
+ while (!list_empty(&opal_memory_err_list)) {
+ msg_node = list_entry(opal_memory_err_list.next,
+ struct OpalMsgNode, list);
+ list_del(&msg_node->list);
+ spin_unlock_irqrestore(&opal_mem_err_lock, flags);
+
+ merr_evt = (struct OpalMemoryErrorData *)
+ &msg_node->msg.params[0];
+ handle_memory_error_event(merr_evt);
+ kfree(msg_node);
+ spin_lock_irqsave(&opal_mem_err_lock, flags);
+ }
+ spin_unlock_irqrestore(&opal_mem_err_lock, flags);
+}
+
+static void mem_error_handler(struct work_struct *work)
+{
+ handle_memory_error();
+}
+
+static DECLARE_WORK(mem_error_work, mem_error_handler);
+
+/*
+ * opal_memory_err_event - notifier handler that queues up the opal message
+ * to be preocessed later.
+ */
+static int opal_memory_err_event(struct notifier_block *nb,
+ unsigned long msg_type, void *msg)
+{
+ unsigned long flags;
+ struct OpalMsgNode *msg_node;
+
+ if (msg_type != OPAL_MSG_MEM_ERR)
+ return 0;
+
+ msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC);
+ if (!msg_node) {
+ pr_err("MEMORY_ERROR: out of memory, Opal message event not"
+ "handled\n");
+ return -ENOMEM;
+ }
+ memcpy(&msg_node->msg, msg, sizeof(struct opal_msg));
+
+ spin_lock_irqsave(&opal_mem_err_lock, flags);
+ list_add(&msg_node->list, &opal_memory_err_list);
+ spin_unlock_irqrestore(&opal_mem_err_lock, flags);
+
+ schedule_work(&mem_error_work);
+ return 0;
+}
+
+static struct notifier_block opal_mem_err_nb = {
+ .notifier_call = opal_memory_err_event,
+ .next = NULL,
+ .priority = 0,
+};
+
+static int __init opal_mem_err_init(void)
+{
+ int ret;
+
+ if (!opal_mem_err_nb_init) {
+ ret = opal_message_notifier_register(
+ OPAL_MSG_MEM_ERR, &opal_mem_err_nb);
+ if (ret) {
+ pr_err("%s: Can't register OPAL event notifier (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ opal_mem_err_nb_init = 1;
+ }
+ return 0;
+}
+subsys_initcall(opal_mem_err_init);
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index 7d07c7e80ec0..b1885db8fdf3 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -18,6 +18,7 @@
#include <asm/opal.h>
#include <asm/firmware.h>
+#include <asm/machdep.h>
static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
{
@@ -48,8 +49,11 @@ unsigned long __init opal_get_boot_time(void)
else
mdelay(10);
}
- if (rc != OPAL_SUCCESS)
+ if (rc != OPAL_SUCCESS) {
+ ppc_md.get_rtc_time = NULL;
+ ppc_md.set_rtc_time = NULL;
return 0;
+ }
y_m_d = be32_to_cpu(__y_m_d);
h_m_s_ms = be64_to_cpu(__h_m_s_ms);
opal_to_tm(y_m_d, h_m_s_ms, &tm);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e7806504e976..3e8829c40fbb 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -126,3 +126,6 @@ OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU);
OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE);
OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE);
OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE);
+OPAL_CALL(opal_get_msg, OPAL_GET_MSG);
+OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION);
+OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 1c798cd55372..65499adaecff 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -18,9 +18,12 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include <linux/kobject.h>
+#include <linux/delay.h>
#include <asm/opal.h>
#include <asm/firmware.h>
+#include <asm/mce.h>
#include "powernv.h"
@@ -38,6 +41,7 @@ extern u64 opal_mc_secondary_handler[];
static unsigned int *opal_irqs;
static unsigned int opal_irq_count;
static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
+static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
static DEFINE_SPINLOCK(opal_notifier_lock);
static uint64_t last_notified_mask = 0x0ul;
static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
@@ -88,14 +92,10 @@ static int __init opal_register_exception_handlers(void)
if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
return -ENODEV;
- /* Hookup some exception handlers. We use the fwnmi area at 0x7000
- * to provide the glue space to OPAL
+ /* Hookup some exception handlers except machine check. We use the
+ * fwnmi area at 0x7000 to provide the glue space to OPAL
*/
glue = 0x7000;
- opal_register_exception_handler(OPAL_MACHINE_CHECK_HANDLER,
- __pa(opal_mc_secondary_handler[0]),
- glue);
- glue += 128;
opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
0, glue);
glue += 128;
@@ -169,6 +169,95 @@ void opal_notifier_disable(void)
atomic_set(&opal_notifier_hold, 1);
}
+/*
+ * Opal message notifier based on message type. Allow subscribers to get
+ * notified for specific messgae type.
+ */
+int opal_message_notifier_register(enum OpalMessageType msg_type,
+ struct notifier_block *nb)
+{
+ if (!nb) {
+ pr_warning("%s: Invalid argument (%p)\n",
+ __func__, nb);
+ return -EINVAL;
+ }
+ if (msg_type > OPAL_MSG_TYPE_MAX) {
+ pr_warning("%s: Invalid message type argument (%d)\n",
+ __func__, msg_type);
+ return -EINVAL;
+ }
+ return atomic_notifier_chain_register(
+ &opal_msg_notifier_head[msg_type], nb);
+}
+
+static void opal_message_do_notify(uint32_t msg_type, void *msg)
+{
+ /* notify subscribers */
+ atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
+ msg_type, msg);
+}
+
+static void opal_handle_message(void)
+{
+ s64 ret;
+ /*
+ * TODO: pre-allocate a message buffer depending on opal-msg-size
+ * value in /proc/device-tree.
+ */
+ static struct opal_msg msg;
+
+ ret = opal_get_msg(__pa(&msg), sizeof(msg));
+ /* No opal message pending. */
+ if (ret == OPAL_RESOURCE)
+ return;
+
+ /* check for errors. */
+ if (ret) {
+ pr_warning("%s: Failed to retrive opal message, err=%lld\n",
+ __func__, ret);
+ return;
+ }
+
+ /* Sanity check */
+ if (msg.msg_type > OPAL_MSG_TYPE_MAX) {
+ pr_warning("%s: Unknown message type: %u\n",
+ __func__, msg.msg_type);
+ return;
+ }
+ opal_message_do_notify(msg.msg_type, (void *)&msg);
+}
+
+static int opal_message_notify(struct notifier_block *nb,
+ unsigned long events, void *change)
+{
+ if (events & OPAL_EVENT_MSG_PENDING)
+ opal_handle_message();
+ return 0;
+}
+
+static struct notifier_block opal_message_nb = {
+ .notifier_call = opal_message_notify,
+ .next = NULL,
+ .priority = 0,
+};
+
+static int __init opal_message_init(void)
+{
+ int ret, i;
+
+ for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
+ ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
+
+ ret = opal_notifier_register(&opal_message_nb);
+ if (ret) {
+ pr_err("%s: Can't register OPAL event notifier (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ return 0;
+}
+early_initcall(opal_message_init);
+
int opal_get_chars(uint32_t vtermno, char *buf, int count)
{
s64 rc;
@@ -254,119 +343,62 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
return written;
}
+static int opal_recover_mce(struct pt_regs *regs,
+ struct machine_check_event *evt)
+{
+ int recovered = 0;
+ uint64_t ea = get_mce_fault_addr(evt);
+
+ if (!(regs->msr & MSR_RI)) {
+ /* If MSR_RI isn't set, we cannot recover */
+ recovered = 0;
+ } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
+ /* Platform corrected itself */
+ recovered = 1;
+ } else if (ea && !is_kernel_addr(ea)) {
+ /*
+ * Faulting address is not in kernel text. We should be fine.
+ * We need to find which process uses this address.
+ * For now, kill the task if we have received exception when
+ * in userspace.
+ *
+ * TODO: Queue up this address for hwpoisioning later.
+ */
+ if (user_mode(regs) && !is_global_init(current)) {
+ _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
+ recovered = 1;
+ } else
+ recovered = 0;
+ } else if (user_mode(regs) && !is_global_init(current) &&
+ evt->severity == MCE_SEV_ERROR_SYNC) {
+ /*
+ * If we have received a synchronous error when in userspace
+ * kill the task.
+ */
+ _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
+ recovered = 1;
+ }
+ return recovered;
+}
+
int opal_machine_check(struct pt_regs *regs)
{
- struct opal_machine_check_event *opal_evt = get_paca()->opal_mc_evt;
- struct opal_machine_check_event evt;
- const char *level, *sevstr, *subtype;
- static const char *opal_mc_ue_types[] = {
- "Indeterminate",
- "Instruction fetch",
- "Page table walk ifetch",
- "Load/Store",
- "Page table walk Load/Store",
- };
- static const char *opal_mc_slb_types[] = {
- "Indeterminate",
- "Parity",
- "Multihit",
- };
- static const char *opal_mc_erat_types[] = {
- "Indeterminate",
- "Parity",
- "Multihit",
- };
- static const char *opal_mc_tlb_types[] = {
- "Indeterminate",
- "Parity",
- "Multihit",
- };
-
- /* Copy the event structure and release the original */
- evt = *opal_evt;
- opal_evt->in_use = 0;
+ struct machine_check_event evt;
+
+ if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
+ return 0;
/* Print things out */
- if (evt.version != OpalMCE_V1) {
+ if (evt.version != MCE_V1) {
pr_err("Machine Check Exception, Unknown event version %d !\n",
evt.version);
return 0;
}
- switch(evt.severity) {
- case OpalMCE_SEV_NO_ERROR:
- level = KERN_INFO;
- sevstr = "Harmless";
- break;
- case OpalMCE_SEV_WARNING:
- level = KERN_WARNING;
- sevstr = "";
- break;
- case OpalMCE_SEV_ERROR_SYNC:
- level = KERN_ERR;
- sevstr = "Severe";
- break;
- case OpalMCE_SEV_FATAL:
- default:
- level = KERN_ERR;
- sevstr = "Fatal";
- break;
- }
+ machine_check_print_event_info(&evt);
- printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
- evt.disposition == OpalMCE_DISPOSITION_RECOVERED ?
- "Recovered" : "[Not recovered");
- printk("%s Initiator: %s\n", level,
- evt.initiator == OpalMCE_INITIATOR_CPU ? "CPU" : "Unknown");
- switch(evt.error_type) {
- case OpalMCE_ERROR_TYPE_UE:
- subtype = evt.u.ue_error.ue_error_type <
- ARRAY_SIZE(opal_mc_ue_types) ?
- opal_mc_ue_types[evt.u.ue_error.ue_error_type]
- : "Unknown";
- printk("%s Error type: UE [%s]\n", level, subtype);
- if (evt.u.ue_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt.u.ue_error.effective_address);
- if (evt.u.ue_error.physical_address_provided)
- printk("%s Physial address: %016llx\n",
- level, evt.u.ue_error.physical_address);
- break;
- case OpalMCE_ERROR_TYPE_SLB:
- subtype = evt.u.slb_error.slb_error_type <
- ARRAY_SIZE(opal_mc_slb_types) ?
- opal_mc_slb_types[evt.u.slb_error.slb_error_type]
- : "Unknown";
- printk("%s Error type: SLB [%s]\n", level, subtype);
- if (evt.u.slb_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt.u.slb_error.effective_address);
- break;
- case OpalMCE_ERROR_TYPE_ERAT:
- subtype = evt.u.erat_error.erat_error_type <
- ARRAY_SIZE(opal_mc_erat_types) ?
- opal_mc_erat_types[evt.u.erat_error.erat_error_type]
- : "Unknown";
- printk("%s Error type: ERAT [%s]\n", level, subtype);
- if (evt.u.erat_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt.u.erat_error.effective_address);
- break;
- case OpalMCE_ERROR_TYPE_TLB:
- subtype = evt.u.tlb_error.tlb_error_type <
- ARRAY_SIZE(opal_mc_tlb_types) ?
- opal_mc_tlb_types[evt.u.tlb_error.tlb_error_type]
- : "Unknown";
- printk("%s Error type: TLB [%s]\n", level, subtype);
- if (evt.u.tlb_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt.u.tlb_error.effective_address);
- break;
- default:
- case OpalMCE_ERROR_TYPE_UNKNOWN:
- printk("%s Error type: Unknown\n", level);
- break;
- }
- return evt.severity == OpalMCE_SEV_FATAL ? 0 : 1;
+ if (opal_recover_mce(regs, &evt))
+ return 1;
+ return 0;
}
static irqreturn_t opal_interrupt(int irq, void *data)
@@ -451,10 +483,25 @@ subsys_initcall(opal_init);
void opal_shutdown(void)
{
unsigned int i;
+ long rc = OPAL_BUSY;
+ /* First free interrupts, which will also mask them */
for (i = 0; i < opal_irq_count; i++) {
if (opal_irqs[i])
free_irq(opal_irqs[i], NULL);
opal_irqs[i] = 0;
}
+
+ /*
+ * Then sync with OPAL which ensure anything that can
+ * potentially write to our memory has completed such
+ * as an ongoing dump retrieval
+ */
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_sync_host_reboot();
+ if (rc == OPAL_BUSY)
+ opal_poll_events(NULL);
+ else
+ mdelay(10);
+ }
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 2c6d173842b2..7d6dcc6d5fa9 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -460,7 +460,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
return;
pe = &phb->ioda.pe_array[pdn->pe_number];
- set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
}
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
@@ -468,7 +468,7 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
- set_iommu_table_base(&dev->dev, &pe->tce32_table);
+ set_iommu_table_base_and_group(&dev->dev, &pe->tce32_table);
if (dev->subordinate)
pnv_ioda_setup_bus_dma(pe, dev->subordinate);
}
@@ -644,7 +644,7 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
if (pe->pdev)
- set_iommu_table_base(&pe->pdev->dev, tbl);
+ set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
else
pnv_ioda_setup_bus_dma(pe, pe->pbus);
@@ -723,7 +723,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
if (pe->pdev)
- set_iommu_table_base(&pe->pdev->dev, tbl);
+ set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
else
pnv_ioda_setup_bus_dma(pe, pe->pbus);
@@ -1144,7 +1144,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
{
struct pci_controller *hose;
struct pnv_phb *phb;
- unsigned long size, m32map_off, iomap_off, pemap_off;
+ unsigned long size, m32map_off, pemap_off, iomap_off = 0;
const __be64 *prop64;
const __be32 *prop32;
int len;
@@ -1231,7 +1231,6 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
m32map_off = size;
size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
- iomap_off = size;
if (phb->type == PNV_PHB_IODA1) {
iomap_off = size;
size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index f8b4bd8afb2e..e3807d69393e 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -92,7 +92,7 @@ static void pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb,
pci_domain_nr(phb->hose->bus), phb->opal_id);
}
- set_iommu_table_base(&pdev->dev, &phb->p5ioc2.iommu_table);
+ set_iommu_table_base_and_group(&pdev->dev, &phb->p5ioc2.iommu_table);
}
static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 4eb33a9ed532..b555ebc57ef5 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -124,77 +124,157 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
}
#endif /* CONFIG_PCI_MSI */
-static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb)
+static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
+ struct OpalIoPhbErrorCommon *common)
{
- struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc;
+ struct OpalIoP7IOCPhbErrorData *data;
int i;
- pr_info("PHB %d diagnostic data:\n", phb->hose->global_number);
-
- pr_info(" brdgCtl = 0x%08x\n", data->brdgCtl);
-
- pr_info(" portStatusReg = 0x%08x\n", data->portStatusReg);
- pr_info(" rootCmplxStatus = 0x%08x\n", data->rootCmplxStatus);
- pr_info(" busAgentStatus = 0x%08x\n", data->busAgentStatus);
-
- pr_info(" deviceStatus = 0x%08x\n", data->deviceStatus);
- pr_info(" slotStatus = 0x%08x\n", data->slotStatus);
- pr_info(" linkStatus = 0x%08x\n", data->linkStatus);
- pr_info(" devCmdStatus = 0x%08x\n", data->devCmdStatus);
- pr_info(" devSecStatus = 0x%08x\n", data->devSecStatus);
-
- pr_info(" rootErrorStatus = 0x%08x\n", data->rootErrorStatus);
- pr_info(" uncorrErrorStatus = 0x%08x\n", data->uncorrErrorStatus);
- pr_info(" corrErrorStatus = 0x%08x\n", data->corrErrorStatus);
- pr_info(" tlpHdr1 = 0x%08x\n", data->tlpHdr1);
- pr_info(" tlpHdr2 = 0x%08x\n", data->tlpHdr2);
- pr_info(" tlpHdr3 = 0x%08x\n", data->tlpHdr3);
- pr_info(" tlpHdr4 = 0x%08x\n", data->tlpHdr4);
- pr_info(" sourceId = 0x%08x\n", data->sourceId);
-
- pr_info(" errorClass = 0x%016llx\n", data->errorClass);
- pr_info(" correlator = 0x%016llx\n", data->correlator);
-
- pr_info(" p7iocPlssr = 0x%016llx\n", data->p7iocPlssr);
- pr_info(" p7iocCsr = 0x%016llx\n", data->p7iocCsr);
- pr_info(" lemFir = 0x%016llx\n", data->lemFir);
- pr_info(" lemErrorMask = 0x%016llx\n", data->lemErrorMask);
- pr_info(" lemWOF = 0x%016llx\n", data->lemWOF);
- pr_info(" phbErrorStatus = 0x%016llx\n", data->phbErrorStatus);
- pr_info(" phbFirstErrorStatus = 0x%016llx\n", data->phbFirstErrorStatus);
- pr_info(" phbErrorLog0 = 0x%016llx\n", data->phbErrorLog0);
- pr_info(" phbErrorLog1 = 0x%016llx\n", data->phbErrorLog1);
- pr_info(" mmioErrorStatus = 0x%016llx\n", data->mmioErrorStatus);
- pr_info(" mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus);
- pr_info(" mmioErrorLog0 = 0x%016llx\n", data->mmioErrorLog0);
- pr_info(" mmioErrorLog1 = 0x%016llx\n", data->mmioErrorLog1);
- pr_info(" dma0ErrorStatus = 0x%016llx\n", data->dma0ErrorStatus);
- pr_info(" dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus);
- pr_info(" dma0ErrorLog0 = 0x%016llx\n", data->dma0ErrorLog0);
- pr_info(" dma0ErrorLog1 = 0x%016llx\n", data->dma0ErrorLog1);
- pr_info(" dma1ErrorStatus = 0x%016llx\n", data->dma1ErrorStatus);
- pr_info(" dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus);
- pr_info(" dma1ErrorLog0 = 0x%016llx\n", data->dma1ErrorLog0);
- pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1);
+ data = (struct OpalIoP7IOCPhbErrorData *)common;
+ pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n",
+ hose->global_number, common->version);
+
+ pr_info(" brdgCtl: %08x\n", data->brdgCtl);
+
+ pr_info(" portStatusReg: %08x\n", data->portStatusReg);
+ pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
+ pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
+
+ pr_info(" deviceStatus: %08x\n", data->deviceStatus);
+ pr_info(" slotStatus: %08x\n", data->slotStatus);
+ pr_info(" linkStatus: %08x\n", data->linkStatus);
+ pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
+ pr_info(" devSecStatus: %08x\n", data->devSecStatus);
+
+ pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
+ pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
+ pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
+ pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
+ pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
+ pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
+ pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
+ pr_info(" sourceId: %08x\n", data->sourceId);
+ pr_info(" errorClass: %016llx\n", data->errorClass);
+ pr_info(" correlator: %016llx\n", data->correlator);
+ pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
+ pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
+ pr_info(" lemFir: %016llx\n", data->lemFir);
+ pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
+ pr_info(" lemWOF: %016llx\n", data->lemWOF);
+ pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
+ pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
+ pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
+ pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
+ pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
+ pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
+ pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
+ pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
+ pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
+ pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
+ pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
+ pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
+ pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
+ pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
+ pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
+ pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0)
continue;
- pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]);
- pr_info(" PESTB = 0x%016llx\n", data->pestB[i]);
+
+ pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
+ pr_info(" PESTB: %016llx\n", data->pestB[i]);
+ }
+}
+
+static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
+ struct OpalIoPhbErrorCommon *common)
+{
+ struct OpalIoPhb3ErrorData *data;
+ int i;
+
+ data = (struct OpalIoPhb3ErrorData*)common;
+ pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n",
+ hose->global_number, common->version);
+
+ pr_info(" brdgCtl: %08x\n", data->brdgCtl);
+
+ pr_info(" portStatusReg: %08x\n", data->portStatusReg);
+ pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
+ pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
+
+ pr_info(" deviceStatus: %08x\n", data->deviceStatus);
+ pr_info(" slotStatus: %08x\n", data->slotStatus);
+ pr_info(" linkStatus: %08x\n", data->linkStatus);
+ pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
+ pr_info(" devSecStatus: %08x\n", data->devSecStatus);
+
+ pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
+ pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
+ pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
+ pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
+ pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
+ pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
+ pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
+ pr_info(" sourceId: %08x\n", data->sourceId);
+ pr_info(" errorClass: %016llx\n", data->errorClass);
+ pr_info(" correlator: %016llx\n", data->correlator);
+
+ pr_info(" nFir: %016llx\n", data->nFir);
+ pr_info(" nFirMask: %016llx\n", data->nFirMask);
+ pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
+ pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
+ pr_info(" PhbCsr: %016llx\n", data->phbCsr);
+ pr_info(" lemFir: %016llx\n", data->lemFir);
+ pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
+ pr_info(" lemWOF: %016llx\n", data->lemWOF);
+ pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
+ pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
+ pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
+ pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
+ pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
+ pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
+ pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
+ pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
+ pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
+ pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
+ pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
+ pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
+ pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
+ pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
+ pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
+ pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
+
+ for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
+ if ((data->pestA[i] >> 63) == 0 &&
+ (data->pestB[i] >> 63) == 0)
+ continue;
+
+ pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
+ pr_info(" PESTB: %016llx\n", data->pestB[i]);
}
}
-static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb)
+void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
+ unsigned char *log_buff)
{
- switch(phb->model) {
- case PNV_PHB_MODEL_P7IOC:
- pnv_pci_dump_p7ioc_diag_data(phb);
+ struct OpalIoPhbErrorCommon *common;
+
+ if (!hose || !log_buff)
+ return;
+
+ common = (struct OpalIoPhbErrorCommon *)log_buff;
+ switch (common->ioType) {
+ case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
+ pnv_pci_dump_p7ioc_diag_data(hose, common);
+ break;
+ case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
+ pnv_pci_dump_phb3_diag_data(hose, common);
break;
default:
- pr_warning("PCI %d: Can't decode this PHB diag data\n",
- phb->hose->global_number);
+ pr_warn("%s: Unrecognized ioType %d\n",
+ __func__, common->ioType);
}
}
@@ -222,7 +302,7 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
* with the normal errors generated when probing empty slots
*/
if (has_diag)
- pnv_pci_dump_phb_diag_data(phb);
+ pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
else
pr_warning("PCI %d: No diag data available\n",
phb->hose->global_number);
@@ -484,7 +564,8 @@ void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
{
tbl->it_blocksize = 16;
tbl->it_base = (unsigned long)tce_mem;
- tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
+ tbl->it_offset = dma_offset >> tbl->it_page_shift;
tbl->it_index = 0;
tbl->it_size = tce_size >> 3;
tbl->it_busno = 0;
@@ -536,7 +617,7 @@ static void pnv_pci_dma_fallback_setup(struct pci_controller *hose,
pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
if (!pdn->iommu_table)
return;
- set_iommu_table_base(&pdev->dev, pdn->iommu_table);
+ set_iommu_table_base_and_group(&pdev->dev, pdn->iommu_table);
}
static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
@@ -657,3 +738,32 @@ void __init pnv_pci_init(void)
ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
#endif
}
+
+static int tce_iommu_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ return iommu_add_device(dev);
+ case BUS_NOTIFY_DEL_DEVICE:
+ if (dev->iommu_group)
+ iommu_del_device(dev);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static struct notifier_block tce_iommu_bus_nb = {
+ .notifier_call = tce_iommu_bus_notifier,
+};
+
+static int __init tce_iommu_bus_notifier_init(void)
+{
+ bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
+ return 0;
+}
+
+subsys_initcall_sync(tce_iommu_bus_notifier_init);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 1ed8d5f40f5a..13f1942a9a5f 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -176,6 +176,7 @@ struct pnv_phb {
union {
unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
struct OpalIoP7IOCPhbErrorData p7ioc;
+ struct OpalIoPhb3ErrorData phb3;
struct OpalIoP7IOCErrorData hub_diag;
} diag;
@@ -186,6 +187,8 @@ extern struct pci_ops pnv_pci_ops;
extern struct pnv_eeh_ops ioda_eeh_ops;
#endif
+void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
+ unsigned char *log_buff);
int pnv_pci_cfg_read(struct device_node *dn,
int where, int size, u32 *val);
int pnv_pci_cfg_write(struct device_node *dn,
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 19884b2a51b4..a932feb2901c 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -145,8 +145,10 @@ static void pnv_shutdown(void)
/* Let the PCI code clear up IODA tables */
pnv_pci_shutdown();
- /* And unregister all OPAL interrupts so they don't fire
- * up while we kexec
+ /*
+ * Stop OPAL activity: Unregister all OPAL interrupts so they
+ * don't fire up while we kexec and make sure all potentially
+ * DMA'ing ops are complete (such as dump retrieval).
*/
opal_shutdown();
}
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index e17fa1432d80..a0bca05e26b0 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -143,7 +143,7 @@ static void _dump_areas(unsigned int spe_id, unsigned long priv2,
pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow);
}
-inline u64 ps3_get_spe_id(void *arg)
+u64 ps3_get_spe_id(void *arg)
{
return spu_pdata(arg)->spe_id;
}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 62b4f8025de0..e66643250fee 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -34,7 +34,7 @@ config PPC_SPLPAR
config PSERIES_MSI
bool
- depends on PCI_MSI && EEH
+ depends on PCI_MSI && PPC_PSERIES && EEH
default y
config PSERIES_ENERGY
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 1e561bef459b..2d8bf15879fd 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -25,7 +25,6 @@
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/gfp.h>
-#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/oom.h>
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 5db66f1fbc26..7d61498e45c0 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -20,7 +20,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/spinlock.h>
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index ccb633e077b1..9ef3cc8ebc11 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -689,7 +689,9 @@ static struct eeh_ops pseries_eeh_ops = {
.get_log = pseries_eeh_get_log,
.configure_bridge = pseries_eeh_configure_bridge,
.read_config = pseries_eeh_read_config,
- .write_config = pseries_eeh_write_config
+ .write_config = pseries_eeh_write_config,
+ .next_error = NULL,
+ .restore_config = NULL
};
/**
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index f253361552ae..33b552ffbe57 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -486,9 +486,10 @@ static void iommu_table_setparms(struct pci_controller *phb,
memset((void *)tbl->it_base, 0, *sizep);
tbl->it_busno = phb->bus->number;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
/* Units of tce entries */
- tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT;
+ tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
/* Test if we are going over 2GB of DMA space */
if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
@@ -499,7 +500,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
phb->dma_window_base_cur += phb->dma_window_size;
/* Set the tce table size - measured in entries */
- tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT;
+ tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
tbl->it_index = 0;
tbl->it_blocksize = 16;
@@ -537,11 +538,12 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
tbl->it_busno = phb->bus->number;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
tbl->it_base = 0;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
- tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
- tbl->it_size = size >> IOMMU_PAGE_SHIFT;
+ tbl->it_offset = offset >> tbl->it_page_shift;
+ tbl->it_size = size >> tbl->it_page_shift;
}
static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
@@ -687,7 +689,8 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
iommu_table_setparms(phb, dn, tbl);
PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
iommu_register_group(tbl, pci_domain_nr(phb->bus), 0);
- set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
+ set_iommu_table_base_and_group(&dev->dev,
+ PCI_DN(dn)->iommu_table);
return;
}
@@ -699,7 +702,8 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
dn = dn->parent;
if (dn && PCI_DN(dn))
- set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
+ set_iommu_table_base_and_group(&dev->dev,
+ PCI_DN(dn)->iommu_table);
else
printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
pci_name(dev));
@@ -717,21 +721,6 @@ static int __init disable_ddw_setup(char *str)
early_param("disable_ddw", disable_ddw_setup);
-static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u64 liobn)
-{
- int ret;
-
- ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
- if (ret)
- pr_warning("%s: failed to remove DMA window: rtas returned "
- "%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
- else
- pr_debug("%s: successfully removed DMA window: rtas returned "
- "%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
-}
-
static void remove_ddw(struct device_node *np)
{
struct dynamic_dma_window_prop *dwp;
@@ -761,7 +750,15 @@ static void remove_ddw(struct device_node *np)
pr_debug("%s successfully cleared tces in window.\n",
np->full_name);
- __remove_ddw(np, ddw_avail, liobn);
+ ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
+ if (ret)
+ pr_warning("%s: failed to remove direct window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddw_avail[2], liobn);
+ else
+ pr_debug("%s: successfully removed direct window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddw_avail[2], liobn);
delprop:
ret = of_remove_property(np, win64);
@@ -790,68 +787,33 @@ static u64 find_existing_ddw(struct device_node *pdn)
return dma_addr;
}
-static void __restore_default_window(struct eeh_dev *edev,
- u32 ddw_restore_token)
-{
- u32 cfg_addr;
- u64 buid;
- int ret;
-
- /*
- * Get the config address and phb buid of the PE window.
- * Rely on eeh to retrieve this for us.
- * Retrieve them from the pci device, not the node with the
- * dma-window property
- */
- cfg_addr = edev->config_addr;
- if (edev->pe_config_addr)
- cfg_addr = edev->pe_config_addr;
- buid = edev->phb->buid;
-
- do {
- ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr,
- BUID_HI(buid), BUID_LO(buid));
- } while (rtas_busy_delay(ret));
- pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
- ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret);
-}
-
static int find_existing_ddw_windows(void)
{
+ int len;
struct device_node *pdn;
+ struct direct_window *window;
const struct dynamic_dma_window_prop *direct64;
- const u32 *ddw_extensions;
if (!firmware_has_feature(FW_FEATURE_LPAR))
return 0;
for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
- direct64 = of_get_property(pdn, DIRECT64_PROPNAME, NULL);
+ direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
if (!direct64)
continue;
- /*
- * We need to ensure the IOMMU table is active when we
- * return from the IOMMU setup so that the common code
- * can clear the table or find the holes. To that end,
- * first, remove any existing DDW configuration.
- */
- remove_ddw(pdn);
+ window = kzalloc(sizeof(*window), GFP_KERNEL);
+ if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
+ kfree(window);
+ remove_ddw(pdn);
+ continue;
+ }
- /*
- * Second, if we are running on a new enough level of
- * firmware where the restore API is present, use it to
- * restore the 32-bit window, which was removed in
- * create_ddw.
- * If the API is not present, then create_ddw couldn't
- * have removed the 32-bit window in the first place, so
- * removing the DDW configuration should be sufficient.
- */
- ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions",
- NULL);
- if (ddw_extensions && ddw_extensions[0] > 0)
- __restore_default_window(of_node_to_eeh_dev(pdn),
- ddw_extensions[1]);
+ window->device = pdn;
+ window->prop = direct64;
+ spin_lock(&direct_window_list_lock);
+ list_add(&window->list, &direct_window_list);
+ spin_unlock(&direct_window_list_lock);
}
return 0;
@@ -921,12 +883,6 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
return ret;
}
-static void restore_default_window(struct pci_dev *dev,
- u32 ddw_restore_token)
-{
- __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token);
-}
-
struct failed_ddw_pdn {
struct device_node *pdn;
struct list_head list;
@@ -954,13 +910,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
u64 dma_addr, max_addr;
struct device_node *dn;
const u32 *uninitialized_var(ddw_avail);
- const u32 *uninitialized_var(ddw_extensions);
- u32 ddw_restore_token = 0;
struct direct_window *window;
struct property *win64;
struct dynamic_dma_window_prop *ddwprop;
- const void *dma_window = NULL;
- unsigned long liobn, offset, size;
struct failed_ddw_pdn *fpdn;
mutex_lock(&direct_window_init_mutex);
@@ -991,42 +943,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
*/
ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
if (!ddw_avail || len < 3 * sizeof(u32))
- goto out_unlock;
-
- /*
- * the extensions property is only required to exist in certain
- * levels of firmware and later
- * the ibm,ddw-extensions property is a list with the first
- * element containing the number of extensions and each
- * subsequent entry is a value corresponding to that extension
- */
- ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", &len);
- if (ddw_extensions) {
- /*
- * each new defined extension length should be added to
- * the top of the switch so the "earlier" entries also
- * get picked up
- */
- switch (ddw_extensions[0]) {
- /* ibm,reset-pe-dma-windows */
- case 1:
- ddw_restore_token = ddw_extensions[1];
- break;
- }
- }
-
- /*
- * Only remove the existing DMA window if we can restore back to
- * the default state. Removing the existing window maximizes the
- * resources available to firmware for dynamic window creation.
- */
- if (ddw_restore_token) {
- dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
- of_parse_dma_window(pdn, dma_window, &liobn, &offset, &size);
- __remove_ddw(pdn, ddw_avail, liobn);
- }
+ goto out_failed;
- /*
+ /*
* Query if there is a second window of size to map the
* whole partition. Query returns number of windows, largest
* block assigned to PE (partition endpoint), and two bitmasks
@@ -1035,7 +954,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dn = pci_device_to_OF_node(dev);
ret = query_ddw(dev, ddw_avail, &query);
if (ret != 0)
- goto out_restore_window;
+ goto out_failed;
if (query.windows_available == 0) {
/*
@@ -1044,7 +963,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* trading in for a larger page size.
*/
dev_dbg(&dev->dev, "no free dynamic windows");
- goto out_restore_window;
+ goto out_failed;
}
if (be32_to_cpu(query.page_size) & 4) {
page_shift = 24; /* 16MB */
@@ -1055,7 +974,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
} else {
dev_dbg(&dev->dev, "no supported direct page size in mask %x",
query.page_size);
- goto out_restore_window;
+ goto out_failed;
}
/* verify the window * number of ptes will map the partition */
/* check largest block * page size > max memory hotplug addr */
@@ -1064,14 +983,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
"%llu-sized pages\n", max_addr, query.largest_available_block,
1ULL << page_shift);
- goto out_restore_window;
+ goto out_failed;
}
len = order_base_2(max_addr);
win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
if (!win64) {
dev_info(&dev->dev,
"couldn't allocate property for 64bit dma window\n");
- goto out_restore_window;
+ goto out_failed;
}
win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
@@ -1133,9 +1052,7 @@ out_free_prop:
kfree(win64->value);
kfree(win64);
-out_restore_window:
- if (ddw_restore_token)
- restore_default_window(dev, ddw_restore_token);
+out_failed:
fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
if (!fpdn)
@@ -1193,7 +1110,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
}
- set_iommu_table_base(&dev->dev, pci->iommu_table);
+ set_iommu_table_base_and_group(&dev->dev, pci->iommu_table);
}
static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 4fca3def9db9..b02af9ef3ff6 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -92,7 +92,7 @@ void vpa_init(int cpu)
* PAPR says this feature is SLB-Buffer but firmware never
* reports that. All SPLPAR support SLB shadow buffer.
*/
- addr = __pa(&slb_shadow[cpu]);
+ addr = __pa(paca[cpu].slb_shadow_ptr);
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
ret = register_slb_shadow(hwcpu, addr);
if (ret)
@@ -153,7 +153,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
/* Make pHyp happy */
if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
- hpte_r &= ~_PAGE_COHERENT;
+ hpte_r &= ~HPTE_R_M;
+
if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
flags |= H_COALESCE_CAND;
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 94134a5aecaa..002d5b4112f2 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -17,7 +17,6 @@
#include <asm/reg.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
-#include <asm/runlatch.h>
#include <asm/plpar_wrappers.h>
struct cpuidle_driver pseries_idle_driver = {
@@ -62,7 +61,6 @@ static int snooze_loop(struct cpuidle_device *dev,
set_thread_flag(TIF_POLLING_NRFLAG);
while ((!need_resched()) && cpu_online(cpu)) {
- ppc64_runlatch_off();
HMT_low();
HMT_very_low();
}
@@ -102,7 +100,6 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
idle_loop_prolog(&in_purr);
get_lppaca()->donate_dedicated_cpu = 1;
- ppc64_runlatch_off();
HMT_medium();
check_and_cede_processor();
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 6f76ae417f47..8e639d7cbda7 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -72,7 +72,7 @@
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
-unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT);
+unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
EXPORT_SYMBOL(CMO_PageSize);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
@@ -569,7 +569,7 @@ void pSeries_cmo_feature_init(void)
{
char *ptr, *key, *value, *end;
int call_status;
- int page_order = IOMMU_PAGE_SHIFT;
+ int page_order = IOMMU_PAGE_SHIFT_4K;
pr_debug(" -> fw_cmo_feature_init()\n");
spin_lock(&rtas_data_buf_lock);
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
index 62cb527493e7..9a15e5b39bb8 100644
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -260,7 +260,7 @@ static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
*tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
- tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT);
+ tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT_4K);
uaddr += TCE_PAGE_SIZE;
index++;
@@ -381,8 +381,9 @@ static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
/* Init bits and pieces */
tbl->table.it_blocksize = 16;
- tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT;
- tbl->table.it_size = size >> IOMMU_PAGE_SHIFT;
+ tbl->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
+ tbl->table.it_offset = addr >> tbl->table.it_page_shift;
+ tbl->table.it_size = size >> tbl->table.it_page_shift;
/*
* It's already blank but we clear it anyway.
@@ -449,8 +450,8 @@ static void wsp_pci_dma_dev_setup(struct pci_dev *pdev)
if (table) {
pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
pci_name(pdev),
- table->table.it_offset << IOMMU_PAGE_SHIFT,
- (table->table.it_offset << IOMMU_PAGE_SHIFT)
+ table->table.it_offset << IOMMU_PAGE_SHIFT_4K,
+ (table->table.it_offset << IOMMU_PAGE_SHIFT_4K)
+ phb->dma32_region_size - 1);
archdata->dma_data.iommu_table_base = &table->table;
return;
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 13ec968be4c7..7baa70d6dc01 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -19,7 +19,7 @@ config PPC_MSI_BITMAP
default y if MPIC
default y if FSL_PCI
default y if PPC4xx_MSI
- default y if POWERNV_MSI
+ default y if PPC_POWERNV
source "arch/powerpc/sysdev/xics/Kconfig"
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index 10386b676d87..a11bd1d433ad 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -27,7 +27,6 @@
*/
#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/irq.h>
diff --git a/arch/powerpc/sysdev/fsl_ifc.c b/arch/powerpc/sysdev/fsl_ifc.c
index d7fc72239144..fbc885b31946 100644
--- a/arch/powerpc/sysdev/fsl_ifc.c
+++ b/arch/powerpc/sysdev/fsl_ifc.c
@@ -19,7 +19,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 6bc5a546d49f..d631022ffb4b 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -214,10 +214,14 @@ static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
struct fsl_lbc_ctrl *ctrl = data;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
u32 status;
+ unsigned long flags;
+ spin_lock_irqsave(&fsl_lbc_lock, flags);
status = in_be32(&lbc->ltesr);
- if (!status)
+ if (!status) {
+ spin_unlock_irqrestore(&fsl_lbc_lock, flags);
return IRQ_NONE;
+ }
out_be32(&lbc->ltesr, LTESR_CLEAR);
out_be32(&lbc->lteatr, 0);
@@ -260,6 +264,7 @@ static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
if (status & ~LTESR_MASK)
dev_err(ctrl->dev, "Unknown error: "
"LTESR 0x%08X\n", status);
+ spin_unlock_irqrestore(&fsl_lbc_lock, flags);
return IRQ_HANDLED;
}
@@ -298,8 +303,8 @@ static int fsl_lbc_ctrl_probe(struct platform_device *dev)
goto err;
}
- fsl_lbc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
- if (fsl_lbc_ctrl_dev->irq == NO_IRQ) {
+ fsl_lbc_ctrl_dev->irq[0] = irq_of_parse_and_map(dev->dev.of_node, 0);
+ if (!fsl_lbc_ctrl_dev->irq[0]) {
dev_err(&dev->dev, "failed to get irq resource\n");
ret = -ENODEV;
goto err;
@@ -311,20 +316,34 @@ static int fsl_lbc_ctrl_probe(struct platform_device *dev)
if (ret < 0)
goto err;
- ret = request_irq(fsl_lbc_ctrl_dev->irq, fsl_lbc_ctrl_irq, 0,
+ ret = request_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_irq, 0,
"fsl-lbc", fsl_lbc_ctrl_dev);
if (ret != 0) {
dev_err(&dev->dev, "failed to install irq (%d)\n",
- fsl_lbc_ctrl_dev->irq);
- ret = fsl_lbc_ctrl_dev->irq;
+ fsl_lbc_ctrl_dev->irq[0]);
+ ret = fsl_lbc_ctrl_dev->irq[0];
goto err;
}
+ fsl_lbc_ctrl_dev->irq[1] = irq_of_parse_and_map(dev->dev.of_node, 1);
+ if (fsl_lbc_ctrl_dev->irq[1]) {
+ ret = request_irq(fsl_lbc_ctrl_dev->irq[1], fsl_lbc_ctrl_irq,
+ IRQF_SHARED, "fsl-lbc-err", fsl_lbc_ctrl_dev);
+ if (ret) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_lbc_ctrl_dev->irq[1]);
+ ret = fsl_lbc_ctrl_dev->irq[1];
+ goto err1;
+ }
+ }
+
/* Enable interrupts for any detected events */
out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE);
return 0;
+err1:
+ free_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_dev);
err:
iounmap(fsl_lbc_ctrl_dev->regs);
kfree(fsl_lbc_ctrl_dev);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 4dfd61df8aba..a625dcf26b2b 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -122,7 +122,7 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
* address width of the SoC such that we can address any internal
* SoC address from across PCI if needed
*/
- if ((dev->bus == &pci_bus_type) &&
+ if ((dev_is_pci(dev)) &&
dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) {
set_dma_ops(dev, &dma_direct_ops);
set_dma_offset(dev, pci64_dma_offset);
@@ -454,7 +454,7 @@ void fsl_pcibios_fixup_bus(struct pci_bus *bus)
}
}
-int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
+int fsl_add_bridge(struct platform_device *pdev, int is_primary)
{
int len;
struct pci_controller *hose;
@@ -1035,6 +1035,7 @@ static const struct of_device_id pci_ids[] = {
{ .compatible = "fsl,mpc8548-pcie", },
{ .compatible = "fsl,mpc8610-pci", },
{ .compatible = "fsl,mpc8641-pcie", },
+ { .compatible = "fsl,qoriq-pcie", },
{ .compatible = "fsl,qoriq-pcie-v2.1", },
{ .compatible = "fsl,qoriq-pcie-v2.2", },
{ .compatible = "fsl,qoriq-pcie-v2.3", },
diff --git a/arch/powerpc/sysdev/ge/ge_pic.h b/arch/powerpc/sysdev/ge/ge_pic.h
index 6149916da3f4..908dbd9826b6 100644
--- a/arch/powerpc/sysdev/ge/ge_pic.h
+++ b/arch/powerpc/sysdev/ge/ge_pic.h
@@ -1,7 +1,6 @@
#ifndef __GEF_PIC_H__
#define __GEF_PIC_H__
-#include <linux/init.h>
void gef_pic_cascade(unsigned int, struct irq_desc *);
unsigned int gef_pic_get_irq(void);
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 997df6a7ab5d..45598da0b321 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -8,7 +8,6 @@
*/
#undef DEBUG
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index c6c8b526a4f6..1f6c570d66d4 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -152,10 +152,8 @@ static struct pci_ops indirect_pci_ops =
.write = indirect_write_config,
};
-void __init
-setup_indirect_pci(struct pci_controller* hose,
- resource_size_t cfg_addr,
- resource_size_t cfg_data, u32 flags)
+void setup_indirect_pci(struct pci_controller *hose, resource_size_t cfg_addr,
+ resource_size_t cfg_data, u32 flags)
{
resource_size_t base = cfg_addr & PAGE_MASK;
void __iomem *mbase;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index b724622c3a0b..c4828c0be5bd 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -1,6 +1,5 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/irq.h>
diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c
index 22d7d57eead9..9d9b06217f8b 100644
--- a/arch/powerpc/sysdev/mpic_timer.c
+++ b/arch/powerpc/sysdev/mpic_timer.c
@@ -41,6 +41,7 @@
#define MPIC_TIMER_TCR_ROVR_OFFSET 24
#define TIMER_STOP 0x80000000
+#define GTCCR_TOG 0x80000000
#define TIMERS_PER_GROUP 4
#define MAX_TICKS (~0U >> 1)
#define MAX_TICKS_CASCADE (~0U)
@@ -96,8 +97,11 @@ static void convert_ticks_to_time(struct timer_group_priv *priv,
time->tv_sec = (__kernel_time_t)div_u64(ticks, priv->timerfreq);
tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq;
- time->tv_usec = (__kernel_suseconds_t)
- div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq);
+ time->tv_usec = 0;
+
+ if (tmp_sec <= ticks)
+ time->tv_usec = (__kernel_suseconds_t)
+ div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq);
return;
}
@@ -327,11 +331,13 @@ void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time)
casc_priv = priv->timer[handle->num].cascade_handle;
if (casc_priv) {
tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
+ tmp_ticks &= ~GTCCR_TOG;
ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
ticks += tmp_ticks;
} else {
ticks = in_be32(&priv->regs[handle->num].gtccr);
+ ticks &= ~GTCCR_TOG;
}
convert_ticks_to_time(priv, ticks, time);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
index a88807b3dd57..d09994164daf 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -16,7 +16,6 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/ioport.h>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
index 134b07d29435..621575b7e84a 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc.c
@@ -14,7 +14,6 @@
* option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
index cceb2e366738..65aaf15032ae 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
@@ -13,7 +13,6 @@
* option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index 1c062f48f1ac..befaf1123f7f 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -13,7 +13,6 @@
* option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
index ce5a7b489e4b..9998c0de12d0 100644
--- a/arch/powerpc/sysdev/udbg_memcons.c
+++ b/arch/powerpc/sysdev/udbg_memcons.c
@@ -18,7 +18,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/barrier.h>
#include <asm/page.h>
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index df0fc5821469..c1917cf67c3d 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -12,7 +12,6 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index af9d3469fb99..a90731b3d44a 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2051,6 +2051,10 @@ static void dump_one_paca(int cpu)
DUMP(p, stab_addr, "lx");
#endif
DUMP(p, emergency_sp, "p");
+#ifdef CONFIG_PPC_BOOK3S_64
+ DUMP(p, mc_emergency_sp, "p");
+ DUMP(p, in_mce, "x");
+#endif
DUMP(p, data_offset, "lx");
DUMP(p, hw_cpu_id, "x");
DUMP(p, cpu_start, "x");
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 9ef32b3df91f..590214ba736c 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -133,7 +133,7 @@ static int wf_lm75_probe(struct i2c_client *client,
lm->inited = 0;
lm->ds1775 = ds1775;
lm->i2c = client;
- lm->sens.name = (char *)name; /* XXX fix constness in structure */
+ lm->sens.name = name;
lm->sens.ops = &wf_lm75_ops;
i2c_set_clientdata(client, lm);
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 945a25b2f31e..87e439b10318 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -95,7 +95,7 @@ static int wf_max6690_probe(struct i2c_client *client,
}
max->i2c = client;
- max->sens.name = (char *)name; /* XXX fix constness in structure */
+ max->sens.name = name;
max->sens.ops = &wf_max6690_ops;
i2c_set_clientdata(client, max);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index cde0fd941f0c..4be971590461 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1275,18 +1275,21 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
{
struct net_device *netdev = dev_get_drvdata(&vdev->dev);
struct ibmveth_adapter *adapter;
+ struct iommu_table *tbl;
unsigned long ret;
int i;
int rxqentries = 1;
+ tbl = get_iommu_table_base(&vdev->dev);
+
/* netdev inits at probe time along with the structures we need below*/
if (netdev == NULL)
- return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
+ return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
adapter = netdev_priv(netdev);
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
- ret += IOMMU_PAGE_ALIGN(netdev->mtu);
+ ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
@@ -1294,11 +1297,12 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret +=
adapter->rx_buff_pool[i].size *
IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
- buff_size);
+ buff_size, tbl);
rxqentries += adapter->rx_buff_pool[i].size;
}
/* add the size of the receive queue entries */
- ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
+ ret += IOMMU_PAGE_ALIGN(
+ rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
return ret;
}
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 978db344bda0..b24aa010f68c 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -366,7 +366,7 @@ config TRACE_SINK
"Trace data router for MIPI P1149.7 cJTAG standard".
config PPC_EPAPR_HV_BYTECHAN
- tristate "ePAPR hypervisor byte channel driver"
+ bool "ePAPR hypervisor byte channel driver"
depends on PPC
select EPAPR_PARAVIRT
help
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index bdae7a04af75..a84788ba662c 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container)
* enforcing the limit based on the max that the guest can map.
*/
down_write(&current->mm->mmap_sem);
- npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
+ npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
locked = current->mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
@@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container)
down_write(&current->mm->mmap_sem);
current->mm->locked_vm -= (container->tbl->it_size <<
- IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
+ IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
up_write(&current->mm->mmap_sem);
}
@@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data,
if (info.argsz < minsz)
return -EINVAL;
- info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT;
- info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT;
+ info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
+ info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
info.flags = 0;
if (copy_to_user((void __user *)arg, &info, minsz))
@@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data,
VFIO_DMA_MAP_FLAG_WRITE))
return -EINVAL;
- if ((param.size & ~IOMMU_PAGE_MASK) ||
- (param.vaddr & ~IOMMU_PAGE_MASK))
+ if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
+ (param.vaddr & ~IOMMU_PAGE_MASK_4K))
return -EINVAL;
/* iova is checked by the IOMMU API */
@@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data,
if (ret)
return ret;
- for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) {
+ for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
ret = iommu_put_tce_user_mode(tbl,
- (param.iova >> IOMMU_PAGE_SHIFT) + i,
+ (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
tce);
if (ret)
break;
- tce += IOMMU_PAGE_SIZE;
+ tce += IOMMU_PAGE_SIZE_4K;
}
if (ret)
iommu_clear_tces_and_put_pages(tbl,
- param.iova >> IOMMU_PAGE_SHIFT, i);
+ param.iova >> IOMMU_PAGE_SHIFT_4K, i);
iommu_flush_tce(tbl);
@@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data,
if (param.flags)
return -EINVAL;
- if (param.size & ~IOMMU_PAGE_MASK)
+ if (param.size & ~IOMMU_PAGE_MASK_4K)
return -EINVAL;
ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
- param.size >> IOMMU_PAGE_SHIFT);
+ param.size >> IOMMU_PAGE_SHIFT_4K);
if (ret)
return ret;
ret = iommu_clear_tces_and_put_pages(tbl,
- param.iova >> IOMMU_PAGE_SHIFT,
- param.size >> IOMMU_PAGE_SHIFT);
+ param.iova >> IOMMU_PAGE_SHIFT_4K,
+ param.size >> IOMMU_PAGE_SHIFT_4K);
iommu_flush_tce(tbl);
return ret;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d9992fc128ca..f28f46eade6a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1895,7 +1895,7 @@ static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
}
#endif
-#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
+#ifdef CONFIG_NUMA_BALANCING
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#endif
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 0beaee9dac1f..2b77058a7335 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -116,6 +116,7 @@ extern const void *of_flat_dt_match_machine(const void *default_match,
extern void unflatten_device_tree(void);
extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
+extern void early_get_first_memblock_info(void *, phys_addr_t *);
#else /* CONFIG_OF_FLATTREE */
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {}
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index 9696a5e2c437..6bdf8c61d221 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -685,7 +685,7 @@ do { \
else \
{ \
r = 0; \
- if (X##_s) \
+ if (!X##_s) \
r = ~r; \
} \
FP_SET_EXCEPTION(FP_EX_INVALID); \
@@ -743,12 +743,17 @@ do { \
} \
else \
{ \
+ int _lz0, _lz1; \
if (X##_e <= -_FP_WORKBITS - 1) \
_FP_FRAC_SET_##wc(X, _FP_MINFRAC_##wc); \
else \
_FP_FRAC_SRS_##wc(X, _FP_FRACBITS_##fs - 1 - X##_e, \
_FP_WFRACBITS_##fs); \
+ _FP_FRAC_CLZ_##wc(_lz0, X); \
_FP_ROUND(wc, X); \
+ _FP_FRAC_CLZ_##wc(_lz1, X); \
+ if (_lz1 < _lz0) \
+ X##_e++; /* For overflow detection. */ \
_FP_FRAC_SRL_##wc(X, _FP_WORKBITS); \
_FP_FRAC_ASSEMBLE_##wc(r, X, rsize); \
} \
@@ -762,7 +767,7 @@ do { \
if (!rsigned) \
{ \
r = 0; \
- if (X##_s) \
+ if (!X##_s) \
r = ~r; \
} \
else if (rsigned != 2) \
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 463b7fbf0d1d..36cb46cddf61 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -613,7 +613,7 @@ static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
return 0;
}
-#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
+#ifdef CONFIG_NUMA_BALANCING
/*
* This is used to mark a range of virtual addresses to be inaccessible.
* These are later cleared by a NUMA hinting fault. Depending on these
@@ -627,7 +627,6 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
int nr_updated;
- BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
if (nr_updated)
@@ -641,7 +640,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
{
return 0;
}
-#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
+#endif /* CONFIG_NUMA_BALANCING */
/*
* Walk through page tables and collect pages to be migrated.
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 17855761e6b7..40610984a1b5 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -584,12 +584,16 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 ||
strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 ||
strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 ||
- strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0)
+ strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 ||
+ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
+ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
return 1;
if (info->hdr->e_machine == EM_PPC64)
/* Special register function linked on all modules during final link of .ko */
if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
- strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0)
+ strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
+ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
+ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
return 1;
/* Do not ignore this symbol */
return 0;