summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/include/asm/kvm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h3
-rw-r--r--arch/powerpc/include/asm/machdep.h5
-rw-r--r--arch/powerpc/include/asm/macio.h6
-rw-r--r--arch/powerpc/include/asm/mediabay.h27
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h35
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h47
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h1
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h5
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h37
-rw-r--r--arch/powerpc/kernel/io.c4
-rw-r--r--arch/powerpc/kernel/smp.c3
-rw-r--r--arch/powerpc/kernel/sysfs.c19
-rw-r--r--arch/powerpc/kvm/book3s.c49
-rw-r--r--arch/powerpc/kvm/book3s_64_emulate.c38
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/powerpc/mm/hash_utils_64.c6
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c2
-rw-r--r--arch/powerpc/mm/subpage-prot.c15
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig5
-rw-r--r--arch/powerpc/platforms/52xx/Makefile1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c428
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c560
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c558
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c2
29 files changed, 1793 insertions, 93 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 5dbd375a3f2..0df57466e78 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -320,6 +320,10 @@ config HOTPLUG_CPU
Say N if you are unsure.
+config ARCH_CPU_PROBE_RELEASE
+ def_bool y
+ depends on HOTPLUG_CPU
+
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index c9ca97f43bc..81f3b0b5601 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -47,7 +47,23 @@ struct kvm_regs {
struct kvm_sregs {
__u32 pvr;
- char pad[1020];
+ union {
+ struct {
+ __u64 sdr1;
+ struct {
+ struct {
+ __u64 slbe;
+ __u64 slbv;
+ } slb[64];
+ } ppc64;
+ struct {
+ __u32 sr[16];
+ __u64 ibat[8];
+ __u64 dbat[8];
+ } ppc32;
+ } s;
+ __u8 pad[1020];
+ } u;
};
struct kvm_fpu {
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 19ddb352fd0..af2abe74f54 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -87,6 +87,7 @@
#define BOOK3S_IRQPRIO_MAX 16
#define BOOK3S_HFLAG_DCBZ32 0x1
+#define BOOK3S_HFLAG_SLB 0x2
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index c6011336371..74b7369770d 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -46,6 +46,7 @@ struct kvmppc_sr {
};
struct kvmppc_bat {
+ u64 raw;
u32 bepi;
u32 bepi_mask;
bool vs;
@@ -113,6 +114,8 @@ extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, boo
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
+extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
+ bool upper, u32 val);
extern u32 kvmppc_trampoline_lowmem;
extern u32 kvmppc_trampoline_enter;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 9efa2be7833..9f0fc9e6ce0 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -266,6 +266,11 @@ struct machdep_calls {
void (*suspend_disable_irqs)(void);
void (*suspend_enable_irqs)(void);
#endif
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+ ssize_t (*cpu_probe)(const char *, size_t);
+ ssize_t (*cpu_release)(const char *, size_t);
+#endif
};
extern void e500_idle(void);
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
index 079c06eae44..a062c57696d 100644
--- a/arch/powerpc/include/asm/macio.h
+++ b/arch/powerpc/include/asm/macio.h
@@ -39,6 +39,7 @@ struct macio_dev
struct macio_bus *bus; /* macio bus this device is on */
struct macio_dev *media_bay; /* Device is part of a media bay */
struct of_device ofdev;
+ struct device_dma_parameters dma_parms; /* ide needs that */
int n_resources;
struct resource resource[MACIO_DEV_COUNT_RESOURCES];
int n_interrupts;
@@ -78,6 +79,8 @@ static inline unsigned long macio_resource_len(struct macio_dev *dev, int resour
return res->end - res->start + 1;
}
+extern int macio_enable_devres(struct macio_dev *dev);
+
extern int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name);
extern void macio_release_resource(struct macio_dev *dev, int resource_no);
extern int macio_request_resources(struct macio_dev *dev, const char *name);
@@ -131,6 +134,9 @@ struct macio_driver
int (*resume)(struct macio_dev* dev);
int (*shutdown)(struct macio_dev* dev);
+#ifdef CONFIG_PMAC_MEDIABAY
+ void (*mediabay_event)(struct macio_dev* dev, int mb_state);
+#endif
struct device_driver driver;
};
#define to_macio_driver(drv) container_of(drv,struct macio_driver, driver)
diff --git a/arch/powerpc/include/asm/mediabay.h b/arch/powerpc/include/asm/mediabay.h
index b2efb332580..11037a4133e 100644
--- a/arch/powerpc/include/asm/mediabay.h
+++ b/arch/powerpc/include/asm/mediabay.h
@@ -17,26 +17,31 @@
#define MB_POWER 6 /* media bay contains a Power device (???) */
#define MB_NO 7 /* media bay contains nothing */
-/* Number of bays in the machine or 0 */
-extern int media_bay_count;
+struct macio_dev;
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
-#include <linux/ide.h>
+#ifdef CONFIG_PMAC_MEDIABAY
-int check_media_bay_by_base(unsigned long base, int what);
-/* called by IDE PMAC host driver to register IDE controller for media bay */
-int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base,
- int irq, ide_hwif_t *hwif);
+/* Check the content type of the bay, returns MB_NO if the bay is still
+ * transitionning
+ */
+extern int check_media_bay(struct macio_dev *bay);
-int check_media_bay(struct device_node *which_bay, int what);
+/* The ATA driver uses the calls below to temporarily hold on the
+ * media bay callbacks while initializing the interface
+ */
+extern void lock_media_bay(struct macio_dev *bay);
+extern void unlock_media_bay(struct macio_dev *bay);
#else
-static inline int check_media_bay(struct device_node *which_bay, int what)
+static inline int check_media_bay(struct macio_dev *bay)
{
- return -ENODEV;
+ return MB_NO;
}
+static inline void lock_media_bay(struct macio_dev *bay) { }
+static inline void unlock_media_bay(struct macio_dev *bay) { }
+
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 7514ec2f854..2102b214a87 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -373,6 +373,38 @@ extern void slb_set_size(u16 size);
#ifndef __ASSEMBLY__
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+/*
+ * For the sub-page protection option, we extend the PGD with one of
+ * these. Basically we have a 3-level tree, with the top level being
+ * the protptrs array. To optimize speed and memory consumption when
+ * only addresses < 4GB are being protected, pointers to the first
+ * four pages of sub-page protection words are stored in the low_prot
+ * array.
+ * Each page of sub-page protection words protects 1GB (4 bytes
+ * protects 64k). For the 3-level tree, each page of pointers then
+ * protects 8TB.
+ */
+struct subpage_prot_table {
+ unsigned long maxaddr; /* only addresses < this are protected */
+ unsigned int **protptrs[2];
+ unsigned int *low_prot[4];
+};
+
+#define SBP_L1_BITS (PAGE_SHIFT - 2)
+#define SBP_L2_BITS (PAGE_SHIFT - 3)
+#define SBP_L1_COUNT (1 << SBP_L1_BITS)
+#define SBP_L2_COUNT (1 << SBP_L2_BITS)
+#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
+#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
+
+extern void subpage_prot_free(struct mm_struct *mm);
+extern void subpage_prot_init_new_context(struct mm_struct *mm);
+#else
+static inline void subpage_prot_free(struct mm_struct *mm) {}
+static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
+#endif /* CONFIG_PPC_SUBPAGE_PROT */
+
typedef unsigned long mm_context_id_t;
typedef struct {
@@ -386,6 +418,9 @@ typedef struct {
u16 sllp; /* SLB page size encoding */
#endif
unsigned long vdso_base;
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+ struct subpage_prot_table spt;
+#endif /* CONFIG_PPC_SUBPAGE_PROT */
} mm_context_t;
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 1b4f697abbd..b664ce79a17 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -276,6 +276,53 @@ extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node);
extern void mpc52xx_restart(char *cmd);
+/* mpc52xx_gpt.c */
+struct mpc52xx_gpt_priv;
+extern struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq);
+extern int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
+ int continuous);
+extern u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt);
+extern int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt);
+
+/* mpc52xx_lpbfifo.c */
+#define MPC52XX_LPBFIFO_FLAG_READ (0)
+#define MPC52XX_LPBFIFO_FLAG_WRITE (1<<0)
+#define MPC52XX_LPBFIFO_FLAG_NO_INCREMENT (1<<1)
+#define MPC52XX_LPBFIFO_FLAG_NO_DMA (1<<2)
+#define MPC52XX_LPBFIFO_FLAG_POLL_DMA (1<<3)
+
+struct mpc52xx_lpbfifo_request {
+ struct list_head list;
+
+ /* localplus bus address */
+ unsigned int cs;
+ size_t offset;
+
+ /* Memory address */
+ void *data;
+ phys_addr_t data_phys;
+
+ /* Details of transfer */
+ size_t size;
+ size_t pos; /* current position of transfer */
+ int flags;
+
+ /* What to do when finished */
+ void (*callback)(struct mpc52xx_lpbfifo_request *);
+
+ void *priv; /* Driver private data */
+
+ /* statistics */
+ int irq_count;
+ int irq_ticks;
+ u8 last_byte;
+ int buffer_not_done_cnt;
+};
+
+extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req);
+extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req);
+extern void mpc52xx_lpbfifo_poll(void);
+
/* mpc52xx_pic.c */
extern void mpc52xx_init_irq(void);
extern unsigned int mpc52xx_get_irq(void);
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
index e482e5352e6..d4b4bfa26fb 100644
--- a/arch/powerpc/include/asm/pSeries_reconfig.h
+++ b/arch/powerpc/include/asm/pSeries_reconfig.h
@@ -17,6 +17,7 @@
#ifdef CONFIG_PPC_PSERIES
extern int pSeries_reconfig_notifier_register(struct notifier_block *);
extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
+extern struct blocking_notifier_head pSeries_reconfig_chain;
#else /* !CONFIG_PPC_PSERIES */
static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb)
{
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 5c1cd73dafa..605f5c5398d 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -28,10 +28,6 @@
*/
#define MAX_PGTABLE_INDEX_SIZE 0xf
-#ifndef CONFIG_PPC_SUBPAGE_PROT
-static inline void subpage_prot_free(pgd_t *pgd) {}
-#endif
-
extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) (pgtable_cache[(shift)-1])
@@ -42,7 +38,6 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- subpage_prot_free(pgd);
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
}
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 82b72207c51..c4490f9c67c 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -76,41 +76,4 @@
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
__pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))
-
-#ifdef CONFIG_PPC_SUBPAGE_PROT
-/*
- * For the sub-page protection option, we extend the PGD with one of
- * these. Basically we have a 3-level tree, with the top level being
- * the protptrs array. To optimize speed and memory consumption when
- * only addresses < 4GB are being protected, pointers to the first
- * four pages of sub-page protection words are stored in the low_prot
- * array.
- * Each page of sub-page protection words protects 1GB (4 bytes
- * protects 64k). For the 3-level tree, each page of pointers then
- * protects 8TB.
- */
-struct subpage_prot_table {
- unsigned long maxaddr; /* only addresses < this are protected */
- unsigned int **protptrs[2];
- unsigned int *low_prot[4];
-};
-
-#undef PGD_TABLE_SIZE
-#define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \
- sizeof(struct subpage_prot_table))
-
-#define SBP_L1_BITS (PAGE_SHIFT - 2)
-#define SBP_L2_BITS (PAGE_SHIFT - 3)
-#define SBP_L1_COUNT (1 << SBP_L1_BITS)
-#define SBP_L2_COUNT (1 << SBP_L2_BITS)
-#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
-#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
-
-extern void subpage_prot_free(pgd_t *pgd);
-
-static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
-{
- return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD);
-}
-#endif /* CONFIG_PPC_SUBPAGE_PROT */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index 1882bf419fa..8dc7547c237 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -161,7 +161,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src,
dest++;
n--;
}
- while(n > 4) {
+ while(n >= 4) {
*((u32 *)dest) = *((volatile u32 *)vsrc);
eieio();
vsrc += 4;
@@ -190,7 +190,7 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
vdest++;
n--;
}
- while(n > 4) {
+ while(n >= 4) {
*((volatile u32 *)vdest) = *((volatile u32 *)src);
src += 4;
vdest += 4;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 9b86a74d281..97196eefef3 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -218,6 +218,9 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
static void stop_this_cpu(void *dummy)
{
+ /* Remove this CPU */
+ set_cpu_online(smp_processor_id(), false);
+
local_irq_disable();
while (1)
;
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 956ab33fd73..e235e52dc4f 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -461,6 +461,25 @@ static void unregister_cpu_online(unsigned int cpu)
cacheinfo_cpu_offline(cpu);
}
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+ssize_t arch_cpu_probe(const char *buf, size_t count)
+{
+ if (ppc_md.cpu_probe)
+ return ppc_md.cpu_probe(buf, count);
+
+ return -EINVAL;
+}
+
+ssize_t arch_cpu_release(const char *buf, size_t count)
+{
+ if (ppc_md.cpu_release)
+ return ppc_md.cpu_release(buf, count);
+
+ return -EINVAL;
+}
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+
#endif /* CONFIG_HOTPLUG_CPU */
static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 42037d46a41..3e294bd9b8c 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -281,6 +281,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
{
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
vcpu->arch.pvr = pvr;
if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
kvmppc_mmu_book3s_64_init(vcpu);
@@ -762,14 +763,62 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int i;
+
sregs->pvr = vcpu->arch.pvr;
+
+ sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
+ for (i = 0; i < 64; i++) {
+ sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
+ sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+ sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
+ sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
+ }
+ for (i = 0; i < 8; i++) {
+ sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
+ sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
+ }
+ }
return 0;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int i;
+
kvmppc_set_pvr(vcpu, sregs->pvr);
+
+ vcpu3s->sdr1 = sregs->u.s.sdr1;
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
+ for (i = 0; i < 64; i++) {
+ vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
+ sregs->u.s.ppc64.slb[i].slbe);
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+ vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
+ }
+ for (i = 0; i < 8; i++) {
+ kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
+ (u32)sregs->u.s.ppc32.ibat[i]);
+ kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
+ (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
+ kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
+ (u32)sregs->u.s.ppc32.dbat[i]);
+ kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
+ (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
+ }
+ }
+
+ /* Flush the MMU after messing with the segments */
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c
index c343e67306e..1027eac6d47 100644
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ b/arch/powerpc/kvm/book3s_64_emulate.c
@@ -185,7 +185,27 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val)
+void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
+ u32 val)
+{
+ if (upper) {
+ /* Upper BAT */
+ u32 bl = (val >> 2) & 0x7ff;
+ bat->bepi_mask = (~bl << 17);
+ bat->bepi = val & 0xfffe0000;
+ bat->vs = (val & 2) ? 1 : 0;
+ bat->vp = (val & 1) ? 1 : 0;
+ bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
+ } else {
+ /* Lower BAT */
+ bat->brpn = val & 0xfffe0000;
+ bat->wimg = (val >> 3) & 0xf;
+ bat->pp = val & 3;
+ bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
+ }
+}
+
+static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_bat *bat;
@@ -207,19 +227,7 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u64 val)
BUG();
}
- if (!(sprn % 2)) {
- /* Upper BAT */
- u32 bl = (val >> 2) & 0x7ff;
- bat->bepi_mask = (~bl << 17);
- bat->bepi = val & 0xfffe0000;
- bat->vs = (val & 2) ? 1 : 0;
- bat->vp = (val & 1) ? 1 : 0;
- } else {
- /* Lower BAT */
- bat->brpn = val & 0xfffe0000;
- bat->wimg = (val >> 3) & 0xf;
- bat->pp = val & 3;
- }
+ kvmppc_set_bat(vcpu, bat, !(sprn % 2), val);
}
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
@@ -243,7 +251,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_IBAT4U ... SPRN_IBAT7L:
case SPRN_DBAT0U ... SPRN_DBAT3L:
case SPRN_DBAT4U ... SPRN_DBAT7L:
- kvmppc_write_bat(vcpu, sprn, vcpu->arch.gpr[rs]);
+ kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]);
/* BAT writes happen so rarely that we're ok to flush
* everything here */
kvmppc_mmu_pte_flush(vcpu, 0, 0);
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index a31f9c677d2..5598f88f142 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -473,4 +473,6 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
+
+ vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 692c3709011..d82551efbfb 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -144,6 +144,9 @@ int kvm_dev_ioctl_check_extension(long ext)
int r;
switch (ext) {
+ case KVM_CAP_PPC_SEGSTATE:
+ r = 1;
+ break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 6810128aba3..50f867d657d 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -835,9 +835,9 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
* Result is 0: full permissions, _PAGE_RW: read-only,
* _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
*/
-static int subpage_protection(pgd_t *pgdir, unsigned long ea)
+static int subpage_protection(struct mm_struct *mm, unsigned long ea)
{
- struct subpage_prot_table *spt = pgd_subpage_prot(pgdir);
+ struct subpage_prot_table *spt = &mm->context.spt;
u32 spp = 0;
u32 **sbpm, *sbpp;
@@ -865,7 +865,7 @@ static int subpage_protection(pgd_t *pgdir, unsigned long ea)
}
#else /* CONFIG_PPC_SUBPAGE_PROT */
-static inline int subpage_protection(pgd_t *pgdir, unsigned long ea)
+static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
{
return 0;
}
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index b9e4cc2c205..b910d37aea1 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -76,6 +76,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
*/
if (slice_mm_new_context(mm))
slice_set_user_psize(mm, mmu_virtual_psize);
+ subpage_prot_init_new_context(mm);
mm->context.id = index;
return 0;
@@ -92,5 +93,6 @@ EXPORT_SYMBOL_GPL(__destroy_context);
void destroy_context(struct mm_struct *mm)
{
__destroy_context(mm->context.id);
+ subpage_prot_free(mm);
mm->context.id = NO_CONTEXT;
}
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index 4cafc0c33d0..a040b81e93b 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -24,9 +24,9 @@
* Also makes sure that the subpage_prot_table structure is
* reinitialized for the next user.
*/
-void subpage_prot_free(pgd_t *pgd)
+void subpage_prot_free(struct mm_struct *mm)
{
- struct subpage_prot_table *spt = pgd_subpage_prot(pgd);
+ struct subpage_prot_table *spt = &mm->context.spt;
unsigned long i, j, addr;
u32 **p;
@@ -51,6 +51,13 @@ void subpage_prot_free(pgd_t *pgd)
spt->maxaddr = 0;
}
+void subpage_prot_init_new_context(struct mm_struct *mm)
+{
+ struct subpage_prot_table *spt = &mm->context.spt;
+
+ memset(spt, 0, sizeof(*spt));
+}
+
static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
int npages)
{
@@ -87,7 +94,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
static void subpage_prot_clear(unsigned long addr, unsigned long len)
{
struct mm_struct *mm = current->mm;
- struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd);
+ struct subpage_prot_table *spt = &mm->context.spt;
u32 **spm, *spp;
int i, nw;
unsigned long next, limit;
@@ -136,7 +143,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
{
struct mm_struct *mm = current->mm;
- struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd);
+ struct subpage_prot_table *spt = &mm->context.spt;
u32 **spm, *spp;
int i, nw;
unsigned long next, limit;
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 8b8e9560a31..47ea1be1481 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -62,3 +62,8 @@ config PPC_MPC5200_GPIO
select GENERIC_GPIO
help
Enable gpiolib support for mpc5200 based boards
+
+config PPC_MPC5200_LPBFIFO
+ tristate "MPC5200 LocalPlus bus FIFO driver"
+ depends on PPC_MPC52xx
+ select PPC_BESTCOMM_GEN_BD
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
index bfd4f52cf3d..2bc8cd0c5cf 100644
--- a/arch/powerpc/platforms/52xx/Makefile
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -15,3 +15,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y)
endif
obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o
+obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 4d76b7f2336..17ecdf4c87a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -16,8 +16,14 @@
* output signals or measure input signals.
*
* This driver supports the GPIO and IRQ controller functions of the GPT
- * device. Timer functions are not yet supported, nor is the watchdog
- * timer.
+ * device. Timer functions are not yet supported.
+ *
+ * The timer gpt0 can be used as watchdog (wdt). If the wdt mode is used,
+ * this prevents the use of any gpt0 gpt function (i.e. they will fail with
+ * -EBUSY). Thus, the safety wdt function always has precedence over the gpt
+ * function. If the kernel has been compiled with CONFIG_WATCHDOG_NOWAYOUT,
+ * this means that gpt0 is locked in wdt mode until the next reboot - this
+ * may be a requirement in safety applications.
*
* To use the GPIO function, the following two properties must be added
* to the device tree node for the gpt device (typically in the .dts file
@@ -46,17 +52,24 @@
* the output mode. This driver does not change the output mode setting.
*/
+#include <linux/device.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/kernel.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <asm/div64.h>
#include <asm/mpc52xx.h>
MODULE_DESCRIPTION("Freescale MPC52xx gpt driver");
-MODULE_AUTHOR("Sascha Hauer, Grant Likely");
+MODULE_AUTHOR("Sascha Hauer, Grant Likely, Albrecht Dreß");
MODULE_LICENSE("GPL");
/**
@@ -66,18 +79,27 @@ MODULE_LICENSE("GPL");
* @lock: spinlock to coordinate between different functions.
* @of_gc: of_gpio_chip instance structure; used when GPIO is enabled
* @irqhost: Pointer to irq_host instance; used when IRQ mode is supported
+ * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates
+ * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates
+ * if the timer is actively used as wdt which blocks gpt functions
*/
struct mpc52xx_gpt_priv {
+ struct list_head list; /* List of all GPT devices */
struct device *dev;
struct mpc52xx_gpt __iomem *regs;
spinlock_t lock;
struct irq_host *irqhost;
+ u32 ipb_freq;
+ u8 wdt_mode;
#if defined(CONFIG_GPIOLIB)
struct of_gpio_chip of_gc;
#endif
};
+LIST_HEAD(mpc52xx_gpt_list);
+DEFINE_MUTEX(mpc52xx_gpt_list_mutex);
+
#define MPC52xx_GPT_MODE_MS_MASK (0x07)
#define MPC52xx_GPT_MODE_MS_IC (0x01)
#define MPC52xx_GPT_MODE_MS_OC (0x02)
@@ -88,15 +110,25 @@ struct mpc52xx_gpt_priv {
#define MPC52xx_GPT_MODE_GPIO_OUT_LOW (0x20)
#define MPC52xx_GPT_MODE_GPIO_OUT_HIGH (0x30)
+#define MPC52xx_GPT_MODE_COUNTER_ENABLE (0x1000)
+#define MPC52xx_GPT_MODE_CONTINUOUS (0x0400)
+#define MPC52xx_GPT_MODE_OPEN_DRAIN (0x0200)
#define MPC52xx_GPT_MODE_IRQ_EN (0x0100)
+#define MPC52xx_GPT_MODE_WDT_EN (0x8000)
#define MPC52xx_GPT_MODE_ICT_MASK (0x030000)
#define MPC52xx_GPT_MODE_ICT_RISING (0x010000)
#define MPC52xx_GPT_MODE_ICT_FALLING (0x020000)
#define MPC52xx_GPT_MODE_ICT_TOGGLE (0x030000)
+#define MPC52xx_GPT_MODE_WDT_PING (0xa5)
+
#define MPC52xx_GPT_STATUS_IRQMASK (0x000f)
+#define MPC52xx_GPT_CAN_WDT (1 << 0)
+#define MPC52xx_GPT_IS_WDT (1 << 1)
+
+
/* ---------------------------------------------------------------------
* Cascaded interrupt controller hooks
*/
@@ -190,7 +222,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct,
dev_dbg(gpt->dev, "%s: flags=%i\n", __func__, intspec[0]);
- if ((intsize < 1) || (intspec[0] < 1) || (intspec[0] > 3)) {
+ if ((intsize < 1) || (intspec[0] > 3)) {
dev_err(gpt->dev, "bad irq specifier in %s\n", ct->full_name);
return -EINVAL;
}
@@ -211,13 +243,11 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
{
int cascade_virq;
unsigned long flags;
-
- /* Only setup cascaded IRQ if device tree claims the GPT is
- * an interrupt controller */
- if (!of_find_property(node, "interrupt-controller", NULL))
- return;
+ u32 mode;
cascade_virq = irq_of_parse_and_map(node, 0);
+ if (!cascade_virq)
+ return;
gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1,
&mpc52xx_gpt_irq_ops, -1);
@@ -227,14 +257,16 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
}
gpt->irqhost->host_data = gpt;
-
set_irq_data(cascade_virq, gpt);
set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
- /* Set to Input Capture mode */
+ /* If the GPT is currently disabled, then change it to be in Input
+ * Capture mode. If the mode is non-zero, then the pin could be
+ * already in use for something. */
spin_lock_irqsave(&gpt->lock, flags);
- clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_MS_MASK,
- MPC52xx_GPT_MODE_MS_IC);
+ mode = in_be32(&gpt->regs->mode);
+ if ((mode & MPC52xx_GPT_MODE_MS_MASK) == 0)
+ out_be32(&gpt->regs->mode, mode | MPC52xx_GPT_MODE_MS_IC);
spin_unlock_irqrestore(&gpt->lock, flags);
dev_dbg(gpt->dev, "%s() complete. virq=%i\n", __func__, cascade_virq);
@@ -335,6 +367,354 @@ static void
mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { }
#endif /* defined(CONFIG_GPIOLIB) */
+/***********************************************************************
+ * Timer API
+ */
+
+/**
+ * mpc52xx_gpt_from_irq - Return the GPT device associated with an IRQ number
+ * @irq: irq of timer.
+ */
+struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq)
+{
+ struct mpc52xx_gpt_priv *gpt;
+ struct list_head *pos;
+
+ /* Iterate over the list of timers looking for a matching device */
+ mutex_lock(&mpc52xx_gpt_list_mutex);
+ list_for_each(pos, &mpc52xx_gpt_list) {
+ gpt = container_of(pos, struct mpc52xx_gpt_priv, list);
+ if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) {
+ mutex_unlock(&mpc52xx_gpt_list_mutex);
+ return gpt;
+ }
+ }
+ mutex_unlock(&mpc52xx_gpt_list_mutex);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mpc52xx_gpt_from_irq);
+
+static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period,
+ int continuous, int as_wdt)
+{
+ u32 clear, set;
+ u64 clocks;
+ u32 prescale;
+ unsigned long flags;
+
+ clear = MPC52xx_GPT_MODE_MS_MASK | MPC52xx_GPT_MODE_CONTINUOUS;
+ set = MPC52xx_GPT_MODE_MS_GPIO | MPC52xx_GPT_MODE_COUNTER_ENABLE;
+ if (as_wdt) {
+ clear |= MPC52xx_GPT_MODE_IRQ_EN;
+ set |= MPC52xx_GPT_MODE_WDT_EN;
+ } else if (continuous)
+ set |= MPC52xx_GPT_MODE_CONTINUOUS;
+
+ /* Determine the number of clocks in the requested period. 64 bit
+ * arithmatic is done here to preserve the precision until the value
+ * is scaled back down into the u32 range. Period is in 'ns', bus
+ * frequency is in Hz. */
+ clocks = period * (u64)gpt->ipb_freq;
+ do_div(clocks, 1000000000); /* Scale it down to ns range */
+
+ /* This device cannot handle a clock count greater than 32 bits */
+ if (clocks > 0xffffffff)
+ return -EINVAL;
+
+ /* Calculate the prescaler and count values from the clocks value.
+ * 'clocks' is the number of clock ticks in the period. The timer
+ * has 16 bit precision and a 16 bit prescaler. Prescaler is
+ * calculated by integer dividing the clocks by 0x10000 (shifting
+ * down 16 bits) to obtain the smallest possible divisor for clocks
+ * to get a 16 bit count value.
+ *
+ * Note: the prescale register is '1' based, not '0' based. ie. a
+ * value of '1' means divide the clock by one. 0xffff divides the
+ * clock by 0xffff. '0x0000' does not divide by zero, but wraps
+ * around and divides by 0x10000. That is why prescale must be
+ * a u32 variable, not a u16, for this calculation. */
+ prescale = (clocks >> 16) + 1;
+ do_div(clocks, prescale);
+ if (clocks > 0xffff) {
+ pr_err("calculation error; prescale:%x clocks:%llx\n",
+ prescale, clocks);
+ return -EINVAL;
+ }
+
+ /* Set and enable the timer, reject an attempt to use a wdt as gpt */
+ spin_lock_irqsave(&gpt->lock, flags);
+ if (as_wdt)
+ gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
+ else if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) {
+ spin_unlock_irqrestore(&gpt->lock, flags);
+ return -EBUSY;
+ }
+ out_be32(&gpt->regs->count, prescale << 16 | clocks);
+ clrsetbits_be32(&gpt->regs->mode, clear, set);
+ spin_unlock_irqrestore(&gpt->lock, flags);
+
+ return 0;
+}
+
+/**
+ * mpc52xx_gpt_start_timer - Set and enable the GPT timer
+ * @gpt: Pointer to gpt private data structure
+ * @period: period of timer in ns; max. ~130s @ 33MHz IPB clock
+ * @continuous: set to 1 to make timer continuous free running
+ *
+ * An interrupt will be generated every time the timer fires
+ */
+int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
+ int continuous)
+{
+ return mpc52xx_gpt_do_start(gpt, period, continuous, 0);
+}
+EXPORT_SYMBOL(mpc52xx_gpt_start_timer);
+
+/**
+ * mpc52xx_gpt_stop_timer - Stop a gpt
+ * @gpt: Pointer to gpt private data structure
+ *
+ * Returns an error if attempting to stop a wdt
+ */
+int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt)
+{
+ unsigned long flags;
+
+ /* reject the operation if the timer is used as watchdog (gpt 0 only) */
+ spin_lock_irqsave(&gpt->lock, flags);
+ if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) {
+ spin_unlock_irqrestore(&gpt->lock, flags);
+ return -EBUSY;
+ }
+
+ clrbits32(&gpt->regs->mode, MPC52xx_GPT_MODE_COUNTER_ENABLE);
+ spin_unlock_irqrestore(&gpt->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(mpc52xx_gpt_stop_timer);
+
+/**
+ * mpc52xx_gpt_timer_period - Read the timer period
+ * @gpt: Pointer to gpt private data structure
+ *
+ * Returns the timer period in ns
+ */
+u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt)
+{
+ u64 period;
+ u64 prescale;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpt->lock, flags);
+ period = in_be32(&gpt->regs->count);
+ spin_unlock_irqrestore(&gpt->lock, flags);
+
+ prescale = period >> 16;
+ period &= 0xffff;
+ if (prescale == 0)
+ prescale = 0x10000;
+ period = period * prescale * 1000000000ULL;
+ do_div(period, (u64)gpt->ipb_freq);
+ return period;
+}
+EXPORT_SYMBOL(mpc52xx_gpt_timer_period);
+
+#if defined(CONFIG_MPC5200_WDT)
+/***********************************************************************
+ * Watchdog API for gpt0
+ */
+
+#define WDT_IDENTITY "mpc52xx watchdog on GPT0"
+
+/* wdt_is_active stores wether or not the /dev/watchdog device is opened */
+static unsigned long wdt_is_active;
+
+/* wdt-capable gpt */
+static struct mpc52xx_gpt_priv *mpc52xx_gpt_wdt;
+
+/* low-level wdt functions */
+static inline void mpc52xx_gpt_wdt_ping(struct mpc52xx_gpt_priv *gpt_wdt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpt_wdt->lock, flags);
+ out_8((u8 *) &gpt_wdt->regs->mode, MPC52xx_GPT_MODE_WDT_PING);
+ spin_unlock_irqrestore(&gpt_wdt->lock, flags);
+}
+
+/* wdt misc device api */
+static ssize_t mpc52xx_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
+ mpc52xx_gpt_wdt_ping(gpt_wdt);
+ return 0;
+}
+
+static struct watchdog_info mpc5200_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = WDT_IDENTITY,
+};
+
+static long mpc52xx_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
+ int __user *data = (int __user *)arg;
+ int timeout;
+ u64 real_timeout;
+ int ret = 0;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user(data, &mpc5200_wdt_info,
+ sizeof(mpc5200_wdt_info));
+ if (ret)
+ ret = -EFAULT;
+ break;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(0, data);
+ break;
+
+ case WDIOC_KEEPALIVE:
+ mpc52xx_gpt_wdt_ping(gpt_wdt);
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ ret = get_user(timeout, data);
+ if (ret)
+ break;
+ real_timeout = (u64) timeout * 1000000000ULL;
+ ret = mpc52xx_gpt_do_start(gpt_wdt, real_timeout, 0, 1);
+ if (ret)
+ break;
+ /* fall through and return the timeout */
+
+ case WDIOC_GETTIMEOUT:
+ /* we need to round here as to avoid e.g. the following
+ * situation:
+ * - timeout requested is 1 second;
+ * - real timeout @33MHz is 999997090ns
+ * - the int divide by 10^9 will return 0.
+ */
+ real_timeout =
+ mpc52xx_gpt_timer_period(gpt_wdt) + 500000000ULL;
+ do_div(real_timeout, 1000000000ULL);
+ timeout = (int) real_timeout;
+ ret = put_user(timeout, data);
+ break;
+
+ default:
+ ret = -ENOTTY;
+ }
+ return ret;
+}
+
+static int mpc52xx_wdt_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ /* sanity check */
+ if (!mpc52xx_gpt_wdt)
+ return -ENODEV;
+
+ /* /dev/watchdog can only be opened once */
+ if (test_and_set_bit(0, &wdt_is_active))
+ return -EBUSY;
+
+ /* Set and activate the watchdog with 30 seconds timeout */
+ ret = mpc52xx_gpt_do_start(mpc52xx_gpt_wdt, 30ULL * 1000000000ULL,
+ 0, 1);
+ if (ret) {
+ clear_bit(0, &wdt_is_active);
+ return ret;
+ }
+
+ file->private_data = mpc52xx_gpt_wdt;
+ return nonseekable_open(inode, file);
+}
+
+static int mpc52xx_wdt_release(struct inode *inode, struct file *file)
+{
+ /* note: releasing the wdt in NOWAYOUT-mode does not stop it */
+#if !defined(CONFIG_WATCHDOG_NOWAYOUT)
+ struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpt_wdt->lock, flags);
+ clrbits32(&gpt_wdt->regs->mode,
+ MPC52xx_GPT_MODE_COUNTER_ENABLE | MPC52xx_GPT_MODE_WDT_EN);
+ gpt_wdt->wdt_mode &= ~MPC52xx_GPT_IS_WDT;
+ spin_unlock_irqrestore(&gpt_wdt->lock, flags);
+#endif
+ clear_bit(0, &wdt_is_active);
+ return 0;
+}
+
+
+static const struct file_operations mpc52xx_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = mpc52xx_wdt_write,
+ .unlocked_ioctl = mpc52xx_wdt_ioctl,
+ .open = mpc52xx_wdt_open,
+ .release = mpc52xx_wdt_release,
+};
+
+static struct miscdevice mpc52xx_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &mpc52xx_wdt_fops,
+};
+
+static int __devinit mpc52xx_gpt_wdt_init(void)
+{
+ int err;
+
+ /* try to register the watchdog misc device */
+ err = misc_register(&mpc52xx_wdt_miscdev);
+ if (err)
+ pr_err("%s: cannot register watchdog device\n", WDT_IDENTITY);
+ else
+ pr_info("%s: watchdog device registered\n", WDT_IDENTITY);
+ return err;
+}
+
+static int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt,
+ const u32 *period)
+{
+ u64 real_timeout;
+
+ /* remember the gpt for the wdt operation */
+ mpc52xx_gpt_wdt = gpt;
+
+ /* configure the wdt if the device tree contained a timeout */
+ if (!period || *period == 0)
+ return 0;
+
+ real_timeout = (u64) *period * 1000000000ULL;
+ if (mpc52xx_gpt_do_start(gpt, real_timeout, 0, 1))
+ dev_warn(gpt->dev, "starting as wdt failed\n");
+ else
+ dev_info(gpt->dev, "watchdog set to %us timeout\n", *period);
+ return 0;
+}
+
+#else
+
+static int __devinit mpc52xx_gpt_wdt_init(void)
+{
+ return 0;
+}
+
+#define mpc52xx_gpt_wdt_setup(x, y) (0)
+
+#endif /* CONFIG_MPC5200_WDT */
+
/* ---------------------------------------------------------------------
* of_platform bus binding code
*/
@@ -349,6 +729,7 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
spin_lock_init(&gpt->lock);
gpt->dev = &ofdev->dev;
+ gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->node);
gpt->regs = of_iomap(ofdev->node, 0);
if (!gpt->regs) {
kfree(gpt);
@@ -360,6 +741,26 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
mpc52xx_gpt_gpio_setup(gpt, ofdev->node);
mpc52xx_gpt_irq_setup(gpt, ofdev->node);
+ mutex_lock(&mpc52xx_gpt_list_mutex);
+ list_add(&gpt->list, &mpc52xx_gpt_list);
+ mutex_unlock(&mpc52xx_gpt_list_mutex);
+
+ /* check if this device could be a watchdog */
+ if (of_get_property(ofdev->node, "fsl,has-wdt", NULL) ||
+ of_get_property(ofdev->node, "has-wdt", NULL)) {
+ const u32 *on_boot_wdt;
+
+ gpt->wdt_mode = MPC52xx_GPT_CAN_WDT;
+ on_boot_wdt = of_get_property(ofdev->node, "fsl,wdt-on-boot",
+ NULL);
+ if (on_boot_wdt) {
+ dev_info(gpt->dev, "used as watchdog\n");
+ gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
+ } else
+ dev_info(gpt->dev, "can function as watchdog\n");
+ mpc52xx_gpt_wdt_setup(gpt, on_boot_wdt);
+ }
+
return 0;
}
@@ -394,3 +795,4 @@ static int __init mpc52xx_gpt_init(void)
/* Make sure GPIOs and IRQs get set up before anyone tries to use them */
subsys_initcall(mpc52xx_gpt_init);
+device_initcall(mpc52xx_gpt_wdt_init);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
new file mode 100644
index 00000000000..929d017535a
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -0,0 +1,560 @@
+/*
+ * LocalPlus Bus FIFO driver for the Freescale MPC52xx.
+ *
+ * Copyright (C) 2009 Secret Lab Technologies Ltd.
+ *
+ * This file is released under the GPLv2
+ *
+ * Todo:
+ * - Add support for multiple requests to be queued.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/mpc52xx.h>
+#include <asm/time.h>
+
+#include <sysdev/bestcomm/bestcomm.h>
+#include <sysdev/bestcomm/bestcomm_priv.h>
+#include <sysdev/bestcomm/gen_bd.h>
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver");
+MODULE_LICENSE("GPL");
+
+#define LPBFIFO_REG_PACKET_SIZE (0x00)
+#define LPBFIFO_REG_START_ADDRESS (0x04)
+#define LPBFIFO_REG_CONTROL (0x08)
+#define LPBFIFO_REG_ENABLE (0x0C)
+#define LPBFIFO_REG_BYTES_DONE_STATUS (0x14)
+#define LPBFIFO_REG_FIFO_DATA (0x40)
+#define LPBFIFO_REG_FIFO_STATUS (0x44)
+#define LPBFIFO_REG_FIFO_CONTROL (0x48)
+#define LPBFIFO_REG_FIFO_ALARM (0x4C)
+
+struct mpc52xx_lpbfifo {
+ struct device *dev;
+ phys_addr_t regs_phys;
+ void __iomem *regs;
+ int irq;
+ spinlock_t lock;
+
+ struct bcom_task *bcom_tx_task;
+ struct bcom_task *bcom_rx_task;
+ struct bcom_task *bcom_cur_task;
+
+ /* Current state data */
+ struct mpc52xx_lpbfifo_request *req;
+ int dma_irqs_enabled;
+};
+
+/* The MPC5200 has only one fifo, so only need one instance structure */
+static struct mpc52xx_lpbfifo lpbfifo;
+
+/**
+ * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered
+ */
+static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
+{
+ size_t transfer_size = req->size - req->pos;
+ struct bcom_bd *bd;
+ void __iomem *reg;
+ u32 *data;
+ int i;
+ int bit_fields;
+ int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
+ int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
+ int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
+
+ /* Set and clear the reset bits; is good practice in User Manual */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
+
+ /* set master enable bit */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001);
+ if (!dma) {
+ /* While the FIFO can be setup for transfer sizes as large as
+ * 16M-1, the FIFO itself is only 512 bytes deep and it does
+ * not generate interrupts for FIFO full events (only transfer
+ * complete will raise an IRQ). Therefore when not using
+ * Bestcomm to drive the FIFO it needs to either be polled, or
+ * transfers need to constrained to the size of the fifo.
+ *
+ * This driver restricts the size of the transfer
+ */
+ if (transfer_size > 512)
+ transfer_size = 512;
+
+ /* Load the FIFO with data */
+ if (write) {
+ reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
+ data = req->data + req->pos;
+ for (i = 0; i < transfer_size; i += 4)
+ out_be32(reg, *data++);
+ }
+
+ /* Unmask both error and completion irqs */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301);
+ } else {
+ /* Choose the correct direction
+ *
+ * Configure the watermarks so DMA will always complete correctly.
+ * It may be worth experimenting with the ALARM value to see if
+ * there is a performance impacit. However, if it is wrong there
+ * is a risk of DMA not transferring the last chunk of data
+ */
+ if (write) {
+ out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4);
+ out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7);
+ lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task;
+ } else {
+ out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff);
+ out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0);
+ lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task;
+
+ if (poll_dma) {
+ if (lpbfifo.dma_irqs_enabled) {
+ disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
+ lpbfifo.dma_irqs_enabled = 0;
+ }
+ } else {
+ if (!lpbfifo.dma_irqs_enabled) {
+ enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
+ lpbfifo.dma_irqs_enabled = 1;
+ }
+ }
+ }
+
+ bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task);
+ bd->status = transfer_size;
+ if (!write) {
+ /*
+ * In the DMA read case, the DMA doesn't complete,
+ * possibly due to incorrect watermarks in the ALARM
+ * and CONTROL regs. For now instead of trying to
+ * determine the right watermarks that will make this
+ * work, just increase the number of bytes the FIFO is
+ * expecting.
+ *
+ * When submitting another operation, the FIFO will get
+ * reset, so the condition of the FIFO waiting for a
+ * non-existent 4 bytes will get cleared.
+ */
+ transfer_size += 4; /* BLECH! */
+ }
+ bd->data[0] = req->data_phys + req->pos;
+ bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL);
+
+ /* error irq & master enabled bit */
+ bit_fields = 0x00000201;
+
+ /* Unmask irqs */
+ if (write && (!poll_dma))
+ bit_fields |= 0x00000100; /* completion irq too */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields);
+ }
+
+ /* Set transfer size, width, chip select and READ mode */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS,
+ req->offset + req->pos);
+ out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size);
+
+ bit_fields = req->cs << 24 | 0x000008;
+ if (!write)
+ bit_fields |= 0x010000; /* read mode */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
+
+ /* Kick it off */
+ out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
+ if (dma)
+ bcom_enable(lpbfifo.bcom_cur_task);
+}
+
+/**
+ * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO
+ *
+ * On transmit, the dma completion irq triggers before the fifo completion
+ * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm
+ * task completion irq becuase everyting is not really done until the LPB FIFO
+ * completion irq triggers.
+ *
+ * In other words:
+ * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on
+ * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this
+ * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings.
+ *
+ * Reasons for entering this routine:
+ * 1) PIO mode rx and tx completion irq
+ * 2) DMA interrupt mode tx completion irq
+ * 3) DMA polled mode tx
+ *
+ * Exit conditions:
+ * 1) Transfer aborted
+ * 2) FIFO complete without DMA; more data to do
+ * 3) FIFO complete without DMA; all data transfered
+ * 4) FIFO complete using DMA
+ *
+ * Condition 1 can occur regardless of whether or not DMA is used.
+ * It requires executing the callback to report the error and exiting
+ * immediately.
+ *
+ * Condition 2 requires programming the FIFO with the next block of data
+ *
+ * Condition 3 requires executing the callback to report completion
+ *
+ * Condition 4 means the same as 3, except that we also retrieve the bcom
+ * buffer so DMA doesn't get clogged up.
+ *
+ * To make things trickier, the spinlock must be dropped before
+ * executing the callback, otherwise we could end up with a deadlock
+ * or nested spinlock condition. The out path is non-trivial, so
+ * extra fiddling is done to make sure all paths lead to the same
+ * outbound code.
+ */
+static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
+{
+ struct mpc52xx_lpbfifo_request *req;
+ u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
+ void __iomem *reg;
+ u32 *data;
+ int count, i;
+ int do_callback = 0;
+ u32 ts;
+ unsigned long flags;
+ int dma, write, poll_dma;
+
+ spin_lock_irqsave(&lpbfifo.lock, flags);
+ ts = get_tbl();
+
+ req = lpbfifo.req;
+ if (!req) {
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ pr_err("bogus LPBFIFO IRQ\n");
+ return IRQ_HANDLED;
+ }
+
+ dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
+ write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
+ poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
+
+ if (dma && !write) {
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ pr_err("bogus LPBFIFO IRQ (dma and not writting)\n");
+ return IRQ_HANDLED;
+ }
+
+ if ((status & 0x01) == 0) {
+ goto out;
+ }
+
+ /* check abort bit */
+ if (status & 0x10) {
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
+ do_callback = 1;
+ goto out;
+ }
+
+ /* Read result from hardware */
+ count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
+ count &= 0x00ffffff;
+
+ if (!dma && !write) {
+ /* copy the data out of the FIFO */
+ reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
+ data = req->data + req->pos;
+ for (i = 0; i < count; i += 4)
+ *data++ = in_be32(reg);
+ }
+
+ /* Update transfer position and count */
+ req->pos += count;
+
+ /* Decide what to do next */
+ if (req->size - req->pos)
+ mpc52xx_lpbfifo_kick(req); /* more work to do */
+ else
+ do_callback = 1;
+
+ out:
+ /* Clear the IRQ */
+ out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01);
+
+ if (dma && (status & 0x11)) {
+ /*
+ * Count the DMA as complete only when the FIFO completion
+ * status or abort bits are set.
+ *
+ * (status & 0x01) should always be the case except sometimes
+ * when using polled DMA.
+ *
+ * (status & 0x10) {transfer aborted}: This case needs more
+ * testing.
+ */
+ bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
+ }
+ req->last_byte = ((u8 *)req->data)[req->size - 1];
+
+ /* When the do_callback flag is set; it means the transfer is finished
+ * so set the FIFO as idle */
+ if (do_callback)
+ lpbfifo.req = NULL;
+
+ if (irq != 0) /* don't increment on polled case */
+ req->irq_count++;
+
+ req->irq_ticks += get_tbl() - ts;
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+
+ /* Spinlock is released; it is now safe to call the callback */
+ if (do_callback && req->callback)
+ req->callback(req);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task
+ *
+ * Only used when receiving data.
+ */
+static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
+{
+ struct mpc52xx_lpbfifo_request *req;
+ unsigned long flags;
+ u32 status;
+ u32 ts;
+
+ spin_lock_irqsave(&lpbfifo.lock, flags);
+ ts = get_tbl();
+
+ req = lpbfifo.req;
+ if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (irq != 0) /* don't increment on polled case */
+ req->irq_count++;
+
+ if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) {
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+
+ req->buffer_not_done_cnt++;
+ if ((req->buffer_not_done_cnt % 1000) == 0)
+ pr_err("transfer stalled\n");
+
+ return IRQ_HANDLED;
+ }
+
+ bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
+
+ req->last_byte = ((u8 *)req->data)[req->size - 1];
+
+ req->pos = status & 0x00ffffff;
+
+ /* Mark the FIFO as idle */
+ lpbfifo.req = NULL;
+
+ /* Release the lock before calling out to the callback. */
+ req->irq_ticks += get_tbl() - ts;
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+
+ if (req->callback)
+ req->callback(req);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion
+ */
+void mpc52xx_lpbfifo_poll(void)
+{
+ struct mpc52xx_lpbfifo_request *req = lpbfifo.req;
+ int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
+ int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
+
+ /*
+ * For more information, see comments on the "Fat Lady"
+ */
+ if (dma && write)
+ mpc52xx_lpbfifo_irq(0, NULL);
+ else
+ mpc52xx_lpbfifo_bcom_irq(0, NULL);
+}
+EXPORT_SYMBOL(mpc52xx_lpbfifo_poll);
+
+/**
+ * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request.
+ * @req: Pointer to request structure
+ */
+int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
+{
+ unsigned long flags;
+
+ if (!lpbfifo.regs)
+ return -ENODEV;
+
+ spin_lock_irqsave(&lpbfifo.lock, flags);
+
+ /* If the req pointer is already set, then a transfer is in progress */
+ if (lpbfifo.req) {
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ return -EBUSY;
+ }
+
+ /* Setup the transfer */
+ lpbfifo.req = req;
+ req->irq_count = 0;
+ req->irq_ticks = 0;
+ req->buffer_not_done_cnt = 0;
+ req->pos = 0;
+
+ mpc52xx_lpbfifo_kick(req);
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
+
+void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lpbfifo.lock, flags);
+ if (lpbfifo.req == req) {
+ /* Put it into reset and clear the state */
+ bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task);
+ bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task);
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
+ lpbfifo.req = NULL;
+ }
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+}
+EXPORT_SYMBOL(mpc52xx_lpbfifo_abort);
+
+static int __devinit
+mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match)
+{
+ struct resource res;
+ int rc = -ENOMEM;
+
+ if (lpbfifo.dev != NULL)
+ return -ENOSPC;
+
+ lpbfifo.irq = irq_of_parse_and_map(op->node, 0);
+ if (!lpbfifo.irq)
+ return -ENODEV;
+
+ if (of_address_to_resource(op->node, 0, &res))
+ return -ENODEV;
+ lpbfifo.regs_phys = res.start;
+ lpbfifo.regs = of_iomap(op->node, 0);
+ if (!lpbfifo.regs)
+ return -ENOMEM;
+
+ spin_lock_init(&lpbfifo.lock);
+
+ /* Put FIFO into reset */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
+
+ /* Register the interrupt handler */
+ rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0,
+ "mpc52xx-lpbfifo", &lpbfifo);
+ if (rc)
+ goto err_irq;
+
+ /* Request the Bestcomm receive (fifo --> memory) task and IRQ */
+ lpbfifo.bcom_rx_task =
+ bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
+ BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC,
+ 16*1024*1024);
+ if (!lpbfifo.bcom_rx_task)
+ goto err_bcom_rx;
+
+ rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task),
+ mpc52xx_lpbfifo_bcom_irq, 0,
+ "mpc52xx-lpbfifo-rx", &lpbfifo);
+ if (rc)
+ goto err_bcom_rx_irq;
+
+ /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
+ lpbfifo.bcom_tx_task =
+ bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
+ BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC);
+ if (!lpbfifo.bcom_tx_task)
+ goto err_bcom_tx;
+
+ lpbfifo.dev = &op->dev;
+ return 0;
+
+ err_bcom_tx:
+ free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
+ err_bcom_rx_irq:
+ bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
+ err_bcom_rx:
+ err_irq:
+ iounmap(lpbfifo.regs);
+ lpbfifo.regs = NULL;
+
+ dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n");
+ return -ENODEV;
+}
+
+
+static int __devexit mpc52xx_lpbfifo_remove(struct of_device *op)
+{
+ if (lpbfifo.dev != &op->dev)
+ return 0;
+
+ /* Put FIFO in reset */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
+
+ /* Release the bestcomm transmit task */
+ free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo);
+ bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task);
+
+ /* Release the bestcomm receive task */
+ free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
+ bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
+
+ free_irq(lpbfifo.irq, &lpbfifo);
+ iounmap(lpbfifo.regs);
+ lpbfifo.regs = NULL;
+ lpbfifo.dev = NULL;
+
+ return 0;
+}
+
+static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = {
+ { .compatible = "fsl,mpc5200-lpbfifo", },
+ {},
+};
+
+static struct of_platform_driver mpc52xx_lpbfifo_driver = {
+ .owner = THIS_MODULE,
+ .name = "mpc52xx-lpbfifo",
+ .match_table = mpc52xx_lpbfifo_match,
+ .probe = mpc52xx_lpbfifo_probe,
+ .remove = __devexit_p(mpc52xx_lpbfifo_remove),
+};
+
+/***********************************************************************
+ * Module init/exit
+ */
+static int __init mpc52xx_lpbfifo_init(void)
+{
+ pr_debug("Registering LocalPlus bus FIFO driver\n");
+ return of_register_platform_driver(&mpc52xx_lpbfifo_driver);
+}
+module_init(mpc52xx_lpbfifo_init);
+
+static void __exit mpc52xx_lpbfifo_exit(void)
+{
+ pr_debug("Unregistering LocalPlus bus FIFO driver\n");
+ of_unregister_platform_driver(&mpc52xx_lpbfifo_driver);
+}
+module_exit(mpc52xx_lpbfifo_exit);
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 4b1c422b814..0ff5174ae4f 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -8,7 +8,7 @@ endif
obj-y := lpar.o hvCall.o nvram.o reconfig.o \
setup.o iommu.o ras.o \
- firmware.o power.o
+ firmware.o power.o dlpar.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_SCANLOG) += scanlog.o
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
new file mode 100644
index 00000000000..fd2f0afeb4d
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -0,0 +1,558 @@
+/*
+ * Support for dynamic reconfiguration for PCI, Memory, and CPU
+ * Hotplug and Dynamic Logical Partitioning on RPA platforms.
+ *
+ * Copyright (C) 2009 Nathan Fontenot
+ * Copyright (C) 2009 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/notifier.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include "offline_states.h"
+
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/uaccess.h>
+#include <asm/rtas.h>
+#include <asm/pSeries_reconfig.h>
+
+struct cc_workarea {
+ u32 drc_index;
+ u32 zero;
+ u32 name_offset;
+ u32 prop_length;
+ u32 prop_offset;
+};
+
+static void dlpar_free_cc_property(struct property *prop)
+{
+ kfree(prop->name);
+ kfree(prop->value);
+ kfree(prop);
+}
+
+static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
+{
+ struct property *prop;
+ char *name;
+ char *value;
+
+ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return NULL;
+
+ name = (char *)ccwa + ccwa->name_offset;
+ prop->name = kstrdup(name, GFP_KERNEL);
+
+ prop->length = ccwa->prop_length;
+ value = (char *)ccwa + ccwa->prop_offset;
+ prop->value = kzalloc(prop->length, GFP_KERNEL);
+ if (!prop->value) {
+ dlpar_free_cc_property(prop);
+ return NULL;
+ }
+
+ memcpy(prop->value, value, prop->length);
+ return prop;
+}
+
+static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
+{
+ struct device_node *dn;
+ char *name;
+
+ dn = kzalloc(sizeof(*dn), GFP_KERNEL);
+ if (!dn)
+ return NULL;
+
+ /* The configure connector reported name does not contain a
+ * preceeding '/', so we allocate a buffer large enough to
+ * prepend this to the full_name.
+ */
+ name = (char *)ccwa + ccwa->name_offset;
+ dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL);
+ if (!dn->full_name) {
+ kfree(dn);
+ return NULL;
+ }
+
+ sprintf(dn->full_name, "/%s", name);
+ return dn;
+}
+
+static void dlpar_free_one_cc_node(struct device_node *dn)
+{
+ struct property *prop;
+
+ while (dn->properties) {
+ prop = dn->properties;
+ dn->properties = prop->next;
+ dlpar_free_cc_property(prop);
+ }
+
+ kfree(dn->full_name);
+ kfree(dn);
+}
+
+static void dlpar_free_cc_nodes(struct device_node *dn)
+{
+ if (dn->child)
+ dlpar_free_cc_nodes(dn->child);
+
+ if (dn->sibling)
+ dlpar_free_cc_nodes(dn->sibling);
+
+ dlpar_free_one_cc_node(dn);
+}
+
+#define NEXT_SIBLING 1
+#define NEXT_CHILD 2
+#define NEXT_PROPERTY 3
+#define PREV_PARENT 4
+#define MORE_MEMORY 5
+#define CALL_AGAIN -2
+#define ERR_CFG_USE -9003
+
+struct device_node *dlpar_configure_connector(u32 drc_index)
+{
+ struct device_node *dn;
+ struct device_node *first_dn = NULL;
+ struct device_node *last_dn = NULL;
+ struct property *property;
+ struct property *last_property = NULL;
+ struct cc_workarea *ccwa;
+ int cc_token;
+ int rc;
+
+ cc_token = rtas_token("ibm,configure-connector");
+ if (cc_token == RTAS_UNKNOWN_SERVICE)
+ return NULL;
+
+ spin_lock(&rtas_data_buf_lock);
+ ccwa = (struct cc_workarea *)&rtas_data_buf[0];
+ ccwa->drc_index = drc_index;
+ ccwa->zero = 0;
+
+ rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
+ while (rc) {
+ switch (rc) {
+ case NEXT_SIBLING:
+ dn = dlpar_parse_cc_node(ccwa);
+ if (!dn)
+ goto cc_error;
+
+ dn->parent = last_dn->parent;
+ last_dn->sibling = dn;
+ last_dn = dn;
+ break;
+
+ case NEXT_CHILD:
+ dn = dlpar_parse_cc_node(ccwa);
+ if (!dn)
+ goto cc_error;
+
+ if (!first_dn)
+ first_dn = dn;
+ else {
+ dn->parent = last_dn;
+ if (last_dn)
+ last_dn->child = dn;
+ }
+
+ last_dn = dn;
+ break;
+
+ case NEXT_PROPERTY:
+ property = dlpar_parse_cc_property(ccwa);
+ if (!property)
+ goto cc_error;
+
+ if (!last_dn->properties)
+ last_dn->properties = property;
+ else
+ last_property->next = property;
+
+ last_property = property;
+ break;
+
+ case PREV_PARENT:
+ last_dn = last_dn->parent;
+ break;
+
+ case CALL_AGAIN:
+ break;
+
+ case MORE_MEMORY:
+ case ERR_CFG_USE:
+ default:
+ printk(KERN_ERR "Unexpected Error (%d) "
+ "returned from configure-connector\n", rc);
+ goto cc_error;
+ }
+
+ rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
+ }
+
+ spin_unlock(&rtas_data_buf_lock);
+ return first_dn;
+
+cc_error:
+ if (first_dn)
+ dlpar_free_cc_nodes(first_dn);
+ spin_unlock(&rtas_data_buf_lock);
+ return NULL;
+}
+
+static struct device_node *derive_parent(const char *path)
+{
+ struct device_node *parent;
+ char *last_slash;
+
+ last_slash = strrchr(path, '/');
+ if (last_slash == path) {
+ parent = of_find_node_by_path("/");
+ } else {
+ char *parent_path;
+ int parent_path_len = last_slash - path + 1;
+ parent_path = kmalloc(parent_path_len, GFP_KERNEL);
+ if (!parent_path)
+ return NULL;
+
+ strlcpy(parent_path, path, parent_path_len);
+ parent = of_find_node_by_path(parent_path);
+ kfree(parent_path);
+ }
+
+ return parent;
+}
+
+int dlpar_attach_node(struct device_node *dn)
+{
+ struct proc_dir_entry *ent;
+ int rc;
+
+ of_node_set_flag(dn, OF_DYNAMIC);
+ kref_init(&dn->kref);
+ dn->parent = derive_parent(dn->full_name);
+ if (!dn->parent)
+ return -ENOMEM;
+
+ rc = blocking_notifier_call_chain(&pSeries_reconfig_chain,
+ PSERIES_RECONFIG_ADD, dn);
+ if (rc == NOTIFY_BAD) {
+ printk(KERN_ERR "Failed to add device node %s\n",
+ dn->full_name);
+ return -ENOMEM; /* For now, safe to assume kmalloc failure */
+ }
+
+ of_attach_node(dn);
+
+#ifdef CONFIG_PROC_DEVICETREE
+ ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde);
+ if (ent)
+ proc_device_tree_add_node(dn, ent);
+#endif
+
+ of_node_put(dn->parent);
+ return 0;
+}
+
+int dlpar_detach_node(struct device_node *dn)
+{
+ struct device_node *parent = dn->parent;
+ struct property *prop = dn->properties;
+
+#ifdef CONFIG_PROC_DEVICETREE
+ while (prop) {
+ remove_proc_entry(prop->name, dn->pde);
+ prop = prop->next;
+ }
+
+ if (dn->pde)
+ remove_proc_entry(dn->pde->name, parent->pde);
+#endif
+
+ blocking_notifier_call_chain(&pSeries_reconfig_chain,
+ PSERIES_RECONFIG_REMOVE, dn);
+ of_detach_node(dn);
+ of_node_put(dn); /* Must decrement the refcount */
+
+ return 0;
+}
+
+int online_node_cpus(struct device_node *dn)
+{
+ int rc = 0;
+ unsigned int cpu;
+ int len, nthreads, i;
+ const u32 *intserv;
+
+ intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
+ if (!intserv)
+ return -EINVAL;
+
+ nthreads = len / sizeof(u32);
+
+ cpu_maps_update_begin();
+ for (i = 0; i < nthreads; i++) {
+ for_each_present_cpu(cpu) {
+ if (get_hard_smp_processor_id(cpu) != intserv[i])
+ continue;
+ BUG_ON(get_cpu_current_state(cpu)
+ != CPU_STATE_OFFLINE);
+ cpu_maps_update_done();
+ rc = cpu_up(cpu);
+ if (rc)
+ goto out;
+ cpu_maps_update_begin();
+
+ break;
+ }
+ if (cpu == num_possible_cpus())
+ printk(KERN_WARNING "Could not find cpu to online "
+ "with physical id 0x%x\n", intserv[i]);
+ }
+ cpu_maps_update_done();
+
+out:
+ return rc;
+
+}
+
+int offline_node_cpus(struct device_node *dn)
+{
+ int rc = 0;
+ unsigned int cpu;
+ int len, nthreads, i;
+ const u32 *intserv;
+
+ intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
+ if (!intserv)
+ return -EINVAL;
+
+ nthreads = len / sizeof(u32);
+
+ cpu_maps_update_begin();
+ for (i = 0; i < nthreads; i++) {
+ for_each_present_cpu(cpu) {
+ if (get_hard_smp_processor_id(cpu) != intserv[i])
+ continue;
+
+ if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
+ break;
+
+ if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
+ cpu_maps_update_done();
+ rc = cpu_down(cpu);
+ if (rc)
+ goto out;
+ cpu_maps_update_begin();
+ break;
+
+ }
+
+ /*
+ * The cpu is in CPU_STATE_INACTIVE.
+ * Upgrade it's state to CPU_STATE_OFFLINE.
+ */
+ set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
+ BUG_ON(plpar_hcall_norets(H_PROD, intserv[i])
+ != H_SUCCESS);
+ __cpu_die(cpu);
+ break;
+ }
+ if (cpu == num_possible_cpus())
+ printk(KERN_WARNING "Could not find cpu to offline "
+ "with physical id 0x%x\n", intserv[i]);
+ }
+ cpu_maps_update_done();
+
+out:
+ return rc;
+
+}
+
+#define DR_ENTITY_SENSE 9003
+#define DR_ENTITY_PRESENT 1
+#define DR_ENTITY_UNUSABLE 2
+#define ALLOCATION_STATE 9003
+#define ALLOC_UNUSABLE 0
+#define ALLOC_USABLE 1
+#define ISOLATION_STATE 9001
+#define ISOLATE 0
+#define UNISOLATE 1
+
+int dlpar_acquire_drc(u32 drc_index)
+{
+ int dr_status, rc;
+
+ rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
+ DR_ENTITY_SENSE, drc_index);
+ if (rc || dr_status != DR_ENTITY_UNUSABLE)
+ return -1;
+
+ rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
+ if (rc)
+ return rc;
+
+ rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
+ if (rc) {
+ rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
+ return rc;
+ }
+
+ return 0;
+}
+
+int dlpar_release_drc(u32 drc_index)
+{
+ int dr_status, rc;
+
+ rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
+ DR_ENTITY_SENSE, drc_index);
+ if (rc || dr_status != DR_ENTITY_PRESENT)
+ return -1;
+
+ rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
+ if (rc)
+ return rc;
+
+ rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
+ if (rc) {
+ rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
+ return rc;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+
+static DEFINE_MUTEX(pseries_cpu_hotplug_mutex);
+
+void cpu_hotplug_driver_lock()
+{
+ mutex_lock(&pseries_cpu_hotplug_mutex);
+}
+
+void cpu_hotplug_driver_unlock()
+{
+ mutex_unlock(&pseries_cpu_hotplug_mutex);
+}
+
+static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
+{
+ struct device_node *dn;
+ unsigned long drc_index;
+ char *cpu_name;
+ int rc;
+
+ cpu_hotplug_driver_lock();
+ rc = strict_strtoul(buf, 0, &drc_index);
+ if (rc) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dn = dlpar_configure_connector(drc_index);
+ if (!dn) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* configure-connector reports cpus as living in the base
+ * directory of the device tree. CPUs actually live in the
+ * cpus directory so we need to fixup the full_name.
+ */
+ cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1,
+ GFP_KERNEL);
+ if (!cpu_name) {
+ dlpar_free_cc_nodes(dn);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ sprintf(cpu_name, "/cpus%s", dn->full_name);
+ kfree(dn->full_name);
+ dn->full_name = cpu_name;
+
+ rc = dlpar_acquire_drc(drc_index);
+ if (rc) {
+ dlpar_free_cc_nodes(dn);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = dlpar_attach_node(dn);
+ if (rc) {
+ dlpar_release_drc(drc_index);
+ dlpar_free_cc_nodes(dn);
+ }
+
+ rc = online_node_cpus(dn);
+out:
+ cpu_hotplug_driver_unlock();
+
+ return rc ? rc : count;
+}
+
+static ssize_t dlpar_cpu_release(const char *buf, size_t count)
+{
+ struct device_node *dn;
+ const u32 *drc_index;
+ int rc;
+
+ dn = of_find_node_by_path(buf);
+ if (!dn)
+ return -EINVAL;
+
+ drc_index = of_get_property(dn, "ibm,my-drc-index", NULL);
+ if (!drc_index) {
+ of_node_put(dn);
+ return -EINVAL;
+ }
+
+ cpu_hotplug_driver_lock();
+ rc = offline_node_cpus(dn);
+ if (rc) {
+ of_node_put(dn);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = dlpar_release_drc(*drc_index);
+ if (rc) {
+ of_node_put(dn);
+ goto out;
+ }
+
+ rc = dlpar_detach_node(dn);
+ if (rc) {
+ dlpar_acquire_drc(*drc_index);
+ goto out;
+ }
+
+ of_node_put(dn);
+out:
+ cpu_hotplug_driver_unlock();
+ return rc ? rc : count;
+}
+
+static int __init pseries_dlpar_init(void)
+{
+ ppc_md.cpu_probe = dlpar_cpu_probe;
+ ppc_md.cpu_release = dlpar_cpu_release;
+
+ return 0;
+}
+machine_device_initcall(pseries, pseries_dlpar_init);
+
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 5182d2b992c..a2305d29bbb 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -96,7 +96,7 @@ static struct device_node *derive_parent(const char *path)
return parent;
}
-static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
+BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
int pSeries_reconfig_notifier_register(struct notifier_block *nb)
{