diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2007-03-29 23:42:42 +0100 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2007-03-29 23:46:36 +0100 |
commit | 8a1e97ee2e025f116765c92409a3cf8f6cb07ad6 (patch) | |
tree | c3df538fce3f32b35b1b8355a9a411cb668e724e | |
parent | 6c9fde4bfff11b2fd93b4e518ae7ecb25a9244e4 (diff) | |
download | linux-3.10-8a1e97ee2e025f116765c92409a3cf8f6cb07ad6.tar.gz linux-3.10-8a1e97ee2e025f116765c92409a3cf8f6cb07ad6.tar.bz2 linux-3.10-8a1e97ee2e025f116765c92409a3cf8f6cb07ad6.zip |
[MIPS] SMTC: Fix recursion in instant IPI replay code.
local_irq_restore -> raw_local_irq_restore -> irq_restore_epilog ->
smtc_ipi_replay -> smtc_ipi_dq -> spin_unlock_irqrestore ->
_spin_unlock_irqrestore -> local_irq_restore
The recursion does abort when there is no more IPI queued for a CPU, so
this isn't usually fatal which is why we got away with this for so long
until this was discovered by code inspection.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r-- | arch/mips/kernel/smtc.c | 40 | ||||
-rw-r--r-- | include/asm-mips/irqflags.h | 55 | ||||
-rw-r--r-- | include/asm-mips/smtc_ipi.h | 16 |
3 files changed, 69 insertions, 42 deletions
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index e50fe20571f..5dcfab6b288 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -999,10 +999,17 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) /* * SMTC-specific hacks invoked from elsewhere in the kernel. + * + * smtc_ipi_replay is called from raw_local_irq_restore which is only ever + * called with interrupts disabled. We do rely on interrupts being disabled + * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would + * result in a recursive call to raw_local_irq_restore(). */ -void smtc_ipi_replay(void) +static void __smtc_ipi_replay(void) { + unsigned int cpu = smp_processor_id(); + /* * To the extent that we've ever turned interrupts off, * we may have accumulated deferred IPIs. This is subtle. @@ -1017,17 +1024,30 @@ void smtc_ipi_replay(void) * is clear, and we'll handle it as a real pseudo-interrupt * and not a pseudo-pseudo interrupt. */ - if (IPIQ[smp_processor_id()].depth > 0) { - struct smtc_ipi *pipi; - extern void self_ipi(struct smtc_ipi *); + if (IPIQ[cpu].depth > 0) { + while (1) { + struct smtc_ipi_q *q = &IPIQ[cpu]; + struct smtc_ipi *pipi; + extern void self_ipi(struct smtc_ipi *); + + spin_lock(&q->lock); + pipi = __smtc_ipi_dq(q); + spin_unlock(&q->lock); + if (!pipi) + break; - while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) { self_ipi(pipi); - smtc_cpu_stats[smp_processor_id()].selfipis++; + smtc_cpu_stats[cpu].selfipis++; } } } +void smtc_ipi_replay(void) +{ + raw_local_irq_disable(); + __smtc_ipi_replay(); +} + EXPORT_SYMBOL(smtc_ipi_replay); void smtc_idle_loop_hook(void) @@ -1132,7 +1152,13 @@ void smtc_idle_loop_hook(void) * is in use, there should never be any. */ #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY - smtc_ipi_replay(); + { + unsigned long flags; + + local_irq_save(flags); + __smtc_ipi_replay(); + local_irq_restore(flags); + } #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ } diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h index af3b07dfad4..e459fa05db8 100644 --- a/include/asm-mips/irqflags.h +++ b/include/asm-mips/irqflags.h @@ -13,29 +13,9 @@ #ifndef __ASSEMBLY__ +#include <linux/compiler.h> #include <asm/hazards.h> -/* - * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred IPIs, - * at the cost of branch and call overhead on each local_irq_restore() - */ - -#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY - -extern void smtc_ipi_replay(void); - -#define irq_restore_epilog(flags) \ -do { \ - if (!(flags & 0x0400)) \ - smtc_ipi_replay(); \ -} while (0) - -#else - -#define irq_restore_epilog(ignore) do { } while (0) - -#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ - __asm__ ( " .macro raw_local_irq_enable \n" " .set push \n" @@ -205,17 +185,28 @@ __asm__ ( " .set pop \n" " .endm \n"); -#define raw_local_irq_restore(flags) \ -do { \ - unsigned long __tmp1; \ - \ - __asm__ __volatile__( \ - "raw_local_irq_restore\t%0" \ - : "=r" (__tmp1) \ - : "0" (flags) \ - : "memory"); \ - irq_restore_epilog(flags); \ -} while(0) +extern void smtc_ipi_replay(void); + +static inline void raw_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + +#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY + /* + * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred + * IPIs, at the cost of branch and call overhead on each + * local_irq_restore() + */ + if (unlikely(!(flags & 0x0400))) + smtc_ipi_replay(); +#endif + + __asm__ __volatile__( + "raw_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); +} static inline int raw_irqs_disabled_flags(unsigned long flags) { diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h index 360ea6d250c..a52a4a7a36e 100644 --- a/include/asm-mips/smtc_ipi.h +++ b/include/asm-mips/smtc_ipi.h @@ -65,12 +65,10 @@ static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) spin_unlock_irqrestore(&q->lock, flags); } -static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) +static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q) { struct smtc_ipi *p; - long flags; - spin_lock_irqsave(&q->lock, flags); if (q->head == NULL) p = NULL; else { @@ -81,7 +79,19 @@ static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) if (q->head == NULL) q->tail = NULL; } + + return p; +} + +static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) +{ + unsigned long flags; + struct smtc_ipi *p; + + spin_lock_irqsave(&q->lock, flags); + p = __smtc_ipi_dq(q); spin_unlock_irqrestore(&q->lock, flags); + return p; } |