diff options
author | David S. Miller <davem@davemloft.net> | 2016-05-25 12:51:20 -0700 |
---|---|---|
committer | Sasha Levin <sasha.levin@oracle.com> | 2016-07-10 23:07:31 -0400 |
commit | ea312fcb4efc84b28f891485d6c4aca2163c2438 (patch) | |
tree | 863eaebdb4bd374921dde811af3bf7a692ea067a /arch | |
parent | a74b2f239c2be446c4e7cdb0cdcb641e968a9ce0 (diff) | |
download | linux-exynos-ea312fcb4efc84b28f891485d6c4aca2163c2438.tar.gz linux-exynos-ea312fcb4efc84b28f891485d6c4aca2163c2438.tar.bz2 linux-exynos-ea312fcb4efc84b28f891485d6c4aca2163c2438.zip |
sparc64: Take ctx_alloc_lock properly in hugetlb_setup().
[ Upstream commit 9ea46abe22550e3366ff7cee2f8391b35b12f730 ]
On cheetahplus chips we take the ctx_alloc_lock in order to
modify the TLB lookup parameters for the indexed TLBs, which
are stored in the context register.
This is called with interrupts disabled, however ctx_alloc_lock
is an IRQ safe lock, therefore we must take acquire/release it
properly with spin_{lock,unlock}_irq().
Reported-by: Meelis Roos <mroos@linux.ee>
Tested-by: Meelis Roos <mroos@linux.ee>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc/mm/init_64.c | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index b2f1e75165cb..71c7ace855d7 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2770,9 +2770,10 @@ void hugetlb_setup(struct pt_regs *regs) * the Data-TLB for huge pages. */ if (tlb_type == cheetah_plus) { + bool need_context_reload = false; unsigned long ctx; - spin_lock(&ctx_alloc_lock); + spin_lock_irq(&ctx_alloc_lock); ctx = mm->context.sparc64_ctx_val; ctx &= ~CTX_PGSZ_MASK; ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; @@ -2791,9 +2792,12 @@ void hugetlb_setup(struct pt_regs *regs) * also executing in this address space. */ mm->context.sparc64_ctx_val = ctx; - on_each_cpu(context_reload, mm, 0); + need_context_reload = true; } - spin_unlock(&ctx_alloc_lock); + spin_unlock_irq(&ctx_alloc_lock); + + if (need_context_reload) + on_each_cpu(context_reload, mm, 0); } } #endif |