diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-11-07 14:09:01 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-11-07 14:09:01 -0800 |
commit | dedeb0029b9c83420fc1337d4ee53daa7b2a0ad4 (patch) | |
tree | d87e66e1d6240cd412c20ecbc12f5b810c9807e4 | |
parent | b8ae48656db860d4c83a29aa7b0588fc89361935 (diff) | |
download | linux-3.10-dedeb0029b9c83420fc1337d4ee53daa7b2a0ad4.tar.gz linux-3.10-dedeb0029b9c83420fc1337d4ee53daa7b2a0ad4.tar.bz2 linux-3.10-dedeb0029b9c83420fc1337d4ee53daa7b2a0ad4.zip |
[SPARC64] mm: context switch ptlock
sparc64 is unique among architectures in taking the page_table_lock in
its context switch (well, cris does too, but erroneously, and it's not
yet SMP anyway).
This seems to be a private affair between switch_mm and activate_mm,
using page_table_lock as a per-mm lock, without any relation to its uses
elsewhere. That's fine, but comment it as such; and unlock sooner in
switch_mm, more like in activate_mm (preemption is disabled here).
There is a block of "if (0)"ed code in smp_flush_tlb_pending which would
have liked to rely on the page_table_lock, in switch_mm and elsewhere;
but its comment explains how dup_mmap's flush_tlb_mm defeated it. And
though that could have been changed at any time over the past few years,
now the chance vanishes as we push the page_table_lock downwards, and
perhaps split it per page table page. Just delete that block of code.
Which leaves the mysterious spin_unlock_wait(&oldmm->page_table_lock)
in kernel/fork.c copy_mm. Textual analysis (supported by Nick Piggin)
suggests that the comment was written by DaveM, and that it relates to
the defeated approach in the sparc64 smp_flush_tlb_pending. Just delete
this block too.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc64/kernel/smp.c | 31 | ||||
-rw-r--r-- | include/asm-sparc64/mmu_context.h | 46 | ||||
-rw-r--r-- | kernel/fork.c | 7 |
3 files changed, 29 insertions, 55 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index b137fd63f5e..a9089e2140e 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -883,34 +883,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long u32 ctx = CTX_HWBITS(mm->context); int cpu = get_cpu(); - if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) { + if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) mm->cpu_vm_mask = cpumask_of_cpu(cpu); - goto local_flush_and_out; - } else { - /* This optimization is not valid. Normally - * we will be holding the page_table_lock, but - * there is an exception which is copy_page_range() - * when forking. The lock is held during the individual - * page table updates in the parent, but not at the - * top level, which is where we are invoked. - */ - if (0) { - cpumask_t this_cpu_mask = cpumask_of_cpu(cpu); - - /* By virtue of running under the mm->page_table_lock, - * and mmu_context.h:switch_mm doing the same, the - * following operation is safe. - */ - if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask)) - goto local_flush_and_out; - } - } - - smp_cross_call_masked(&xcall_flush_tlb_pending, - ctx, nr, (unsigned long) vaddrs, - mm->cpu_vm_mask); + else + smp_cross_call_masked(&xcall_flush_tlb_pending, + ctx, nr, (unsigned long) vaddrs, + mm->cpu_vm_mask); -local_flush_and_out: __flush_tlb_pending(ctx, nr, vaddrs); put_cpu(); diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 87c43c67866..08ba72d7722 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -87,37 +87,35 @@ extern void __flush_tlb_mm(unsigned long, unsigned long); static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) { unsigned long ctx_valid; + int cpu; + /* Note: page_table_lock is used here to serialize switch_mm + * and activate_mm, and their calls to get_new_mmu_context. + * This use of page_table_lock is unrelated to its other uses. + */ spin_lock(&mm->page_table_lock); - if (CTX_VALID(mm->context)) - ctx_valid = 1; - else - ctx_valid = 0; + ctx_valid = CTX_VALID(mm->context); + if (!ctx_valid) + get_new_mmu_context(mm); + spin_unlock(&mm->page_table_lock); if (!ctx_valid || (old_mm != mm)) { - if (!ctx_valid) - get_new_mmu_context(mm); - load_secondary_context(mm); reload_tlbmiss_state(tsk, mm); } - { - int cpu = smp_processor_id(); - - /* Even if (mm == old_mm) we _must_ check - * the cpu_vm_mask. If we do not we could - * corrupt the TLB state because of how - * smp_flush_tlb_{page,range,mm} on sparc64 - * and lazy tlb switches work. -DaveM - */ - if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { - cpu_set(cpu, mm->cpu_vm_mask); - __flush_tlb_mm(CTX_HWBITS(mm->context), - SECONDARY_CONTEXT); - } + /* Even if (mm == old_mm) we _must_ check + * the cpu_vm_mask. If we do not we could + * corrupt the TLB state because of how + * smp_flush_tlb_{page,range,mm} on sparc64 + * and lazy tlb switches work. -DaveM + */ + cpu = smp_processor_id(); + if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { + cpu_set(cpu, mm->cpu_vm_mask); + __flush_tlb_mm(CTX_HWBITS(mm->context), + SECONDARY_CONTEXT); } - spin_unlock(&mm->page_table_lock); } #define deactivate_mm(tsk,mm) do { } while (0) @@ -127,6 +125,10 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm { int cpu; + /* Note: page_table_lock is used here to serialize switch_mm + * and activate_mm, and their calls to get_new_mmu_context. + * This use of page_table_lock is unrelated to its other uses. + */ spin_lock(&mm->page_table_lock); if (!CTX_VALID(mm->context)) get_new_mmu_context(mm); diff --git a/kernel/fork.c b/kernel/fork.c index efac2c58ec7..158710d2256 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -470,13 +470,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) if (clone_flags & CLONE_VM) { atomic_inc(&oldmm->mm_users); mm = oldmm; - /* - * There are cases where the PTL is held to ensure no - * new threads start up in user mode using an mm, which - * allows optimizing out ipis; the tlb_gather_mmu code - * is an example. - */ - spin_unlock_wait(&oldmm->page_table_lock); goto good_mm; } |