summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm/pgtable.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2010-08-24 09:26:21 +0200
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2010-08-24 09:26:34 +0200
commit050eef364ad700590a605a0749f825cab4834b1e (patch)
tree2714c9cf7edcbf394971cc2c929e5ab2ea34d6a6 /arch/s390/include/asm/pgtable.h
parent7af048dc7639db5202c56fecf2346c310647a218 (diff)
downloadlinux-3.10-050eef364ad700590a605a0749f825cab4834b1e.tar.gz
linux-3.10-050eef364ad700590a605a0749f825cab4834b1e.tar.bz2
linux-3.10-050eef364ad700590a605a0749f825cab4834b1e.zip
[S390] fix tlb flushing vs. concurrent /proc accesses
The tlb flushing code uses the mm_users field of the mm_struct to decide if each page table entry needs to be flushed individually with IPTE or if a global flush for the mm_struct is sufficient after all page table updates have been done. The comment for mm_users says "How many users with user space?" but the /proc code increases mm_users after it found the process structure by pid without creating a new user process. Which makes mm_users useless for the decision between the two tlb flusing methods. The current code can be confused to not flush tlb entries by a concurrent access to /proc files if e.g. a fork is in progres. The solution for this problem is to make the tlb flushing logic independent from the mm_users field. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/pgtable.h')
-rw-r--r--arch/s390/include/asm/pgtable.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 89a504c3f12..3157441ee1d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -880,7 +880,8 @@ static inline void ptep_invalidate(struct mm_struct *mm,
#define ptep_get_and_clear(__mm, __address, __ptep) \
({ \
pte_t __pte = *(__ptep); \
- if (atomic_read(&(__mm)->mm_users) > 1 || \
+ (__mm)->context.flush_mm = 1; \
+ if (atomic_read(&(__mm)->context.attach_count) > 1 || \
(__mm) != current->active_mm) \
ptep_invalidate(__mm, __address, __ptep); \
else \
@@ -923,7 +924,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
({ \
pte_t __pte = *(__ptep); \
if (pte_write(__pte)) { \
- if (atomic_read(&(__mm)->mm_users) > 1 || \
+ (__mm)->context.flush_mm = 1; \
+ if (atomic_read(&(__mm)->context.attach_count) > 1 || \
(__mm) != current->active_mm) \
ptep_invalidate(__mm, __addr, __ptep); \
set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \