summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/ksm.h11
-rw-r--r--kernel/fork.c1
-rw-r--r--mm/ksm.c5
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mmap.c7
5 files changed, 8 insertions, 20 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 2d64ff30c0d..0e26de6adb5 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -18,8 +18,7 @@ struct mmu_gather;
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
int __ksm_enter(struct mm_struct *mm);
-void __ksm_exit(struct mm_struct *mm,
- struct mmu_gather **tlbp, unsigned long end);
+void __ksm_exit(struct mm_struct *mm);
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
@@ -41,11 +40,10 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
return atomic_read(&mm->mm_users) == 0;
}
-static inline void ksm_exit(struct mm_struct *mm,
- struct mmu_gather **tlbp, unsigned long end)
+static inline void ksm_exit(struct mm_struct *mm)
{
if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
- __ksm_exit(mm, tlbp, end);
+ __ksm_exit(mm);
}
/*
@@ -86,8 +84,7 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
return 0;
}
-static inline void ksm_exit(struct mm_struct *mm,
- struct mmu_gather **tlbp, unsigned long end)
+static inline void ksm_exit(struct mm_struct *mm)
{
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 42f20f565b1..73a442b7be6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -501,6 +501,7 @@ void mmput(struct mm_struct *mm)
if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
+ ksm_exit(mm);
exit_mmap(mm);
set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) {
diff --git a/mm/ksm.c b/mm/ksm.c
index 722e3f2a8dc..92034eb47eb 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1416,8 +1416,7 @@ int __ksm_enter(struct mm_struct *mm)
return 0;
}
-void __ksm_exit(struct mm_struct *mm,
- struct mmu_gather **tlbp, unsigned long end)
+void __ksm_exit(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
int easy_to_free = 0;
@@ -1450,10 +1449,8 @@ void __ksm_exit(struct mm_struct *mm,
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
mmdrop(mm);
} else if (mm_slot) {
- tlb_finish_mmu(*tlbp, 0, end);
down_write(&mm->mmap_sem);
up_write(&mm->mmap_sem);
- *tlbp = tlb_gather_mmu(mm, 1);
}
}
diff --git a/mm/memory.c b/mm/memory.c
index f47ffe97101..05feaa11d87 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2648,7 +2648,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
- if (!pte_none(*page_table) || ksm_test_exit(mm))
+ if (!pte_none(*page_table))
goto release;
inc_mm_counter(mm, anon_rss);
@@ -2792,7 +2792,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* handle that later.
*/
/* Only go through if we didn't race with anybody else... */
- if (likely(pte_same(*page_table, orig_pte) && !ksm_test_exit(mm))) {
+ if (likely(pte_same(*page_table, orig_pte))) {
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
diff --git a/mm/mmap.c b/mm/mmap.c
index e02f1aa66a1..ffd6c6c9bcf 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2113,13 +2113,6 @@ void exit_mmap(struct mm_struct *mm)
end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
vm_unacct_memory(nr_accounted);
- /*
- * For KSM to handle OOM without deadlock when it's breaking COW in a
- * likely victim of the OOM killer, we must serialize with ksm_exit()
- * after freeing mm's pages but before freeing its page tables.
- */
- ksm_exit(mm, &tlb, end);
-
free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
tlb_finish_mmu(tlb, 0, end);