summaryrefslogtreecommitdiff
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4a2cba6c4a67..4eeb0a864e45 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -384,10 +384,12 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
}
}
-static int task_early_kill(struct task_struct *tsk)
+static int task_early_kill(struct task_struct *tsk, int force_early)
{
if (!tsk->mm)
return 0;
+ if (force_early)
+ return 1;
if (tsk->flags & PF_MCE_PROCESS)
return !!(tsk->flags & PF_MCE_EARLY);
return sysctl_memory_failure_early_kill;
@@ -397,7 +399,7 @@ static int task_early_kill(struct task_struct *tsk)
* Collect processes when the error hit an anonymous page.
*/
static void collect_procs_anon(struct page *page, struct list_head *to_kill,
- struct to_kill **tkc)
+ struct to_kill **tkc, int force_early)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
@@ -413,7 +415,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
for_each_process (tsk) {
struct anon_vma_chain *vmac;
- if (!task_early_kill(tsk))
+ if (!task_early_kill(tsk, force_early))
continue;
anon_vma_interval_tree_foreach(vmac, &av->rb_root,
pgoff, pgoff) {
@@ -432,7 +434,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
* Collect processes when the error hit a file mapped page.
*/
static void collect_procs_file(struct page *page, struct list_head *to_kill,
- struct to_kill **tkc)
+ struct to_kill **tkc, int force_early)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
@@ -443,7 +445,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
for_each_process(tsk) {
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
- if (!task_early_kill(tsk))
+ if (!task_early_kill(tsk, force_early))
continue;
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
@@ -469,7 +471,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
* First preallocate one tokill structure outside the spin locks,
* so that we can kill at least one process reasonably reliable.
*/
-static void collect_procs(struct page *page, struct list_head *tokill)
+static void collect_procs(struct page *page, struct list_head *tokill,
+ int force_early)
{
struct to_kill *tk;
@@ -480,9 +483,9 @@ static void collect_procs(struct page *page, struct list_head *tokill)
if (!tk)
return;
if (PageAnon(page))
- collect_procs_anon(page, tokill, &tk);
+ collect_procs_anon(page, tokill, &tk, force_early);
else
- collect_procs_file(page, tokill, &tk);
+ collect_procs_file(page, tokill, &tk, force_early);
kfree(tk);
}
@@ -967,7 +970,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* there's nothing that can be done.
*/
if (kill)
- collect_procs(ppage, &tokill);
+ collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
ret = try_to_unmap(ppage, ttu);
if (ret != SWAP_SUCCESS)