summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-06-12 00:27:11 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2014-06-12 00:28:09 -0400
commit9c1d5284c79fea050f115eadeec1dd1758e5c630 (patch)
tree4d16fd5aad7ff4931e985c0128c5747f23561f8a /mm
parent5f073850602084fbcbb987948ff3e70ae273f7d2 (diff)
parent9f12600fe425bc28f0ccba034a77783c09c15af4 (diff)
downloadkernel-common-9c1d5284c79fea050f115eadeec1dd1758e5c630.tar.gz
kernel-common-9c1d5284c79fea050f115eadeec1dd1758e5c630.tar.bz2
kernel-common-9c1d5284c79fea050f115eadeec1dd1758e5c630.zip
Merge commit '9f12600fe425bc28f0ccba034a77783c09c15af4' into for-linus
Backmerge of dcache.c changes from mainline. It's that, or complete rebase... Conflicts: fs/splice.c Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig15
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/kmemleak.c4
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memcontrol.c27
-rw-r--r--mm/memory-failure.c17
-rw-r--r--mm/mremap.c9
-rw-r--r--mm/percpu.c2
8 files changed, 55 insertions, 27 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index ebe5880c29d6..1b5a95f0fa01 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -581,3 +581,18 @@ config PGTABLE_MAPPING
config GENERIC_EARLY_IOREMAP
bool
+
+config MAX_STACK_SIZE_MB
+ int "Maximum user stack size for 32-bit processes (MB)"
+ default 80
+ range 8 256 if METAG
+ range 8 2048
+ depends on STACK_GROWSUP && (!64BIT || COMPAT)
+ help
+ This is the maximum stack size in Megabytes in the VM layout of 32-bit
+ user processes when the stack grows upwards (currently only on parisc
+ and metag arch). The stack will be located at the highest memory
+ address minus the given value, unless the RLIMIT_STACK hard limit is
+ changed to a smaller value in which case that is used.
+
+ A sane initial value is 80 MB.
diff --git a/mm/filemap.c b/mm/filemap.c
index 2f724e3cdf24..7499ef19f1c1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping)
{
int ret = 0;
/* Check for outstanding write errors */
- if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+ if (test_bit(AS_ENOSPC, &mapping->flags) &&
+ test_and_clear_bit(AS_ENOSPC, &mapping->flags))
ret = -ENOSPC;
- if (test_and_clear_bit(AS_EIO, &mapping->flags))
+ if (test_bit(AS_EIO, &mapping->flags) &&
+ test_and_clear_bit(AS_EIO, &mapping->flags))
ret = -EIO;
return ret;
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 91d67eaee050..8d2fcdfeff7f 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1775,10 +1775,9 @@ void __init kmemleak_init(void)
int i;
unsigned long flags;
- kmemleak_early_log = 0;
-
#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
if (!kmemleak_skip_disable) {
+ kmemleak_early_log = 0;
kmemleak_disable();
return;
}
@@ -1796,6 +1795,7 @@ void __init kmemleak_init(void)
/* the kernel is still in UP mode, so disabling the IRQs is enough */
local_irq_save(flags);
+ kmemleak_early_log = 0;
if (kmemleak_error) {
local_irq_restore(flags);
return;
diff --git a/mm/madvise.c b/mm/madvise.c
index 539eeb96b323..a402f8fdc68e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
for (; start < end; start += PAGE_SIZE) {
index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- page = find_get_page(mapping, index);
+ page = find_get_entry(mapping, index);
if (!radix_tree_exceptional_entry(page)) {
if (page)
page_cache_release(page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c47dffdcb246..5177c6d4a2dd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
rcu_read_lock();
do {
- memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (unlikely(!memcg))
+ /*
+ * Page cache insertions can happen withou an
+ * actual mm context, e.g. during disk probing
+ * on boot, loopback IO, acct() writes etc.
+ */
+ if (unlikely(!mm))
memcg = root_mem_cgroup;
+ else {
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!memcg))
+ memcg = root_mem_cgroup;
+ }
} while (!css_tryget(&memcg->css));
rcu_read_unlock();
return memcg;
@@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
return 0;
}
- /*
- * Page cache insertions can happen without an actual mm
- * context, e.g. during disk probing on boot.
- */
- if (unlikely(!mm))
- memcg = root_mem_cgroup;
- else {
- memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
- if (!memcg)
- return -ENOMEM;
- }
+ memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
+ if (!memcg)
+ return -ENOMEM;
__mem_cgroup_commit_charge(memcg, page, 1, type, false);
return 0;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 35ef28acf137..9ccef39a9de2 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
return 0;
} else if (PageHuge(hpage)) {
/*
- * Check "just unpoisoned", "filter hit", and
- * "race with other subpage."
+ * Check "filter hit" and "race with other subpage."
*/
lock_page(hpage);
- if (!PageHWPoison(hpage)
- || (hwpoison_filter(p) && TestClearPageHWPoison(p))
- || (p != hpage && TestSetPageHWPoison(hpage))) {
- atomic_long_sub(nr_pages, &num_poisoned_pages);
- return 0;
+ if (PageHWPoison(hpage)) {
+ if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
+ || (p != hpage && TestSetPageHWPoison(hpage))) {
+ atomic_long_sub(nr_pages, &num_poisoned_pages);
+ unlock_page(hpage);
+ return 0;
+ }
}
set_page_hwpoison_huge_page(hpage);
res = dequeue_hwpoisoned_huge_page(hpage);
@@ -1152,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
*/
if (!PageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+ atomic_long_sub(nr_pages, &num_poisoned_pages);
+ put_page(hpage);
res = 0;
goto out;
}
diff --git a/mm/mremap.c b/mm/mremap.c
index 0843feb66f3d..05f1180e9f21 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -194,10 +194,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
break;
if (pmd_trans_huge(*old_pmd)) {
int err = 0;
- if (extent == HPAGE_PMD_SIZE)
+ if (extent == HPAGE_PMD_SIZE) {
+ VM_BUG_ON(vma->vm_file || !vma->anon_vma);
+ /* See comment in move_ptes() */
+ if (need_rmap_locks)
+ anon_vma_lock_write(vma->anon_vma);
err = move_huge_pmd(vma, new_vma, old_addr,
new_addr, old_end,
old_pmd, new_pmd);
+ if (need_rmap_locks)
+ anon_vma_unlock_write(vma->anon_vma);
+ }
if (err > 0) {
need_flush = true;
continue;
diff --git a/mm/percpu.c b/mm/percpu.c
index 63e24fb4387b..2ddf9a990dbd 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -610,7 +610,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
sizeof(chunk->map[0]));
if (!chunk->map) {
- kfree(chunk);
+ pcpu_mem_free(chunk, pcpu_chunk_struct_size);
return NULL;
}