summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2013-07-03 10:06:28 +0930
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-07-13 11:42:26 -0700
commit0b72ac9665b63dd029675ea601cb46defd2c974d (patch)
tree110983bb19dd69dd1a13baf9f2c5ceaa044d5ebd
parent2842e87389ad1af50afd3fb89b7832aae7f3a7c0 (diff)
downloadlinux-3.10-0b72ac9665b63dd029675ea601cb46defd2c974d.tar.gz
linux-3.10-0b72ac9665b63dd029675ea601cb46defd2c974d.tar.bz2
linux-3.10-0b72ac9665b63dd029675ea601cb46defd2c974d.zip
module: do percpu allocation after uniqueness check. No, really!
commit 8d8022e8aba85192e937f1f0f7450e256d66ae5c upstream. v3.8-rc1-5-g1fb9341 was supposed to stop parallel kvm loads exhausting percpu memory on large machines: Now we have a new state MODULE_STATE_UNFORMED, we can insert the module into the list (and thus guarantee its uniqueness) before we allocate the per-cpu region. In my defence, it didn't actually say the patch did this. Just that we "can". This patch actually *does* it. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Tested-by: Jim Hull <jim.hull@hp.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--kernel/module.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/kernel/module.c b/kernel/module.c
index cab4bce49c2..fa53db8aade 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2927,7 +2927,6 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
{
/* Module within temporary copy. */
struct module *mod;
- Elf_Shdr *pcpusec;
int err;
mod = setup_load_info(info, flags);
@@ -2942,17 +2941,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
err = module_frob_arch_sections(info->hdr, info->sechdrs,
info->secstrings, mod);
if (err < 0)
- goto out;
+ return ERR_PTR(err);
- pcpusec = &info->sechdrs[info->index.pcpu];
- if (pcpusec->sh_size) {
- /* We have a special allocation for this section. */
- err = percpu_modalloc(mod,
- pcpusec->sh_size, pcpusec->sh_addralign);
- if (err)
- goto out;
- pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
- }
+ /* We will do a special allocation for per-cpu sections later. */
+ info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
/* Determine total sizes, and put offsets in sh_entsize. For now
this is done generically; there doesn't appear to be any
@@ -2963,17 +2955,22 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
/* Allocate and move to the final place */
err = move_module(mod, info);
if (err)
- goto free_percpu;
+ return ERR_PTR(err);
/* Module has been copied to its final place now: return it. */
mod = (void *)info->sechdrs[info->index.mod].sh_addr;
kmemleak_load_module(mod, info);
return mod;
+}
-free_percpu:
- percpu_modfree(mod);
-out:
- return ERR_PTR(err);
+static int alloc_module_percpu(struct module *mod, struct load_info *info)
+{
+ Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
+ if (!pcpusec->sh_size)
+ return 0;
+
+ /* We have a special allocation for this section. */
+ return percpu_modalloc(mod, pcpusec->sh_size, pcpusec->sh_addralign);
}
/* mod is no longer valid after this! */
@@ -3237,6 +3234,11 @@ static int load_module(struct load_info *info, const char __user *uargs,
}
#endif
+ /* To avoid stressing percpu allocator, do this once we're unique. */
+ err = alloc_module_percpu(mod, info);
+ if (err)
+ goto unlink_mod;
+
/* Now module is in final location, initialize linked lists, etc. */
err = module_unload_init(mod);
if (err)