summaryrefslogtreecommitdiff
path: root/mm/percpu-vm.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-08-15 16:06:06 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-10-05 14:54:13 -0700
commit5df644fd609cd0610a77dd13eed9729bd2eefbd5 (patch)
tree98c47c483fc713beac0aea32ddddc9137ba04ea8 /mm/percpu-vm.c
parentf9c27a7c7880c29c7579dea9134dfef355f6c3eb (diff)
downloadlinux-3.10-5df644fd609cd0610a77dd13eed9729bd2eefbd5.tar.gz
linux-3.10-5df644fd609cd0610a77dd13eed9729bd2eefbd5.tar.bz2
linux-3.10-5df644fd609cd0610a77dd13eed9729bd2eefbd5.zip
percpu: fix pcpu_alloc_pages() failure path
commit f0d279654dea22b7a6ad34b9334aee80cda62cde upstream. When pcpu_alloc_pages() fails midway, pcpu_free_pages() is invoked to free what has already been allocated. The invocation is across the whole requested range and pcpu_free_pages() will try to free all non-NULL pages; unfortunately, this is incorrect as pcpu_get_pages_and_bitmap(), unlike what its comment suggests, doesn't clear the pages array and thus the array may have entries from the previous invocations making the partial failure path free incorrect pages. Fix it by open-coding the partial freeing of the already allocated pages. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm/percpu-vm.c')
-rw-r--r--mm/percpu-vm.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 3707c71ae4c..8d9bb2c00c6 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
int page_start, int page_end)
{
const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
- unsigned int cpu;
+ unsigned int cpu, tcpu;
int i;
for_each_possible_cpu(cpu) {
@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
- if (!*pagep) {
- pcpu_free_pages(chunk, pages, populated,
- page_start, page_end);
- return -ENOMEM;
- }
+ if (!*pagep)
+ goto err;
}
}
return 0;
+
+err:
+ while (--i >= page_start)
+ __free_page(pages[pcpu_page_idx(cpu, i)]);
+
+ for_each_possible_cpu(tcpu) {
+ if (tcpu == cpu)
+ break;
+ for (i = page_start; i < page_end; i++)
+ __free_page(pages[pcpu_page_idx(tcpu, i)]);
+ }
+ return -ENOMEM;
}
/**