diff options
author | Christoph Lameter <cl@linux.com> | 2015-06-29 09:28:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-29 10:49:51 -0700 |
commit | a9730fca9946f3697410479e0ef1bd759ba00a77 (patch) | |
tree | 0708dfc873d6df1e7df1ce7cd6575f19cd8b5ed0 /mm | |
parent | 88793e5c774ec69351ef6b5200bb59f532e41bca (diff) | |
download | linux-rpi-a9730fca9946f3697410479e0ef1bd759ba00a77.tar.gz linux-rpi-a9730fca9946f3697410479e0ef1bd759ba00a77.tar.bz2 linux-rpi-a9730fca9946f3697410479e0ef1bd759ba00a77.zip |
Fix kmalloc slab creation sequence
This patch restores the slab creation sequence that was broken by commit
4066c33d0308f8 and also reverts the portions that introduced the
KMALLOC_LOOP_XXX macros. Those can never really work since the slab creation
is much more complex than just going from a minimum to a maximum number.
The latest upstream kernel boots cleanly on my machine with a 64 bit x86
configuration under KVM using either SLAB or SLUB.
Fixes: 4066c33d0308f8 ("support the slub_debug boot option")
Reported-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab_common.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 9f8d71f78404..983b78694c46 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -855,6 +855,12 @@ void __init setup_kmalloc_cache_index_table(void) } } +static void new_kmalloc_cache(int idx, unsigned long flags) +{ + kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name, + kmalloc_info[idx].size, flags); +} + /* * Create the kmalloc array. Some of the regular kmalloc arrays * may already have been created because they were needed to @@ -864,25 +870,19 @@ void __init create_kmalloc_caches(unsigned long flags) { int i; - for (i = KMALLOC_LOOP_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { - if (!kmalloc_caches[i]) { - kmalloc_caches[i] = create_kmalloc_cache( - kmalloc_info[i].name, - kmalloc_info[i].size, - flags); - } + for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { + if (!kmalloc_caches[i]) + new_kmalloc_cache(i, flags); /* - * "i == 2" is the "kmalloc-192" case which is the last special - * case for initialization and it's the point to jump to - * allocate the minimize size of the object. In slab allocator, - * the KMALLOC_SHIFT_LOW = 5. So, it needs to skip 2^3 and 2^4 - * and go straight to allocate 2^5. If the ARCH_DMA_MINALIGN is - * defined, it may be larger than 2^5 and here is also the - * trick to skip the empty gap. + * Caches that are not of the two-to-the-power-of size. + * These have to be created immediately after the + * earlier power of two caches */ - if (i == 2) - i = (KMALLOC_SHIFT_LOW - 1); + if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) + new_kmalloc_cache(1, flags); + if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7) + new_kmalloc_cache(2, flags); } /* Kmalloc array is now usable */ |