summaryrefslogtreecommitdiff
path: root/include/linux/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h231
1 files changed, 163 insertions, 68 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 5d168d7e0a2..0c621752caa 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -94,29 +94,6 @@
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
(unsigned long)ZERO_SIZE_PTR)
-/*
- * Common fields provided in kmem_cache by all slab allocators
- * This struct is either used directly by the allocator (SLOB)
- * or the allocator must include definitions for all fields
- * provided in kmem_cache_common in their definition of kmem_cache.
- *
- * Once we can do anonymous structs (C11 standard) we could put a
- * anonymous struct definition in these allocators so that the
- * separate allocations in the kmem_cache structure of SLAB and
- * SLUB is no longer needed.
- */
-#ifdef CONFIG_SLOB
-struct kmem_cache {
- unsigned int object_size;/* The original size of the object */
- unsigned int size; /* The aligned/padded/added on size */
- unsigned int align; /* Alignment as calculated */
- unsigned long flags; /* Active flags on the slab */
- const char *name; /* Slab name for sysfs */
- int refcount; /* Use counter */
- void (*ctor)(void *); /* Called on object slot creation */
- struct list_head list; /* List of all slab caches on the system */
-};
-#endif
struct mem_cgroup;
/*
@@ -148,7 +125,63 @@ void kmem_cache_free(struct kmem_cache *, void *);
(__flags), NULL)
/*
- * The largest kmalloc size supported by the slab allocators is
+ * Common kmalloc functions provided by all allocators
+ */
+void * __must_check __krealloc(const void *, size_t, gfp_t);
+void * __must_check krealloc(const void *, size_t, gfp_t);
+void kfree(const void *);
+void kzfree(const void *);
+size_t ksize(const void *);
+
+/*
+ * Some archs want to perform DMA into kmalloc caches and need a guaranteed
+ * alignment larger than the alignment of a 64-bit integer.
+ * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
+ */
+#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
+#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
+#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
+#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
+#else
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#endif
+
+#ifdef CONFIG_SLOB
+/*
+ * Common fields provided in kmem_cache by all slab allocators
+ * This struct is either used directly by the allocator (SLOB)
+ * or the allocator must include definitions for all fields
+ * provided in kmem_cache_common in their definition of kmem_cache.
+ *
+ * Once we can do anonymous structs (C11 standard) we could put a
+ * anonymous struct definition in these allocators so that the
+ * separate allocations in the kmem_cache structure of SLAB and
+ * SLUB is no longer needed.
+ */
+struct kmem_cache {
+ unsigned int object_size;/* The original size of the object */
+ unsigned int size; /* The aligned/padded/added on size */
+ unsigned int align; /* Alignment as calculated */
+ unsigned long flags; /* Active flags on the slab */
+ const char *name; /* Slab name for sysfs */
+ int refcount; /* Use counter */
+ void (*ctor)(void *); /* Called on object slot creation */
+ struct list_head list; /* List of all slab caches on the system */
+};
+
+#define KMALLOC_MAX_SIZE (1UL << 30)
+
+#include <linux/slob_def.h>
+
+#else /* CONFIG_SLOB */
+
+/*
+ * Kmalloc array related definitions
+ */
+
+#ifdef CONFIG_SLAB
+/*
+ * The largest kmalloc size supported by the SLAB allocators is
* 32 megabyte (2^25) or the maximum allocatable page order if that is
* less than 32 MB.
*
@@ -158,22 +191,120 @@ void kmem_cache_free(struct kmem_cache *, void *);
*/
#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
(MAX_ORDER + PAGE_SHIFT - 1) : 25)
+#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
+#ifndef KMALLOC_SHIFT_LOW
+#define KMALLOC_SHIFT_LOW 5
+#endif
+#else
+/*
+ * SLUB allocates up to order 2 pages directly and otherwise
+ * passes the request to the page allocator.
+ */
+#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#ifndef KMALLOC_SHIFT_LOW
+#define KMALLOC_SHIFT_LOW 3
+#endif
+#endif
-#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
-#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
+/* Maximum allocatable size */
+#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
+/* Maximum size for which we actually use a slab cache */
+#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
+/* Maximum order allocatable via the slab allocagtor */
+#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
/*
- * Some archs want to perform DMA into kmalloc caches and need a guaranteed
- * alignment larger than the alignment of a 64-bit integer.
- * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
+ * Kmalloc subsystem.
*/
-#ifdef ARCH_DMA_MINALIGN
-#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
+#ifndef KMALLOC_MIN_SIZE
+#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
+#endif
+
+extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
+#ifdef CONFIG_ZONE_DMA
+extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+#endif
+
+/*
+ * Figure out which kmalloc slab an allocation of a certain size
+ * belongs to.
+ * 0 = zero alloc
+ * 1 = 65 .. 96 bytes
+ * 2 = 120 .. 192 bytes
+ * n = 2^(n-1) .. 2^n -1
+ */
+static __always_inline int kmalloc_index(size_t size)
+{
+ if (!size)
+ return 0;
+
+ if (size <= KMALLOC_MIN_SIZE)
+ return KMALLOC_SHIFT_LOW;
+
+ if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
+ return 1;
+ if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
+ return 2;
+ if (size <= 8) return 3;
+ if (size <= 16) return 4;
+ if (size <= 32) return 5;
+ if (size <= 64) return 6;
+ if (size <= 128) return 7;
+ if (size <= 256) return 8;
+ if (size <= 512) return 9;
+ if (size <= 1024) return 10;
+ if (size <= 2 * 1024) return 11;
+ if (size <= 4 * 1024) return 12;
+ if (size <= 8 * 1024) return 13;
+ if (size <= 16 * 1024) return 14;
+ if (size <= 32 * 1024) return 15;
+ if (size <= 64 * 1024) return 16;
+ if (size <= 128 * 1024) return 17;
+ if (size <= 256 * 1024) return 18;
+ if (size <= 512 * 1024) return 19;
+ if (size <= 1024 * 1024) return 20;
+ if (size <= 2 * 1024 * 1024) return 21;
+ if (size <= 4 * 1024 * 1024) return 22;
+ if (size <= 8 * 1024 * 1024) return 23;
+ if (size <= 16 * 1024 * 1024) return 24;
+ if (size <= 32 * 1024 * 1024) return 25;
+ if (size <= 64 * 1024 * 1024) return 26;
+ BUG();
+
+ /* Will never be reached. Needed because the compiler may complain */
+ return -1;
+}
+
+#ifdef CONFIG_SLAB
+#include <linux/slab_def.h>
+#elif defined(CONFIG_SLUB)
+#include <linux/slub_def.h>
#else
-#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#error "Unknown slab allocator"
#endif
/*
+ * Determine size used for the nth kmalloc cache.
+ * return size or 0 if a kmalloc cache for that
+ * size does not exist
+ */
+static __always_inline int kmalloc_size(int n)
+{
+ if (n > 2)
+ return 1 << n;
+
+ if (n == 1 && KMALLOC_MIN_SIZE <= 32)
+ return 96;
+
+ if (n == 2 && KMALLOC_MIN_SIZE <= 64)
+ return 192;
+
+ return 0;
+}
+#endif /* !CONFIG_SLOB */
+
+/*
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
* Intended for arches that get misalignment faults even for 64 bit integer
* aligned buffers.
@@ -224,42 +355,6 @@ struct seq_file;
int cache_show(struct kmem_cache *s, struct seq_file *m);
void print_slabinfo_header(struct seq_file *m);
-/*
- * Common kmalloc functions provided by all allocators
- */
-void * __must_check __krealloc(const void *, size_t, gfp_t);
-void * __must_check krealloc(const void *, size_t, gfp_t);
-void kfree(const void *);
-void kzfree(const void *);
-size_t ksize(const void *);
-
-/*
- * Allocator specific definitions. These are mainly used to establish optimized
- * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
- * selecting the appropriate general cache at compile time.
- *
- * Allocators must define at least:
- *
- * kmem_cache_alloc()
- * __kmalloc()
- * kmalloc()
- *
- * Those wishing to support NUMA must also define:
- *
- * kmem_cache_alloc_node()
- * kmalloc_node()
- *
- * See each allocator definition file for additional comments and
- * implementation notes.
- */
-#ifdef CONFIG_SLUB
-#include <linux/slub_def.h>
-#elif defined(CONFIG_SLOB)
-#include <linux/slob_def.h>
-#else
-#include <linux/slab_def.h>
-#endif
-
/**
* kmalloc_array - allocate memory for an array.
* @n: number of elements.