summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorGeliang Tang <geliangtang@163.com>2016-01-14 15:18:02 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 16:00:49 -0800
commit7aa0d22785deea2725a23716823edd96e65c2ff6 (patch)
tree6cda7f570983465f883ffd5c768b63a8a5dafa7f /mm/slab.c
parent73c0219d8eca4114d81626032055598bc0a17130 (diff)
downloadlinux-rpi-7aa0d22785deea2725a23716823edd96e65c2ff6.tar.gz
linux-rpi-7aa0d22785deea2725a23716823edd96e65c2ff6.tar.bz2
linux-rpi-7aa0d22785deea2725a23716823edd96e65c2ff6.zip
mm/slab.c: add a helper function get_first_slab
Add a new helper function get_first_slab() that get the first slab from a kmem_cache_node. Signed-off-by: Geliang Tang <geliangtang@163.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c39
1 files changed, 21 insertions, 18 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 5d5aa3bbdc3f..6ecc697a8bc4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2756,6 +2756,21 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
#define cache_free_debugcheck(x,objp,z) (objp)
#endif
+static struct page *get_first_slab(struct kmem_cache_node *n)
+{
+ struct page *page;
+
+ page = list_first_entry_or_null(&n->slabs_partial,
+ struct page, lru);
+ if (!page) {
+ n->free_touched = 1;
+ page = list_first_entry_or_null(&n->slabs_free,
+ struct page, lru);
+ }
+
+ return page;
+}
+
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
bool force_refill)
{
@@ -2793,15 +2808,9 @@ retry:
while (batchcount > 0) {
struct page *page;
/* Get slab alloc is to come from. */
- page = list_first_entry_or_null(&n->slabs_partial,
- struct page, lru);
- if (!page) {
- n->free_touched = 1;
- page = list_first_entry_or_null(&n->slabs_free,
- struct page, lru);
- if (!page)
- goto must_grow;
- }
+ page = get_first_slab(n);
+ if (!page)
+ goto must_grow;
check_spinlock_acquired(cachep);
@@ -3097,15 +3106,9 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
retry:
check_irq_off();
spin_lock(&n->list_lock);
- page = list_first_entry_or_null(&n->slabs_partial,
- struct page, lru);
- if (!page) {
- n->free_touched = 1;
- page = list_first_entry_or_null(&n->slabs_free,
- struct page, lru);
- if (!page)
- goto must_grow;
- }
+ page = get_first_slab(n);
+ if (!page)
+ goto must_grow;
check_spinlock_acquired_node(cachep, nodeid);