summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2011-07-25 00:01:22 +0000
committerDavid S. Miller <davem@davemloft.net>2011-11-30 18:46:43 -0500
commit5b8b0060cbd6332ae5d1fa0bec0e8e211248d0e7 (patch)
tree40ba4f43e875c830aefc3aef42fc05510b624922 /net/core
parent1026fec8739663621d64216ba939c23bc1d089b7 (diff)
downloadlinux-3.10-5b8b0060cbd6332ae5d1fa0bec0e8e211248d0e7.tar.gz
linux-3.10-5b8b0060cbd6332ae5d1fa0bec0e8e211248d0e7.tar.bz2
linux-3.10-5b8b0060cbd6332ae5d1fa0bec0e8e211248d0e7.zip
neigh: Get rid of neigh_table->kmem_cachep
We are going to alloc for device specific private areas for neighbour entries, and in order to do that we have to move away from the fixed allocation size enforced by using neigh_table->kmem_cachep As a nice side effect we can now use kfree_rcu(). Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/neighbour.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 27d3fefeaa1..661ad12e0cc 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -288,7 +288,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
goto out_entries;
}
- n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
+ n = kzalloc(tbl->entry_size, GFP_ATOMIC);
if (!n)
goto out_entries;
@@ -678,12 +678,6 @@ static inline void neigh_parms_put(struct neigh_parms *parms)
neigh_parms_destroy(parms);
}
-static void neigh_destroy_rcu(struct rcu_head *head)
-{
- struct neighbour *neigh = container_of(head, struct neighbour, rcu);
-
- kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
-}
/*
* neighbour must already be out of the table;
*
@@ -711,7 +705,7 @@ void neigh_destroy(struct neighbour *neigh)
NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
atomic_dec(&neigh->tbl->entries);
- call_rcu(&neigh->rcu, neigh_destroy_rcu);
+ kfree_rcu(neigh, rcu);
}
EXPORT_SYMBOL(neigh_destroy);
@@ -1486,11 +1480,6 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
tbl->parms.reachable_time =
neigh_rand_reach_time(tbl->parms.base_reachable_time);
- if (!tbl->kmem_cachep)
- tbl->kmem_cachep =
- kmem_cache_create(tbl->id, tbl->entry_size, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL);
tbl->stats = alloc_percpu(struct neigh_statistics);
if (!tbl->stats)
panic("cannot create neighbour cache statistics");
@@ -1575,9 +1564,6 @@ int neigh_table_clear(struct neigh_table *tbl)
free_percpu(tbl->stats);
tbl->stats = NULL;
- kmem_cache_destroy(tbl->kmem_cachep);
- tbl->kmem_cachep = NULL;
-
return 0;
}
EXPORT_SYMBOL(neigh_table_clear);