diff options
author | Andreas Gruenbacher <agruen@suse.de> | 2010-08-16 19:05:23 +0200 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2010-08-18 06:24:41 -0400 |
commit | 3a48ee8a4ad26c3a538b6fc11a86a8f80c3dce18 (patch) | |
tree | 3e89deaa14c8d72c7484f61c2babc5755a9e991e | |
parent | 3b6036d148bad5bb7928b021a49bb9e395361084 (diff) | |
download | linux-3.10-3a48ee8a4ad26c3a538b6fc11a86a8f80c3dce18.tar.gz linux-3.10-3a48ee8a4ad26c3a538b6fc11a86a8f80c3dce18.tar.bz2 linux-3.10-3a48ee8a4ad26c3a538b6fc11a86a8f80c3dce18.zip |
mbcache: Limit the maximum number of cache entries
Limit the maximum number of mb_cache entries depending on the number of
hash buckets: if the only limit to the number of cache entries is the
available memory the hash chains can grow very long, taking a long time
to search.
At least partially solves https://bugzilla.lustre.org/show_bug.cgi?id=22771.
Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | fs/mbcache.c | 30 |
1 files changed, 24 insertions, 6 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c index cf4e6cdfd15..93444747237 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -80,6 +80,7 @@ struct mb_cache { struct list_head c_cache_list; const char *c_name; atomic_t c_entry_count; + int c_max_entries; int c_bucket_bits; struct kmem_cache *c_entry_cache; struct list_head *c_block_hash; @@ -243,6 +244,12 @@ mb_cache_create(const char *name, int bucket_bits) if (!cache->c_entry_cache) goto fail2; + /* + * Set an upper limit on the number of cache entries so that the hash + * chains won't grow too long. + */ + cache->c_max_entries = bucket_count << 4; + spin_lock(&mb_cache_spinlock); list_add(&cache->c_cache_list, &mb_cache_list); spin_unlock(&mb_cache_spinlock); @@ -333,7 +340,6 @@ mb_cache_destroy(struct mb_cache *cache) kfree(cache); } - /* * mb_cache_entry_alloc() * @@ -345,17 +351,29 @@ mb_cache_destroy(struct mb_cache *cache) struct mb_cache_entry * mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) { - struct mb_cache_entry *ce; - - ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); - if (ce) { + struct mb_cache_entry *ce = NULL; + + if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) { + spin_lock(&mb_cache_spinlock); + if (!list_empty(&mb_cache_lru_list)) { + ce = list_entry(mb_cache_lru_list.next, + struct mb_cache_entry, e_lru_list); + list_del_init(&ce->e_lru_list); + __mb_cache_entry_unhash(ce); + } + spin_unlock(&mb_cache_spinlock); + } + if (!ce) { + ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); + if (!ce) + return NULL; atomic_inc(&cache->c_entry_count); INIT_LIST_HEAD(&ce->e_lru_list); INIT_LIST_HEAD(&ce->e_block_list); ce->e_cache = cache; - ce->e_used = 1 + MB_CACHE_WRITER; ce->e_queued = 0; } + ce->e_used = 1 + MB_CACHE_WRITER; return ce; } |