mbcache: Limit the maximum number of cache entries
Limit the maximum number of mb_cache entries depending on the number of hash buckets: if the only limit to the number of cache entries is the available memory the hash chains can grow very long, taking a long time to search. At least partially solves https://bugzilla.lustre.org/show_bug.cgi?id=22771. Signed-off-by: Andreas Gruenbacher <agruen@suse.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
3b6036d148
commit
3a48ee8a4a
1 changed files with 23 additions and 5 deletions
28
fs/mbcache.c
28
fs/mbcache.c
|
@ -80,6 +80,7 @@ struct mb_cache {
|
|||
struct list_head c_cache_list;
|
||||
const char *c_name;
|
||||
atomic_t c_entry_count;
|
||||
int c_max_entries;
|
||||
int c_bucket_bits;
|
||||
struct kmem_cache *c_entry_cache;
|
||||
struct list_head *c_block_hash;
|
||||
|
@ -243,6 +244,12 @@ mb_cache_create(const char *name, int bucket_bits)
|
|||
if (!cache->c_entry_cache)
|
||||
goto fail2;
|
||||
|
||||
/*
|
||||
* Set an upper limit on the number of cache entries so that the hash
|
||||
* chains won't grow too long.
|
||||
*/
|
||||
cache->c_max_entries = bucket_count << 4;
|
||||
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
list_add(&cache->c_cache_list, &mb_cache_list);
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
|
@ -333,7 +340,6 @@ mb_cache_destroy(struct mb_cache *cache)
|
|||
kfree(cache);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* mb_cache_entry_alloc()
|
||||
*
|
||||
|
@ -345,17 +351,29 @@ mb_cache_destroy(struct mb_cache *cache)
|
|||
struct mb_cache_entry *
|
||||
mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
|
||||
{
|
||||
struct mb_cache_entry *ce;
|
||||
struct mb_cache_entry *ce = NULL;
|
||||
|
||||
ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
|
||||
if (ce) {
|
||||
if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
|
||||
spin_lock(&mb_cache_spinlock);
|
||||
if (!list_empty(&mb_cache_lru_list)) {
|
||||
ce = list_entry(mb_cache_lru_list.next,
|
||||
struct mb_cache_entry, e_lru_list);
|
||||
list_del_init(&ce->e_lru_list);
|
||||
__mb_cache_entry_unhash(ce);
|
||||
}
|
||||
spin_unlock(&mb_cache_spinlock);
|
||||
}
|
||||
if (!ce) {
|
||||
ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
|
||||
if (!ce)
|
||||
return NULL;
|
||||
atomic_inc(&cache->c_entry_count);
|
||||
INIT_LIST_HEAD(&ce->e_lru_list);
|
||||
INIT_LIST_HEAD(&ce->e_block_list);
|
||||
ce->e_cache = cache;
|
||||
ce->e_used = 1 + MB_CACHE_WRITER;
|
||||
ce->e_queued = 0;
|
||||
}
|
||||
ce->e_used = 1 + MB_CACHE_WRITER;
|
||||
return ce;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue