mm/zswap: refactor the get/put routines
The refcount routine was not fit the kernel get/put semantic exactly, There were too many judgement statements on refcount and it could be minus. This patch does the following: - move refcount judgement to zswap_entry_put() to hide resource free function. - add a new function zswap_entry_find_get(), so that callers can use easily in the following pattern: zswap_entry_find_get .../* do something */ zswap_entry_put - to eliminate compile error, move some functions declaration This patch is based on Minchan Kim <minchan@kernel.org> 's idea and suggestion. Signed-off-by: Weijie Yang <weijie.yang@samsung.com> Cc: Seth Jennings <sjennings@variantweb.net> Acked-by: Minchan Kim <minchan@kernel.org> Cc: Bob Liu <bob.liu@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
67d13fe846
commit
0ab0abcf51
1 changed files with 88 additions and 94 deletions
182
mm/zswap.c
182
mm/zswap.c
|
@ -217,6 +217,7 @@ static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
|
||||||
if (!entry)
|
if (!entry)
|
||||||
return NULL;
|
return NULL;
|
||||||
entry->refcount = 1;
|
entry->refcount = 1;
|
||||||
|
RB_CLEAR_NODE(&entry->rbnode);
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,19 +226,6 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
|
||||||
kmem_cache_free(zswap_entry_cache, entry);
|
kmem_cache_free(zswap_entry_cache, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* caller must hold the tree lock */
|
|
||||||
static void zswap_entry_get(struct zswap_entry *entry)
|
|
||||||
{
|
|
||||||
entry->refcount++;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* caller must hold the tree lock */
|
|
||||||
static int zswap_entry_put(struct zswap_entry *entry)
|
|
||||||
{
|
|
||||||
entry->refcount--;
|
|
||||||
return entry->refcount;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*********************************
|
/*********************************
|
||||||
* rbtree functions
|
* rbtree functions
|
||||||
**********************************/
|
**********************************/
|
||||||
|
@ -285,6 +273,61 @@ static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
|
||||||
|
{
|
||||||
|
if (!RB_EMPTY_NODE(&entry->rbnode)) {
|
||||||
|
rb_erase(&entry->rbnode, root);
|
||||||
|
RB_CLEAR_NODE(&entry->rbnode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Carries out the common pattern of freeing and entry's zsmalloc allocation,
|
||||||
|
* freeing the entry itself, and decrementing the number of stored pages.
|
||||||
|
*/
|
||||||
|
static void zswap_free_entry(struct zswap_tree *tree,
|
||||||
|
struct zswap_entry *entry)
|
||||||
|
{
|
||||||
|
zbud_free(tree->pool, entry->handle);
|
||||||
|
zswap_entry_cache_free(entry);
|
||||||
|
atomic_dec(&zswap_stored_pages);
|
||||||
|
zswap_pool_pages = zbud_get_pool_size(tree->pool);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* caller must hold the tree lock */
|
||||||
|
static void zswap_entry_get(struct zswap_entry *entry)
|
||||||
|
{
|
||||||
|
entry->refcount++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* caller must hold the tree lock
|
||||||
|
* remove from the tree and free it, if nobody reference the entry
|
||||||
|
*/
|
||||||
|
static void zswap_entry_put(struct zswap_tree *tree,
|
||||||
|
struct zswap_entry *entry)
|
||||||
|
{
|
||||||
|
int refcount = --entry->refcount;
|
||||||
|
|
||||||
|
BUG_ON(refcount < 0);
|
||||||
|
if (refcount == 0) {
|
||||||
|
zswap_rb_erase(&tree->rbroot, entry);
|
||||||
|
zswap_free_entry(tree, entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* caller must hold the tree lock */
|
||||||
|
static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
|
||||||
|
pgoff_t offset)
|
||||||
|
{
|
||||||
|
struct zswap_entry *entry = NULL;
|
||||||
|
|
||||||
|
entry = zswap_rb_search(root, offset);
|
||||||
|
if (entry)
|
||||||
|
zswap_entry_get(entry);
|
||||||
|
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
/*********************************
|
/*********************************
|
||||||
* per-cpu code
|
* per-cpu code
|
||||||
**********************************/
|
**********************************/
|
||||||
|
@ -368,18 +411,6 @@ static bool zswap_is_full(void)
|
||||||
zswap_pool_pages);
|
zswap_pool_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Carries out the common pattern of freeing and entry's zsmalloc allocation,
|
|
||||||
* freeing the entry itself, and decrementing the number of stored pages.
|
|
||||||
*/
|
|
||||||
static void zswap_free_entry(struct zswap_tree *tree, struct zswap_entry *entry)
|
|
||||||
{
|
|
||||||
zbud_free(tree->pool, entry->handle);
|
|
||||||
zswap_entry_cache_free(entry);
|
|
||||||
atomic_dec(&zswap_stored_pages);
|
|
||||||
zswap_pool_pages = zbud_get_pool_size(tree->pool);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*********************************
|
/*********************************
|
||||||
* writeback code
|
* writeback code
|
||||||
**********************************/
|
**********************************/
|
||||||
|
@ -503,7 +534,7 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
|
||||||
struct page *page;
|
struct page *page;
|
||||||
u8 *src, *dst;
|
u8 *src, *dst;
|
||||||
unsigned int dlen;
|
unsigned int dlen;
|
||||||
int ret, refcount;
|
int ret;
|
||||||
struct writeback_control wbc = {
|
struct writeback_control wbc = {
|
||||||
.sync_mode = WB_SYNC_NONE,
|
.sync_mode = WB_SYNC_NONE,
|
||||||
};
|
};
|
||||||
|
@ -518,13 +549,12 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
|
||||||
|
|
||||||
/* find and ref zswap entry */
|
/* find and ref zswap entry */
|
||||||
spin_lock(&tree->lock);
|
spin_lock(&tree->lock);
|
||||||
entry = zswap_rb_search(&tree->rbroot, offset);
|
entry = zswap_entry_find_get(&tree->rbroot, offset);
|
||||||
if (!entry) {
|
if (!entry) {
|
||||||
/* entry was invalidated */
|
/* entry was invalidated */
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
zswap_entry_get(entry);
|
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
BUG_ON(offset != entry->offset);
|
BUG_ON(offset != entry->offset);
|
||||||
|
|
||||||
|
@ -566,42 +596,35 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
|
||||||
zswap_written_back_pages++;
|
zswap_written_back_pages++;
|
||||||
|
|
||||||
spin_lock(&tree->lock);
|
spin_lock(&tree->lock);
|
||||||
|
|
||||||
/* drop local reference */
|
/* drop local reference */
|
||||||
zswap_entry_put(entry);
|
zswap_entry_put(tree, entry);
|
||||||
/* drop the initial reference from entry creation */
|
|
||||||
refcount = zswap_entry_put(entry);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are three possible values for refcount here:
|
* There are two possible situations for entry here:
|
||||||
* (1) refcount is 1, load is in progress, unlink from rbtree,
|
* (1) refcount is 1(normal case), entry is valid and on the tree
|
||||||
* load will free
|
* (2) refcount is 0, entry is freed and not on the tree
|
||||||
* (2) refcount is 0, (normal case) entry is valid,
|
* because invalidate happened during writeback
|
||||||
* remove from rbtree and free entry
|
* search the tree and free the entry if find entry
|
||||||
* (3) refcount is -1, invalidate happened during writeback,
|
*/
|
||||||
* free entry
|
if (entry == zswap_rb_search(&tree->rbroot, offset))
|
||||||
*/
|
zswap_entry_put(tree, entry);
|
||||||
if (refcount >= 0) {
|
|
||||||
/* no invalidate yet, remove from rbtree */
|
|
||||||
rb_erase(&entry->rbnode, &tree->rbroot);
|
|
||||||
}
|
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
if (refcount <= 0) {
|
|
||||||
/* free the entry */
|
|
||||||
zswap_free_entry(tree, entry);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
|
goto end;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if we get here due to ZSWAP_SWAPCACHE_EXIST
|
||||||
|
* a load may happening concurrently
|
||||||
|
* it is safe and okay to not free the entry
|
||||||
|
* if we free the entry in the following put
|
||||||
|
* it it either okay to return !0
|
||||||
|
*/
|
||||||
fail:
|
fail:
|
||||||
spin_lock(&tree->lock);
|
spin_lock(&tree->lock);
|
||||||
refcount = zswap_entry_put(entry);
|
zswap_entry_put(tree, entry);
|
||||||
if (refcount <= 0) {
|
|
||||||
/* invalidate happened, consider writeback as success */
|
|
||||||
zswap_free_entry(tree, entry);
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
|
|
||||||
|
end:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -685,11 +708,8 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
|
||||||
if (ret == -EEXIST) {
|
if (ret == -EEXIST) {
|
||||||
zswap_duplicate_entry++;
|
zswap_duplicate_entry++;
|
||||||
/* remove from rbtree */
|
/* remove from rbtree */
|
||||||
rb_erase(&dupentry->rbnode, &tree->rbroot);
|
zswap_rb_erase(&tree->rbroot, dupentry);
|
||||||
if (!zswap_entry_put(dupentry)) {
|
zswap_entry_put(tree, dupentry);
|
||||||
/* free */
|
|
||||||
zswap_free_entry(tree, dupentry);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} while (ret == -EEXIST);
|
} while (ret == -EEXIST);
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
|
@ -718,17 +738,16 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
|
||||||
struct zswap_entry *entry;
|
struct zswap_entry *entry;
|
||||||
u8 *src, *dst;
|
u8 *src, *dst;
|
||||||
unsigned int dlen;
|
unsigned int dlen;
|
||||||
int refcount, ret;
|
int ret;
|
||||||
|
|
||||||
/* find */
|
/* find */
|
||||||
spin_lock(&tree->lock);
|
spin_lock(&tree->lock);
|
||||||
entry = zswap_rb_search(&tree->rbroot, offset);
|
entry = zswap_entry_find_get(&tree->rbroot, offset);
|
||||||
if (!entry) {
|
if (!entry) {
|
||||||
/* entry was written back */
|
/* entry was written back */
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
zswap_entry_get(entry);
|
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
|
|
||||||
/* decompress */
|
/* decompress */
|
||||||
|
@ -743,22 +762,9 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
|
|
||||||
spin_lock(&tree->lock);
|
spin_lock(&tree->lock);
|
||||||
refcount = zswap_entry_put(entry);
|
zswap_entry_put(tree, entry);
|
||||||
if (likely(refcount)) {
|
|
||||||
spin_unlock(&tree->lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
|
|
||||||
/*
|
|
||||||
* We don't have to unlink from the rbtree because
|
|
||||||
* zswap_writeback_entry() or zswap_frontswap_invalidate page()
|
|
||||||
* has already done this for us if we are the last reference.
|
|
||||||
*/
|
|
||||||
/* free */
|
|
||||||
|
|
||||||
zswap_free_entry(tree, entry);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -767,7 +773,6 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
|
||||||
{
|
{
|
||||||
struct zswap_tree *tree = zswap_trees[type];
|
struct zswap_tree *tree = zswap_trees[type];
|
||||||
struct zswap_entry *entry;
|
struct zswap_entry *entry;
|
||||||
int refcount;
|
|
||||||
|
|
||||||
/* find */
|
/* find */
|
||||||
spin_lock(&tree->lock);
|
spin_lock(&tree->lock);
|
||||||
|
@ -779,20 +784,12 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* remove from rbtree */
|
/* remove from rbtree */
|
||||||
rb_erase(&entry->rbnode, &tree->rbroot);
|
zswap_rb_erase(&tree->rbroot, entry);
|
||||||
|
|
||||||
/* drop the initial reference from entry creation */
|
/* drop the initial reference from entry creation */
|
||||||
refcount = zswap_entry_put(entry);
|
zswap_entry_put(tree, entry);
|
||||||
|
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
|
|
||||||
if (refcount) {
|
|
||||||
/* writeback in progress, writeback will free */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* free */
|
|
||||||
zswap_free_entry(tree, entry);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* frees all zswap entries for the given swap type */
|
/* frees all zswap entries for the given swap type */
|
||||||
|
@ -806,11 +803,8 @@ static void zswap_frontswap_invalidate_area(unsigned type)
|
||||||
|
|
||||||
/* walk the tree and free everything */
|
/* walk the tree and free everything */
|
||||||
spin_lock(&tree->lock);
|
spin_lock(&tree->lock);
|
||||||
rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) {
|
rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
|
||||||
zbud_free(tree->pool, entry->handle);
|
zswap_free_entry(tree, entry);
|
||||||
zswap_entry_cache_free(entry);
|
|
||||||
atomic_dec(&zswap_stored_pages);
|
|
||||||
}
|
|
||||||
tree->rbroot = RB_ROOT;
|
tree->rbroot = RB_ROOT;
|
||||||
spin_unlock(&tree->lock);
|
spin_unlock(&tree->lock);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue