mm/zswap: use postorder iteration when destroying rbtree
Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com> Reviewed-by: Seth Jennings <sjenning@linux.vnet.ibm.com> Cc: David Woodhouse <David.Woodhouse@intel.com> Cc: Rik van Riel <riel@redhat.com> Cc: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7c993e11aa
commit
0bd42136f7
1 changed files with 2 additions and 14 deletions
16
mm/zswap.c
16
mm/zswap.c
|
@ -790,26 +790,14 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
|
|||
static void zswap_frontswap_invalidate_area(unsigned type)
|
||||
{
|
||||
struct zswap_tree *tree = zswap_trees[type];
|
||||
struct rb_node *node;
|
||||
struct zswap_entry *entry;
|
||||
struct zswap_entry *entry, *n;
|
||||
|
||||
if (!tree)
|
||||
return;
|
||||
|
||||
/* walk the tree and free everything */
|
||||
spin_lock(&tree->lock);
|
||||
/*
|
||||
* TODO: Even though this code should not be executed because
|
||||
* the try_to_unuse() in swapoff should have emptied the tree,
|
||||
* it is very wasteful to rebalance the tree after every
|
||||
* removal when we are freeing the whole tree.
|
||||
*
|
||||
* If post-order traversal code is ever added to the rbtree
|
||||
* implementation, it should be used here.
|
||||
*/
|
||||
while ((node = rb_first(&tree->rbroot))) {
|
||||
entry = rb_entry(node, struct zswap_entry, rbnode);
|
||||
rb_erase(&entry->rbnode, &tree->rbroot);
|
||||
rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) {
|
||||
zbud_free(tree->pool, entry->handle);
|
||||
zswap_entry_cache_free(entry);
|
||||
atomic_dec(&zswap_stored_pages);
|
||||
|
|
Loading…
Reference in a new issue