frontswap: support exclusive gets if tmem backend is capable
Tmem, as originally specified, assumes that "get" operations performed on persistent pools never flush the page of data out of tmem on a successful get, waiting instead for a flush operation. This is intended to mimic the model of a swap disk, where a disk read is non-destructive. Unlike a disk, however, freeing up the RAM can be valuable. Over the years that frontswap was in the review process, several reviewers (and notably Hugh Dickins in 2010) pointed out that this would result, at least temporarily, in two copies of the data in RAM: one (compressed for zcache) copy in tmem, and one copy in the swap cache. We wondered if this could be done differently, at least optionally. This patch allows tmem backends to instruct the frontswap code that this backend performs exclusive gets. Zcache2 already contains hooks to support this feature. Other backends are completely unaffected unless/until they are updated to support this feature. While it is not clear that exclusive gets are a performance win on all workloads at all times, this small patch allows for experimentation by backends. P.S. Let's not quibble about the naming of "get" vs "read" vs "load" etc. The naming is currently horribly inconsistent between cleancache and frontswap and existing tmem backends, so will need to be straightened out as a separate patch. "Get" is used by the tmem architecture spec, existing backends, and all documentation and presentation material so I am using it in this patch. Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
a00bb1e9fc
commit
e3483a5f3a
2 changed files with 24 additions and 1 deletions
|
@ -19,6 +19,8 @@ extern struct frontswap_ops
|
|||
extern void frontswap_shrink(unsigned long);
|
||||
extern unsigned long frontswap_curr_pages(void);
|
||||
extern void frontswap_writethrough(bool);
|
||||
#define FRONTSWAP_HAS_EXCLUSIVE_GETS
|
||||
extern void frontswap_tmem_exclusive_gets(bool);
|
||||
|
||||
extern void __frontswap_init(unsigned type);
|
||||
extern int __frontswap_store(struct page *page);
|
||||
|
|
|
@ -44,6 +44,13 @@ EXPORT_SYMBOL(frontswap_enabled);
|
|||
*/
|
||||
static bool frontswap_writethrough_enabled __read_mostly;
|
||||
|
||||
/*
|
||||
* If enabled, the underlying tmem implementation is capable of doing
|
||||
* exclusive gets, so frontswap_load, on a successful tmem_get must
|
||||
* mark the page as no longer in frontswap AND mark it dirty.
|
||||
*/
|
||||
static bool frontswap_tmem_exclusive_gets_enabled __read_mostly;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/*
|
||||
* Counters available via /sys/kernel/debug/frontswap (if debugfs is
|
||||
|
@ -96,6 +103,15 @@ void frontswap_writethrough(bool enable)
|
|||
}
|
||||
EXPORT_SYMBOL(frontswap_writethrough);
|
||||
|
||||
/*
|
||||
* Enable/disable frontswap exclusive gets (see above).
|
||||
*/
|
||||
void frontswap_tmem_exclusive_gets(bool enable)
|
||||
{
|
||||
frontswap_tmem_exclusive_gets_enabled = enable;
|
||||
}
|
||||
EXPORT_SYMBOL(frontswap_tmem_exclusive_gets);
|
||||
|
||||
/*
|
||||
* Called when a swap device is swapon'd.
|
||||
*/
|
||||
|
@ -174,8 +190,13 @@ int __frontswap_load(struct page *page)
|
|||
BUG_ON(sis == NULL);
|
||||
if (frontswap_test(sis, offset))
|
||||
ret = frontswap_ops.load(type, offset, page);
|
||||
if (ret == 0)
|
||||
if (ret == 0) {
|
||||
inc_frontswap_loads();
|
||||
if (frontswap_tmem_exclusive_gets_enabled) {
|
||||
SetPageDirty(page);
|
||||
frontswap_clear(sis, offset);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__frontswap_load);
|
||||
|
|
Loading…
Reference in a new issue