2006-03-22 01:09:12 -07:00
|
|
|
#ifndef _LINUX_MIGRATE_H
|
|
|
|
#define _LINUX_MIGRATE_H
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
2007-05-06 15:50:20 -06:00
|
|
|
#include <linux/mempolicy.h>
|
2012-01-20 15:33:53 -07:00
|
|
|
#include <linux/migrate_mode.h>
|
2006-03-22 01:09:12 -07:00
|
|
|
|
2006-06-23 03:03:55 -06:00
|
|
|
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
|
2006-06-23 03:03:53 -06:00
|
|
|
|
2012-12-11 17:02:31 -07:00
|
|
|
/*
|
|
|
|
* Return values from addresss_space_operations.migratepage():
|
|
|
|
* - negative errno on page migration failure;
|
|
|
|
* - zero on page migration success;
|
2012-12-11 17:02:38 -07:00
|
|
|
*
|
|
|
|
* The balloon page migration introduces this special case where a 'distinct'
|
|
|
|
* return code is used to flag a successful page migration to unmap_and_move().
|
|
|
|
* This approach is necessary because page migration can race against balloon
|
|
|
|
* deflation procedure, and for such case we could introduce a nasty page leak
|
|
|
|
* if a successfully migrated balloon page gets released concurrently with
|
|
|
|
* migration's unmap_and_move() wrap-up steps.
|
2012-12-11 17:02:31 -07:00
|
|
|
*/
|
|
|
|
#define MIGRATEPAGE_SUCCESS 0
|
2012-12-11 17:02:38 -07:00
|
|
|
#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page
|
|
|
|
* sucessful migration case.
|
|
|
|
*/
|
2012-10-19 07:07:31 -06:00
|
|
|
enum migrate_reason {
|
|
|
|
MR_COMPACTION,
|
|
|
|
MR_MEMORY_FAILURE,
|
|
|
|
MR_MEMORY_HOTPLUG,
|
|
|
|
MR_SYSCALL, /* also applies to cpusets */
|
|
|
|
MR_MEMPOLICY_MBIND,
|
2012-10-25 06:16:34 -06:00
|
|
|
MR_NUMA_MISPLACED,
|
2012-10-19 07:07:31 -06:00
|
|
|
MR_CMA
|
|
|
|
};
|
2012-12-11 17:02:31 -07:00
|
|
|
|
2007-05-06 15:50:20 -06:00
|
|
|
#ifdef CONFIG_MIGRATION
|
2009-01-06 15:39:16 -07:00
|
|
|
|
2012-12-11 17:02:47 -07:00
|
|
|
extern void putback_movable_pages(struct list_head *l);
|
2006-06-23 03:03:33 -06:00
|
|
|
extern int migrate_page(struct address_space *,
|
2012-01-12 18:19:43 -07:00
|
|
|
struct page *, struct page *, enum migrate_mode);
|
ksm: memory hotremove migration only
The previous patch enables page migration of ksm pages, but that soon gets
into trouble: not surprising, since we're using the ksm page lock to lock
operations on its stable_node, but page migration switches the page whose
lock is to be used for that. Another layer of locking would fix it, but
do we need that yet?
Do we actually need page migration of ksm pages? Yes, memory hotremove
needs to offline sections of memory: and since we stopped allocating ksm
pages with GFP_HIGHUSER, they will tend to be GFP_HIGHUSER_MOVABLE
candidates for migration.
But KSM is currently unconscious of NUMA issues, happily merging pages
from different NUMA nodes: at present the rule must be, not to use
MADV_MERGEABLE where you care about NUMA. So no, NUMA page migration of
ksm pages does not make sense yet.
So, to complete support for ksm swapping we need to make hotremove safe.
ksm_memory_callback() take ksm_thread_mutex when MEM_GOING_OFFLINE and
release it when MEM_OFFLINE or MEM_CANCEL_OFFLINE. But if mapped pages
are freed before migration reaches them, stable_nodes may be left still
pointing to struct pages which have been removed from the system: the
stable_node needs to identify a page by pfn rather than page pointer, then
it can safely prune them when MEM_OFFLINE.
And make NUMA migration skip PageKsm pages where it skips PageReserved.
But it's only when we reach unmap_and_move() that the page lock is taken
and we can be sure that raised pagecount has prevented a PageAnon from
being upgraded: so add offlining arg to migrate_pages(), to migrate ksm
page when offlining (has sufficient locking) but reject it otherwise.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Chris Wright <chrisw@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-14 18:59:33 -07:00
|
|
|
extern int migrate_pages(struct list_head *l, new_page_t x,
|
2013-02-22 17:35:14 -07:00
|
|
|
unsigned long private, enum migrate_mode mode, int reason);
|
2006-06-23 03:03:53 -06:00
|
|
|
|
2006-03-22 01:09:12 -07:00
|
|
|
extern int migrate_prep(void);
|
2010-05-24 15:32:27 -06:00
|
|
|
extern int migrate_prep_local(void);
|
2006-06-25 06:46:48 -06:00
|
|
|
extern int migrate_vmas(struct mm_struct *mm,
|
|
|
|
const nodemask_t *from, const nodemask_t *to,
|
|
|
|
unsigned long flags);
|
2010-09-07 19:19:35 -06:00
|
|
|
extern void migrate_page_copy(struct page *newpage, struct page *page);
|
|
|
|
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|
|
|
struct page *newpage, struct page *page);
|
2013-07-16 03:56:16 -06:00
|
|
|
extern int migrate_page_move_mapping(struct address_space *mapping,
|
|
|
|
struct page *newpage, struct page *page,
|
2013-12-21 15:56:08 -07:00
|
|
|
struct buffer_head *head, enum migrate_mode mode,
|
|
|
|
int extra_count);
|
2006-03-22 01:09:12 -07:00
|
|
|
#else
|
2009-01-06 15:39:16 -07:00
|
|
|
|
2012-12-11 17:02:47 -07:00
|
|
|
static inline void putback_movable_pages(struct list_head *l) {}
|
2006-06-23 03:03:53 -06:00
|
|
|
static inline int migrate_pages(struct list_head *l, new_page_t x,
|
2013-02-22 17:35:14 -07:00
|
|
|
unsigned long private, enum migrate_mode mode, int reason)
|
|
|
|
{ return -ENOSYS; }
|
2006-03-31 03:29:56 -07:00
|
|
|
|
2006-03-22 01:09:12 -07:00
|
|
|
static inline int migrate_prep(void) { return -ENOSYS; }
|
2010-05-24 15:32:27 -06:00
|
|
|
static inline int migrate_prep_local(void) { return -ENOSYS; }
|
2006-03-22 01:09:12 -07:00
|
|
|
|
2006-06-25 06:46:48 -06:00
|
|
|
static inline int migrate_vmas(struct mm_struct *mm,
|
|
|
|
const nodemask_t *from, const nodemask_t *to,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
2010-09-07 19:19:35 -06:00
|
|
|
static inline void migrate_page_copy(struct page *newpage,
|
|
|
|
struct page *page) {}
|
|
|
|
|
2010-09-29 20:54:51 -06:00
|
|
|
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
|
2010-09-07 19:19:35 -06:00
|
|
|
struct page *newpage, struct page *page)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
2006-03-22 01:09:12 -07:00
|
|
|
/* Possible settings for the migrate_page() method in address_operations */
|
|
|
|
#define migrate_page NULL
|
|
|
|
|
|
|
|
#endif /* CONFIG_MIGRATION */
|
2012-10-25 06:16:34 -06:00
|
|
|
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
2013-12-18 18:08:42 -07:00
|
|
|
extern bool pmd_trans_migrating(pmd_t pmd);
|
|
|
|
extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
|
2013-10-07 04:29:05 -06:00
|
|
|
extern int migrate_misplaced_page(struct page *page,
|
|
|
|
struct vm_area_struct *vma, int node);
|
2012-11-19 03:59:15 -07:00
|
|
|
extern bool migrate_ratelimited(int node);
|
2012-10-25 06:16:34 -06:00
|
|
|
#else
|
2013-12-18 18:08:42 -07:00
|
|
|
static inline bool pmd_trans_migrating(pmd_t pmd)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
|
|
|
|
{
|
|
|
|
}
|
2013-10-07 04:29:05 -06:00
|
|
|
static inline int migrate_misplaced_page(struct page *page,
|
|
|
|
struct vm_area_struct *vma, int node)
|
2012-10-25 06:16:34 -06:00
|
|
|
{
|
|
|
|
return -EAGAIN; /* can't migrate now */
|
|
|
|
}
|
2012-11-19 03:59:15 -07:00
|
|
|
static inline bool migrate_ratelimited(int node)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2012-12-05 02:32:56 -07:00
|
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
2012-11-19 05:35:47 -07:00
|
|
|
|
2012-12-05 02:32:56 -07:00
|
|
|
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
|
|
|
extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
|
|
struct vm_area_struct *vma,
|
|
|
|
pmd_t *pmd, pmd_t entry,
|
|
|
|
unsigned long address,
|
|
|
|
struct page *page, int node);
|
|
|
|
#else
|
2012-11-19 05:35:47 -07:00
|
|
|
static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
|
|
struct vm_area_struct *vma,
|
|
|
|
pmd_t *pmd, pmd_t entry,
|
|
|
|
unsigned long address,
|
|
|
|
struct page *page, int node)
|
|
|
|
{
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
2012-12-05 02:32:56 -07:00
|
|
|
#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
|
2012-10-25 06:16:34 -06:00
|
|
|
|
2006-03-22 01:09:12 -07:00
|
|
|
#endif /* _LINUX_MIGRATE_H */
|