1 #ifndef _LINUX_MIGRATE_H
2 #define _LINUX_MIGRATE_H
5 #include <linux/mempolicy.h>
6 #include <linux/migrate_mode.h>
8 typedef struct page
*new_page_t(struct page
*, unsigned long private, int **);
11 * Return values from addresss_space_operations.migratepage():
12 * - negative errno on page migration failure;
13 * - zero on page migration success;
15 * The balloon page migration introduces this special case where a 'distinct'
16 * return code is used to flag a successful page migration to unmap_and_move().
17 * This approach is necessary because page migration can race against balloon
18 * deflation procedure, and for such case we could introduce a nasty page leak
19 * if a successfully migrated balloon page gets released concurrently with
20 * migration's unmap_and_move() wrap-up steps.
22 #define MIGRATEPAGE_SUCCESS 0
23 #define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page
24 * sucessful migration case.
30 MR_SYSCALL
, /* also applies to cpusets */
36 #ifdef CONFIG_MIGRATION
38 extern void putback_movable_pages(struct list_head
*l
);
39 extern int migrate_page(struct address_space
*,
40 struct page
*, struct page
*, enum migrate_mode
);
41 extern int migrate_pages(struct list_head
*l
, new_page_t x
,
42 unsigned long private, enum migrate_mode mode
, int reason
);
44 extern int fail_migrate_page(struct address_space
*,
45 struct page
*, struct page
*);
47 extern int migrate_prep(void);
48 extern int migrate_prep_local(void);
49 extern int migrate_vmas(struct mm_struct
*mm
,
50 const nodemask_t
*from
, const nodemask_t
*to
,
52 extern void migrate_page_copy(struct page
*newpage
, struct page
*page
);
53 extern int migrate_huge_page_move_mapping(struct address_space
*mapping
,
54 struct page
*newpage
, struct page
*page
);
55 extern int migrate_page_move_mapping(struct address_space
*mapping
,
56 struct page
*newpage
, struct page
*page
,
57 struct buffer_head
*head
, enum migrate_mode mode
,
61 static inline void putback_movable_pages(struct list_head
*l
) {}
62 static inline int migrate_pages(struct list_head
*l
, new_page_t x
,
63 unsigned long private, enum migrate_mode mode
, int reason
)
66 static inline int migrate_prep(void) { return -ENOSYS
; }
67 static inline int migrate_prep_local(void) { return -ENOSYS
; }
69 static inline int migrate_vmas(struct mm_struct
*mm
,
70 const nodemask_t
*from
, const nodemask_t
*to
,
76 static inline void migrate_page_copy(struct page
*newpage
,
79 static inline int migrate_huge_page_move_mapping(struct address_space
*mapping
,
80 struct page
*newpage
, struct page
*page
)
85 /* Possible settings for the migrate_page() method in address_operations */
86 #define migrate_page NULL
87 #define fail_migrate_page NULL
89 #endif /* CONFIG_MIGRATION */
91 #ifdef CONFIG_NUMA_BALANCING
92 extern bool pmd_trans_migrating(pmd_t pmd
);
93 extern void wait_migrate_huge_page(struct anon_vma
*anon_vma
, pmd_t
*pmd
);
94 extern int migrate_misplaced_page(struct page
*page
,
95 struct vm_area_struct
*vma
, int node
);
96 extern bool migrate_ratelimited(int node
);
98 static inline bool pmd_trans_migrating(pmd_t pmd
)
102 static inline void wait_migrate_huge_page(struct anon_vma
*anon_vma
, pmd_t
*pmd
)
105 static inline int migrate_misplaced_page(struct page
*page
,
106 struct vm_area_struct
*vma
, int node
)
108 return -EAGAIN
; /* can't migrate now */
110 static inline bool migrate_ratelimited(int node
)
114 #endif /* CONFIG_NUMA_BALANCING */
116 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
117 extern int migrate_misplaced_transhuge_page(struct mm_struct
*mm
,
118 struct vm_area_struct
*vma
,
119 pmd_t
*pmd
, pmd_t entry
,
120 unsigned long address
,
121 struct page
*page
, int node
);
123 static inline int migrate_misplaced_transhuge_page(struct mm_struct
*mm
,
124 struct vm_area_struct
*vma
,
125 pmd_t
*pmd
, pmd_t entry
,
126 unsigned long address
,
127 struct page
*page
, int node
)
131 #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
133 #endif /* _LINUX_MIGRATE_H */
This page took 0.036588 seconds and 5 git commands to generate.