mm: migrate: support non-lru movable page migration
[deliverable/linux.git] / include / linux / compaction.h
1 #ifndef _LINUX_COMPACTION_H
2 #define _LINUX_COMPACTION_H
3
4 /* Return values for compact_zone() and try_to_compact_pages() */
5 /* When adding new states, please adjust include/trace/events/compaction.h */
6 enum compact_result {
7 /* For more detailed tracepoint output - internal to compaction */
8 COMPACT_NOT_SUITABLE_ZONE,
9 /*
10 * compaction didn't start as it was not possible or direct reclaim
11 * was more suitable
12 */
13 COMPACT_SKIPPED,
14 /* compaction didn't start as it was deferred due to past failures */
15 COMPACT_DEFERRED,
16
17 /* compaction not active last round */
18 COMPACT_INACTIVE = COMPACT_DEFERRED,
19
20 /* For more detailed tracepoint output - internal to compaction */
21 COMPACT_NO_SUITABLE_PAGE,
22 /* compaction should continue to another pageblock */
23 COMPACT_CONTINUE,
24
25 /*
26 * The full zone was compacted scanned but wasn't successfull to compact
27 * suitable pages.
28 */
29 COMPACT_COMPLETE,
30 /*
31 * direct compaction has scanned part of the zone but wasn't successfull
32 * to compact suitable pages.
33 */
34 COMPACT_PARTIAL_SKIPPED,
35
36 /* compaction terminated prematurely due to lock contentions */
37 COMPACT_CONTENDED,
38
39 /*
40 * direct compaction partially compacted a zone and there might be
41 * suitable pages
42 */
43 COMPACT_PARTIAL,
44 };
45
46 /* Used to signal whether compaction detected need_sched() or lock contention */
47 /* No contention detected */
48 #define COMPACT_CONTENDED_NONE 0
49 /* Either need_sched() was true or fatal signal pending */
50 #define COMPACT_CONTENDED_SCHED 1
51 /* Zone lock or lru_lock was contended in async compaction */
52 #define COMPACT_CONTENDED_LOCK 2
53
54 struct alloc_context; /* in mm/internal.h */
55
56 #ifdef CONFIG_COMPACTION
57 extern int PageMovable(struct page *page);
58 extern void __SetPageMovable(struct page *page, struct address_space *mapping);
59 extern void __ClearPageMovable(struct page *page);
60 extern int sysctl_compact_memory;
61 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
62 void __user *buffer, size_t *length, loff_t *ppos);
63 extern int sysctl_extfrag_threshold;
64 extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
65 void __user *buffer, size_t *length, loff_t *ppos);
66 extern int sysctl_compact_unevictable_allowed;
67
68 extern int fragmentation_index(struct zone *zone, unsigned int order);
69 extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
70 unsigned int order,
71 unsigned int alloc_flags, const struct alloc_context *ac,
72 enum migrate_mode mode, int *contended);
73 extern void compact_pgdat(pg_data_t *pgdat, int order);
74 extern void reset_isolation_suitable(pg_data_t *pgdat);
75 extern enum compact_result compaction_suitable(struct zone *zone, int order,
76 unsigned int alloc_flags, int classzone_idx);
77
78 extern void defer_compaction(struct zone *zone, int order);
79 extern bool compaction_deferred(struct zone *zone, int order);
80 extern void compaction_defer_reset(struct zone *zone, int order,
81 bool alloc_success);
82 extern bool compaction_restarting(struct zone *zone, int order);
83
84 /* Compaction has made some progress and retrying makes sense */
85 static inline bool compaction_made_progress(enum compact_result result)
86 {
87 /*
88 * Even though this might sound confusing this in fact tells us
89 * that the compaction successfully isolated and migrated some
90 * pageblocks.
91 */
92 if (result == COMPACT_PARTIAL)
93 return true;
94
95 return false;
96 }
97
98 /* Compaction has failed and it doesn't make much sense to keep retrying. */
99 static inline bool compaction_failed(enum compact_result result)
100 {
101 /* All zones were scanned completely and still not result. */
102 if (result == COMPACT_COMPLETE)
103 return true;
104
105 return false;
106 }
107
108 /*
109 * Compaction has backed off for some reason. It might be throttling or
110 * lock contention. Retrying is still worthwhile.
111 */
112 static inline bool compaction_withdrawn(enum compact_result result)
113 {
114 /*
115 * Compaction backed off due to watermark checks for order-0
116 * so the regular reclaim has to try harder and reclaim something.
117 */
118 if (result == COMPACT_SKIPPED)
119 return true;
120
121 /*
122 * If compaction is deferred for high-order allocations, it is
123 * because sync compaction recently failed. If this is the case
124 * and the caller requested a THP allocation, we do not want
125 * to heavily disrupt the system, so we fail the allocation
126 * instead of entering direct reclaim.
127 */
128 if (result == COMPACT_DEFERRED)
129 return true;
130
131 /*
132 * If compaction in async mode encounters contention or blocks higher
133 * priority task we back off early rather than cause stalls.
134 */
135 if (result == COMPACT_CONTENDED)
136 return true;
137
138 /*
139 * Page scanners have met but we haven't scanned full zones so this
140 * is a back off in fact.
141 */
142 if (result == COMPACT_PARTIAL_SKIPPED)
143 return true;
144
145 return false;
146 }
147
148
149 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
150 int alloc_flags);
151
152 extern int kcompactd_run(int nid);
153 extern void kcompactd_stop(int nid);
154 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
155
156 #else
157 static inline int PageMovable(struct page *page)
158 {
159 return 0;
160 }
161 static inline void __SetPageMovable(struct page *page,
162 struct address_space *mapping)
163 {
164 }
165
166 static inline void __ClearPageMovable(struct page *page)
167 {
168 }
169
170 static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask,
171 unsigned int order, int alloc_flags,
172 const struct alloc_context *ac,
173 enum migrate_mode mode, int *contended)
174 {
175 return COMPACT_CONTINUE;
176 }
177
178 static inline void compact_pgdat(pg_data_t *pgdat, int order)
179 {
180 }
181
182 static inline void reset_isolation_suitable(pg_data_t *pgdat)
183 {
184 }
185
186 static inline enum compact_result compaction_suitable(struct zone *zone, int order,
187 int alloc_flags, int classzone_idx)
188 {
189 return COMPACT_SKIPPED;
190 }
191
192 static inline void defer_compaction(struct zone *zone, int order)
193 {
194 }
195
196 static inline bool compaction_deferred(struct zone *zone, int order)
197 {
198 return true;
199 }
200
201 static inline bool compaction_made_progress(enum compact_result result)
202 {
203 return false;
204 }
205
206 static inline bool compaction_failed(enum compact_result result)
207 {
208 return false;
209 }
210
211 static inline bool compaction_withdrawn(enum compact_result result)
212 {
213 return true;
214 }
215
216 static inline int kcompactd_run(int nid)
217 {
218 return 0;
219 }
220 static inline void kcompactd_stop(int nid)
221 {
222 }
223
224 static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
225 {
226 }
227
228 #endif /* CONFIG_COMPACTION */
229
230 #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
231 struct node;
232 extern int compaction_register_node(struct node *node);
233 extern void compaction_unregister_node(struct node *node);
234
235 #else
236
237 static inline int compaction_register_node(struct node *node)
238 {
239 return 0;
240 }
241
242 static inline void compaction_unregister_node(struct node *node)
243 {
244 }
245 #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
246
247 #endif /* _LINUX_COMPACTION_H */
This page took 0.038549 seconds and 5 git commands to generate.