Merge branch 'pm-runtime' into pm-for-linus
[deliverable/linux.git] / include / linux / memcontrol.h
1 /* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24
25 struct mem_cgroup;
26 struct page_cgroup;
27 struct page;
28 struct mm_struct;
29
30 /* Stats that can be updated by kernel. */
31 enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33 };
34
35 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
36 struct list_head *dst,
37 unsigned long *scanned, int order,
38 int mode, struct zone *z,
39 struct mem_cgroup *mem_cont,
40 int active, int file);
41
42 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
43 /*
44 * All "charge" functions with gfp_mask should use GFP_KERNEL or
45 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
46 * alloc memory but reclaims memory from all available zones. So, "where I want
47 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
48 * available but adding a rule is better. charge functions' gfp_mask should
49 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
50 * codes.
51 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
52 */
53
54 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
55 gfp_t gfp_mask);
56 /* for swap handling */
57 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
58 struct page *page, gfp_t mask, struct mem_cgroup **ptr);
59 extern void mem_cgroup_commit_charge_swapin(struct page *page,
60 struct mem_cgroup *ptr);
61 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
62
63 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
64 gfp_t gfp_mask);
65 extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
66 extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
67 extern void mem_cgroup_rotate_reclaimable_page(struct page *page);
68 extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
69 extern void mem_cgroup_del_lru(struct page *page);
70 extern void mem_cgroup_move_lists(struct page *page,
71 enum lru_list from, enum lru_list to);
72
73 /* For coalescing uncharge for reducing memcg' overhead*/
74 extern void mem_cgroup_uncharge_start(void);
75 extern void mem_cgroup_uncharge_end(void);
76
77 extern void mem_cgroup_uncharge_page(struct page *page);
78 extern void mem_cgroup_uncharge_cache_page(struct page *page);
79
80 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
81 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
82
83 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
84 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
85 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
86
87 static inline
88 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
89 {
90 struct mem_cgroup *mem;
91 rcu_read_lock();
92 mem = mem_cgroup_from_task(rcu_dereference((mm)->owner));
93 rcu_read_unlock();
94 return cgroup == mem;
95 }
96
97 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
98
99 extern int
100 mem_cgroup_prepare_migration(struct page *page,
101 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask);
102 extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
103 struct page *oldpage, struct page *newpage, bool migration_ok);
104
105 /*
106 * For memory reclaim.
107 */
108 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
109 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
110 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
111 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
112 int nid, int zid, unsigned int lrumask);
113 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
114 struct zone *zone);
115 struct zone_reclaim_stat*
116 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
117 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
118 struct task_struct *p);
119
120 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
121 extern int do_swap_account;
122 #endif
123
124 static inline bool mem_cgroup_disabled(void)
125 {
126 if (mem_cgroup_subsys.disabled)
127 return true;
128 return false;
129 }
130
131 void mem_cgroup_update_page_stat(struct page *page,
132 enum mem_cgroup_page_stat_item idx,
133 int val);
134
135 static inline void mem_cgroup_inc_page_stat(struct page *page,
136 enum mem_cgroup_page_stat_item idx)
137 {
138 mem_cgroup_update_page_stat(page, idx, 1);
139 }
140
141 static inline void mem_cgroup_dec_page_stat(struct page *page,
142 enum mem_cgroup_page_stat_item idx)
143 {
144 mem_cgroup_update_page_stat(page, idx, -1);
145 }
146
147 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
148 gfp_t gfp_mask,
149 unsigned long *total_scanned);
150 u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
151
152 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
153 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
154 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
155 #endif
156
157 #ifdef CONFIG_DEBUG_VM
158 bool mem_cgroup_bad_page_check(struct page *page);
159 void mem_cgroup_print_bad_page(struct page *page);
160 #endif
161 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
162 struct mem_cgroup;
163
164 static inline int mem_cgroup_newpage_charge(struct page *page,
165 struct mm_struct *mm, gfp_t gfp_mask)
166 {
167 return 0;
168 }
169
170 static inline int mem_cgroup_cache_charge(struct page *page,
171 struct mm_struct *mm, gfp_t gfp_mask)
172 {
173 return 0;
174 }
175
176 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
177 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
178 {
179 return 0;
180 }
181
182 static inline void mem_cgroup_commit_charge_swapin(struct page *page,
183 struct mem_cgroup *ptr)
184 {
185 }
186
187 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
188 {
189 }
190
191 static inline void mem_cgroup_uncharge_start(void)
192 {
193 }
194
195 static inline void mem_cgroup_uncharge_end(void)
196 {
197 }
198
199 static inline void mem_cgroup_uncharge_page(struct page *page)
200 {
201 }
202
203 static inline void mem_cgroup_uncharge_cache_page(struct page *page)
204 {
205 }
206
207 static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
208 {
209 }
210
211 static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
212 {
213 return ;
214 }
215
216 static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
217 {
218 return ;
219 }
220
221 static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
222 {
223 return ;
224 }
225
226 static inline void mem_cgroup_del_lru(struct page *page)
227 {
228 return ;
229 }
230
231 static inline void
232 mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
233 {
234 }
235
236 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
237 {
238 return NULL;
239 }
240
241 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
242 {
243 return NULL;
244 }
245
246 static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
247 {
248 return 1;
249 }
250
251 static inline int task_in_mem_cgroup(struct task_struct *task,
252 const struct mem_cgroup *mem)
253 {
254 return 1;
255 }
256
257 static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
258 {
259 return NULL;
260 }
261
262 static inline int
263 mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
264 struct mem_cgroup **ptr, gfp_t gfp_mask)
265 {
266 return 0;
267 }
268
269 static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
270 struct page *oldpage, struct page *newpage, bool migration_ok)
271 {
272 }
273
274 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
275 {
276 return 0;
277 }
278
279 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
280 int priority)
281 {
282 }
283
284 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
285 int priority)
286 {
287 }
288
289 static inline bool mem_cgroup_disabled(void)
290 {
291 return true;
292 }
293
294 static inline int
295 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
296 {
297 return 1;
298 }
299
300 static inline int
301 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
302 {
303 return 1;
304 }
305
306 static inline unsigned long
307 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
308 unsigned int lru_mask)
309 {
310 return 0;
311 }
312
313
314 static inline struct zone_reclaim_stat*
315 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
316 {
317 return NULL;
318 }
319
320 static inline struct zone_reclaim_stat*
321 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
322 {
323 return NULL;
324 }
325
326 static inline void
327 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
328 {
329 }
330
331 static inline void mem_cgroup_inc_page_stat(struct page *page,
332 enum mem_cgroup_page_stat_item idx)
333 {
334 }
335
336 static inline void mem_cgroup_dec_page_stat(struct page *page,
337 enum mem_cgroup_page_stat_item idx)
338 {
339 }
340
341 static inline
342 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
343 gfp_t gfp_mask,
344 unsigned long *total_scanned)
345 {
346 return 0;
347 }
348
349 static inline
350 u64 mem_cgroup_get_limit(struct mem_cgroup *mem)
351 {
352 return 0;
353 }
354
355 static inline void mem_cgroup_split_huge_fixup(struct page *head,
356 struct page *tail)
357 {
358 }
359
360 static inline
361 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
362 {
363 }
364 #endif /* CONFIG_CGROUP_MEM_CONT */
365
366 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
367 static inline bool
368 mem_cgroup_bad_page_check(struct page *page)
369 {
370 return false;
371 }
372
373 static inline void
374 mem_cgroup_print_bad_page(struct page *page)
375 {
376 }
377 #endif
378
379 #endif /* _LINUX_MEMCONTROL_H */
380
This page took 0.067157 seconds and 6 git commands to generate.