memcg: memory cgroup hierarchy documentation
[deliverable/linux.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/mutex.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/spinlock.h>
34 #include <linux/fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/vmalloc.h>
37 #include <linux/mm_inline.h>
38 #include <linux/page_cgroup.h>
39 #include "internal.h"
40
41 #include <asm/uaccess.h>
42
43 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
44 #define MEM_CGROUP_RECLAIM_RETRIES 5
45
46 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
47 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
48 int do_swap_account __read_mostly;
49 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
50 #else
51 #define do_swap_account (0)
52 #endif
53
54
55 /*
56 * Statistics for memory cgroup.
57 */
58 enum mem_cgroup_stat_index {
59 /*
60 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
61 */
62 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
63 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
64 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
65 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
66
67 MEM_CGROUP_STAT_NSTATS,
68 };
69
70 struct mem_cgroup_stat_cpu {
71 s64 count[MEM_CGROUP_STAT_NSTATS];
72 } ____cacheline_aligned_in_smp;
73
74 struct mem_cgroup_stat {
75 struct mem_cgroup_stat_cpu cpustat[0];
76 };
77
78 /*
79 * For accounting under irq disable, no need for increment preempt count.
80 */
81 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
82 enum mem_cgroup_stat_index idx, int val)
83 {
84 stat->count[idx] += val;
85 }
86
87 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
88 enum mem_cgroup_stat_index idx)
89 {
90 int cpu;
91 s64 ret = 0;
92 for_each_possible_cpu(cpu)
93 ret += stat->cpustat[cpu].count[idx];
94 return ret;
95 }
96
97 /*
98 * per-zone information in memory controller.
99 */
100 struct mem_cgroup_per_zone {
101 /*
102 * spin_lock to protect the per cgroup LRU
103 */
104 struct list_head lists[NR_LRU_LISTS];
105 unsigned long count[NR_LRU_LISTS];
106 };
107 /* Macro for accessing counter */
108 #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
109
110 struct mem_cgroup_per_node {
111 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
112 };
113
114 struct mem_cgroup_lru_info {
115 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
116 };
117
118 /*
119 * The memory controller data structure. The memory controller controls both
120 * page cache and RSS per cgroup. We would eventually like to provide
121 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
122 * to help the administrator determine what knobs to tune.
123 *
124 * TODO: Add a water mark for the memory controller. Reclaim will begin when
125 * we hit the water mark. May be even add a low water mark, such that
126 * no reclaim occurs from a cgroup at it's low water mark, this is
127 * a feature that will be implemented much later in the future.
128 */
129 struct mem_cgroup {
130 struct cgroup_subsys_state css;
131 /*
132 * the counter to account for memory usage
133 */
134 struct res_counter res;
135 /*
136 * the counter to account for mem+swap usage.
137 */
138 struct res_counter memsw;
139 /*
140 * Per cgroup active and inactive list, similar to the
141 * per zone LRU lists.
142 */
143 struct mem_cgroup_lru_info info;
144
145 int prev_priority; /* for recording reclaim priority */
146 int obsolete;
147 atomic_t refcnt;
148 /*
149 * statistics. This must be placed at the end of memcg.
150 */
151 struct mem_cgroup_stat stat;
152 };
153
154 enum charge_type {
155 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
156 MEM_CGROUP_CHARGE_TYPE_MAPPED,
157 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
158 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
159 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
160 NR_CHARGE_TYPE,
161 };
162
163 /* only for here (for easy reading.) */
164 #define PCGF_CACHE (1UL << PCG_CACHE)
165 #define PCGF_USED (1UL << PCG_USED)
166 #define PCGF_LOCK (1UL << PCG_LOCK)
167 static const unsigned long
168 pcg_default_flags[NR_CHARGE_TYPE] = {
169 PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
170 PCGF_USED | PCGF_LOCK, /* Anon */
171 PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
172 0, /* FORCE */
173 };
174
175
176 /* for encoding cft->private value on file */
177 #define _MEM (0)
178 #define _MEMSWAP (1)
179 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
180 #define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
181 #define MEMFILE_ATTR(val) ((val) & 0xffff)
182
183 static void mem_cgroup_get(struct mem_cgroup *mem);
184 static void mem_cgroup_put(struct mem_cgroup *mem);
185
186 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
187 struct page_cgroup *pc,
188 bool charge)
189 {
190 int val = (charge)? 1 : -1;
191 struct mem_cgroup_stat *stat = &mem->stat;
192 struct mem_cgroup_stat_cpu *cpustat;
193 int cpu = get_cpu();
194
195 cpustat = &stat->cpustat[cpu];
196 if (PageCgroupCache(pc))
197 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
198 else
199 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
200
201 if (charge)
202 __mem_cgroup_stat_add_safe(cpustat,
203 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
204 else
205 __mem_cgroup_stat_add_safe(cpustat,
206 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
207 put_cpu();
208 }
209
210 static struct mem_cgroup_per_zone *
211 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
212 {
213 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
214 }
215
216 static struct mem_cgroup_per_zone *
217 page_cgroup_zoneinfo(struct page_cgroup *pc)
218 {
219 struct mem_cgroup *mem = pc->mem_cgroup;
220 int nid = page_cgroup_nid(pc);
221 int zid = page_cgroup_zid(pc);
222
223 return mem_cgroup_zoneinfo(mem, nid, zid);
224 }
225
226 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
227 enum lru_list idx)
228 {
229 int nid, zid;
230 struct mem_cgroup_per_zone *mz;
231 u64 total = 0;
232
233 for_each_online_node(nid)
234 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
235 mz = mem_cgroup_zoneinfo(mem, nid, zid);
236 total += MEM_CGROUP_ZSTAT(mz, idx);
237 }
238 return total;
239 }
240
241 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
242 {
243 return container_of(cgroup_subsys_state(cont,
244 mem_cgroup_subsys_id), struct mem_cgroup,
245 css);
246 }
247
248 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
249 {
250 /*
251 * mm_update_next_owner() may clear mm->owner to NULL
252 * if it races with swapoff, page migration, etc.
253 * So this can be called with p == NULL.
254 */
255 if (unlikely(!p))
256 return NULL;
257
258 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
259 struct mem_cgroup, css);
260 }
261
262 /*
263 * Following LRU functions are allowed to be used without PCG_LOCK.
264 * Operations are called by routine of global LRU independently from memcg.
265 * What we have to take care of here is validness of pc->mem_cgroup.
266 *
267 * Changes to pc->mem_cgroup happens when
268 * 1. charge
269 * 2. moving account
270 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
271 * It is added to LRU before charge.
272 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
273 * When moving account, the page is not on LRU. It's isolated.
274 */
275
276 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
277 {
278 struct page_cgroup *pc;
279 struct mem_cgroup *mem;
280 struct mem_cgroup_per_zone *mz;
281
282 if (mem_cgroup_disabled())
283 return;
284 pc = lookup_page_cgroup(page);
285 /* can happen while we handle swapcache. */
286 if (list_empty(&pc->lru))
287 return;
288 mz = page_cgroup_zoneinfo(pc);
289 mem = pc->mem_cgroup;
290 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
291 list_del_init(&pc->lru);
292 return;
293 }
294
295 void mem_cgroup_del_lru(struct page *page)
296 {
297 mem_cgroup_del_lru_list(page, page_lru(page));
298 }
299
300 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
301 {
302 struct mem_cgroup_per_zone *mz;
303 struct page_cgroup *pc;
304
305 if (mem_cgroup_disabled())
306 return;
307
308 pc = lookup_page_cgroup(page);
309 smp_rmb();
310 /* unused page is not rotated. */
311 if (!PageCgroupUsed(pc))
312 return;
313 mz = page_cgroup_zoneinfo(pc);
314 list_move(&pc->lru, &mz->lists[lru]);
315 }
316
317 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
318 {
319 struct page_cgroup *pc;
320 struct mem_cgroup_per_zone *mz;
321
322 if (mem_cgroup_disabled())
323 return;
324 pc = lookup_page_cgroup(page);
325 /* barrier to sync with "charge" */
326 smp_rmb();
327 if (!PageCgroupUsed(pc))
328 return;
329
330 mz = page_cgroup_zoneinfo(pc);
331 MEM_CGROUP_ZSTAT(mz, lru) += 1;
332 list_add(&pc->lru, &mz->lists[lru]);
333 }
334 /*
335 * To add swapcache into LRU. Be careful to all this function.
336 * zone->lru_lock shouldn't be held and irq must not be disabled.
337 */
338 static void mem_cgroup_lru_fixup(struct page *page)
339 {
340 if (!isolate_lru_page(page))
341 putback_lru_page(page);
342 }
343
344 void mem_cgroup_move_lists(struct page *page,
345 enum lru_list from, enum lru_list to)
346 {
347 if (mem_cgroup_disabled())
348 return;
349 mem_cgroup_del_lru_list(page, from);
350 mem_cgroup_add_lru_list(page, to);
351 }
352
353 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
354 {
355 int ret;
356
357 task_lock(task);
358 ret = task->mm && mm_match_cgroup(task->mm, mem);
359 task_unlock(task);
360 return ret;
361 }
362
363 /*
364 * Calculate mapped_ratio under memory controller. This will be used in
365 * vmscan.c for deteremining we have to reclaim mapped pages.
366 */
367 int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
368 {
369 long total, rss;
370
371 /*
372 * usage is recorded in bytes. But, here, we assume the number of
373 * physical pages can be represented by "long" on any arch.
374 */
375 total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
376 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
377 return (int)((rss * 100L) / total);
378 }
379
380 /*
381 * prev_priority control...this will be used in memory reclaim path.
382 */
383 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
384 {
385 return mem->prev_priority;
386 }
387
388 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
389 {
390 if (priority < mem->prev_priority)
391 mem->prev_priority = priority;
392 }
393
394 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
395 {
396 mem->prev_priority = priority;
397 }
398
399 /*
400 * Calculate # of pages to be scanned in this priority/zone.
401 * See also vmscan.c
402 *
403 * priority starts from "DEF_PRIORITY" and decremented in each loop.
404 * (see include/linux/mmzone.h)
405 */
406
407 long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
408 int priority, enum lru_list lru)
409 {
410 long nr_pages;
411 int nid = zone->zone_pgdat->node_id;
412 int zid = zone_idx(zone);
413 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
414
415 nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
416
417 return (nr_pages >> priority);
418 }
419
420 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
421 struct list_head *dst,
422 unsigned long *scanned, int order,
423 int mode, struct zone *z,
424 struct mem_cgroup *mem_cont,
425 int active, int file)
426 {
427 unsigned long nr_taken = 0;
428 struct page *page;
429 unsigned long scan;
430 LIST_HEAD(pc_list);
431 struct list_head *src;
432 struct page_cgroup *pc, *tmp;
433 int nid = z->zone_pgdat->node_id;
434 int zid = zone_idx(z);
435 struct mem_cgroup_per_zone *mz;
436 int lru = LRU_FILE * !!file + !!active;
437
438 BUG_ON(!mem_cont);
439 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
440 src = &mz->lists[lru];
441
442 scan = 0;
443 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
444 if (scan >= nr_to_scan)
445 break;
446
447 page = pc->page;
448 if (unlikely(!PageCgroupUsed(pc)))
449 continue;
450 if (unlikely(!PageLRU(page)))
451 continue;
452
453 scan++;
454 if (__isolate_lru_page(page, mode, file) == 0) {
455 list_move(&page->lru, dst);
456 nr_taken++;
457 }
458 }
459
460 *scanned = scan;
461 return nr_taken;
462 }
463
464 /*
465 * Unlike exported interface, "oom" parameter is added. if oom==true,
466 * oom-killer can be invoked.
467 */
468 static int __mem_cgroup_try_charge(struct mm_struct *mm,
469 gfp_t gfp_mask, struct mem_cgroup **memcg,
470 bool oom)
471 {
472 struct mem_cgroup *mem;
473 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
474 /*
475 * We always charge the cgroup the mm_struct belongs to.
476 * The mm_struct's mem_cgroup changes on task migration if the
477 * thread group leader migrates. It's possible that mm is not
478 * set, if so charge the init_mm (happens for pagecache usage).
479 */
480 if (likely(!*memcg)) {
481 rcu_read_lock();
482 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
483 if (unlikely(!mem)) {
484 rcu_read_unlock();
485 return 0;
486 }
487 /*
488 * For every charge from the cgroup, increment reference count
489 */
490 css_get(&mem->css);
491 *memcg = mem;
492 rcu_read_unlock();
493 } else {
494 mem = *memcg;
495 css_get(&mem->css);
496 }
497
498 while (1) {
499 int ret;
500 bool noswap = false;
501
502 ret = res_counter_charge(&mem->res, PAGE_SIZE);
503 if (likely(!ret)) {
504 if (!do_swap_account)
505 break;
506 ret = res_counter_charge(&mem->memsw, PAGE_SIZE);
507 if (likely(!ret))
508 break;
509 /* mem+swap counter fails */
510 res_counter_uncharge(&mem->res, PAGE_SIZE);
511 noswap = true;
512 }
513 if (!(gfp_mask & __GFP_WAIT))
514 goto nomem;
515
516 if (try_to_free_mem_cgroup_pages(mem, gfp_mask, noswap))
517 continue;
518
519 /*
520 * try_to_free_mem_cgroup_pages() might not give us a full
521 * picture of reclaim. Some pages are reclaimed and might be
522 * moved to swap cache or just unmapped from the cgroup.
523 * Check the limit again to see if the reclaim reduced the
524 * current usage of the cgroup before giving up
525 *
526 */
527 if (!do_swap_account &&
528 res_counter_check_under_limit(&mem->res))
529 continue;
530 if (do_swap_account &&
531 res_counter_check_under_limit(&mem->memsw))
532 continue;
533
534 if (!nr_retries--) {
535 if (oom)
536 mem_cgroup_out_of_memory(mem, gfp_mask);
537 goto nomem;
538 }
539 }
540 return 0;
541 nomem:
542 css_put(&mem->css);
543 return -ENOMEM;
544 }
545
546 /**
547 * mem_cgroup_try_charge - get charge of PAGE_SIZE.
548 * @mm: an mm_struct which is charged against. (when *memcg is NULL)
549 * @gfp_mask: gfp_mask for reclaim.
550 * @memcg: a pointer to memory cgroup which is charged against.
551 *
552 * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
553 * memory cgroup from @mm is got and stored in *memcg.
554 *
555 * Returns 0 if success. -ENOMEM at failure.
556 * This call can invoke OOM-Killer.
557 */
558
559 int mem_cgroup_try_charge(struct mm_struct *mm,
560 gfp_t mask, struct mem_cgroup **memcg)
561 {
562 return __mem_cgroup_try_charge(mm, mask, memcg, true);
563 }
564
565 /*
566 * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
567 * USED state. If already USED, uncharge and return.
568 */
569
570 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
571 struct page_cgroup *pc,
572 enum charge_type ctype)
573 {
574 /* try_charge() can return NULL to *memcg, taking care of it. */
575 if (!mem)
576 return;
577
578 lock_page_cgroup(pc);
579 if (unlikely(PageCgroupUsed(pc))) {
580 unlock_page_cgroup(pc);
581 res_counter_uncharge(&mem->res, PAGE_SIZE);
582 if (do_swap_account)
583 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
584 css_put(&mem->css);
585 return;
586 }
587 pc->mem_cgroup = mem;
588 smp_wmb();
589 pc->flags = pcg_default_flags[ctype];
590
591 mem_cgroup_charge_statistics(mem, pc, true);
592
593 unlock_page_cgroup(pc);
594 }
595
596 /**
597 * mem_cgroup_move_account - move account of the page
598 * @pc: page_cgroup of the page.
599 * @from: mem_cgroup which the page is moved from.
600 * @to: mem_cgroup which the page is moved to. @from != @to.
601 *
602 * The caller must confirm following.
603 * - page is not on LRU (isolate_page() is useful.)
604 *
605 * returns 0 at success,
606 * returns -EBUSY when lock is busy or "pc" is unstable.
607 *
608 * This function does "uncharge" from old cgroup but doesn't do "charge" to
609 * new cgroup. It should be done by a caller.
610 */
611
612 static int mem_cgroup_move_account(struct page_cgroup *pc,
613 struct mem_cgroup *from, struct mem_cgroup *to)
614 {
615 struct mem_cgroup_per_zone *from_mz, *to_mz;
616 int nid, zid;
617 int ret = -EBUSY;
618
619 VM_BUG_ON(from == to);
620 VM_BUG_ON(PageLRU(pc->page));
621
622 nid = page_cgroup_nid(pc);
623 zid = page_cgroup_zid(pc);
624 from_mz = mem_cgroup_zoneinfo(from, nid, zid);
625 to_mz = mem_cgroup_zoneinfo(to, nid, zid);
626
627 if (!trylock_page_cgroup(pc))
628 return ret;
629
630 if (!PageCgroupUsed(pc))
631 goto out;
632
633 if (pc->mem_cgroup != from)
634 goto out;
635
636 css_put(&from->css);
637 res_counter_uncharge(&from->res, PAGE_SIZE);
638 mem_cgroup_charge_statistics(from, pc, false);
639 if (do_swap_account)
640 res_counter_uncharge(&from->memsw, PAGE_SIZE);
641 pc->mem_cgroup = to;
642 mem_cgroup_charge_statistics(to, pc, true);
643 css_get(&to->css);
644 ret = 0;
645 out:
646 unlock_page_cgroup(pc);
647 return ret;
648 }
649
650 /*
651 * move charges to its parent.
652 */
653
654 static int mem_cgroup_move_parent(struct page_cgroup *pc,
655 struct mem_cgroup *child,
656 gfp_t gfp_mask)
657 {
658 struct page *page = pc->page;
659 struct cgroup *cg = child->css.cgroup;
660 struct cgroup *pcg = cg->parent;
661 struct mem_cgroup *parent;
662 int ret;
663
664 /* Is ROOT ? */
665 if (!pcg)
666 return -EINVAL;
667
668
669 parent = mem_cgroup_from_cont(pcg);
670
671
672 ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
673 if (ret)
674 return ret;
675
676 if (!get_page_unless_zero(page))
677 return -EBUSY;
678
679 ret = isolate_lru_page(page);
680
681 if (ret)
682 goto cancel;
683
684 ret = mem_cgroup_move_account(pc, child, parent);
685
686 /* drop extra refcnt by try_charge() (move_account increment one) */
687 css_put(&parent->css);
688 putback_lru_page(page);
689 if (!ret) {
690 put_page(page);
691 return 0;
692 }
693 /* uncharge if move fails */
694 cancel:
695 res_counter_uncharge(&parent->res, PAGE_SIZE);
696 if (do_swap_account)
697 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
698 put_page(page);
699 return ret;
700 }
701
702 /*
703 * Charge the memory controller for page usage.
704 * Return
705 * 0 if the charge was successful
706 * < 0 if the cgroup is over its limit
707 */
708 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
709 gfp_t gfp_mask, enum charge_type ctype,
710 struct mem_cgroup *memcg)
711 {
712 struct mem_cgroup *mem;
713 struct page_cgroup *pc;
714 int ret;
715
716 pc = lookup_page_cgroup(page);
717 /* can happen at boot */
718 if (unlikely(!pc))
719 return 0;
720 prefetchw(pc);
721
722 mem = memcg;
723 ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
724 if (ret)
725 return ret;
726
727 __mem_cgroup_commit_charge(mem, pc, ctype);
728 return 0;
729 }
730
731 int mem_cgroup_newpage_charge(struct page *page,
732 struct mm_struct *mm, gfp_t gfp_mask)
733 {
734 if (mem_cgroup_disabled())
735 return 0;
736 if (PageCompound(page))
737 return 0;
738 /*
739 * If already mapped, we don't have to account.
740 * If page cache, page->mapping has address_space.
741 * But page->mapping may have out-of-use anon_vma pointer,
742 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
743 * is NULL.
744 */
745 if (page_mapped(page) || (page->mapping && !PageAnon(page)))
746 return 0;
747 if (unlikely(!mm))
748 mm = &init_mm;
749 return mem_cgroup_charge_common(page, mm, gfp_mask,
750 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
751 }
752
753 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
754 gfp_t gfp_mask)
755 {
756 if (mem_cgroup_disabled())
757 return 0;
758 if (PageCompound(page))
759 return 0;
760 /*
761 * Corner case handling. This is called from add_to_page_cache()
762 * in usual. But some FS (shmem) precharges this page before calling it
763 * and call add_to_page_cache() with GFP_NOWAIT.
764 *
765 * For GFP_NOWAIT case, the page may be pre-charged before calling
766 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
767 * charge twice. (It works but has to pay a bit larger cost.)
768 */
769 if (!(gfp_mask & __GFP_WAIT)) {
770 struct page_cgroup *pc;
771
772
773 pc = lookup_page_cgroup(page);
774 if (!pc)
775 return 0;
776 lock_page_cgroup(pc);
777 if (PageCgroupUsed(pc)) {
778 unlock_page_cgroup(pc);
779 return 0;
780 }
781 unlock_page_cgroup(pc);
782 }
783
784 if (unlikely(!mm))
785 mm = &init_mm;
786
787 if (page_is_file_cache(page))
788 return mem_cgroup_charge_common(page, mm, gfp_mask,
789 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
790 else
791 return mem_cgroup_charge_common(page, mm, gfp_mask,
792 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
793 }
794
795 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
796 struct page *page,
797 gfp_t mask, struct mem_cgroup **ptr)
798 {
799 struct mem_cgroup *mem;
800 swp_entry_t ent;
801
802 if (mem_cgroup_disabled())
803 return 0;
804
805 if (!do_swap_account)
806 goto charge_cur_mm;
807
808 /*
809 * A racing thread's fault, or swapoff, may have already updated
810 * the pte, and even removed page from swap cache: return success
811 * to go on to do_swap_page()'s pte_same() test, which should fail.
812 */
813 if (!PageSwapCache(page))
814 return 0;
815
816 ent.val = page_private(page);
817
818 mem = lookup_swap_cgroup(ent);
819 if (!mem || mem->obsolete)
820 goto charge_cur_mm;
821 *ptr = mem;
822 return __mem_cgroup_try_charge(NULL, mask, ptr, true);
823 charge_cur_mm:
824 if (unlikely(!mm))
825 mm = &init_mm;
826 return __mem_cgroup_try_charge(mm, mask, ptr, true);
827 }
828
829 #ifdef CONFIG_SWAP
830
831 int mem_cgroup_cache_charge_swapin(struct page *page,
832 struct mm_struct *mm, gfp_t mask, bool locked)
833 {
834 int ret = 0;
835
836 if (mem_cgroup_disabled())
837 return 0;
838 if (unlikely(!mm))
839 mm = &init_mm;
840 if (!locked)
841 lock_page(page);
842 /*
843 * If not locked, the page can be dropped from SwapCache until
844 * we reach here.
845 */
846 if (PageSwapCache(page)) {
847 struct mem_cgroup *mem = NULL;
848 swp_entry_t ent;
849
850 ent.val = page_private(page);
851 if (do_swap_account) {
852 mem = lookup_swap_cgroup(ent);
853 if (mem && mem->obsolete)
854 mem = NULL;
855 if (mem)
856 mm = NULL;
857 }
858 ret = mem_cgroup_charge_common(page, mm, mask,
859 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
860
861 if (!ret && do_swap_account) {
862 /* avoid double counting */
863 mem = swap_cgroup_record(ent, NULL);
864 if (mem) {
865 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
866 mem_cgroup_put(mem);
867 }
868 }
869 }
870 if (!locked)
871 unlock_page(page);
872 /* add this page(page_cgroup) to the LRU we want. */
873 mem_cgroup_lru_fixup(page);
874
875 return ret;
876 }
877 #endif
878
879 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
880 {
881 struct page_cgroup *pc;
882
883 if (mem_cgroup_disabled())
884 return;
885 if (!ptr)
886 return;
887 pc = lookup_page_cgroup(page);
888 __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
889 /*
890 * Now swap is on-memory. This means this page may be
891 * counted both as mem and swap....double count.
892 * Fix it by uncharging from memsw. This SwapCache is stable
893 * because we're still under lock_page().
894 */
895 if (do_swap_account) {
896 swp_entry_t ent = {.val = page_private(page)};
897 struct mem_cgroup *memcg;
898 memcg = swap_cgroup_record(ent, NULL);
899 if (memcg) {
900 /* If memcg is obsolete, memcg can be != ptr */
901 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
902 mem_cgroup_put(memcg);
903 }
904
905 }
906 /* add this page(page_cgroup) to the LRU we want. */
907 mem_cgroup_lru_fixup(page);
908 }
909
910 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
911 {
912 if (mem_cgroup_disabled())
913 return;
914 if (!mem)
915 return;
916 res_counter_uncharge(&mem->res, PAGE_SIZE);
917 if (do_swap_account)
918 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
919 css_put(&mem->css);
920 }
921
922
923 /*
924 * uncharge if !page_mapped(page)
925 */
926 static struct mem_cgroup *
927 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
928 {
929 struct page_cgroup *pc;
930 struct mem_cgroup *mem = NULL;
931 struct mem_cgroup_per_zone *mz;
932
933 if (mem_cgroup_disabled())
934 return NULL;
935
936 if (PageSwapCache(page))
937 return NULL;
938
939 /*
940 * Check if our page_cgroup is valid
941 */
942 pc = lookup_page_cgroup(page);
943 if (unlikely(!pc || !PageCgroupUsed(pc)))
944 return NULL;
945
946 lock_page_cgroup(pc);
947
948 mem = pc->mem_cgroup;
949
950 if (!PageCgroupUsed(pc))
951 goto unlock_out;
952
953 switch (ctype) {
954 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
955 if (page_mapped(page))
956 goto unlock_out;
957 break;
958 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
959 if (!PageAnon(page)) { /* Shared memory */
960 if (page->mapping && !page_is_file_cache(page))
961 goto unlock_out;
962 } else if (page_mapped(page)) /* Anon */
963 goto unlock_out;
964 break;
965 default:
966 break;
967 }
968
969 res_counter_uncharge(&mem->res, PAGE_SIZE);
970 if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
971 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
972
973 mem_cgroup_charge_statistics(mem, pc, false);
974 ClearPageCgroupUsed(pc);
975
976 mz = page_cgroup_zoneinfo(pc);
977 unlock_page_cgroup(pc);
978
979 css_put(&mem->css);
980
981 return mem;
982
983 unlock_out:
984 unlock_page_cgroup(pc);
985 return NULL;
986 }
987
988 void mem_cgroup_uncharge_page(struct page *page)
989 {
990 /* early check. */
991 if (page_mapped(page))
992 return;
993 if (page->mapping && !PageAnon(page))
994 return;
995 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
996 }
997
998 void mem_cgroup_uncharge_cache_page(struct page *page)
999 {
1000 VM_BUG_ON(page_mapped(page));
1001 VM_BUG_ON(page->mapping);
1002 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1003 }
1004
1005 /*
1006 * called from __delete_from_swap_cache() and drop "page" account.
1007 * memcg information is recorded to swap_cgroup of "ent"
1008 */
1009 void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1010 {
1011 struct mem_cgroup *memcg;
1012
1013 memcg = __mem_cgroup_uncharge_common(page,
1014 MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1015 /* record memcg information */
1016 if (do_swap_account && memcg) {
1017 swap_cgroup_record(ent, memcg);
1018 mem_cgroup_get(memcg);
1019 }
1020 }
1021
1022 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1023 /*
1024 * called from swap_entry_free(). remove record in swap_cgroup and
1025 * uncharge "memsw" account.
1026 */
1027 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1028 {
1029 struct mem_cgroup *memcg;
1030
1031 if (!do_swap_account)
1032 return;
1033
1034 memcg = swap_cgroup_record(ent, NULL);
1035 if (memcg) {
1036 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1037 mem_cgroup_put(memcg);
1038 }
1039 }
1040 #endif
1041
1042 /*
1043 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1044 * page belongs to.
1045 */
1046 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1047 {
1048 struct page_cgroup *pc;
1049 struct mem_cgroup *mem = NULL;
1050 int ret = 0;
1051
1052 if (mem_cgroup_disabled())
1053 return 0;
1054
1055 pc = lookup_page_cgroup(page);
1056 lock_page_cgroup(pc);
1057 if (PageCgroupUsed(pc)) {
1058 mem = pc->mem_cgroup;
1059 css_get(&mem->css);
1060 }
1061 unlock_page_cgroup(pc);
1062
1063 if (mem) {
1064 ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem);
1065 css_put(&mem->css);
1066 }
1067 *ptr = mem;
1068 return ret;
1069 }
1070
1071 /* remove redundant charge if migration failed*/
1072 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1073 struct page *oldpage, struct page *newpage)
1074 {
1075 struct page *target, *unused;
1076 struct page_cgroup *pc;
1077 enum charge_type ctype;
1078
1079 if (!mem)
1080 return;
1081
1082 /* at migration success, oldpage->mapping is NULL. */
1083 if (oldpage->mapping) {
1084 target = oldpage;
1085 unused = NULL;
1086 } else {
1087 target = newpage;
1088 unused = oldpage;
1089 }
1090
1091 if (PageAnon(target))
1092 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1093 else if (page_is_file_cache(target))
1094 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1095 else
1096 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1097
1098 /* unused page is not on radix-tree now. */
1099 if (unused)
1100 __mem_cgroup_uncharge_common(unused, ctype);
1101
1102 pc = lookup_page_cgroup(target);
1103 /*
1104 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1105 * So, double-counting is effectively avoided.
1106 */
1107 __mem_cgroup_commit_charge(mem, pc, ctype);
1108
1109 /*
1110 * Both of oldpage and newpage are still under lock_page().
1111 * Then, we don't have to care about race in radix-tree.
1112 * But we have to be careful that this page is unmapped or not.
1113 *
1114 * There is a case for !page_mapped(). At the start of
1115 * migration, oldpage was mapped. But now, it's zapped.
1116 * But we know *target* page is not freed/reused under us.
1117 * mem_cgroup_uncharge_page() does all necessary checks.
1118 */
1119 if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1120 mem_cgroup_uncharge_page(target);
1121 }
1122
1123 /*
1124 * A call to try to shrink memory usage under specified resource controller.
1125 * This is typically used for page reclaiming for shmem for reducing side
1126 * effect of page allocation from shmem, which is used by some mem_cgroup.
1127 */
1128 int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
1129 {
1130 struct mem_cgroup *mem;
1131 int progress = 0;
1132 int retry = MEM_CGROUP_RECLAIM_RETRIES;
1133
1134 if (mem_cgroup_disabled())
1135 return 0;
1136 if (!mm)
1137 return 0;
1138
1139 rcu_read_lock();
1140 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
1141 if (unlikely(!mem)) {
1142 rcu_read_unlock();
1143 return 0;
1144 }
1145 css_get(&mem->css);
1146 rcu_read_unlock();
1147
1148 do {
1149 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
1150 progress += res_counter_check_under_limit(&mem->res);
1151 } while (!progress && --retry);
1152
1153 css_put(&mem->css);
1154 if (!retry)
1155 return -ENOMEM;
1156 return 0;
1157 }
1158
1159 static DEFINE_MUTEX(set_limit_mutex);
1160
1161 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1162 unsigned long long val)
1163 {
1164
1165 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1166 int progress;
1167 u64 memswlimit;
1168 int ret = 0;
1169
1170 while (retry_count) {
1171 if (signal_pending(current)) {
1172 ret = -EINTR;
1173 break;
1174 }
1175 /*
1176 * Rather than hide all in some function, I do this in
1177 * open coded manner. You see what this really does.
1178 * We have to guarantee mem->res.limit < mem->memsw.limit.
1179 */
1180 mutex_lock(&set_limit_mutex);
1181 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1182 if (memswlimit < val) {
1183 ret = -EINVAL;
1184 mutex_unlock(&set_limit_mutex);
1185 break;
1186 }
1187 ret = res_counter_set_limit(&memcg->res, val);
1188 mutex_unlock(&set_limit_mutex);
1189
1190 if (!ret)
1191 break;
1192
1193 progress = try_to_free_mem_cgroup_pages(memcg,
1194 GFP_HIGHUSER_MOVABLE, false);
1195 if (!progress) retry_count--;
1196 }
1197 return ret;
1198 }
1199
1200 int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1201 unsigned long long val)
1202 {
1203 int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1204 u64 memlimit, oldusage, curusage;
1205 int ret;
1206
1207 if (!do_swap_account)
1208 return -EINVAL;
1209
1210 while (retry_count) {
1211 if (signal_pending(current)) {
1212 ret = -EINTR;
1213 break;
1214 }
1215 /*
1216 * Rather than hide all in some function, I do this in
1217 * open coded manner. You see what this really does.
1218 * We have to guarantee mem->res.limit < mem->memsw.limit.
1219 */
1220 mutex_lock(&set_limit_mutex);
1221 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1222 if (memlimit > val) {
1223 ret = -EINVAL;
1224 mutex_unlock(&set_limit_mutex);
1225 break;
1226 }
1227 ret = res_counter_set_limit(&memcg->memsw, val);
1228 mutex_unlock(&set_limit_mutex);
1229
1230 if (!ret)
1231 break;
1232
1233 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1234 try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true);
1235 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1236 if (curusage >= oldusage)
1237 retry_count--;
1238 }
1239 return ret;
1240 }
1241
1242 /*
1243 * This routine traverse page_cgroup in given list and drop them all.
1244 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1245 */
1246 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1247 int node, int zid, enum lru_list lru)
1248 {
1249 struct zone *zone;
1250 struct mem_cgroup_per_zone *mz;
1251 struct page_cgroup *pc, *busy;
1252 unsigned long flags, loop;
1253 struct list_head *list;
1254 int ret = 0;
1255
1256 zone = &NODE_DATA(node)->node_zones[zid];
1257 mz = mem_cgroup_zoneinfo(mem, node, zid);
1258 list = &mz->lists[lru];
1259
1260 loop = MEM_CGROUP_ZSTAT(mz, lru);
1261 /* give some margin against EBUSY etc...*/
1262 loop += 256;
1263 busy = NULL;
1264 while (loop--) {
1265 ret = 0;
1266 spin_lock_irqsave(&zone->lru_lock, flags);
1267 if (list_empty(list)) {
1268 spin_unlock_irqrestore(&zone->lru_lock, flags);
1269 break;
1270 }
1271 pc = list_entry(list->prev, struct page_cgroup, lru);
1272 if (busy == pc) {
1273 list_move(&pc->lru, list);
1274 busy = 0;
1275 spin_unlock_irqrestore(&zone->lru_lock, flags);
1276 continue;
1277 }
1278 spin_unlock_irqrestore(&zone->lru_lock, flags);
1279
1280 ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE);
1281 if (ret == -ENOMEM)
1282 break;
1283
1284 if (ret == -EBUSY || ret == -EINVAL) {
1285 /* found lock contention or "pc" is obsolete. */
1286 busy = pc;
1287 cond_resched();
1288 } else
1289 busy = NULL;
1290 }
1291
1292 if (!ret && !list_empty(list))
1293 return -EBUSY;
1294 return ret;
1295 }
1296
1297 /*
1298 * make mem_cgroup's charge to be 0 if there is no task.
1299 * This enables deleting this mem_cgroup.
1300 */
1301 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1302 {
1303 int ret;
1304 int node, zid, shrink;
1305 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1306 struct cgroup *cgrp = mem->css.cgroup;
1307
1308 css_get(&mem->css);
1309
1310 shrink = 0;
1311 /* should free all ? */
1312 if (free_all)
1313 goto try_to_free;
1314 move_account:
1315 while (mem->res.usage > 0) {
1316 ret = -EBUSY;
1317 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1318 goto out;
1319 ret = -EINTR;
1320 if (signal_pending(current))
1321 goto out;
1322 /* This is for making all *used* pages to be on LRU. */
1323 lru_add_drain_all();
1324 ret = 0;
1325 for_each_node_state(node, N_POSSIBLE) {
1326 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1327 enum lru_list l;
1328 for_each_lru(l) {
1329 ret = mem_cgroup_force_empty_list(mem,
1330 node, zid, l);
1331 if (ret)
1332 break;
1333 }
1334 }
1335 if (ret)
1336 break;
1337 }
1338 /* it seems parent cgroup doesn't have enough mem */
1339 if (ret == -ENOMEM)
1340 goto try_to_free;
1341 cond_resched();
1342 }
1343 ret = 0;
1344 out:
1345 css_put(&mem->css);
1346 return ret;
1347
1348 try_to_free:
1349 /* returns EBUSY if there is a task or if we come here twice. */
1350 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1351 ret = -EBUSY;
1352 goto out;
1353 }
1354 /* we call try-to-free pages for make this cgroup empty */
1355 lru_add_drain_all();
1356 /* try to free all pages in this cgroup */
1357 shrink = 1;
1358 while (nr_retries && mem->res.usage > 0) {
1359 int progress;
1360
1361 if (signal_pending(current)) {
1362 ret = -EINTR;
1363 goto out;
1364 }
1365 progress = try_to_free_mem_cgroup_pages(mem,
1366 GFP_HIGHUSER_MOVABLE, false);
1367 if (!progress) {
1368 nr_retries--;
1369 /* maybe some writeback is necessary */
1370 congestion_wait(WRITE, HZ/10);
1371 }
1372
1373 }
1374 lru_add_drain();
1375 /* try move_account...there may be some *locked* pages. */
1376 if (mem->res.usage)
1377 goto move_account;
1378 ret = 0;
1379 goto out;
1380 }
1381
1382 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1383 {
1384 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1385 }
1386
1387
1388 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1389 {
1390 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1391 u64 val = 0;
1392 int type, name;
1393
1394 type = MEMFILE_TYPE(cft->private);
1395 name = MEMFILE_ATTR(cft->private);
1396 switch (type) {
1397 case _MEM:
1398 val = res_counter_read_u64(&mem->res, name);
1399 break;
1400 case _MEMSWAP:
1401 if (do_swap_account)
1402 val = res_counter_read_u64(&mem->memsw, name);
1403 break;
1404 default:
1405 BUG();
1406 break;
1407 }
1408 return val;
1409 }
1410 /*
1411 * The user of this function is...
1412 * RES_LIMIT.
1413 */
1414 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1415 const char *buffer)
1416 {
1417 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1418 int type, name;
1419 unsigned long long val;
1420 int ret;
1421
1422 type = MEMFILE_TYPE(cft->private);
1423 name = MEMFILE_ATTR(cft->private);
1424 switch (name) {
1425 case RES_LIMIT:
1426 /* This function does all necessary parse...reuse it */
1427 ret = res_counter_memparse_write_strategy(buffer, &val);
1428 if (ret)
1429 break;
1430 if (type == _MEM)
1431 ret = mem_cgroup_resize_limit(memcg, val);
1432 else
1433 ret = mem_cgroup_resize_memsw_limit(memcg, val);
1434 break;
1435 default:
1436 ret = -EINVAL; /* should be BUG() ? */
1437 break;
1438 }
1439 return ret;
1440 }
1441
1442 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1443 {
1444 struct mem_cgroup *mem;
1445 int type, name;
1446
1447 mem = mem_cgroup_from_cont(cont);
1448 type = MEMFILE_TYPE(event);
1449 name = MEMFILE_ATTR(event);
1450 switch (name) {
1451 case RES_MAX_USAGE:
1452 if (type == _MEM)
1453 res_counter_reset_max(&mem->res);
1454 else
1455 res_counter_reset_max(&mem->memsw);
1456 break;
1457 case RES_FAILCNT:
1458 if (type == _MEM)
1459 res_counter_reset_failcnt(&mem->res);
1460 else
1461 res_counter_reset_failcnt(&mem->memsw);
1462 break;
1463 }
1464 return 0;
1465 }
1466
1467 static const struct mem_cgroup_stat_desc {
1468 const char *msg;
1469 u64 unit;
1470 } mem_cgroup_stat_desc[] = {
1471 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
1472 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
1473 [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
1474 [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
1475 };
1476
1477 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1478 struct cgroup_map_cb *cb)
1479 {
1480 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
1481 struct mem_cgroup_stat *stat = &mem_cont->stat;
1482 int i;
1483
1484 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
1485 s64 val;
1486
1487 val = mem_cgroup_read_stat(stat, i);
1488 val *= mem_cgroup_stat_desc[i].unit;
1489 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
1490 }
1491 /* showing # of active pages */
1492 {
1493 unsigned long active_anon, inactive_anon;
1494 unsigned long active_file, inactive_file;
1495 unsigned long unevictable;
1496
1497 inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
1498 LRU_INACTIVE_ANON);
1499 active_anon = mem_cgroup_get_all_zonestat(mem_cont,
1500 LRU_ACTIVE_ANON);
1501 inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
1502 LRU_INACTIVE_FILE);
1503 active_file = mem_cgroup_get_all_zonestat(mem_cont,
1504 LRU_ACTIVE_FILE);
1505 unevictable = mem_cgroup_get_all_zonestat(mem_cont,
1506 LRU_UNEVICTABLE);
1507
1508 cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
1509 cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
1510 cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
1511 cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
1512 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
1513
1514 }
1515 return 0;
1516 }
1517
1518
1519 static struct cftype mem_cgroup_files[] = {
1520 {
1521 .name = "usage_in_bytes",
1522 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
1523 .read_u64 = mem_cgroup_read,
1524 },
1525 {
1526 .name = "max_usage_in_bytes",
1527 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
1528 .trigger = mem_cgroup_reset,
1529 .read_u64 = mem_cgroup_read,
1530 },
1531 {
1532 .name = "limit_in_bytes",
1533 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
1534 .write_string = mem_cgroup_write,
1535 .read_u64 = mem_cgroup_read,
1536 },
1537 {
1538 .name = "failcnt",
1539 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
1540 .trigger = mem_cgroup_reset,
1541 .read_u64 = mem_cgroup_read,
1542 },
1543 {
1544 .name = "stat",
1545 .read_map = mem_control_stat_show,
1546 },
1547 {
1548 .name = "force_empty",
1549 .trigger = mem_cgroup_force_empty_write,
1550 },
1551 };
1552
1553 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1554 static struct cftype memsw_cgroup_files[] = {
1555 {
1556 .name = "memsw.usage_in_bytes",
1557 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
1558 .read_u64 = mem_cgroup_read,
1559 },
1560 {
1561 .name = "memsw.max_usage_in_bytes",
1562 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
1563 .trigger = mem_cgroup_reset,
1564 .read_u64 = mem_cgroup_read,
1565 },
1566 {
1567 .name = "memsw.limit_in_bytes",
1568 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
1569 .write_string = mem_cgroup_write,
1570 .read_u64 = mem_cgroup_read,
1571 },
1572 {
1573 .name = "memsw.failcnt",
1574 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
1575 .trigger = mem_cgroup_reset,
1576 .read_u64 = mem_cgroup_read,
1577 },
1578 };
1579
1580 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1581 {
1582 if (!do_swap_account)
1583 return 0;
1584 return cgroup_add_files(cont, ss, memsw_cgroup_files,
1585 ARRAY_SIZE(memsw_cgroup_files));
1586 };
1587 #else
1588 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1589 {
1590 return 0;
1591 }
1592 #endif
1593
1594 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1595 {
1596 struct mem_cgroup_per_node *pn;
1597 struct mem_cgroup_per_zone *mz;
1598 enum lru_list l;
1599 int zone, tmp = node;
1600 /*
1601 * This routine is called against possible nodes.
1602 * But it's BUG to call kmalloc() against offline node.
1603 *
1604 * TODO: this routine can waste much memory for nodes which will
1605 * never be onlined. It's better to use memory hotplug callback
1606 * function.
1607 */
1608 if (!node_state(node, N_NORMAL_MEMORY))
1609 tmp = -1;
1610 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
1611 if (!pn)
1612 return 1;
1613
1614 mem->info.nodeinfo[node] = pn;
1615 memset(pn, 0, sizeof(*pn));
1616
1617 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1618 mz = &pn->zoneinfo[zone];
1619 for_each_lru(l)
1620 INIT_LIST_HEAD(&mz->lists[l]);
1621 }
1622 return 0;
1623 }
1624
1625 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1626 {
1627 kfree(mem->info.nodeinfo[node]);
1628 }
1629
1630 static int mem_cgroup_size(void)
1631 {
1632 int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
1633 return sizeof(struct mem_cgroup) + cpustat_size;
1634 }
1635
1636 static struct mem_cgroup *mem_cgroup_alloc(void)
1637 {
1638 struct mem_cgroup *mem;
1639 int size = mem_cgroup_size();
1640
1641 if (size < PAGE_SIZE)
1642 mem = kmalloc(size, GFP_KERNEL);
1643 else
1644 mem = vmalloc(size);
1645
1646 if (mem)
1647 memset(mem, 0, size);
1648 return mem;
1649 }
1650
1651 /*
1652 * At destroying mem_cgroup, references from swap_cgroup can remain.
1653 * (scanning all at force_empty is too costly...)
1654 *
1655 * Instead of clearing all references at force_empty, we remember
1656 * the number of reference from swap_cgroup and free mem_cgroup when
1657 * it goes down to 0.
1658 *
1659 * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and
1660 * entry which points to this memcg will be ignore at swapin.
1661 *
1662 * Removal of cgroup itself succeeds regardless of refs from swap.
1663 */
1664
1665 static void mem_cgroup_free(struct mem_cgroup *mem)
1666 {
1667 int node;
1668
1669 if (atomic_read(&mem->refcnt) > 0)
1670 return;
1671
1672
1673 for_each_node_state(node, N_POSSIBLE)
1674 free_mem_cgroup_per_zone_info(mem, node);
1675
1676 if (mem_cgroup_size() < PAGE_SIZE)
1677 kfree(mem);
1678 else
1679 vfree(mem);
1680 }
1681
1682 static void mem_cgroup_get(struct mem_cgroup *mem)
1683 {
1684 atomic_inc(&mem->refcnt);
1685 }
1686
1687 static void mem_cgroup_put(struct mem_cgroup *mem)
1688 {
1689 if (atomic_dec_and_test(&mem->refcnt)) {
1690 if (!mem->obsolete)
1691 return;
1692 mem_cgroup_free(mem);
1693 }
1694 }
1695
1696
1697 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1698 static void __init enable_swap_cgroup(void)
1699 {
1700 if (!mem_cgroup_disabled() && really_do_swap_account)
1701 do_swap_account = 1;
1702 }
1703 #else
1704 static void __init enable_swap_cgroup(void)
1705 {
1706 }
1707 #endif
1708
1709 static struct cgroup_subsys_state *
1710 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1711 {
1712 struct mem_cgroup *mem;
1713 int node;
1714
1715 mem = mem_cgroup_alloc();
1716 if (!mem)
1717 return ERR_PTR(-ENOMEM);
1718
1719 res_counter_init(&mem->res);
1720 res_counter_init(&mem->memsw);
1721
1722 for_each_node_state(node, N_POSSIBLE)
1723 if (alloc_mem_cgroup_per_zone_info(mem, node))
1724 goto free_out;
1725 /* root ? */
1726 if (cont->parent == NULL)
1727 enable_swap_cgroup();
1728
1729 return &mem->css;
1730 free_out:
1731 for_each_node_state(node, N_POSSIBLE)
1732 free_mem_cgroup_per_zone_info(mem, node);
1733 mem_cgroup_free(mem);
1734 return ERR_PTR(-ENOMEM);
1735 }
1736
1737 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1738 struct cgroup *cont)
1739 {
1740 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1741 mem->obsolete = 1;
1742 mem_cgroup_force_empty(mem, false);
1743 }
1744
1745 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1746 struct cgroup *cont)
1747 {
1748 mem_cgroup_free(mem_cgroup_from_cont(cont));
1749 }
1750
1751 static int mem_cgroup_populate(struct cgroup_subsys *ss,
1752 struct cgroup *cont)
1753 {
1754 int ret;
1755
1756 ret = cgroup_add_files(cont, ss, mem_cgroup_files,
1757 ARRAY_SIZE(mem_cgroup_files));
1758
1759 if (!ret)
1760 ret = register_memsw_files(cont, ss);
1761 return ret;
1762 }
1763
1764 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1765 struct cgroup *cont,
1766 struct cgroup *old_cont,
1767 struct task_struct *p)
1768 {
1769 struct mm_struct *mm;
1770 struct mem_cgroup *mem, *old_mem;
1771
1772 mm = get_task_mm(p);
1773 if (mm == NULL)
1774 return;
1775
1776 mem = mem_cgroup_from_cont(cont);
1777 old_mem = mem_cgroup_from_cont(old_cont);
1778
1779 /*
1780 * Only thread group leaders are allowed to migrate, the mm_struct is
1781 * in effect owned by the leader
1782 */
1783 if (!thread_group_leader(p))
1784 goto out;
1785
1786 out:
1787 mmput(mm);
1788 }
1789
1790 struct cgroup_subsys mem_cgroup_subsys = {
1791 .name = "memory",
1792 .subsys_id = mem_cgroup_subsys_id,
1793 .create = mem_cgroup_create,
1794 .pre_destroy = mem_cgroup_pre_destroy,
1795 .destroy = mem_cgroup_destroy,
1796 .populate = mem_cgroup_populate,
1797 .attach = mem_cgroup_move_task,
1798 .early_init = 0,
1799 };
1800
1801 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1802
1803 static int __init disable_swap_account(char *s)
1804 {
1805 really_do_swap_account = 0;
1806 return 1;
1807 }
1808 __setup("noswapaccount", disable_swap_account);
1809 #endif
This page took 0.070229 seconds and 6 git commands to generate.