Merge branch 'arm64-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 */
27
28 #include <linux/res_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/mm.h>
32 #include <linux/hugetlb.h>
33 #include <linux/pagemap.h>
34 #include <linux/smp.h>
35 #include <linux/page-flags.h>
36 #include <linux/backing-dev.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/rcupdate.h>
39 #include <linux/limits.h>
40 #include <linux/export.h>
41 #include <linux/mutex.h>
42 #include <linux/rbtree.h>
43 #include <linux/slab.h>
44 #include <linux/swap.h>
45 #include <linux/swapops.h>
46 #include <linux/spinlock.h>
47 #include <linux/eventfd.h>
48 #include <linux/poll.h>
49 #include <linux/sort.h>
50 #include <linux/fs.h>
51 #include <linux/seq_file.h>
52 #include <linux/vmpressure.h>
53 #include <linux/mm_inline.h>
54 #include <linux/page_cgroup.h>
55 #include <linux/cpu.h>
56 #include <linux/oom.h>
57 #include <linux/lockdep.h>
58 #include <linux/file.h>
59 #include "internal.h"
60 #include <net/sock.h>
61 #include <net/ip.h>
62 #include <net/tcp_memcontrol.h>
63 #include "slab.h"
64
65 #include <asm/uaccess.h>
66
67 #include <trace/events/vmscan.h>
68
69 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
70 EXPORT_SYMBOL(memory_cgrp_subsys);
71
72 #define MEM_CGROUP_RECLAIM_RETRIES 5
73 static struct mem_cgroup *root_mem_cgroup __read_mostly;
74
75 #ifdef CONFIG_MEMCG_SWAP
76 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
77 int do_swap_account __read_mostly;
78
79 /* for remember boot option*/
80 #ifdef CONFIG_MEMCG_SWAP_ENABLED
81 static int really_do_swap_account __initdata = 1;
82 #else
83 static int really_do_swap_account __initdata;
84 #endif
85
86 #else
87 #define do_swap_account 0
88 #endif
89
90
91 static const char * const mem_cgroup_stat_names[] = {
92 "cache",
93 "rss",
94 "rss_huge",
95 "mapped_file",
96 "writeback",
97 "swap",
98 };
99
100 enum mem_cgroup_events_index {
101 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
102 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
103 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
104 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
105 MEM_CGROUP_EVENTS_NSTATS,
106 };
107
108 static const char * const mem_cgroup_events_names[] = {
109 "pgpgin",
110 "pgpgout",
111 "pgfault",
112 "pgmajfault",
113 };
114
115 static const char * const mem_cgroup_lru_names[] = {
116 "inactive_anon",
117 "active_anon",
118 "inactive_file",
119 "active_file",
120 "unevictable",
121 };
122
123 /*
124 * Per memcg event counter is incremented at every pagein/pageout. With THP,
125 * it will be incremated by the number of pages. This counter is used for
126 * for trigger some periodic events. This is straightforward and better
127 * than using jiffies etc. to handle periodic memcg event.
128 */
129 enum mem_cgroup_events_target {
130 MEM_CGROUP_TARGET_THRESH,
131 MEM_CGROUP_TARGET_SOFTLIMIT,
132 MEM_CGROUP_TARGET_NUMAINFO,
133 MEM_CGROUP_NTARGETS,
134 };
135 #define THRESHOLDS_EVENTS_TARGET 128
136 #define SOFTLIMIT_EVENTS_TARGET 1024
137 #define NUMAINFO_EVENTS_TARGET 1024
138
139 struct mem_cgroup_stat_cpu {
140 long count[MEM_CGROUP_STAT_NSTATS];
141 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
142 unsigned long nr_page_events;
143 unsigned long targets[MEM_CGROUP_NTARGETS];
144 };
145
146 struct mem_cgroup_reclaim_iter {
147 /*
148 * last scanned hierarchy member. Valid only if last_dead_count
149 * matches memcg->dead_count of the hierarchy root group.
150 */
151 struct mem_cgroup *last_visited;
152 int last_dead_count;
153
154 /* scan generation, increased every round-trip */
155 unsigned int generation;
156 };
157
158 /*
159 * per-zone information in memory controller.
160 */
161 struct mem_cgroup_per_zone {
162 struct lruvec lruvec;
163 unsigned long lru_size[NR_LRU_LISTS];
164
165 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
166
167 struct rb_node tree_node; /* RB tree node */
168 unsigned long long usage_in_excess;/* Set to the value by which */
169 /* the soft limit is exceeded*/
170 bool on_tree;
171 struct mem_cgroup *memcg; /* Back pointer, we cannot */
172 /* use container_of */
173 };
174
175 struct mem_cgroup_per_node {
176 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
177 };
178
179 /*
180 * Cgroups above their limits are maintained in a RB-Tree, independent of
181 * their hierarchy representation
182 */
183
184 struct mem_cgroup_tree_per_zone {
185 struct rb_root rb_root;
186 spinlock_t lock;
187 };
188
189 struct mem_cgroup_tree_per_node {
190 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
191 };
192
193 struct mem_cgroup_tree {
194 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
195 };
196
197 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
198
199 struct mem_cgroup_threshold {
200 struct eventfd_ctx *eventfd;
201 u64 threshold;
202 };
203
204 /* For threshold */
205 struct mem_cgroup_threshold_ary {
206 /* An array index points to threshold just below or equal to usage. */
207 int current_threshold;
208 /* Size of entries[] */
209 unsigned int size;
210 /* Array of thresholds */
211 struct mem_cgroup_threshold entries[0];
212 };
213
214 struct mem_cgroup_thresholds {
215 /* Primary thresholds array */
216 struct mem_cgroup_threshold_ary *primary;
217 /*
218 * Spare threshold array.
219 * This is needed to make mem_cgroup_unregister_event() "never fail".
220 * It must be able to store at least primary->size - 1 entries.
221 */
222 struct mem_cgroup_threshold_ary *spare;
223 };
224
225 /* for OOM */
226 struct mem_cgroup_eventfd_list {
227 struct list_head list;
228 struct eventfd_ctx *eventfd;
229 };
230
231 /*
232 * cgroup_event represents events which userspace want to receive.
233 */
234 struct mem_cgroup_event {
235 /*
236 * memcg which the event belongs to.
237 */
238 struct mem_cgroup *memcg;
239 /*
240 * eventfd to signal userspace about the event.
241 */
242 struct eventfd_ctx *eventfd;
243 /*
244 * Each of these stored in a list by the cgroup.
245 */
246 struct list_head list;
247 /*
248 * register_event() callback will be used to add new userspace
249 * waiter for changes related to this event. Use eventfd_signal()
250 * on eventfd to send notification to userspace.
251 */
252 int (*register_event)(struct mem_cgroup *memcg,
253 struct eventfd_ctx *eventfd, const char *args);
254 /*
255 * unregister_event() callback will be called when userspace closes
256 * the eventfd or on cgroup removing. This callback must be set,
257 * if you want provide notification functionality.
258 */
259 void (*unregister_event)(struct mem_cgroup *memcg,
260 struct eventfd_ctx *eventfd);
261 /*
262 * All fields below needed to unregister event when
263 * userspace closes eventfd.
264 */
265 poll_table pt;
266 wait_queue_head_t *wqh;
267 wait_queue_t wait;
268 struct work_struct remove;
269 };
270
271 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
272 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
273
274 /*
275 * The memory controller data structure. The memory controller controls both
276 * page cache and RSS per cgroup. We would eventually like to provide
277 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
278 * to help the administrator determine what knobs to tune.
279 *
280 * TODO: Add a water mark for the memory controller. Reclaim will begin when
281 * we hit the water mark. May be even add a low water mark, such that
282 * no reclaim occurs from a cgroup at it's low water mark, this is
283 * a feature that will be implemented much later in the future.
284 */
285 struct mem_cgroup {
286 struct cgroup_subsys_state css;
287 /*
288 * the counter to account for memory usage
289 */
290 struct res_counter res;
291
292 /* vmpressure notifications */
293 struct vmpressure vmpressure;
294
295 /*
296 * the counter to account for mem+swap usage.
297 */
298 struct res_counter memsw;
299
300 /*
301 * the counter to account for kernel memory usage.
302 */
303 struct res_counter kmem;
304 /*
305 * Should the accounting and control be hierarchical, per subtree?
306 */
307 bool use_hierarchy;
308 unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
309
310 bool oom_lock;
311 atomic_t under_oom;
312 atomic_t oom_wakeups;
313
314 int swappiness;
315 /* OOM-Killer disable */
316 int oom_kill_disable;
317
318 /* set when res.limit == memsw.limit */
319 bool memsw_is_minimum;
320
321 /* protect arrays of thresholds */
322 struct mutex thresholds_lock;
323
324 /* thresholds for memory usage. RCU-protected */
325 struct mem_cgroup_thresholds thresholds;
326
327 /* thresholds for mem+swap usage. RCU-protected */
328 struct mem_cgroup_thresholds memsw_thresholds;
329
330 /* For oom notifier event fd */
331 struct list_head oom_notify;
332
333 /*
334 * Should we move charges of a task when a task is moved into this
335 * mem_cgroup ? And what type of charges should we move ?
336 */
337 unsigned long move_charge_at_immigrate;
338 /*
339 * set > 0 if pages under this cgroup are moving to other cgroup.
340 */
341 atomic_t moving_account;
342 /* taken only while moving_account > 0 */
343 spinlock_t move_lock;
344 /*
345 * percpu counter.
346 */
347 struct mem_cgroup_stat_cpu __percpu *stat;
348 /*
349 * used when a cpu is offlined or other synchronizations
350 * See mem_cgroup_read_stat().
351 */
352 struct mem_cgroup_stat_cpu nocpu_base;
353 spinlock_t pcp_counter_lock;
354
355 atomic_t dead_count;
356 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
357 struct cg_proto tcp_mem;
358 #endif
359 #if defined(CONFIG_MEMCG_KMEM)
360 /* analogous to slab_common's slab_caches list, but per-memcg;
361 * protected by memcg_slab_mutex */
362 struct list_head memcg_slab_caches;
363 /* Index in the kmem_cache->memcg_params->memcg_caches array */
364 int kmemcg_id;
365 #endif
366
367 int last_scanned_node;
368 #if MAX_NUMNODES > 1
369 nodemask_t scan_nodes;
370 atomic_t numainfo_events;
371 atomic_t numainfo_updating;
372 #endif
373
374 /* List of events which userspace want to receive */
375 struct list_head event_list;
376 spinlock_t event_list_lock;
377
378 struct mem_cgroup_per_node *nodeinfo[0];
379 /* WARNING: nodeinfo must be the last member here */
380 };
381
382 /* internal only representation about the status of kmem accounting. */
383 enum {
384 KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
385 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
386 };
387
388 #ifdef CONFIG_MEMCG_KMEM
389 static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
390 {
391 set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
392 }
393
394 static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
395 {
396 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
397 }
398
399 static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
400 {
401 /*
402 * Our caller must use css_get() first, because memcg_uncharge_kmem()
403 * will call css_put() if it sees the memcg is dead.
404 */
405 smp_wmb();
406 if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
407 set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
408 }
409
410 static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
411 {
412 return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
413 &memcg->kmem_account_flags);
414 }
415 #endif
416
417 /* Stuffs for move charges at task migration. */
418 /*
419 * Types of charges to be moved. "move_charge_at_immitgrate" and
420 * "immigrate_flags" are treated as a left-shifted bitmap of these types.
421 */
422 enum move_type {
423 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
424 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
425 NR_MOVE_TYPE,
426 };
427
428 /* "mc" and its members are protected by cgroup_mutex */
429 static struct move_charge_struct {
430 spinlock_t lock; /* for from, to */
431 struct mem_cgroup *from;
432 struct mem_cgroup *to;
433 unsigned long immigrate_flags;
434 unsigned long precharge;
435 unsigned long moved_charge;
436 unsigned long moved_swap;
437 struct task_struct *moving_task; /* a task moving charges */
438 wait_queue_head_t waitq; /* a waitq for other context */
439 } mc = {
440 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
441 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
442 };
443
444 static bool move_anon(void)
445 {
446 return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
447 }
448
449 static bool move_file(void)
450 {
451 return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
452 }
453
454 /*
455 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
456 * limit reclaim to prevent infinite loops, if they ever occur.
457 */
458 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
459 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
460
461 enum charge_type {
462 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
463 MEM_CGROUP_CHARGE_TYPE_ANON,
464 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
465 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
466 NR_CHARGE_TYPE,
467 };
468
469 /* for encoding cft->private value on file */
470 enum res_type {
471 _MEM,
472 _MEMSWAP,
473 _OOM_TYPE,
474 _KMEM,
475 };
476
477 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
478 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
479 #define MEMFILE_ATTR(val) ((val) & 0xffff)
480 /* Used for OOM nofiier */
481 #define OOM_CONTROL (0)
482
483 /*
484 * Reclaim flags for mem_cgroup_hierarchical_reclaim
485 */
486 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
487 #define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
488 #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
489 #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
490
491 /*
492 * The memcg_create_mutex will be held whenever a new cgroup is created.
493 * As a consequence, any change that needs to protect against new child cgroups
494 * appearing has to hold it as well.
495 */
496 static DEFINE_MUTEX(memcg_create_mutex);
497
498 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
499 {
500 return s ? container_of(s, struct mem_cgroup, css) : NULL;
501 }
502
503 /* Some nice accessors for the vmpressure. */
504 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
505 {
506 if (!memcg)
507 memcg = root_mem_cgroup;
508 return &memcg->vmpressure;
509 }
510
511 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
512 {
513 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
514 }
515
516 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
517 {
518 return (memcg == root_mem_cgroup);
519 }
520
521 /*
522 * We restrict the id in the range of [1, 65535], so it can fit into
523 * an unsigned short.
524 */
525 #define MEM_CGROUP_ID_MAX USHRT_MAX
526
527 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
528 {
529 /*
530 * The ID of the root cgroup is 0, but memcg treat 0 as an
531 * invalid ID, so we return (cgroup_id + 1).
532 */
533 return memcg->css.cgroup->id + 1;
534 }
535
536 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
537 {
538 struct cgroup_subsys_state *css;
539
540 css = css_from_id(id - 1, &memory_cgrp_subsys);
541 return mem_cgroup_from_css(css);
542 }
543
544 /* Writing them here to avoid exposing memcg's inner layout */
545 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
546
547 void sock_update_memcg(struct sock *sk)
548 {
549 if (mem_cgroup_sockets_enabled) {
550 struct mem_cgroup *memcg;
551 struct cg_proto *cg_proto;
552
553 BUG_ON(!sk->sk_prot->proto_cgroup);
554
555 /* Socket cloning can throw us here with sk_cgrp already
556 * filled. It won't however, necessarily happen from
557 * process context. So the test for root memcg given
558 * the current task's memcg won't help us in this case.
559 *
560 * Respecting the original socket's memcg is a better
561 * decision in this case.
562 */
563 if (sk->sk_cgrp) {
564 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
565 css_get(&sk->sk_cgrp->memcg->css);
566 return;
567 }
568
569 rcu_read_lock();
570 memcg = mem_cgroup_from_task(current);
571 cg_proto = sk->sk_prot->proto_cgroup(memcg);
572 if (!mem_cgroup_is_root(memcg) &&
573 memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
574 sk->sk_cgrp = cg_proto;
575 }
576 rcu_read_unlock();
577 }
578 }
579 EXPORT_SYMBOL(sock_update_memcg);
580
581 void sock_release_memcg(struct sock *sk)
582 {
583 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
584 struct mem_cgroup *memcg;
585 WARN_ON(!sk->sk_cgrp->memcg);
586 memcg = sk->sk_cgrp->memcg;
587 css_put(&sk->sk_cgrp->memcg->css);
588 }
589 }
590
591 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
592 {
593 if (!memcg || mem_cgroup_is_root(memcg))
594 return NULL;
595
596 return &memcg->tcp_mem;
597 }
598 EXPORT_SYMBOL(tcp_proto_cgroup);
599
600 static void disarm_sock_keys(struct mem_cgroup *memcg)
601 {
602 if (!memcg_proto_activated(&memcg->tcp_mem))
603 return;
604 static_key_slow_dec(&memcg_socket_limit_enabled);
605 }
606 #else
607 static void disarm_sock_keys(struct mem_cgroup *memcg)
608 {
609 }
610 #endif
611
612 #ifdef CONFIG_MEMCG_KMEM
613 /*
614 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
615 * The main reason for not using cgroup id for this:
616 * this works better in sparse environments, where we have a lot of memcgs,
617 * but only a few kmem-limited. Or also, if we have, for instance, 200
618 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
619 * 200 entry array for that.
620 *
621 * The current size of the caches array is stored in
622 * memcg_limited_groups_array_size. It will double each time we have to
623 * increase it.
624 */
625 static DEFINE_IDA(kmem_limited_groups);
626 int memcg_limited_groups_array_size;
627
628 /*
629 * MIN_SIZE is different than 1, because we would like to avoid going through
630 * the alloc/free process all the time. In a small machine, 4 kmem-limited
631 * cgroups is a reasonable guess. In the future, it could be a parameter or
632 * tunable, but that is strictly not necessary.
633 *
634 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
635 * this constant directly from cgroup, but it is understandable that this is
636 * better kept as an internal representation in cgroup.c. In any case, the
637 * cgrp_id space is not getting any smaller, and we don't have to necessarily
638 * increase ours as well if it increases.
639 */
640 #define MEMCG_CACHES_MIN_SIZE 4
641 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
642
643 /*
644 * A lot of the calls to the cache allocation functions are expected to be
645 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
646 * conditional to this static branch, we'll have to allow modules that does
647 * kmem_cache_alloc and the such to see this symbol as well
648 */
649 struct static_key memcg_kmem_enabled_key;
650 EXPORT_SYMBOL(memcg_kmem_enabled_key);
651
652 static void disarm_kmem_keys(struct mem_cgroup *memcg)
653 {
654 if (memcg_kmem_is_active(memcg)) {
655 static_key_slow_dec(&memcg_kmem_enabled_key);
656 ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
657 }
658 /*
659 * This check can't live in kmem destruction function,
660 * since the charges will outlive the cgroup
661 */
662 WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
663 }
664 #else
665 static void disarm_kmem_keys(struct mem_cgroup *memcg)
666 {
667 }
668 #endif /* CONFIG_MEMCG_KMEM */
669
670 static void disarm_static_keys(struct mem_cgroup *memcg)
671 {
672 disarm_sock_keys(memcg);
673 disarm_kmem_keys(memcg);
674 }
675
676 static void drain_all_stock_async(struct mem_cgroup *memcg);
677
678 static struct mem_cgroup_per_zone *
679 mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
680 {
681 VM_BUG_ON((unsigned)nid >= nr_node_ids);
682 return &memcg->nodeinfo[nid]->zoneinfo[zid];
683 }
684
685 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
686 {
687 return &memcg->css;
688 }
689
690 static struct mem_cgroup_per_zone *
691 page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
692 {
693 int nid = page_to_nid(page);
694 int zid = page_zonenum(page);
695
696 return mem_cgroup_zoneinfo(memcg, nid, zid);
697 }
698
699 static struct mem_cgroup_tree_per_zone *
700 soft_limit_tree_node_zone(int nid, int zid)
701 {
702 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
703 }
704
705 static struct mem_cgroup_tree_per_zone *
706 soft_limit_tree_from_page(struct page *page)
707 {
708 int nid = page_to_nid(page);
709 int zid = page_zonenum(page);
710
711 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
712 }
713
714 static void
715 __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
716 struct mem_cgroup_per_zone *mz,
717 struct mem_cgroup_tree_per_zone *mctz,
718 unsigned long long new_usage_in_excess)
719 {
720 struct rb_node **p = &mctz->rb_root.rb_node;
721 struct rb_node *parent = NULL;
722 struct mem_cgroup_per_zone *mz_node;
723
724 if (mz->on_tree)
725 return;
726
727 mz->usage_in_excess = new_usage_in_excess;
728 if (!mz->usage_in_excess)
729 return;
730 while (*p) {
731 parent = *p;
732 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
733 tree_node);
734 if (mz->usage_in_excess < mz_node->usage_in_excess)
735 p = &(*p)->rb_left;
736 /*
737 * We can't avoid mem cgroups that are over their soft
738 * limit by the same amount
739 */
740 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
741 p = &(*p)->rb_right;
742 }
743 rb_link_node(&mz->tree_node, parent, p);
744 rb_insert_color(&mz->tree_node, &mctz->rb_root);
745 mz->on_tree = true;
746 }
747
748 static void
749 __mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
750 struct mem_cgroup_per_zone *mz,
751 struct mem_cgroup_tree_per_zone *mctz)
752 {
753 if (!mz->on_tree)
754 return;
755 rb_erase(&mz->tree_node, &mctz->rb_root);
756 mz->on_tree = false;
757 }
758
759 static void
760 mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
761 struct mem_cgroup_per_zone *mz,
762 struct mem_cgroup_tree_per_zone *mctz)
763 {
764 spin_lock(&mctz->lock);
765 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
766 spin_unlock(&mctz->lock);
767 }
768
769
770 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
771 {
772 unsigned long long excess;
773 struct mem_cgroup_per_zone *mz;
774 struct mem_cgroup_tree_per_zone *mctz;
775 int nid = page_to_nid(page);
776 int zid = page_zonenum(page);
777 mctz = soft_limit_tree_from_page(page);
778
779 /*
780 * Necessary to update all ancestors when hierarchy is used.
781 * because their event counter is not touched.
782 */
783 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
784 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
785 excess = res_counter_soft_limit_excess(&memcg->res);
786 /*
787 * We have to update the tree if mz is on RB-tree or
788 * mem is over its softlimit.
789 */
790 if (excess || mz->on_tree) {
791 spin_lock(&mctz->lock);
792 /* if on-tree, remove it */
793 if (mz->on_tree)
794 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
795 /*
796 * Insert again. mz->usage_in_excess will be updated.
797 * If excess is 0, no tree ops.
798 */
799 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
800 spin_unlock(&mctz->lock);
801 }
802 }
803 }
804
805 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
806 {
807 int node, zone;
808 struct mem_cgroup_per_zone *mz;
809 struct mem_cgroup_tree_per_zone *mctz;
810
811 for_each_node(node) {
812 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
813 mz = mem_cgroup_zoneinfo(memcg, node, zone);
814 mctz = soft_limit_tree_node_zone(node, zone);
815 mem_cgroup_remove_exceeded(memcg, mz, mctz);
816 }
817 }
818 }
819
820 static struct mem_cgroup_per_zone *
821 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
822 {
823 struct rb_node *rightmost = NULL;
824 struct mem_cgroup_per_zone *mz;
825
826 retry:
827 mz = NULL;
828 rightmost = rb_last(&mctz->rb_root);
829 if (!rightmost)
830 goto done; /* Nothing to reclaim from */
831
832 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
833 /*
834 * Remove the node now but someone else can add it back,
835 * we will to add it back at the end of reclaim to its correct
836 * position in the tree.
837 */
838 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
839 if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
840 !css_tryget(&mz->memcg->css))
841 goto retry;
842 done:
843 return mz;
844 }
845
846 static struct mem_cgroup_per_zone *
847 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
848 {
849 struct mem_cgroup_per_zone *mz;
850
851 spin_lock(&mctz->lock);
852 mz = __mem_cgroup_largest_soft_limit_node(mctz);
853 spin_unlock(&mctz->lock);
854 return mz;
855 }
856
857 /*
858 * Implementation Note: reading percpu statistics for memcg.
859 *
860 * Both of vmstat[] and percpu_counter has threshold and do periodic
861 * synchronization to implement "quick" read. There are trade-off between
862 * reading cost and precision of value. Then, we may have a chance to implement
863 * a periodic synchronizion of counter in memcg's counter.
864 *
865 * But this _read() function is used for user interface now. The user accounts
866 * memory usage by memory cgroup and he _always_ requires exact value because
867 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
868 * have to visit all online cpus and make sum. So, for now, unnecessary
869 * synchronization is not implemented. (just implemented for cpu hotplug)
870 *
871 * If there are kernel internal actions which can make use of some not-exact
872 * value, and reading all cpu value can be performance bottleneck in some
873 * common workload, threashold and synchonization as vmstat[] should be
874 * implemented.
875 */
876 static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
877 enum mem_cgroup_stat_index idx)
878 {
879 long val = 0;
880 int cpu;
881
882 get_online_cpus();
883 for_each_online_cpu(cpu)
884 val += per_cpu(memcg->stat->count[idx], cpu);
885 #ifdef CONFIG_HOTPLUG_CPU
886 spin_lock(&memcg->pcp_counter_lock);
887 val += memcg->nocpu_base.count[idx];
888 spin_unlock(&memcg->pcp_counter_lock);
889 #endif
890 put_online_cpus();
891 return val;
892 }
893
894 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
895 bool charge)
896 {
897 int val = (charge) ? 1 : -1;
898 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
899 }
900
901 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
902 enum mem_cgroup_events_index idx)
903 {
904 unsigned long val = 0;
905 int cpu;
906
907 get_online_cpus();
908 for_each_online_cpu(cpu)
909 val += per_cpu(memcg->stat->events[idx], cpu);
910 #ifdef CONFIG_HOTPLUG_CPU
911 spin_lock(&memcg->pcp_counter_lock);
912 val += memcg->nocpu_base.events[idx];
913 spin_unlock(&memcg->pcp_counter_lock);
914 #endif
915 put_online_cpus();
916 return val;
917 }
918
919 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
920 struct page *page,
921 bool anon, int nr_pages)
922 {
923 /*
924 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
925 * counted as CACHE even if it's on ANON LRU.
926 */
927 if (anon)
928 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
929 nr_pages);
930 else
931 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
932 nr_pages);
933
934 if (PageTransHuge(page))
935 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
936 nr_pages);
937
938 /* pagein of a big page is an event. So, ignore page size */
939 if (nr_pages > 0)
940 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
941 else {
942 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
943 nr_pages = -nr_pages; /* for event */
944 }
945
946 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
947 }
948
949 unsigned long
950 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
951 {
952 struct mem_cgroup_per_zone *mz;
953
954 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
955 return mz->lru_size[lru];
956 }
957
958 static unsigned long
959 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
960 unsigned int lru_mask)
961 {
962 struct mem_cgroup_per_zone *mz;
963 enum lru_list lru;
964 unsigned long ret = 0;
965
966 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
967
968 for_each_lru(lru) {
969 if (BIT(lru) & lru_mask)
970 ret += mz->lru_size[lru];
971 }
972 return ret;
973 }
974
975 static unsigned long
976 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
977 int nid, unsigned int lru_mask)
978 {
979 u64 total = 0;
980 int zid;
981
982 for (zid = 0; zid < MAX_NR_ZONES; zid++)
983 total += mem_cgroup_zone_nr_lru_pages(memcg,
984 nid, zid, lru_mask);
985
986 return total;
987 }
988
989 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
990 unsigned int lru_mask)
991 {
992 int nid;
993 u64 total = 0;
994
995 for_each_node_state(nid, N_MEMORY)
996 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
997 return total;
998 }
999
1000 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1001 enum mem_cgroup_events_target target)
1002 {
1003 unsigned long val, next;
1004
1005 val = __this_cpu_read(memcg->stat->nr_page_events);
1006 next = __this_cpu_read(memcg->stat->targets[target]);
1007 /* from time_after() in jiffies.h */
1008 if ((long)next - (long)val < 0) {
1009 switch (target) {
1010 case MEM_CGROUP_TARGET_THRESH:
1011 next = val + THRESHOLDS_EVENTS_TARGET;
1012 break;
1013 case MEM_CGROUP_TARGET_SOFTLIMIT:
1014 next = val + SOFTLIMIT_EVENTS_TARGET;
1015 break;
1016 case MEM_CGROUP_TARGET_NUMAINFO:
1017 next = val + NUMAINFO_EVENTS_TARGET;
1018 break;
1019 default:
1020 break;
1021 }
1022 __this_cpu_write(memcg->stat->targets[target], next);
1023 return true;
1024 }
1025 return false;
1026 }
1027
1028 /*
1029 * Check events in order.
1030 *
1031 */
1032 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
1033 {
1034 preempt_disable();
1035 /* threshold event is triggered in finer grain than soft limit */
1036 if (unlikely(mem_cgroup_event_ratelimit(memcg,
1037 MEM_CGROUP_TARGET_THRESH))) {
1038 bool do_softlimit;
1039 bool do_numainfo __maybe_unused;
1040
1041 do_softlimit = mem_cgroup_event_ratelimit(memcg,
1042 MEM_CGROUP_TARGET_SOFTLIMIT);
1043 #if MAX_NUMNODES > 1
1044 do_numainfo = mem_cgroup_event_ratelimit(memcg,
1045 MEM_CGROUP_TARGET_NUMAINFO);
1046 #endif
1047 preempt_enable();
1048
1049 mem_cgroup_threshold(memcg);
1050 if (unlikely(do_softlimit))
1051 mem_cgroup_update_tree(memcg, page);
1052 #if MAX_NUMNODES > 1
1053 if (unlikely(do_numainfo))
1054 atomic_inc(&memcg->numainfo_events);
1055 #endif
1056 } else
1057 preempt_enable();
1058 }
1059
1060 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1061 {
1062 /*
1063 * mm_update_next_owner() may clear mm->owner to NULL
1064 * if it races with swapoff, page migration, etc.
1065 * So this can be called with p == NULL.
1066 */
1067 if (unlikely(!p))
1068 return NULL;
1069
1070 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1071 }
1072
1073 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1074 {
1075 struct mem_cgroup *memcg = NULL;
1076
1077 rcu_read_lock();
1078 do {
1079 /*
1080 * Page cache insertions can happen withou an
1081 * actual mm context, e.g. during disk probing
1082 * on boot, loopback IO, acct() writes etc.
1083 */
1084 if (unlikely(!mm))
1085 memcg = root_mem_cgroup;
1086 else {
1087 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1088 if (unlikely(!memcg))
1089 memcg = root_mem_cgroup;
1090 }
1091 } while (!css_tryget(&memcg->css));
1092 rcu_read_unlock();
1093 return memcg;
1094 }
1095
1096 /*
1097 * Returns a next (in a pre-order walk) alive memcg (with elevated css
1098 * ref. count) or NULL if the whole root's subtree has been visited.
1099 *
1100 * helper function to be used by mem_cgroup_iter
1101 */
1102 static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
1103 struct mem_cgroup *last_visited)
1104 {
1105 struct cgroup_subsys_state *prev_css, *next_css;
1106
1107 prev_css = last_visited ? &last_visited->css : NULL;
1108 skip_node:
1109 next_css = css_next_descendant_pre(prev_css, &root->css);
1110
1111 /*
1112 * Even if we found a group we have to make sure it is
1113 * alive. css && !memcg means that the groups should be
1114 * skipped and we should continue the tree walk.
1115 * last_visited css is safe to use because it is
1116 * protected by css_get and the tree walk is rcu safe.
1117 *
1118 * We do not take a reference on the root of the tree walk
1119 * because we might race with the root removal when it would
1120 * be the only node in the iterated hierarchy and mem_cgroup_iter
1121 * would end up in an endless loop because it expects that at
1122 * least one valid node will be returned. Root cannot disappear
1123 * because caller of the iterator should hold it already so
1124 * skipping css reference should be safe.
1125 */
1126 if (next_css) {
1127 if ((next_css == &root->css) ||
1128 ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
1129 return mem_cgroup_from_css(next_css);
1130
1131 prev_css = next_css;
1132 goto skip_node;
1133 }
1134
1135 return NULL;
1136 }
1137
1138 static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
1139 {
1140 /*
1141 * When a group in the hierarchy below root is destroyed, the
1142 * hierarchy iterator can no longer be trusted since it might
1143 * have pointed to the destroyed group. Invalidate it.
1144 */
1145 atomic_inc(&root->dead_count);
1146 }
1147
1148 static struct mem_cgroup *
1149 mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
1150 struct mem_cgroup *root,
1151 int *sequence)
1152 {
1153 struct mem_cgroup *position = NULL;
1154 /*
1155 * A cgroup destruction happens in two stages: offlining and
1156 * release. They are separated by a RCU grace period.
1157 *
1158 * If the iterator is valid, we may still race with an
1159 * offlining. The RCU lock ensures the object won't be
1160 * released, tryget will fail if we lost the race.
1161 */
1162 *sequence = atomic_read(&root->dead_count);
1163 if (iter->last_dead_count == *sequence) {
1164 smp_rmb();
1165 position = iter->last_visited;
1166
1167 /*
1168 * We cannot take a reference to root because we might race
1169 * with root removal and returning NULL would end up in
1170 * an endless loop on the iterator user level when root
1171 * would be returned all the time.
1172 */
1173 if (position && position != root &&
1174 !css_tryget(&position->css))
1175 position = NULL;
1176 }
1177 return position;
1178 }
1179
1180 static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1181 struct mem_cgroup *last_visited,
1182 struct mem_cgroup *new_position,
1183 struct mem_cgroup *root,
1184 int sequence)
1185 {
1186 /* root reference counting symmetric to mem_cgroup_iter_load */
1187 if (last_visited && last_visited != root)
1188 css_put(&last_visited->css);
1189 /*
1190 * We store the sequence count from the time @last_visited was
1191 * loaded successfully instead of rereading it here so that we
1192 * don't lose destruction events in between. We could have
1193 * raced with the destruction of @new_position after all.
1194 */
1195 iter->last_visited = new_position;
1196 smp_wmb();
1197 iter->last_dead_count = sequence;
1198 }
1199
1200 /**
1201 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1202 * @root: hierarchy root
1203 * @prev: previously returned memcg, NULL on first invocation
1204 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1205 *
1206 * Returns references to children of the hierarchy below @root, or
1207 * @root itself, or %NULL after a full round-trip.
1208 *
1209 * Caller must pass the return value in @prev on subsequent
1210 * invocations for reference counting, or use mem_cgroup_iter_break()
1211 * to cancel a hierarchy walk before the round-trip is complete.
1212 *
1213 * Reclaimers can specify a zone and a priority level in @reclaim to
1214 * divide up the memcgs in the hierarchy among all concurrent
1215 * reclaimers operating on the same zone and priority.
1216 */
1217 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1218 struct mem_cgroup *prev,
1219 struct mem_cgroup_reclaim_cookie *reclaim)
1220 {
1221 struct mem_cgroup *memcg = NULL;
1222 struct mem_cgroup *last_visited = NULL;
1223
1224 if (mem_cgroup_disabled())
1225 return NULL;
1226
1227 if (!root)
1228 root = root_mem_cgroup;
1229
1230 if (prev && !reclaim)
1231 last_visited = prev;
1232
1233 if (!root->use_hierarchy && root != root_mem_cgroup) {
1234 if (prev)
1235 goto out_css_put;
1236 return root;
1237 }
1238
1239 rcu_read_lock();
1240 while (!memcg) {
1241 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1242 int uninitialized_var(seq);
1243
1244 if (reclaim) {
1245 int nid = zone_to_nid(reclaim->zone);
1246 int zid = zone_idx(reclaim->zone);
1247 struct mem_cgroup_per_zone *mz;
1248
1249 mz = mem_cgroup_zoneinfo(root, nid, zid);
1250 iter = &mz->reclaim_iter[reclaim->priority];
1251 if (prev && reclaim->generation != iter->generation) {
1252 iter->last_visited = NULL;
1253 goto out_unlock;
1254 }
1255
1256 last_visited = mem_cgroup_iter_load(iter, root, &seq);
1257 }
1258
1259 memcg = __mem_cgroup_iter_next(root, last_visited);
1260
1261 if (reclaim) {
1262 mem_cgroup_iter_update(iter, last_visited, memcg, root,
1263 seq);
1264
1265 if (!memcg)
1266 iter->generation++;
1267 else if (!prev && memcg)
1268 reclaim->generation = iter->generation;
1269 }
1270
1271 if (prev && !memcg)
1272 goto out_unlock;
1273 }
1274 out_unlock:
1275 rcu_read_unlock();
1276 out_css_put:
1277 if (prev && prev != root)
1278 css_put(&prev->css);
1279
1280 return memcg;
1281 }
1282
1283 /**
1284 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1285 * @root: hierarchy root
1286 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1287 */
1288 void mem_cgroup_iter_break(struct mem_cgroup *root,
1289 struct mem_cgroup *prev)
1290 {
1291 if (!root)
1292 root = root_mem_cgroup;
1293 if (prev && prev != root)
1294 css_put(&prev->css);
1295 }
1296
1297 /*
1298 * Iteration constructs for visiting all cgroups (under a tree). If
1299 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1300 * be used for reference counting.
1301 */
1302 #define for_each_mem_cgroup_tree(iter, root) \
1303 for (iter = mem_cgroup_iter(root, NULL, NULL); \
1304 iter != NULL; \
1305 iter = mem_cgroup_iter(root, iter, NULL))
1306
1307 #define for_each_mem_cgroup(iter) \
1308 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
1309 iter != NULL; \
1310 iter = mem_cgroup_iter(NULL, iter, NULL))
1311
1312 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
1313 {
1314 struct mem_cgroup *memcg;
1315
1316 rcu_read_lock();
1317 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1318 if (unlikely(!memcg))
1319 goto out;
1320
1321 switch (idx) {
1322 case PGFAULT:
1323 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1324 break;
1325 case PGMAJFAULT:
1326 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1327 break;
1328 default:
1329 BUG();
1330 }
1331 out:
1332 rcu_read_unlock();
1333 }
1334 EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
1335
1336 /**
1337 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1338 * @zone: zone of the wanted lruvec
1339 * @memcg: memcg of the wanted lruvec
1340 *
1341 * Returns the lru list vector holding pages for the given @zone and
1342 * @mem. This can be the global zone lruvec, if the memory controller
1343 * is disabled.
1344 */
1345 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1346 struct mem_cgroup *memcg)
1347 {
1348 struct mem_cgroup_per_zone *mz;
1349 struct lruvec *lruvec;
1350
1351 if (mem_cgroup_disabled()) {
1352 lruvec = &zone->lruvec;
1353 goto out;
1354 }
1355
1356 mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1357 lruvec = &mz->lruvec;
1358 out:
1359 /*
1360 * Since a node can be onlined after the mem_cgroup was created,
1361 * we have to be prepared to initialize lruvec->zone here;
1362 * and if offlined then reonlined, we need to reinitialize it.
1363 */
1364 if (unlikely(lruvec->zone != zone))
1365 lruvec->zone = zone;
1366 return lruvec;
1367 }
1368
1369 /*
1370 * Following LRU functions are allowed to be used without PCG_LOCK.
1371 * Operations are called by routine of global LRU independently from memcg.
1372 * What we have to take care of here is validness of pc->mem_cgroup.
1373 *
1374 * Changes to pc->mem_cgroup happens when
1375 * 1. charge
1376 * 2. moving account
1377 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1378 * It is added to LRU before charge.
1379 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1380 * When moving account, the page is not on LRU. It's isolated.
1381 */
1382
1383 /**
1384 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1385 * @page: the page
1386 * @zone: zone of the page
1387 */
1388 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1389 {
1390 struct mem_cgroup_per_zone *mz;
1391 struct mem_cgroup *memcg;
1392 struct page_cgroup *pc;
1393 struct lruvec *lruvec;
1394
1395 if (mem_cgroup_disabled()) {
1396 lruvec = &zone->lruvec;
1397 goto out;
1398 }
1399
1400 pc = lookup_page_cgroup(page);
1401 memcg = pc->mem_cgroup;
1402
1403 /*
1404 * Surreptitiously switch any uncharged offlist page to root:
1405 * an uncharged page off lru does nothing to secure
1406 * its former mem_cgroup from sudden removal.
1407 *
1408 * Our caller holds lru_lock, and PageCgroupUsed is updated
1409 * under page_cgroup lock: between them, they make all uses
1410 * of pc->mem_cgroup safe.
1411 */
1412 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1413 pc->mem_cgroup = memcg = root_mem_cgroup;
1414
1415 mz = page_cgroup_zoneinfo(memcg, page);
1416 lruvec = &mz->lruvec;
1417 out:
1418 /*
1419 * Since a node can be onlined after the mem_cgroup was created,
1420 * we have to be prepared to initialize lruvec->zone here;
1421 * and if offlined then reonlined, we need to reinitialize it.
1422 */
1423 if (unlikely(lruvec->zone != zone))
1424 lruvec->zone = zone;
1425 return lruvec;
1426 }
1427
1428 /**
1429 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1430 * @lruvec: mem_cgroup per zone lru vector
1431 * @lru: index of lru list the page is sitting on
1432 * @nr_pages: positive when adding or negative when removing
1433 *
1434 * This function must be called when a page is added to or removed from an
1435 * lru list.
1436 */
1437 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1438 int nr_pages)
1439 {
1440 struct mem_cgroup_per_zone *mz;
1441 unsigned long *lru_size;
1442
1443 if (mem_cgroup_disabled())
1444 return;
1445
1446 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1447 lru_size = mz->lru_size + lru;
1448 *lru_size += nr_pages;
1449 VM_BUG_ON((long)(*lru_size) < 0);
1450 }
1451
1452 /*
1453 * Checks whether given mem is same or in the root_mem_cgroup's
1454 * hierarchy subtree
1455 */
1456 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1457 struct mem_cgroup *memcg)
1458 {
1459 if (root_memcg == memcg)
1460 return true;
1461 if (!root_memcg->use_hierarchy || !memcg)
1462 return false;
1463 return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
1464 }
1465
1466 static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1467 struct mem_cgroup *memcg)
1468 {
1469 bool ret;
1470
1471 rcu_read_lock();
1472 ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1473 rcu_read_unlock();
1474 return ret;
1475 }
1476
1477 bool task_in_mem_cgroup(struct task_struct *task,
1478 const struct mem_cgroup *memcg)
1479 {
1480 struct mem_cgroup *curr = NULL;
1481 struct task_struct *p;
1482 bool ret;
1483
1484 p = find_lock_task_mm(task);
1485 if (p) {
1486 curr = get_mem_cgroup_from_mm(p->mm);
1487 task_unlock(p);
1488 } else {
1489 /*
1490 * All threads may have already detached their mm's, but the oom
1491 * killer still needs to detect if they have already been oom
1492 * killed to prevent needlessly killing additional tasks.
1493 */
1494 rcu_read_lock();
1495 curr = mem_cgroup_from_task(task);
1496 if (curr)
1497 css_get(&curr->css);
1498 rcu_read_unlock();
1499 }
1500 /*
1501 * We should check use_hierarchy of "memcg" not "curr". Because checking
1502 * use_hierarchy of "curr" here make this function true if hierarchy is
1503 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1504 * hierarchy(even if use_hierarchy is disabled in "memcg").
1505 */
1506 ret = mem_cgroup_same_or_subtree(memcg, curr);
1507 css_put(&curr->css);
1508 return ret;
1509 }
1510
1511 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1512 {
1513 unsigned long inactive_ratio;
1514 unsigned long inactive;
1515 unsigned long active;
1516 unsigned long gb;
1517
1518 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1519 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1520
1521 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1522 if (gb)
1523 inactive_ratio = int_sqrt(10 * gb);
1524 else
1525 inactive_ratio = 1;
1526
1527 return inactive * inactive_ratio < active;
1528 }
1529
1530 #define mem_cgroup_from_res_counter(counter, member) \
1531 container_of(counter, struct mem_cgroup, member)
1532
1533 /**
1534 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1535 * @memcg: the memory cgroup
1536 *
1537 * Returns the maximum amount of memory @mem can be charged with, in
1538 * pages.
1539 */
1540 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1541 {
1542 unsigned long long margin;
1543
1544 margin = res_counter_margin(&memcg->res);
1545 if (do_swap_account)
1546 margin = min(margin, res_counter_margin(&memcg->memsw));
1547 return margin >> PAGE_SHIFT;
1548 }
1549
1550 int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1551 {
1552 /* root ? */
1553 if (!css_parent(&memcg->css))
1554 return vm_swappiness;
1555
1556 return memcg->swappiness;
1557 }
1558
1559 /*
1560 * memcg->moving_account is used for checking possibility that some thread is
1561 * calling move_account(). When a thread on CPU-A starts moving pages under
1562 * a memcg, other threads should check memcg->moving_account under
1563 * rcu_read_lock(), like this:
1564 *
1565 * CPU-A CPU-B
1566 * rcu_read_lock()
1567 * memcg->moving_account+1 if (memcg->mocing_account)
1568 * take heavy locks.
1569 * synchronize_rcu() update something.
1570 * rcu_read_unlock()
1571 * start move here.
1572 */
1573
1574 /* for quick checking without looking up memcg */
1575 atomic_t memcg_moving __read_mostly;
1576
1577 static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1578 {
1579 atomic_inc(&memcg_moving);
1580 atomic_inc(&memcg->moving_account);
1581 synchronize_rcu();
1582 }
1583
1584 static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1585 {
1586 /*
1587 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1588 * We check NULL in callee rather than caller.
1589 */
1590 if (memcg) {
1591 atomic_dec(&memcg_moving);
1592 atomic_dec(&memcg->moving_account);
1593 }
1594 }
1595
1596 /*
1597 * A routine for checking "mem" is under move_account() or not.
1598 *
1599 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1600 * moving cgroups. This is for waiting at high-memory pressure
1601 * caused by "move".
1602 */
1603 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1604 {
1605 struct mem_cgroup *from;
1606 struct mem_cgroup *to;
1607 bool ret = false;
1608 /*
1609 * Unlike task_move routines, we access mc.to, mc.from not under
1610 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1611 */
1612 spin_lock(&mc.lock);
1613 from = mc.from;
1614 to = mc.to;
1615 if (!from)
1616 goto unlock;
1617
1618 ret = mem_cgroup_same_or_subtree(memcg, from)
1619 || mem_cgroup_same_or_subtree(memcg, to);
1620 unlock:
1621 spin_unlock(&mc.lock);
1622 return ret;
1623 }
1624
1625 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1626 {
1627 if (mc.moving_task && current != mc.moving_task) {
1628 if (mem_cgroup_under_move(memcg)) {
1629 DEFINE_WAIT(wait);
1630 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1631 /* moving charge context might have finished. */
1632 if (mc.moving_task)
1633 schedule();
1634 finish_wait(&mc.waitq, &wait);
1635 return true;
1636 }
1637 }
1638 return false;
1639 }
1640
1641 /*
1642 * Take this lock when
1643 * - a code tries to modify page's memcg while it's USED.
1644 * - a code tries to modify page state accounting in a memcg.
1645 */
1646 static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1647 unsigned long *flags)
1648 {
1649 spin_lock_irqsave(&memcg->move_lock, *flags);
1650 }
1651
1652 static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1653 unsigned long *flags)
1654 {
1655 spin_unlock_irqrestore(&memcg->move_lock, *flags);
1656 }
1657
1658 #define K(x) ((x) << (PAGE_SHIFT-10))
1659 /**
1660 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1661 * @memcg: The memory cgroup that went over limit
1662 * @p: Task that is going to be killed
1663 *
1664 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1665 * enabled
1666 */
1667 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1668 {
1669 /* oom_info_lock ensures that parallel ooms do not interleave */
1670 static DEFINE_MUTEX(oom_info_lock);
1671 struct mem_cgroup *iter;
1672 unsigned int i;
1673
1674 if (!p)
1675 return;
1676
1677 mutex_lock(&oom_info_lock);
1678 rcu_read_lock();
1679
1680 pr_info("Task in ");
1681 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1682 pr_info(" killed as a result of limit of ");
1683 pr_cont_cgroup_path(memcg->css.cgroup);
1684 pr_info("\n");
1685
1686 rcu_read_unlock();
1687
1688 pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
1689 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1690 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1691 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1692 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
1693 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1694 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1695 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1696 pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
1697 res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
1698 res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
1699 res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
1700
1701 for_each_mem_cgroup_tree(iter, memcg) {
1702 pr_info("Memory cgroup stats for ");
1703 pr_cont_cgroup_path(iter->css.cgroup);
1704 pr_cont(":");
1705
1706 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1707 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1708 continue;
1709 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1710 K(mem_cgroup_read_stat(iter, i)));
1711 }
1712
1713 for (i = 0; i < NR_LRU_LISTS; i++)
1714 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1715 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1716
1717 pr_cont("\n");
1718 }
1719 mutex_unlock(&oom_info_lock);
1720 }
1721
1722 /*
1723 * This function returns the number of memcg under hierarchy tree. Returns
1724 * 1(self count) if no children.
1725 */
1726 static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1727 {
1728 int num = 0;
1729 struct mem_cgroup *iter;
1730
1731 for_each_mem_cgroup_tree(iter, memcg)
1732 num++;
1733 return num;
1734 }
1735
1736 /*
1737 * Return the memory (and swap, if configured) limit for a memcg.
1738 */
1739 static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1740 {
1741 u64 limit;
1742
1743 limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1744
1745 /*
1746 * Do not consider swap space if we cannot swap due to swappiness
1747 */
1748 if (mem_cgroup_swappiness(memcg)) {
1749 u64 memsw;
1750
1751 limit += total_swap_pages << PAGE_SHIFT;
1752 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1753
1754 /*
1755 * If memsw is finite and limits the amount of swap space
1756 * available to this memcg, return that limit.
1757 */
1758 limit = min(limit, memsw);
1759 }
1760
1761 return limit;
1762 }
1763
1764 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1765 int order)
1766 {
1767 struct mem_cgroup *iter;
1768 unsigned long chosen_points = 0;
1769 unsigned long totalpages;
1770 unsigned int points = 0;
1771 struct task_struct *chosen = NULL;
1772
1773 /*
1774 * If current has a pending SIGKILL or is exiting, then automatically
1775 * select it. The goal is to allow it to allocate so that it may
1776 * quickly exit and free its memory.
1777 */
1778 if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
1779 set_thread_flag(TIF_MEMDIE);
1780 return;
1781 }
1782
1783 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
1784 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1785 for_each_mem_cgroup_tree(iter, memcg) {
1786 struct css_task_iter it;
1787 struct task_struct *task;
1788
1789 css_task_iter_start(&iter->css, &it);
1790 while ((task = css_task_iter_next(&it))) {
1791 switch (oom_scan_process_thread(task, totalpages, NULL,
1792 false)) {
1793 case OOM_SCAN_SELECT:
1794 if (chosen)
1795 put_task_struct(chosen);
1796 chosen = task;
1797 chosen_points = ULONG_MAX;
1798 get_task_struct(chosen);
1799 /* fall through */
1800 case OOM_SCAN_CONTINUE:
1801 continue;
1802 case OOM_SCAN_ABORT:
1803 css_task_iter_end(&it);
1804 mem_cgroup_iter_break(memcg, iter);
1805 if (chosen)
1806 put_task_struct(chosen);
1807 return;
1808 case OOM_SCAN_OK:
1809 break;
1810 };
1811 points = oom_badness(task, memcg, NULL, totalpages);
1812 if (!points || points < chosen_points)
1813 continue;
1814 /* Prefer thread group leaders for display purposes */
1815 if (points == chosen_points &&
1816 thread_group_leader(chosen))
1817 continue;
1818
1819 if (chosen)
1820 put_task_struct(chosen);
1821 chosen = task;
1822 chosen_points = points;
1823 get_task_struct(chosen);
1824 }
1825 css_task_iter_end(&it);
1826 }
1827
1828 if (!chosen)
1829 return;
1830 points = chosen_points * 1000 / totalpages;
1831 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1832 NULL, "Memory cgroup out of memory");
1833 }
1834
1835 static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1836 gfp_t gfp_mask,
1837 unsigned long flags)
1838 {
1839 unsigned long total = 0;
1840 bool noswap = false;
1841 int loop;
1842
1843 if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1844 noswap = true;
1845 if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1846 noswap = true;
1847
1848 for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1849 if (loop)
1850 drain_all_stock_async(memcg);
1851 total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1852 /*
1853 * Allow limit shrinkers, which are triggered directly
1854 * by userspace, to catch signals and stop reclaim
1855 * after minimal progress, regardless of the margin.
1856 */
1857 if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1858 break;
1859 if (mem_cgroup_margin(memcg))
1860 break;
1861 /*
1862 * If nothing was reclaimed after two attempts, there
1863 * may be no reclaimable pages in this hierarchy.
1864 */
1865 if (loop && !total)
1866 break;
1867 }
1868 return total;
1869 }
1870
1871 /**
1872 * test_mem_cgroup_node_reclaimable
1873 * @memcg: the target memcg
1874 * @nid: the node ID to be checked.
1875 * @noswap : specify true here if the user wants flle only information.
1876 *
1877 * This function returns whether the specified memcg contains any
1878 * reclaimable pages on a node. Returns true if there are any reclaimable
1879 * pages in the node.
1880 */
1881 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1882 int nid, bool noswap)
1883 {
1884 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1885 return true;
1886 if (noswap || !total_swap_pages)
1887 return false;
1888 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1889 return true;
1890 return false;
1891
1892 }
1893 #if MAX_NUMNODES > 1
1894
1895 /*
1896 * Always updating the nodemask is not very good - even if we have an empty
1897 * list or the wrong list here, we can start from some node and traverse all
1898 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1899 *
1900 */
1901 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1902 {
1903 int nid;
1904 /*
1905 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1906 * pagein/pageout changes since the last update.
1907 */
1908 if (!atomic_read(&memcg->numainfo_events))
1909 return;
1910 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1911 return;
1912
1913 /* make a nodemask where this memcg uses memory from */
1914 memcg->scan_nodes = node_states[N_MEMORY];
1915
1916 for_each_node_mask(nid, node_states[N_MEMORY]) {
1917
1918 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1919 node_clear(nid, memcg->scan_nodes);
1920 }
1921
1922 atomic_set(&memcg->numainfo_events, 0);
1923 atomic_set(&memcg->numainfo_updating, 0);
1924 }
1925
1926 /*
1927 * Selecting a node where we start reclaim from. Because what we need is just
1928 * reducing usage counter, start from anywhere is O,K. Considering
1929 * memory reclaim from current node, there are pros. and cons.
1930 *
1931 * Freeing memory from current node means freeing memory from a node which
1932 * we'll use or we've used. So, it may make LRU bad. And if several threads
1933 * hit limits, it will see a contention on a node. But freeing from remote
1934 * node means more costs for memory reclaim because of memory latency.
1935 *
1936 * Now, we use round-robin. Better algorithm is welcomed.
1937 */
1938 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1939 {
1940 int node;
1941
1942 mem_cgroup_may_update_nodemask(memcg);
1943 node = memcg->last_scanned_node;
1944
1945 node = next_node(node, memcg->scan_nodes);
1946 if (node == MAX_NUMNODES)
1947 node = first_node(memcg->scan_nodes);
1948 /*
1949 * We call this when we hit limit, not when pages are added to LRU.
1950 * No LRU may hold pages because all pages are UNEVICTABLE or
1951 * memcg is too small and all pages are not on LRU. In that case,
1952 * we use curret node.
1953 */
1954 if (unlikely(node == MAX_NUMNODES))
1955 node = numa_node_id();
1956
1957 memcg->last_scanned_node = node;
1958 return node;
1959 }
1960
1961 /*
1962 * Check all nodes whether it contains reclaimable pages or not.
1963 * For quick scan, we make use of scan_nodes. This will allow us to skip
1964 * unused nodes. But scan_nodes is lazily updated and may not cotain
1965 * enough new information. We need to do double check.
1966 */
1967 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1968 {
1969 int nid;
1970
1971 /*
1972 * quick check...making use of scan_node.
1973 * We can skip unused nodes.
1974 */
1975 if (!nodes_empty(memcg->scan_nodes)) {
1976 for (nid = first_node(memcg->scan_nodes);
1977 nid < MAX_NUMNODES;
1978 nid = next_node(nid, memcg->scan_nodes)) {
1979
1980 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1981 return true;
1982 }
1983 }
1984 /*
1985 * Check rest of nodes.
1986 */
1987 for_each_node_state(nid, N_MEMORY) {
1988 if (node_isset(nid, memcg->scan_nodes))
1989 continue;
1990 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1991 return true;
1992 }
1993 return false;
1994 }
1995
1996 #else
1997 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1998 {
1999 return 0;
2000 }
2001
2002 static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
2003 {
2004 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
2005 }
2006 #endif
2007
2008 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
2009 struct zone *zone,
2010 gfp_t gfp_mask,
2011 unsigned long *total_scanned)
2012 {
2013 struct mem_cgroup *victim = NULL;
2014 int total = 0;
2015 int loop = 0;
2016 unsigned long excess;
2017 unsigned long nr_scanned;
2018 struct mem_cgroup_reclaim_cookie reclaim = {
2019 .zone = zone,
2020 .priority = 0,
2021 };
2022
2023 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
2024
2025 while (1) {
2026 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2027 if (!victim) {
2028 loop++;
2029 if (loop >= 2) {
2030 /*
2031 * If we have not been able to reclaim
2032 * anything, it might because there are
2033 * no reclaimable pages under this hierarchy
2034 */
2035 if (!total)
2036 break;
2037 /*
2038 * We want to do more targeted reclaim.
2039 * excess >> 2 is not to excessive so as to
2040 * reclaim too much, nor too less that we keep
2041 * coming back to reclaim from this cgroup
2042 */
2043 if (total >= (excess >> 2) ||
2044 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2045 break;
2046 }
2047 continue;
2048 }
2049 if (!mem_cgroup_reclaimable(victim, false))
2050 continue;
2051 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2052 zone, &nr_scanned);
2053 *total_scanned += nr_scanned;
2054 if (!res_counter_soft_limit_excess(&root_memcg->res))
2055 break;
2056 }
2057 mem_cgroup_iter_break(root_memcg, victim);
2058 return total;
2059 }
2060
2061 #ifdef CONFIG_LOCKDEP
2062 static struct lockdep_map memcg_oom_lock_dep_map = {
2063 .name = "memcg_oom_lock",
2064 };
2065 #endif
2066
2067 static DEFINE_SPINLOCK(memcg_oom_lock);
2068
2069 /*
2070 * Check OOM-Killer is already running under our hierarchy.
2071 * If someone is running, return false.
2072 */
2073 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
2074 {
2075 struct mem_cgroup *iter, *failed = NULL;
2076
2077 spin_lock(&memcg_oom_lock);
2078
2079 for_each_mem_cgroup_tree(iter, memcg) {
2080 if (iter->oom_lock) {
2081 /*
2082 * this subtree of our hierarchy is already locked
2083 * so we cannot give a lock.
2084 */
2085 failed = iter;
2086 mem_cgroup_iter_break(memcg, iter);
2087 break;
2088 } else
2089 iter->oom_lock = true;
2090 }
2091
2092 if (failed) {
2093 /*
2094 * OK, we failed to lock the whole subtree so we have
2095 * to clean up what we set up to the failing subtree
2096 */
2097 for_each_mem_cgroup_tree(iter, memcg) {
2098 if (iter == failed) {
2099 mem_cgroup_iter_break(memcg, iter);
2100 break;
2101 }
2102 iter->oom_lock = false;
2103 }
2104 } else
2105 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
2106
2107 spin_unlock(&memcg_oom_lock);
2108
2109 return !failed;
2110 }
2111
2112 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
2113 {
2114 struct mem_cgroup *iter;
2115
2116 spin_lock(&memcg_oom_lock);
2117 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
2118 for_each_mem_cgroup_tree(iter, memcg)
2119 iter->oom_lock = false;
2120 spin_unlock(&memcg_oom_lock);
2121 }
2122
2123 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
2124 {
2125 struct mem_cgroup *iter;
2126
2127 for_each_mem_cgroup_tree(iter, memcg)
2128 atomic_inc(&iter->under_oom);
2129 }
2130
2131 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
2132 {
2133 struct mem_cgroup *iter;
2134
2135 /*
2136 * When a new child is created while the hierarchy is under oom,
2137 * mem_cgroup_oom_lock() may not be called. We have to use
2138 * atomic_add_unless() here.
2139 */
2140 for_each_mem_cgroup_tree(iter, memcg)
2141 atomic_add_unless(&iter->under_oom, -1, 0);
2142 }
2143
2144 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
2145
2146 struct oom_wait_info {
2147 struct mem_cgroup *memcg;
2148 wait_queue_t wait;
2149 };
2150
2151 static int memcg_oom_wake_function(wait_queue_t *wait,
2152 unsigned mode, int sync, void *arg)
2153 {
2154 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
2155 struct mem_cgroup *oom_wait_memcg;
2156 struct oom_wait_info *oom_wait_info;
2157
2158 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
2159 oom_wait_memcg = oom_wait_info->memcg;
2160
2161 /*
2162 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
2163 * Then we can use css_is_ancestor without taking care of RCU.
2164 */
2165 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
2166 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
2167 return 0;
2168 return autoremove_wake_function(wait, mode, sync, arg);
2169 }
2170
2171 static void memcg_wakeup_oom(struct mem_cgroup *memcg)
2172 {
2173 atomic_inc(&memcg->oom_wakeups);
2174 /* for filtering, pass "memcg" as argument. */
2175 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
2176 }
2177
2178 static void memcg_oom_recover(struct mem_cgroup *memcg)
2179 {
2180 if (memcg && atomic_read(&memcg->under_oom))
2181 memcg_wakeup_oom(memcg);
2182 }
2183
2184 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2185 {
2186 if (!current->memcg_oom.may_oom)
2187 return;
2188 /*
2189 * We are in the middle of the charge context here, so we
2190 * don't want to block when potentially sitting on a callstack
2191 * that holds all kinds of filesystem and mm locks.
2192 *
2193 * Also, the caller may handle a failed allocation gracefully
2194 * (like optional page cache readahead) and so an OOM killer
2195 * invocation might not even be necessary.
2196 *
2197 * That's why we don't do anything here except remember the
2198 * OOM context and then deal with it at the end of the page
2199 * fault when the stack is unwound, the locks are released,
2200 * and when we know whether the fault was overall successful.
2201 */
2202 css_get(&memcg->css);
2203 current->memcg_oom.memcg = memcg;
2204 current->memcg_oom.gfp_mask = mask;
2205 current->memcg_oom.order = order;
2206 }
2207
2208 /**
2209 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2210 * @handle: actually kill/wait or just clean up the OOM state
2211 *
2212 * This has to be called at the end of a page fault if the memcg OOM
2213 * handler was enabled.
2214 *
2215 * Memcg supports userspace OOM handling where failed allocations must
2216 * sleep on a waitqueue until the userspace task resolves the
2217 * situation. Sleeping directly in the charge context with all kinds
2218 * of locks held is not a good idea, instead we remember an OOM state
2219 * in the task and mem_cgroup_oom_synchronize() has to be called at
2220 * the end of the page fault to complete the OOM handling.
2221 *
2222 * Returns %true if an ongoing memcg OOM situation was detected and
2223 * completed, %false otherwise.
2224 */
2225 bool mem_cgroup_oom_synchronize(bool handle)
2226 {
2227 struct mem_cgroup *memcg = current->memcg_oom.memcg;
2228 struct oom_wait_info owait;
2229 bool locked;
2230
2231 /* OOM is global, do not handle */
2232 if (!memcg)
2233 return false;
2234
2235 if (!handle)
2236 goto cleanup;
2237
2238 owait.memcg = memcg;
2239 owait.wait.flags = 0;
2240 owait.wait.func = memcg_oom_wake_function;
2241 owait.wait.private = current;
2242 INIT_LIST_HEAD(&owait.wait.task_list);
2243
2244 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2245 mem_cgroup_mark_under_oom(memcg);
2246
2247 locked = mem_cgroup_oom_trylock(memcg);
2248
2249 if (locked)
2250 mem_cgroup_oom_notify(memcg);
2251
2252 if (locked && !memcg->oom_kill_disable) {
2253 mem_cgroup_unmark_under_oom(memcg);
2254 finish_wait(&memcg_oom_waitq, &owait.wait);
2255 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
2256 current->memcg_oom.order);
2257 } else {
2258 schedule();
2259 mem_cgroup_unmark_under_oom(memcg);
2260 finish_wait(&memcg_oom_waitq, &owait.wait);
2261 }
2262
2263 if (locked) {
2264 mem_cgroup_oom_unlock(memcg);
2265 /*
2266 * There is no guarantee that an OOM-lock contender
2267 * sees the wakeups triggered by the OOM kill
2268 * uncharges. Wake any sleepers explicitely.
2269 */
2270 memcg_oom_recover(memcg);
2271 }
2272 cleanup:
2273 current->memcg_oom.memcg = NULL;
2274 css_put(&memcg->css);
2275 return true;
2276 }
2277
2278 /*
2279 * Used to update mapped file or writeback or other statistics.
2280 *
2281 * Notes: Race condition
2282 *
2283 * We usually use lock_page_cgroup() for accessing page_cgroup member but
2284 * it tends to be costly. But considering some conditions, we doesn't need
2285 * to do so _always_.
2286 *
2287 * Considering "charge", lock_page_cgroup() is not required because all
2288 * file-stat operations happen after a page is attached to radix-tree. There
2289 * are no race with "charge".
2290 *
2291 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
2292 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
2293 * if there are race with "uncharge". Statistics itself is properly handled
2294 * by flags.
2295 *
2296 * Considering "move", this is an only case we see a race. To make the race
2297 * small, we check memcg->moving_account and detect there are possibility
2298 * of race or not. If there is, we take a lock.
2299 */
2300
2301 void __mem_cgroup_begin_update_page_stat(struct page *page,
2302 bool *locked, unsigned long *flags)
2303 {
2304 struct mem_cgroup *memcg;
2305 struct page_cgroup *pc;
2306
2307 pc = lookup_page_cgroup(page);
2308 again:
2309 memcg = pc->mem_cgroup;
2310 if (unlikely(!memcg || !PageCgroupUsed(pc)))
2311 return;
2312 /*
2313 * If this memory cgroup is not under account moving, we don't
2314 * need to take move_lock_mem_cgroup(). Because we already hold
2315 * rcu_read_lock(), any calls to move_account will be delayed until
2316 * rcu_read_unlock().
2317 */
2318 VM_BUG_ON(!rcu_read_lock_held());
2319 if (atomic_read(&memcg->moving_account) <= 0)
2320 return;
2321
2322 move_lock_mem_cgroup(memcg, flags);
2323 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
2324 move_unlock_mem_cgroup(memcg, flags);
2325 goto again;
2326 }
2327 *locked = true;
2328 }
2329
2330 void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
2331 {
2332 struct page_cgroup *pc = lookup_page_cgroup(page);
2333
2334 /*
2335 * It's guaranteed that pc->mem_cgroup never changes while
2336 * lock is held because a routine modifies pc->mem_cgroup
2337 * should take move_lock_mem_cgroup().
2338 */
2339 move_unlock_mem_cgroup(pc->mem_cgroup, flags);
2340 }
2341
2342 void mem_cgroup_update_page_stat(struct page *page,
2343 enum mem_cgroup_stat_index idx, int val)
2344 {
2345 struct mem_cgroup *memcg;
2346 struct page_cgroup *pc = lookup_page_cgroup(page);
2347 unsigned long uninitialized_var(flags);
2348
2349 if (mem_cgroup_disabled())
2350 return;
2351
2352 VM_BUG_ON(!rcu_read_lock_held());
2353 memcg = pc->mem_cgroup;
2354 if (unlikely(!memcg || !PageCgroupUsed(pc)))
2355 return;
2356
2357 this_cpu_add(memcg->stat->count[idx], val);
2358 }
2359
2360 /*
2361 * size of first charge trial. "32" comes from vmscan.c's magic value.
2362 * TODO: maybe necessary to use big numbers in big irons.
2363 */
2364 #define CHARGE_BATCH 32U
2365 struct memcg_stock_pcp {
2366 struct mem_cgroup *cached; /* this never be root cgroup */
2367 unsigned int nr_pages;
2368 struct work_struct work;
2369 unsigned long flags;
2370 #define FLUSHING_CACHED_CHARGE 0
2371 };
2372 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2373 static DEFINE_MUTEX(percpu_charge_mutex);
2374
2375 /**
2376 * consume_stock: Try to consume stocked charge on this cpu.
2377 * @memcg: memcg to consume from.
2378 * @nr_pages: how many pages to charge.
2379 *
2380 * The charges will only happen if @memcg matches the current cpu's memcg
2381 * stock, and at least @nr_pages are available in that stock. Failure to
2382 * service an allocation will refill the stock.
2383 *
2384 * returns true if successful, false otherwise.
2385 */
2386 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2387 {
2388 struct memcg_stock_pcp *stock;
2389 bool ret = true;
2390
2391 if (nr_pages > CHARGE_BATCH)
2392 return false;
2393
2394 stock = &get_cpu_var(memcg_stock);
2395 if (memcg == stock->cached && stock->nr_pages >= nr_pages)
2396 stock->nr_pages -= nr_pages;
2397 else /* need to call res_counter_charge */
2398 ret = false;
2399 put_cpu_var(memcg_stock);
2400 return ret;
2401 }
2402
2403 /*
2404 * Returns stocks cached in percpu to res_counter and reset cached information.
2405 */
2406 static void drain_stock(struct memcg_stock_pcp *stock)
2407 {
2408 struct mem_cgroup *old = stock->cached;
2409
2410 if (stock->nr_pages) {
2411 unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2412
2413 res_counter_uncharge(&old->res, bytes);
2414 if (do_swap_account)
2415 res_counter_uncharge(&old->memsw, bytes);
2416 stock->nr_pages = 0;
2417 }
2418 stock->cached = NULL;
2419 }
2420
2421 /*
2422 * This must be called under preempt disabled or must be called by
2423 * a thread which is pinned to local cpu.
2424 */
2425 static void drain_local_stock(struct work_struct *dummy)
2426 {
2427 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
2428 drain_stock(stock);
2429 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2430 }
2431
2432 static void __init memcg_stock_init(void)
2433 {
2434 int cpu;
2435
2436 for_each_possible_cpu(cpu) {
2437 struct memcg_stock_pcp *stock =
2438 &per_cpu(memcg_stock, cpu);
2439 INIT_WORK(&stock->work, drain_local_stock);
2440 }
2441 }
2442
2443 /*
2444 * Cache charges(val) which is from res_counter, to local per_cpu area.
2445 * This will be consumed by consume_stock() function, later.
2446 */
2447 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2448 {
2449 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2450
2451 if (stock->cached != memcg) { /* reset if necessary */
2452 drain_stock(stock);
2453 stock->cached = memcg;
2454 }
2455 stock->nr_pages += nr_pages;
2456 put_cpu_var(memcg_stock);
2457 }
2458
2459 /*
2460 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2461 * of the hierarchy under it. sync flag says whether we should block
2462 * until the work is done.
2463 */
2464 static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2465 {
2466 int cpu, curcpu;
2467
2468 /* Notify other cpus that system-wide "drain" is running */
2469 get_online_cpus();
2470 curcpu = get_cpu();
2471 for_each_online_cpu(cpu) {
2472 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2473 struct mem_cgroup *memcg;
2474
2475 memcg = stock->cached;
2476 if (!memcg || !stock->nr_pages)
2477 continue;
2478 if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2479 continue;
2480 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2481 if (cpu == curcpu)
2482 drain_local_stock(&stock->work);
2483 else
2484 schedule_work_on(cpu, &stock->work);
2485 }
2486 }
2487 put_cpu();
2488
2489 if (!sync)
2490 goto out;
2491
2492 for_each_online_cpu(cpu) {
2493 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2494 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2495 flush_work(&stock->work);
2496 }
2497 out:
2498 put_online_cpus();
2499 }
2500
2501 /*
2502 * Tries to drain stocked charges in other cpus. This function is asynchronous
2503 * and just put a work per cpu for draining localy on each cpu. Caller can
2504 * expects some charges will be back to res_counter later but cannot wait for
2505 * it.
2506 */
2507 static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2508 {
2509 /*
2510 * If someone calls draining, avoid adding more kworker runs.
2511 */
2512 if (!mutex_trylock(&percpu_charge_mutex))
2513 return;
2514 drain_all_stock(root_memcg, false);
2515 mutex_unlock(&percpu_charge_mutex);
2516 }
2517
2518 /* This is a synchronous drain interface. */
2519 static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2520 {
2521 /* called when force_empty is called */
2522 mutex_lock(&percpu_charge_mutex);
2523 drain_all_stock(root_memcg, true);
2524 mutex_unlock(&percpu_charge_mutex);
2525 }
2526
2527 /*
2528 * This function drains percpu counter value from DEAD cpu and
2529 * move it to local cpu. Note that this function can be preempted.
2530 */
2531 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2532 {
2533 int i;
2534
2535 spin_lock(&memcg->pcp_counter_lock);
2536 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2537 long x = per_cpu(memcg->stat->count[i], cpu);
2538
2539 per_cpu(memcg->stat->count[i], cpu) = 0;
2540 memcg->nocpu_base.count[i] += x;
2541 }
2542 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2543 unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2544
2545 per_cpu(memcg->stat->events[i], cpu) = 0;
2546 memcg->nocpu_base.events[i] += x;
2547 }
2548 spin_unlock(&memcg->pcp_counter_lock);
2549 }
2550
2551 static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
2552 unsigned long action,
2553 void *hcpu)
2554 {
2555 int cpu = (unsigned long)hcpu;
2556 struct memcg_stock_pcp *stock;
2557 struct mem_cgroup *iter;
2558
2559 if (action == CPU_ONLINE)
2560 return NOTIFY_OK;
2561
2562 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2563 return NOTIFY_OK;
2564
2565 for_each_mem_cgroup(iter)
2566 mem_cgroup_drain_pcp_counter(iter, cpu);
2567
2568 stock = &per_cpu(memcg_stock, cpu);
2569 drain_stock(stock);
2570 return NOTIFY_OK;
2571 }
2572
2573
2574 /* See mem_cgroup_try_charge() for details */
2575 enum {
2576 CHARGE_OK, /* success */
2577 CHARGE_RETRY, /* need to retry but retry is not bad */
2578 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
2579 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
2580 };
2581
2582 static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2583 unsigned int nr_pages, unsigned int min_pages,
2584 bool invoke_oom)
2585 {
2586 unsigned long csize = nr_pages * PAGE_SIZE;
2587 struct mem_cgroup *mem_over_limit;
2588 struct res_counter *fail_res;
2589 unsigned long flags = 0;
2590 int ret;
2591
2592 ret = res_counter_charge(&memcg->res, csize, &fail_res);
2593
2594 if (likely(!ret)) {
2595 if (!do_swap_account)
2596 return CHARGE_OK;
2597 ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2598 if (likely(!ret))
2599 return CHARGE_OK;
2600
2601 res_counter_uncharge(&memcg->res, csize);
2602 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2603 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2604 } else
2605 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2606 /*
2607 * Never reclaim on behalf of optional batching, retry with a
2608 * single page instead.
2609 */
2610 if (nr_pages > min_pages)
2611 return CHARGE_RETRY;
2612
2613 if (!(gfp_mask & __GFP_WAIT))
2614 return CHARGE_WOULDBLOCK;
2615
2616 if (gfp_mask & __GFP_NORETRY)
2617 return CHARGE_NOMEM;
2618
2619 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2620 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2621 return CHARGE_RETRY;
2622 /*
2623 * Even though the limit is exceeded at this point, reclaim
2624 * may have been able to free some pages. Retry the charge
2625 * before killing the task.
2626 *
2627 * Only for regular pages, though: huge pages are rather
2628 * unlikely to succeed so close to the limit, and we fall back
2629 * to regular pages anyway in case of failure.
2630 */
2631 if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
2632 return CHARGE_RETRY;
2633
2634 /*
2635 * At task move, charge accounts can be doubly counted. So, it's
2636 * better to wait until the end of task_move if something is going on.
2637 */
2638 if (mem_cgroup_wait_acct_move(mem_over_limit))
2639 return CHARGE_RETRY;
2640
2641 if (invoke_oom)
2642 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize));
2643
2644 return CHARGE_NOMEM;
2645 }
2646
2647 /**
2648 * mem_cgroup_try_charge - try charging a memcg
2649 * @memcg: memcg to charge
2650 * @nr_pages: number of pages to charge
2651 * @oom: trigger OOM if reclaim fails
2652 *
2653 * Returns 0 if @memcg was charged successfully, -EINTR if the charge
2654 * was bypassed to root_mem_cgroup, and -ENOMEM if the charge failed.
2655 */
2656 static int mem_cgroup_try_charge(struct mem_cgroup *memcg,
2657 gfp_t gfp_mask,
2658 unsigned int nr_pages,
2659 bool oom)
2660 {
2661 unsigned int batch = max(CHARGE_BATCH, nr_pages);
2662 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2663 int ret;
2664
2665 if (mem_cgroup_is_root(memcg))
2666 goto done;
2667 /*
2668 * Unlike in global OOM situations, memcg is not in a physical
2669 * memory shortage. Allow dying and OOM-killed tasks to
2670 * bypass the last charges so that they can exit quickly and
2671 * free their memory.
2672 */
2673 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2674 fatal_signal_pending(current) ||
2675 current->flags & PF_EXITING))
2676 goto bypass;
2677
2678 if (unlikely(task_in_memcg_oom(current)))
2679 goto nomem;
2680
2681 if (gfp_mask & __GFP_NOFAIL)
2682 oom = false;
2683 again:
2684 if (consume_stock(memcg, nr_pages))
2685 goto done;
2686
2687 do {
2688 bool invoke_oom = oom && !nr_oom_retries;
2689
2690 /* If killed, bypass charge */
2691 if (fatal_signal_pending(current))
2692 goto bypass;
2693
2694 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch,
2695 nr_pages, invoke_oom);
2696 switch (ret) {
2697 case CHARGE_OK:
2698 break;
2699 case CHARGE_RETRY: /* not in OOM situation but retry */
2700 batch = nr_pages;
2701 goto again;
2702 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2703 goto nomem;
2704 case CHARGE_NOMEM: /* OOM routine works */
2705 if (!oom || invoke_oom)
2706 goto nomem;
2707 nr_oom_retries--;
2708 break;
2709 }
2710 } while (ret != CHARGE_OK);
2711
2712 if (batch > nr_pages)
2713 refill_stock(memcg, batch - nr_pages);
2714 done:
2715 return 0;
2716 nomem:
2717 if (!(gfp_mask & __GFP_NOFAIL))
2718 return -ENOMEM;
2719 bypass:
2720 return -EINTR;
2721 }
2722
2723 /**
2724 * mem_cgroup_try_charge_mm - try charging a mm
2725 * @mm: mm_struct to charge
2726 * @nr_pages: number of pages to charge
2727 * @oom: trigger OOM if reclaim fails
2728 *
2729 * Returns the charged mem_cgroup associated with the given mm_struct or
2730 * NULL the charge failed.
2731 */
2732 static struct mem_cgroup *mem_cgroup_try_charge_mm(struct mm_struct *mm,
2733 gfp_t gfp_mask,
2734 unsigned int nr_pages,
2735 bool oom)
2736
2737 {
2738 struct mem_cgroup *memcg;
2739 int ret;
2740
2741 memcg = get_mem_cgroup_from_mm(mm);
2742 ret = mem_cgroup_try_charge(memcg, gfp_mask, nr_pages, oom);
2743 css_put(&memcg->css);
2744 if (ret == -EINTR)
2745 memcg = root_mem_cgroup;
2746 else if (ret)
2747 memcg = NULL;
2748
2749 return memcg;
2750 }
2751
2752 /*
2753 * Somemtimes we have to undo a charge we got by try_charge().
2754 * This function is for that and do uncharge, put css's refcnt.
2755 * gotten by try_charge().
2756 */
2757 static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2758 unsigned int nr_pages)
2759 {
2760 if (!mem_cgroup_is_root(memcg)) {
2761 unsigned long bytes = nr_pages * PAGE_SIZE;
2762
2763 res_counter_uncharge(&memcg->res, bytes);
2764 if (do_swap_account)
2765 res_counter_uncharge(&memcg->memsw, bytes);
2766 }
2767 }
2768
2769 /*
2770 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2771 * This is useful when moving usage to parent cgroup.
2772 */
2773 static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2774 unsigned int nr_pages)
2775 {
2776 unsigned long bytes = nr_pages * PAGE_SIZE;
2777
2778 if (mem_cgroup_is_root(memcg))
2779 return;
2780
2781 res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2782 if (do_swap_account)
2783 res_counter_uncharge_until(&memcg->memsw,
2784 memcg->memsw.parent, bytes);
2785 }
2786
2787 /*
2788 * A helper function to get mem_cgroup from ID. must be called under
2789 * rcu_read_lock(). The caller is responsible for calling css_tryget if
2790 * the mem_cgroup is used for charging. (dropping refcnt from swap can be
2791 * called against removed memcg.)
2792 */
2793 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2794 {
2795 /* ID 0 is unused ID */
2796 if (!id)
2797 return NULL;
2798 return mem_cgroup_from_id(id);
2799 }
2800
2801 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2802 {
2803 struct mem_cgroup *memcg = NULL;
2804 struct page_cgroup *pc;
2805 unsigned short id;
2806 swp_entry_t ent;
2807
2808 VM_BUG_ON_PAGE(!PageLocked(page), page);
2809
2810 pc = lookup_page_cgroup(page);
2811 lock_page_cgroup(pc);
2812 if (PageCgroupUsed(pc)) {
2813 memcg = pc->mem_cgroup;
2814 if (memcg && !css_tryget(&memcg->css))
2815 memcg = NULL;
2816 } else if (PageSwapCache(page)) {
2817 ent.val = page_private(page);
2818 id = lookup_swap_cgroup_id(ent);
2819 rcu_read_lock();
2820 memcg = mem_cgroup_lookup(id);
2821 if (memcg && !css_tryget(&memcg->css))
2822 memcg = NULL;
2823 rcu_read_unlock();
2824 }
2825 unlock_page_cgroup(pc);
2826 return memcg;
2827 }
2828
2829 static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2830 struct page *page,
2831 unsigned int nr_pages,
2832 enum charge_type ctype,
2833 bool lrucare)
2834 {
2835 struct page_cgroup *pc = lookup_page_cgroup(page);
2836 struct zone *uninitialized_var(zone);
2837 struct lruvec *lruvec;
2838 bool was_on_lru = false;
2839 bool anon;
2840
2841 lock_page_cgroup(pc);
2842 VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
2843 /*
2844 * we don't need page_cgroup_lock about tail pages, becase they are not
2845 * accessed by any other context at this point.
2846 */
2847
2848 /*
2849 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2850 * may already be on some other mem_cgroup's LRU. Take care of it.
2851 */
2852 if (lrucare) {
2853 zone = page_zone(page);
2854 spin_lock_irq(&zone->lru_lock);
2855 if (PageLRU(page)) {
2856 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2857 ClearPageLRU(page);
2858 del_page_from_lru_list(page, lruvec, page_lru(page));
2859 was_on_lru = true;
2860 }
2861 }
2862
2863 pc->mem_cgroup = memcg;
2864 /*
2865 * We access a page_cgroup asynchronously without lock_page_cgroup().
2866 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2867 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2868 * before USED bit, we need memory barrier here.
2869 * See mem_cgroup_add_lru_list(), etc.
2870 */
2871 smp_wmb();
2872 SetPageCgroupUsed(pc);
2873
2874 if (lrucare) {
2875 if (was_on_lru) {
2876 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2877 VM_BUG_ON_PAGE(PageLRU(page), page);
2878 SetPageLRU(page);
2879 add_page_to_lru_list(page, lruvec, page_lru(page));
2880 }
2881 spin_unlock_irq(&zone->lru_lock);
2882 }
2883
2884 if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
2885 anon = true;
2886 else
2887 anon = false;
2888
2889 mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
2890 unlock_page_cgroup(pc);
2891
2892 /*
2893 * "charge_statistics" updated event counter. Then, check it.
2894 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2895 * if they exceeds softlimit.
2896 */
2897 memcg_check_events(memcg, page);
2898 }
2899
2900 static DEFINE_MUTEX(set_limit_mutex);
2901
2902 #ifdef CONFIG_MEMCG_KMEM
2903 /*
2904 * The memcg_slab_mutex is held whenever a per memcg kmem cache is created or
2905 * destroyed. It protects memcg_caches arrays and memcg_slab_caches lists.
2906 */
2907 static DEFINE_MUTEX(memcg_slab_mutex);
2908
2909 static DEFINE_MUTEX(activate_kmem_mutex);
2910
2911 static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
2912 {
2913 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
2914 memcg_kmem_is_active(memcg);
2915 }
2916
2917 /*
2918 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2919 * in the memcg_cache_params struct.
2920 */
2921 static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2922 {
2923 struct kmem_cache *cachep;
2924
2925 VM_BUG_ON(p->is_root_cache);
2926 cachep = p->root_cache;
2927 return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
2928 }
2929
2930 #ifdef CONFIG_SLABINFO
2931 static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
2932 {
2933 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
2934 struct memcg_cache_params *params;
2935
2936 if (!memcg_can_account_kmem(memcg))
2937 return -EIO;
2938
2939 print_slabinfo_header(m);
2940
2941 mutex_lock(&memcg_slab_mutex);
2942 list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2943 cache_show(memcg_params_to_cache(params), m);
2944 mutex_unlock(&memcg_slab_mutex);
2945
2946 return 0;
2947 }
2948 #endif
2949
2950 static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2951 {
2952 struct res_counter *fail_res;
2953 int ret = 0;
2954
2955 ret = res_counter_charge(&memcg->kmem, size, &fail_res);
2956 if (ret)
2957 return ret;
2958
2959 ret = mem_cgroup_try_charge(memcg, gfp, size >> PAGE_SHIFT,
2960 oom_gfp_allowed(gfp));
2961 if (ret == -EINTR) {
2962 /*
2963 * mem_cgroup_try_charge() chosed to bypass to root due to
2964 * OOM kill or fatal signal. Since our only options are to
2965 * either fail the allocation or charge it to this cgroup, do
2966 * it as a temporary condition. But we can't fail. From a
2967 * kmem/slab perspective, the cache has already been selected,
2968 * by mem_cgroup_kmem_get_cache(), so it is too late to change
2969 * our minds.
2970 *
2971 * This condition will only trigger if the task entered
2972 * memcg_charge_kmem in a sane state, but was OOM-killed during
2973 * mem_cgroup_try_charge() above. Tasks that were already
2974 * dying when the allocation triggers should have been already
2975 * directed to the root cgroup in memcontrol.h
2976 */
2977 res_counter_charge_nofail(&memcg->res, size, &fail_res);
2978 if (do_swap_account)
2979 res_counter_charge_nofail(&memcg->memsw, size,
2980 &fail_res);
2981 ret = 0;
2982 } else if (ret)
2983 res_counter_uncharge(&memcg->kmem, size);
2984
2985 return ret;
2986 }
2987
2988 static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
2989 {
2990 res_counter_uncharge(&memcg->res, size);
2991 if (do_swap_account)
2992 res_counter_uncharge(&memcg->memsw, size);
2993
2994 /* Not down to 0 */
2995 if (res_counter_uncharge(&memcg->kmem, size))
2996 return;
2997
2998 /*
2999 * Releases a reference taken in kmem_cgroup_css_offline in case
3000 * this last uncharge is racing with the offlining code or it is
3001 * outliving the memcg existence.
3002 *
3003 * The memory barrier imposed by test&clear is paired with the
3004 * explicit one in memcg_kmem_mark_dead().
3005 */
3006 if (memcg_kmem_test_and_clear_dead(memcg))
3007 css_put(&memcg->css);
3008 }
3009
3010 /*
3011 * helper for acessing a memcg's index. It will be used as an index in the
3012 * child cache array in kmem_cache, and also to derive its name. This function
3013 * will return -1 when this is not a kmem-limited memcg.
3014 */
3015 int memcg_cache_id(struct mem_cgroup *memcg)
3016 {
3017 return memcg ? memcg->kmemcg_id : -1;
3018 }
3019
3020 static size_t memcg_caches_array_size(int num_groups)
3021 {
3022 ssize_t size;
3023 if (num_groups <= 0)
3024 return 0;
3025
3026 size = 2 * num_groups;
3027 if (size < MEMCG_CACHES_MIN_SIZE)
3028 size = MEMCG_CACHES_MIN_SIZE;
3029 else if (size > MEMCG_CACHES_MAX_SIZE)
3030 size = MEMCG_CACHES_MAX_SIZE;
3031
3032 return size;
3033 }
3034
3035 /*
3036 * We should update the current array size iff all caches updates succeed. This
3037 * can only be done from the slab side. The slab mutex needs to be held when
3038 * calling this.
3039 */
3040 void memcg_update_array_size(int num)
3041 {
3042 if (num > memcg_limited_groups_array_size)
3043 memcg_limited_groups_array_size = memcg_caches_array_size(num);
3044 }
3045
3046 int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3047 {
3048 struct memcg_cache_params *cur_params = s->memcg_params;
3049
3050 VM_BUG_ON(!is_root_cache(s));
3051
3052 if (num_groups > memcg_limited_groups_array_size) {
3053 int i;
3054 struct memcg_cache_params *new_params;
3055 ssize_t size = memcg_caches_array_size(num_groups);
3056
3057 size *= sizeof(void *);
3058 size += offsetof(struct memcg_cache_params, memcg_caches);
3059
3060 new_params = kzalloc(size, GFP_KERNEL);
3061 if (!new_params)
3062 return -ENOMEM;
3063
3064 new_params->is_root_cache = true;
3065
3066 /*
3067 * There is the chance it will be bigger than
3068 * memcg_limited_groups_array_size, if we failed an allocation
3069 * in a cache, in which case all caches updated before it, will
3070 * have a bigger array.
3071 *
3072 * But if that is the case, the data after
3073 * memcg_limited_groups_array_size is certainly unused
3074 */
3075 for (i = 0; i < memcg_limited_groups_array_size; i++) {
3076 if (!cur_params->memcg_caches[i])
3077 continue;
3078 new_params->memcg_caches[i] =
3079 cur_params->memcg_caches[i];
3080 }
3081
3082 /*
3083 * Ideally, we would wait until all caches succeed, and only
3084 * then free the old one. But this is not worth the extra
3085 * pointer per-cache we'd have to have for this.
3086 *
3087 * It is not a big deal if some caches are left with a size
3088 * bigger than the others. And all updates will reset this
3089 * anyway.
3090 */
3091 rcu_assign_pointer(s->memcg_params, new_params);
3092 if (cur_params)
3093 kfree_rcu(cur_params, rcu_head);
3094 }
3095 return 0;
3096 }
3097
3098 int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
3099 struct kmem_cache *root_cache)
3100 {
3101 size_t size;
3102
3103 if (!memcg_kmem_enabled())
3104 return 0;
3105
3106 if (!memcg) {
3107 size = offsetof(struct memcg_cache_params, memcg_caches);
3108 size += memcg_limited_groups_array_size * sizeof(void *);
3109 } else
3110 size = sizeof(struct memcg_cache_params);
3111
3112 s->memcg_params = kzalloc(size, GFP_KERNEL);
3113 if (!s->memcg_params)
3114 return -ENOMEM;
3115
3116 if (memcg) {
3117 s->memcg_params->memcg = memcg;
3118 s->memcg_params->root_cache = root_cache;
3119 css_get(&memcg->css);
3120 } else
3121 s->memcg_params->is_root_cache = true;
3122
3123 return 0;
3124 }
3125
3126 void memcg_free_cache_params(struct kmem_cache *s)
3127 {
3128 if (!s->memcg_params)
3129 return;
3130 if (!s->memcg_params->is_root_cache)
3131 css_put(&s->memcg_params->memcg->css);
3132 kfree(s->memcg_params);
3133 }
3134
3135 static void memcg_register_cache(struct mem_cgroup *memcg,
3136 struct kmem_cache *root_cache)
3137 {
3138 static char memcg_name_buf[NAME_MAX + 1]; /* protected by
3139 memcg_slab_mutex */
3140 struct kmem_cache *cachep;
3141 int id;
3142
3143 lockdep_assert_held(&memcg_slab_mutex);
3144
3145 id = memcg_cache_id(memcg);
3146
3147 /*
3148 * Since per-memcg caches are created asynchronously on first
3149 * allocation (see memcg_kmem_get_cache()), several threads can try to
3150 * create the same cache, but only one of them may succeed.
3151 */
3152 if (cache_from_memcg_idx(root_cache, id))
3153 return;
3154
3155 cgroup_name(memcg->css.cgroup, memcg_name_buf, NAME_MAX + 1);
3156 cachep = memcg_create_kmem_cache(memcg, root_cache, memcg_name_buf);
3157 /*
3158 * If we could not create a memcg cache, do not complain, because
3159 * that's not critical at all as we can always proceed with the root
3160 * cache.
3161 */
3162 if (!cachep)
3163 return;
3164
3165 list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
3166
3167 /*
3168 * Since readers won't lock (see cache_from_memcg_idx()), we need a
3169 * barrier here to ensure nobody will see the kmem_cache partially
3170 * initialized.
3171 */
3172 smp_wmb();
3173
3174 BUG_ON(root_cache->memcg_params->memcg_caches[id]);
3175 root_cache->memcg_params->memcg_caches[id] = cachep;
3176 }
3177
3178 static void memcg_unregister_cache(struct kmem_cache *cachep)
3179 {
3180 struct kmem_cache *root_cache;
3181 struct mem_cgroup *memcg;
3182 int id;
3183
3184 lockdep_assert_held(&memcg_slab_mutex);
3185
3186 BUG_ON(is_root_cache(cachep));
3187
3188 root_cache = cachep->memcg_params->root_cache;
3189 memcg = cachep->memcg_params->memcg;
3190 id = memcg_cache_id(memcg);
3191
3192 BUG_ON(root_cache->memcg_params->memcg_caches[id] != cachep);
3193 root_cache->memcg_params->memcg_caches[id] = NULL;
3194
3195 list_del(&cachep->memcg_params->list);
3196
3197 kmem_cache_destroy(cachep);
3198 }
3199
3200 /*
3201 * During the creation a new cache, we need to disable our accounting mechanism
3202 * altogether. This is true even if we are not creating, but rather just
3203 * enqueing new caches to be created.
3204 *
3205 * This is because that process will trigger allocations; some visible, like
3206 * explicit kmallocs to auxiliary data structures, name strings and internal
3207 * cache structures; some well concealed, like INIT_WORK() that can allocate
3208 * objects during debug.
3209 *
3210 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
3211 * to it. This may not be a bounded recursion: since the first cache creation
3212 * failed to complete (waiting on the allocation), we'll just try to create the
3213 * cache again, failing at the same point.
3214 *
3215 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
3216 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
3217 * inside the following two functions.
3218 */
3219 static inline void memcg_stop_kmem_account(void)
3220 {
3221 VM_BUG_ON(!current->mm);
3222 current->memcg_kmem_skip_account++;
3223 }
3224
3225 static inline void memcg_resume_kmem_account(void)
3226 {
3227 VM_BUG_ON(!current->mm);
3228 current->memcg_kmem_skip_account--;
3229 }
3230
3231 int __memcg_cleanup_cache_params(struct kmem_cache *s)
3232 {
3233 struct kmem_cache *c;
3234 int i, failed = 0;
3235
3236 mutex_lock(&memcg_slab_mutex);
3237 for_each_memcg_cache_index(i) {
3238 c = cache_from_memcg_idx(s, i);
3239 if (!c)
3240 continue;
3241
3242 memcg_unregister_cache(c);
3243
3244 if (cache_from_memcg_idx(s, i))
3245 failed++;
3246 }
3247 mutex_unlock(&memcg_slab_mutex);
3248 return failed;
3249 }
3250
3251 static void memcg_unregister_all_caches(struct mem_cgroup *memcg)
3252 {
3253 struct kmem_cache *cachep;
3254 struct memcg_cache_params *params, *tmp;
3255
3256 if (!memcg_kmem_is_active(memcg))
3257 return;
3258
3259 mutex_lock(&memcg_slab_mutex);
3260 list_for_each_entry_safe(params, tmp, &memcg->memcg_slab_caches, list) {
3261 cachep = memcg_params_to_cache(params);
3262 kmem_cache_shrink(cachep);
3263 if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
3264 memcg_unregister_cache(cachep);
3265 }
3266 mutex_unlock(&memcg_slab_mutex);
3267 }
3268
3269 struct memcg_register_cache_work {
3270 struct mem_cgroup *memcg;
3271 struct kmem_cache *cachep;
3272 struct work_struct work;
3273 };
3274
3275 static void memcg_register_cache_func(struct work_struct *w)
3276 {
3277 struct memcg_register_cache_work *cw =
3278 container_of(w, struct memcg_register_cache_work, work);
3279 struct mem_cgroup *memcg = cw->memcg;
3280 struct kmem_cache *cachep = cw->cachep;
3281
3282 mutex_lock(&memcg_slab_mutex);
3283 memcg_register_cache(memcg, cachep);
3284 mutex_unlock(&memcg_slab_mutex);
3285
3286 css_put(&memcg->css);
3287 kfree(cw);
3288 }
3289
3290 /*
3291 * Enqueue the creation of a per-memcg kmem_cache.
3292 */
3293 static void __memcg_schedule_register_cache(struct mem_cgroup *memcg,
3294 struct kmem_cache *cachep)
3295 {
3296 struct memcg_register_cache_work *cw;
3297
3298 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
3299 if (cw == NULL) {
3300 css_put(&memcg->css);
3301 return;
3302 }
3303
3304 cw->memcg = memcg;
3305 cw->cachep = cachep;
3306
3307 INIT_WORK(&cw->work, memcg_register_cache_func);
3308 schedule_work(&cw->work);
3309 }
3310
3311 static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
3312 struct kmem_cache *cachep)
3313 {
3314 /*
3315 * We need to stop accounting when we kmalloc, because if the
3316 * corresponding kmalloc cache is not yet created, the first allocation
3317 * in __memcg_schedule_register_cache will recurse.
3318 *
3319 * However, it is better to enclose the whole function. Depending on
3320 * the debugging options enabled, INIT_WORK(), for instance, can
3321 * trigger an allocation. This too, will make us recurse. Because at
3322 * this point we can't allow ourselves back into memcg_kmem_get_cache,
3323 * the safest choice is to do it like this, wrapping the whole function.
3324 */
3325 memcg_stop_kmem_account();
3326 __memcg_schedule_register_cache(memcg, cachep);
3327 memcg_resume_kmem_account();
3328 }
3329
3330 int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
3331 {
3332 int res;
3333
3334 res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp,
3335 PAGE_SIZE << order);
3336 if (!res)
3337 atomic_add(1 << order, &cachep->memcg_params->nr_pages);
3338 return res;
3339 }
3340
3341 void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
3342 {
3343 memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order);
3344 atomic_sub(1 << order, &cachep->memcg_params->nr_pages);
3345 }
3346
3347 /*
3348 * Return the kmem_cache we're supposed to use for a slab allocation.
3349 * We try to use the current memcg's version of the cache.
3350 *
3351 * If the cache does not exist yet, if we are the first user of it,
3352 * we either create it immediately, if possible, or create it asynchronously
3353 * in a workqueue.
3354 * In the latter case, we will let the current allocation go through with
3355 * the original cache.
3356 *
3357 * Can't be called in interrupt context or from kernel threads.
3358 * This function needs to be called with rcu_read_lock() held.
3359 */
3360 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3361 gfp_t gfp)
3362 {
3363 struct mem_cgroup *memcg;
3364 struct kmem_cache *memcg_cachep;
3365
3366 VM_BUG_ON(!cachep->memcg_params);
3367 VM_BUG_ON(!cachep->memcg_params->is_root_cache);
3368
3369 if (!current->mm || current->memcg_kmem_skip_account)
3370 return cachep;
3371
3372 rcu_read_lock();
3373 memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
3374
3375 if (!memcg_can_account_kmem(memcg))
3376 goto out;
3377
3378 memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
3379 if (likely(memcg_cachep)) {
3380 cachep = memcg_cachep;
3381 goto out;
3382 }
3383
3384 /* The corresponding put will be done in the workqueue. */
3385 if (!css_tryget(&memcg->css))
3386 goto out;
3387 rcu_read_unlock();
3388
3389 /*
3390 * If we are in a safe context (can wait, and not in interrupt
3391 * context), we could be be predictable and return right away.
3392 * This would guarantee that the allocation being performed
3393 * already belongs in the new cache.
3394 *
3395 * However, there are some clashes that can arrive from locking.
3396 * For instance, because we acquire the slab_mutex while doing
3397 * memcg_create_kmem_cache, this means no further allocation
3398 * could happen with the slab_mutex held. So it's better to
3399 * defer everything.
3400 */
3401 memcg_schedule_register_cache(memcg, cachep);
3402 return cachep;
3403 out:
3404 rcu_read_unlock();
3405 return cachep;
3406 }
3407
3408 /*
3409 * We need to verify if the allocation against current->mm->owner's memcg is
3410 * possible for the given order. But the page is not allocated yet, so we'll
3411 * need a further commit step to do the final arrangements.
3412 *
3413 * It is possible for the task to switch cgroups in this mean time, so at
3414 * commit time, we can't rely on task conversion any longer. We'll then use
3415 * the handle argument to return to the caller which cgroup we should commit
3416 * against. We could also return the memcg directly and avoid the pointer
3417 * passing, but a boolean return value gives better semantics considering
3418 * the compiled-out case as well.
3419 *
3420 * Returning true means the allocation is possible.
3421 */
3422 bool
3423 __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3424 {
3425 struct mem_cgroup *memcg;
3426 int ret;
3427
3428 *_memcg = NULL;
3429
3430 /*
3431 * Disabling accounting is only relevant for some specific memcg
3432 * internal allocations. Therefore we would initially not have such
3433 * check here, since direct calls to the page allocator that are
3434 * accounted to kmemcg (alloc_kmem_pages and friends) only happen
3435 * outside memcg core. We are mostly concerned with cache allocations,
3436 * and by having this test at memcg_kmem_get_cache, we are already able
3437 * to relay the allocation to the root cache and bypass the memcg cache
3438 * altogether.
3439 *
3440 * There is one exception, though: the SLUB allocator does not create
3441 * large order caches, but rather service large kmallocs directly from
3442 * the page allocator. Therefore, the following sequence when backed by
3443 * the SLUB allocator:
3444 *
3445 * memcg_stop_kmem_account();
3446 * kmalloc(<large_number>)
3447 * memcg_resume_kmem_account();
3448 *
3449 * would effectively ignore the fact that we should skip accounting,
3450 * since it will drive us directly to this function without passing
3451 * through the cache selector memcg_kmem_get_cache. Such large
3452 * allocations are extremely rare but can happen, for instance, for the
3453 * cache arrays. We bring this test here.
3454 */
3455 if (!current->mm || current->memcg_kmem_skip_account)
3456 return true;
3457
3458 memcg = get_mem_cgroup_from_mm(current->mm);
3459
3460 if (!memcg_can_account_kmem(memcg)) {
3461 css_put(&memcg->css);
3462 return true;
3463 }
3464
3465 ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
3466 if (!ret)
3467 *_memcg = memcg;
3468
3469 css_put(&memcg->css);
3470 return (ret == 0);
3471 }
3472
3473 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3474 int order)
3475 {
3476 struct page_cgroup *pc;
3477
3478 VM_BUG_ON(mem_cgroup_is_root(memcg));
3479
3480 /* The page allocation failed. Revert */
3481 if (!page) {
3482 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3483 return;
3484 }
3485
3486 pc = lookup_page_cgroup(page);
3487 lock_page_cgroup(pc);
3488 pc->mem_cgroup = memcg;
3489 SetPageCgroupUsed(pc);
3490 unlock_page_cgroup(pc);
3491 }
3492
3493 void __memcg_kmem_uncharge_pages(struct page *page, int order)
3494 {
3495 struct mem_cgroup *memcg = NULL;
3496 struct page_cgroup *pc;
3497
3498
3499 pc = lookup_page_cgroup(page);
3500 /*
3501 * Fast unlocked return. Theoretically might have changed, have to
3502 * check again after locking.
3503 */
3504 if (!PageCgroupUsed(pc))
3505 return;
3506
3507 lock_page_cgroup(pc);
3508 if (PageCgroupUsed(pc)) {
3509 memcg = pc->mem_cgroup;
3510 ClearPageCgroupUsed(pc);
3511 }
3512 unlock_page_cgroup(pc);
3513
3514 /*
3515 * We trust that only if there is a memcg associated with the page, it
3516 * is a valid allocation
3517 */
3518 if (!memcg)
3519 return;
3520
3521 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3522 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3523 }
3524 #else
3525 static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
3526 {
3527 }
3528 #endif /* CONFIG_MEMCG_KMEM */
3529
3530 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3531
3532 #define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
3533 /*
3534 * Because tail pages are not marked as "used", set it. We're under
3535 * zone->lru_lock, 'splitting on pmd' and compound_lock.
3536 * charge/uncharge will be never happen and move_account() is done under
3537 * compound_lock(), so we don't have to take care of races.
3538 */
3539 void mem_cgroup_split_huge_fixup(struct page *head)
3540 {
3541 struct page_cgroup *head_pc = lookup_page_cgroup(head);
3542 struct page_cgroup *pc;
3543 struct mem_cgroup *memcg;
3544 int i;
3545
3546 if (mem_cgroup_disabled())
3547 return;
3548
3549 memcg = head_pc->mem_cgroup;
3550 for (i = 1; i < HPAGE_PMD_NR; i++) {
3551 pc = head_pc + i;
3552 pc->mem_cgroup = memcg;
3553 smp_wmb();/* see __commit_charge() */
3554 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
3555 }
3556 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
3557 HPAGE_PMD_NR);
3558 }
3559 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3560
3561 /**
3562 * mem_cgroup_move_account - move account of the page
3563 * @page: the page
3564 * @nr_pages: number of regular pages (>1 for huge pages)
3565 * @pc: page_cgroup of the page.
3566 * @from: mem_cgroup which the page is moved from.
3567 * @to: mem_cgroup which the page is moved to. @from != @to.
3568 *
3569 * The caller must confirm following.
3570 * - page is not on LRU (isolate_page() is useful.)
3571 * - compound_lock is held when nr_pages > 1
3572 *
3573 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
3574 * from old cgroup.
3575 */
3576 static int mem_cgroup_move_account(struct page *page,
3577 unsigned int nr_pages,
3578 struct page_cgroup *pc,
3579 struct mem_cgroup *from,
3580 struct mem_cgroup *to)
3581 {
3582 unsigned long flags;
3583 int ret;
3584 bool anon = PageAnon(page);
3585
3586 VM_BUG_ON(from == to);
3587 VM_BUG_ON_PAGE(PageLRU(page), page);
3588 /*
3589 * The page is isolated from LRU. So, collapse function
3590 * will not handle this page. But page splitting can happen.
3591 * Do this check under compound_page_lock(). The caller should
3592 * hold it.
3593 */
3594 ret = -EBUSY;
3595 if (nr_pages > 1 && !PageTransHuge(page))
3596 goto out;
3597
3598 lock_page_cgroup(pc);
3599
3600 ret = -EINVAL;
3601 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
3602 goto unlock;
3603
3604 move_lock_mem_cgroup(from, &flags);
3605
3606 if (!anon && page_mapped(page)) {
3607 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3608 nr_pages);
3609 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3610 nr_pages);
3611 }
3612
3613 if (PageWriteback(page)) {
3614 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3615 nr_pages);
3616 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3617 nr_pages);
3618 }
3619
3620 mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
3621
3622 /* caller should have done css_get */
3623 pc->mem_cgroup = to;
3624 mem_cgroup_charge_statistics(to, page, anon, nr_pages);
3625 move_unlock_mem_cgroup(from, &flags);
3626 ret = 0;
3627 unlock:
3628 unlock_page_cgroup(pc);
3629 /*
3630 * check events
3631 */
3632 memcg_check_events(to, page);
3633 memcg_check_events(from, page);
3634 out:
3635 return ret;
3636 }
3637
3638 /**
3639 * mem_cgroup_move_parent - moves page to the parent group
3640 * @page: the page to move
3641 * @pc: page_cgroup of the page
3642 * @child: page's cgroup
3643 *
3644 * move charges to its parent or the root cgroup if the group has no
3645 * parent (aka use_hierarchy==0).
3646 * Although this might fail (get_page_unless_zero, isolate_lru_page or
3647 * mem_cgroup_move_account fails) the failure is always temporary and
3648 * it signals a race with a page removal/uncharge or migration. In the
3649 * first case the page is on the way out and it will vanish from the LRU
3650 * on the next attempt and the call should be retried later.
3651 * Isolation from the LRU fails only if page has been isolated from
3652 * the LRU since we looked at it and that usually means either global
3653 * reclaim or migration going on. The page will either get back to the
3654 * LRU or vanish.
3655 * Finaly mem_cgroup_move_account fails only if the page got uncharged
3656 * (!PageCgroupUsed) or moved to a different group. The page will
3657 * disappear in the next attempt.
3658 */
3659 static int mem_cgroup_move_parent(struct page *page,
3660 struct page_cgroup *pc,
3661 struct mem_cgroup *child)
3662 {
3663 struct mem_cgroup *parent;
3664 unsigned int nr_pages;
3665 unsigned long uninitialized_var(flags);
3666 int ret;
3667
3668 VM_BUG_ON(mem_cgroup_is_root(child));
3669
3670 ret = -EBUSY;
3671 if (!get_page_unless_zero(page))
3672 goto out;
3673 if (isolate_lru_page(page))
3674 goto put;
3675
3676 nr_pages = hpage_nr_pages(page);
3677
3678 parent = parent_mem_cgroup(child);
3679 /*
3680 * If no parent, move charges to root cgroup.
3681 */
3682 if (!parent)
3683 parent = root_mem_cgroup;
3684
3685 if (nr_pages > 1) {
3686 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3687 flags = compound_lock_irqsave(page);
3688 }
3689
3690 ret = mem_cgroup_move_account(page, nr_pages,
3691 pc, child, parent);
3692 if (!ret)
3693 __mem_cgroup_cancel_local_charge(child, nr_pages);
3694
3695 if (nr_pages > 1)
3696 compound_unlock_irqrestore(page, flags);
3697 putback_lru_page(page);
3698 put:
3699 put_page(page);
3700 out:
3701 return ret;
3702 }
3703
3704 int mem_cgroup_charge_anon(struct page *page,
3705 struct mm_struct *mm, gfp_t gfp_mask)
3706 {
3707 unsigned int nr_pages = 1;
3708 struct mem_cgroup *memcg;
3709 bool oom = true;
3710
3711 if (mem_cgroup_disabled())
3712 return 0;
3713
3714 VM_BUG_ON_PAGE(page_mapped(page), page);
3715 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
3716 VM_BUG_ON(!mm);
3717
3718 if (PageTransHuge(page)) {
3719 nr_pages <<= compound_order(page);
3720 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3721 /*
3722 * Never OOM-kill a process for a huge page. The
3723 * fault handler will fall back to regular pages.
3724 */
3725 oom = false;
3726 }
3727
3728 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, nr_pages, oom);
3729 if (!memcg)
3730 return -ENOMEM;
3731 __mem_cgroup_commit_charge(memcg, page, nr_pages,
3732 MEM_CGROUP_CHARGE_TYPE_ANON, false);
3733 return 0;
3734 }
3735
3736 /*
3737 * While swap-in, try_charge -> commit or cancel, the page is locked.
3738 * And when try_charge() successfully returns, one refcnt to memcg without
3739 * struct page_cgroup is acquired. This refcnt will be consumed by
3740 * "commit()" or removed by "cancel()"
3741 */
3742 static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
3743 struct page *page,
3744 gfp_t mask,
3745 struct mem_cgroup **memcgp)
3746 {
3747 struct mem_cgroup *memcg = NULL;
3748 struct page_cgroup *pc;
3749 int ret;
3750
3751 pc = lookup_page_cgroup(page);
3752 /*
3753 * Every swap fault against a single page tries to charge the
3754 * page, bail as early as possible. shmem_unuse() encounters
3755 * already charged pages, too. The USED bit is protected by
3756 * the page lock, which serializes swap cache removal, which
3757 * in turn serializes uncharging.
3758 */
3759 if (PageCgroupUsed(pc))
3760 goto out;
3761 if (do_swap_account)
3762 memcg = try_get_mem_cgroup_from_page(page);
3763 if (!memcg)
3764 memcg = get_mem_cgroup_from_mm(mm);
3765 ret = mem_cgroup_try_charge(memcg, mask, 1, true);
3766 css_put(&memcg->css);
3767 if (ret == -EINTR)
3768 memcg = root_mem_cgroup;
3769 else if (ret)
3770 return ret;
3771 out:
3772 *memcgp = memcg;
3773 return 0;
3774 }
3775
3776 int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
3777 gfp_t gfp_mask, struct mem_cgroup **memcgp)
3778 {
3779 if (mem_cgroup_disabled()) {
3780 *memcgp = NULL;
3781 return 0;
3782 }
3783 /*
3784 * A racing thread's fault, or swapoff, may have already
3785 * updated the pte, and even removed page from swap cache: in
3786 * those cases unuse_pte()'s pte_same() test will fail; but
3787 * there's also a KSM case which does need to charge the page.
3788 */
3789 if (!PageSwapCache(page)) {
3790 struct mem_cgroup *memcg;
3791
3792 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3793 if (!memcg)
3794 return -ENOMEM;
3795 *memcgp = memcg;
3796 return 0;
3797 }
3798 return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
3799 }
3800
3801 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
3802 {
3803 if (mem_cgroup_disabled())
3804 return;
3805 if (!memcg)
3806 return;
3807 __mem_cgroup_cancel_charge(memcg, 1);
3808 }
3809
3810 static void
3811 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
3812 enum charge_type ctype)
3813 {
3814 if (mem_cgroup_disabled())
3815 return;
3816 if (!memcg)
3817 return;
3818
3819 __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
3820 /*
3821 * Now swap is on-memory. This means this page may be
3822 * counted both as mem and swap....double count.
3823 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
3824 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
3825 * may call delete_from_swap_cache() before reach here.
3826 */
3827 if (do_swap_account && PageSwapCache(page)) {
3828 swp_entry_t ent = {.val = page_private(page)};
3829 mem_cgroup_uncharge_swap(ent);
3830 }
3831 }
3832
3833 void mem_cgroup_commit_charge_swapin(struct page *page,
3834 struct mem_cgroup *memcg)
3835 {
3836 __mem_cgroup_commit_charge_swapin(page, memcg,
3837 MEM_CGROUP_CHARGE_TYPE_ANON);
3838 }
3839
3840 int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
3841 gfp_t gfp_mask)
3842 {
3843 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3844 struct mem_cgroup *memcg;
3845 int ret;
3846
3847 if (mem_cgroup_disabled())
3848 return 0;
3849 if (PageCompound(page))
3850 return 0;
3851
3852 if (PageSwapCache(page)) { /* shmem */
3853 ret = __mem_cgroup_try_charge_swapin(mm, page,
3854 gfp_mask, &memcg);
3855 if (ret)
3856 return ret;
3857 __mem_cgroup_commit_charge_swapin(page, memcg, type);
3858 return 0;
3859 }
3860
3861 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3862 if (!memcg)
3863 return -ENOMEM;
3864 __mem_cgroup_commit_charge(memcg, page, 1, type, false);
3865 return 0;
3866 }
3867
3868 static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
3869 unsigned int nr_pages,
3870 const enum charge_type ctype)
3871 {
3872 struct memcg_batch_info *batch = NULL;
3873 bool uncharge_memsw = true;
3874
3875 /* If swapout, usage of swap doesn't decrease */
3876 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
3877 uncharge_memsw = false;
3878
3879 batch = &current->memcg_batch;
3880 /*
3881 * In usual, we do css_get() when we remember memcg pointer.
3882 * But in this case, we keep res->usage until end of a series of
3883 * uncharges. Then, it's ok to ignore memcg's refcnt.
3884 */
3885 if (!batch->memcg)
3886 batch->memcg = memcg;
3887 /*
3888 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
3889 * In those cases, all pages freed continuously can be expected to be in
3890 * the same cgroup and we have chance to coalesce uncharges.
3891 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
3892 * because we want to do uncharge as soon as possible.
3893 */
3894
3895 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
3896 goto direct_uncharge;
3897
3898 if (nr_pages > 1)
3899 goto direct_uncharge;
3900
3901 /*
3902 * In typical case, batch->memcg == mem. This means we can
3903 * merge a series of uncharges to an uncharge of res_counter.
3904 * If not, we uncharge res_counter ony by one.
3905 */
3906 if (batch->memcg != memcg)
3907 goto direct_uncharge;
3908 /* remember freed charge and uncharge it later */
3909 batch->nr_pages++;
3910 if (uncharge_memsw)
3911 batch->memsw_nr_pages++;
3912 return;
3913 direct_uncharge:
3914 res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
3915 if (uncharge_memsw)
3916 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
3917 if (unlikely(batch->memcg != memcg))
3918 memcg_oom_recover(memcg);
3919 }
3920
3921 /*
3922 * uncharge if !page_mapped(page)
3923 */
3924 static struct mem_cgroup *
3925 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
3926 bool end_migration)
3927 {
3928 struct mem_cgroup *memcg = NULL;
3929 unsigned int nr_pages = 1;
3930 struct page_cgroup *pc;
3931 bool anon;
3932
3933 if (mem_cgroup_disabled())
3934 return NULL;
3935
3936 if (PageTransHuge(page)) {
3937 nr_pages <<= compound_order(page);
3938 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3939 }
3940 /*
3941 * Check if our page_cgroup is valid
3942 */
3943 pc = lookup_page_cgroup(page);
3944 if (unlikely(!PageCgroupUsed(pc)))
3945 return NULL;
3946
3947 lock_page_cgroup(pc);
3948
3949 memcg = pc->mem_cgroup;
3950
3951 if (!PageCgroupUsed(pc))
3952 goto unlock_out;
3953
3954 anon = PageAnon(page);
3955
3956 switch (ctype) {
3957 case MEM_CGROUP_CHARGE_TYPE_ANON:
3958 /*
3959 * Generally PageAnon tells if it's the anon statistics to be
3960 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
3961 * used before page reached the stage of being marked PageAnon.
3962 */
3963 anon = true;
3964 /* fallthrough */
3965 case MEM_CGROUP_CHARGE_TYPE_DROP:
3966 /* See mem_cgroup_prepare_migration() */
3967 if (page_mapped(page))
3968 goto unlock_out;
3969 /*
3970 * Pages under migration may not be uncharged. But
3971 * end_migration() /must/ be the one uncharging the
3972 * unused post-migration page and so it has to call
3973 * here with the migration bit still set. See the
3974 * res_counter handling below.
3975 */
3976 if (!end_migration && PageCgroupMigration(pc))
3977 goto unlock_out;
3978 break;
3979 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
3980 if (!PageAnon(page)) { /* Shared memory */
3981 if (page->mapping && !page_is_file_cache(page))
3982 goto unlock_out;
3983 } else if (page_mapped(page)) /* Anon */
3984 goto unlock_out;
3985 break;
3986 default:
3987 break;
3988 }
3989
3990 mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
3991
3992 ClearPageCgroupUsed(pc);
3993 /*
3994 * pc->mem_cgroup is not cleared here. It will be accessed when it's
3995 * freed from LRU. This is safe because uncharged page is expected not
3996 * to be reused (freed soon). Exception is SwapCache, it's handled by
3997 * special functions.
3998 */
3999
4000 unlock_page_cgroup(pc);
4001 /*
4002 * even after unlock, we have memcg->res.usage here and this memcg
4003 * will never be freed, so it's safe to call css_get().
4004 */
4005 memcg_check_events(memcg, page);
4006 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
4007 mem_cgroup_swap_statistics(memcg, true);
4008 css_get(&memcg->css);
4009 }
4010 /*
4011 * Migration does not charge the res_counter for the
4012 * replacement page, so leave it alone when phasing out the
4013 * page that is unused after the migration.
4014 */
4015 if (!end_migration && !mem_cgroup_is_root(memcg))
4016 mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
4017
4018 return memcg;
4019
4020 unlock_out:
4021 unlock_page_cgroup(pc);
4022 return NULL;
4023 }
4024
4025 void mem_cgroup_uncharge_page(struct page *page)
4026 {
4027 /* early check. */
4028 if (page_mapped(page))
4029 return;
4030 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
4031 /*
4032 * If the page is in swap cache, uncharge should be deferred
4033 * to the swap path, which also properly accounts swap usage
4034 * and handles memcg lifetime.
4035 *
4036 * Note that this check is not stable and reclaim may add the
4037 * page to swap cache at any time after this. However, if the
4038 * page is not in swap cache by the time page->mapcount hits
4039 * 0, there won't be any page table references to the swap
4040 * slot, and reclaim will free it and not actually write the
4041 * page to disk.
4042 */
4043 if (PageSwapCache(page))
4044 return;
4045 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
4046 }
4047
4048 void mem_cgroup_uncharge_cache_page(struct page *page)
4049 {
4050 VM_BUG_ON_PAGE(page_mapped(page), page);
4051 VM_BUG_ON_PAGE(page->mapping, page);
4052 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
4053 }
4054
4055 /*
4056 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
4057 * In that cases, pages are freed continuously and we can expect pages
4058 * are in the same memcg. All these calls itself limits the number of
4059 * pages freed at once, then uncharge_start/end() is called properly.
4060 * This may be called prural(2) times in a context,
4061 */
4062
4063 void mem_cgroup_uncharge_start(void)
4064 {
4065 current->memcg_batch.do_batch++;
4066 /* We can do nest. */
4067 if (current->memcg_batch.do_batch == 1) {
4068 current->memcg_batch.memcg = NULL;
4069 current->memcg_batch.nr_pages = 0;
4070 current->memcg_batch.memsw_nr_pages = 0;
4071 }
4072 }
4073
4074 void mem_cgroup_uncharge_end(void)
4075 {
4076 struct memcg_batch_info *batch = &current->memcg_batch;
4077
4078 if (!batch->do_batch)
4079 return;
4080
4081 batch->do_batch--;
4082 if (batch->do_batch) /* If stacked, do nothing. */
4083 return;
4084
4085 if (!batch->memcg)
4086 return;
4087 /*
4088 * This "batch->memcg" is valid without any css_get/put etc...
4089 * bacause we hide charges behind us.
4090 */
4091 if (batch->nr_pages)
4092 res_counter_uncharge(&batch->memcg->res,
4093 batch->nr_pages * PAGE_SIZE);
4094 if (batch->memsw_nr_pages)
4095 res_counter_uncharge(&batch->memcg->memsw,
4096 batch->memsw_nr_pages * PAGE_SIZE);
4097 memcg_oom_recover(batch->memcg);
4098 /* forget this pointer (for sanity check) */
4099 batch->memcg = NULL;
4100 }
4101
4102 #ifdef CONFIG_SWAP
4103 /*
4104 * called after __delete_from_swap_cache() and drop "page" account.
4105 * memcg information is recorded to swap_cgroup of "ent"
4106 */
4107 void
4108 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
4109 {
4110 struct mem_cgroup *memcg;
4111 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
4112
4113 if (!swapout) /* this was a swap cache but the swap is unused ! */
4114 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
4115
4116 memcg = __mem_cgroup_uncharge_common(page, ctype, false);
4117
4118 /*
4119 * record memcg information, if swapout && memcg != NULL,
4120 * css_get() was called in uncharge().
4121 */
4122 if (do_swap_account && swapout && memcg)
4123 swap_cgroup_record(ent, mem_cgroup_id(memcg));
4124 }
4125 #endif
4126
4127 #ifdef CONFIG_MEMCG_SWAP
4128 /*
4129 * called from swap_entry_free(). remove record in swap_cgroup and
4130 * uncharge "memsw" account.
4131 */
4132 void mem_cgroup_uncharge_swap(swp_entry_t ent)
4133 {
4134 struct mem_cgroup *memcg;
4135 unsigned short id;
4136
4137 if (!do_swap_account)
4138 return;
4139
4140 id = swap_cgroup_record(ent, 0);
4141 rcu_read_lock();
4142 memcg = mem_cgroup_lookup(id);
4143 if (memcg) {
4144 /*
4145 * We uncharge this because swap is freed.
4146 * This memcg can be obsolete one. We avoid calling css_tryget
4147 */
4148 if (!mem_cgroup_is_root(memcg))
4149 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
4150 mem_cgroup_swap_statistics(memcg, false);
4151 css_put(&memcg->css);
4152 }
4153 rcu_read_unlock();
4154 }
4155
4156 /**
4157 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
4158 * @entry: swap entry to be moved
4159 * @from: mem_cgroup which the entry is moved from
4160 * @to: mem_cgroup which the entry is moved to
4161 *
4162 * It succeeds only when the swap_cgroup's record for this entry is the same
4163 * as the mem_cgroup's id of @from.
4164 *
4165 * Returns 0 on success, -EINVAL on failure.
4166 *
4167 * The caller must have charged to @to, IOW, called res_counter_charge() about
4168 * both res and memsw, and called css_get().
4169 */
4170 static int mem_cgroup_move_swap_account(swp_entry_t entry,
4171 struct mem_cgroup *from, struct mem_cgroup *to)
4172 {
4173 unsigned short old_id, new_id;
4174
4175 old_id = mem_cgroup_id(from);
4176 new_id = mem_cgroup_id(to);
4177
4178 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
4179 mem_cgroup_swap_statistics(from, false);
4180 mem_cgroup_swap_statistics(to, true);
4181 /*
4182 * This function is only called from task migration context now.
4183 * It postpones res_counter and refcount handling till the end
4184 * of task migration(mem_cgroup_clear_mc()) for performance
4185 * improvement. But we cannot postpone css_get(to) because if
4186 * the process that has been moved to @to does swap-in, the
4187 * refcount of @to might be decreased to 0.
4188 *
4189 * We are in attach() phase, so the cgroup is guaranteed to be
4190 * alive, so we can just call css_get().
4191 */
4192 css_get(&to->css);
4193 return 0;
4194 }
4195 return -EINVAL;
4196 }
4197 #else
4198 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
4199 struct mem_cgroup *from, struct mem_cgroup *to)
4200 {
4201 return -EINVAL;
4202 }
4203 #endif
4204
4205 /*
4206 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
4207 * page belongs to.
4208 */
4209 void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
4210 struct mem_cgroup **memcgp)
4211 {
4212 struct mem_cgroup *memcg = NULL;
4213 unsigned int nr_pages = 1;
4214 struct page_cgroup *pc;
4215 enum charge_type ctype;
4216
4217 *memcgp = NULL;
4218
4219 if (mem_cgroup_disabled())
4220 return;
4221
4222 if (PageTransHuge(page))
4223 nr_pages <<= compound_order(page);
4224
4225 pc = lookup_page_cgroup(page);
4226 lock_page_cgroup(pc);
4227 if (PageCgroupUsed(pc)) {
4228 memcg = pc->mem_cgroup;
4229 css_get(&memcg->css);
4230 /*
4231 * At migrating an anonymous page, its mapcount goes down
4232 * to 0 and uncharge() will be called. But, even if it's fully
4233 * unmapped, migration may fail and this page has to be
4234 * charged again. We set MIGRATION flag here and delay uncharge
4235 * until end_migration() is called
4236 *
4237 * Corner Case Thinking
4238 * A)
4239 * When the old page was mapped as Anon and it's unmap-and-freed
4240 * while migration was ongoing.
4241 * If unmap finds the old page, uncharge() of it will be delayed
4242 * until end_migration(). If unmap finds a new page, it's
4243 * uncharged when it make mapcount to be 1->0. If unmap code
4244 * finds swap_migration_entry, the new page will not be mapped
4245 * and end_migration() will find it(mapcount==0).
4246 *
4247 * B)
4248 * When the old page was mapped but migraion fails, the kernel
4249 * remaps it. A charge for it is kept by MIGRATION flag even
4250 * if mapcount goes down to 0. We can do remap successfully
4251 * without charging it again.
4252 *
4253 * C)
4254 * The "old" page is under lock_page() until the end of
4255 * migration, so, the old page itself will not be swapped-out.
4256 * If the new page is swapped out before end_migraton, our
4257 * hook to usual swap-out path will catch the event.
4258 */
4259 if (PageAnon(page))
4260 SetPageCgroupMigration(pc);
4261 }
4262 unlock_page_cgroup(pc);
4263 /*
4264 * If the page is not charged at this point,
4265 * we return here.
4266 */
4267 if (!memcg)
4268 return;
4269
4270 *memcgp = memcg;
4271 /*
4272 * We charge new page before it's used/mapped. So, even if unlock_page()
4273 * is called before end_migration, we can catch all events on this new
4274 * page. In the case new page is migrated but not remapped, new page's
4275 * mapcount will be finally 0 and we call uncharge in end_migration().
4276 */
4277 if (PageAnon(page))
4278 ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
4279 else
4280 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
4281 /*
4282 * The page is committed to the memcg, but it's not actually
4283 * charged to the res_counter since we plan on replacing the
4284 * old one and only one page is going to be left afterwards.
4285 */
4286 __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
4287 }
4288
4289 /* remove redundant charge if migration failed*/
4290 void mem_cgroup_end_migration(struct mem_cgroup *memcg,
4291 struct page *oldpage, struct page *newpage, bool migration_ok)
4292 {
4293 struct page *used, *unused;
4294 struct page_cgroup *pc;
4295 bool anon;
4296
4297 if (!memcg)
4298 return;
4299
4300 if (!migration_ok) {
4301 used = oldpage;
4302 unused = newpage;
4303 } else {
4304 used = newpage;
4305 unused = oldpage;
4306 }
4307 anon = PageAnon(used);
4308 __mem_cgroup_uncharge_common(unused,
4309 anon ? MEM_CGROUP_CHARGE_TYPE_ANON
4310 : MEM_CGROUP_CHARGE_TYPE_CACHE,
4311 true);
4312 css_put(&memcg->css);
4313 /*
4314 * We disallowed uncharge of pages under migration because mapcount
4315 * of the page goes down to zero, temporarly.
4316 * Clear the flag and check the page should be charged.
4317 */
4318 pc = lookup_page_cgroup(oldpage);
4319 lock_page_cgroup(pc);
4320 ClearPageCgroupMigration(pc);
4321 unlock_page_cgroup(pc);
4322
4323 /*
4324 * If a page is a file cache, radix-tree replacement is very atomic
4325 * and we can skip this check. When it was an Anon page, its mapcount
4326 * goes down to 0. But because we added MIGRATION flage, it's not
4327 * uncharged yet. There are several case but page->mapcount check
4328 * and USED bit check in mem_cgroup_uncharge_page() will do enough
4329 * check. (see prepare_charge() also)
4330 */
4331 if (anon)
4332 mem_cgroup_uncharge_page(used);
4333 }
4334
4335 /*
4336 * At replace page cache, newpage is not under any memcg but it's on
4337 * LRU. So, this function doesn't touch res_counter but handles LRU
4338 * in correct way. Both pages are locked so we cannot race with uncharge.
4339 */
4340 void mem_cgroup_replace_page_cache(struct page *oldpage,
4341 struct page *newpage)
4342 {
4343 struct mem_cgroup *memcg = NULL;
4344 struct page_cgroup *pc;
4345 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
4346
4347 if (mem_cgroup_disabled())
4348 return;
4349
4350 pc = lookup_page_cgroup(oldpage);
4351 /* fix accounting on old pages */
4352 lock_page_cgroup(pc);
4353 if (PageCgroupUsed(pc)) {
4354 memcg = pc->mem_cgroup;
4355 mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
4356 ClearPageCgroupUsed(pc);
4357 }
4358 unlock_page_cgroup(pc);
4359
4360 /*
4361 * When called from shmem_replace_page(), in some cases the
4362 * oldpage has already been charged, and in some cases not.
4363 */
4364 if (!memcg)
4365 return;
4366 /*
4367 * Even if newpage->mapping was NULL before starting replacement,
4368 * the newpage may be on LRU(or pagevec for LRU) already. We lock
4369 * LRU while we overwrite pc->mem_cgroup.
4370 */
4371 __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
4372 }
4373
4374 #ifdef CONFIG_DEBUG_VM
4375 static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
4376 {
4377 struct page_cgroup *pc;
4378
4379 pc = lookup_page_cgroup(page);
4380 /*
4381 * Can be NULL while feeding pages into the page allocator for
4382 * the first time, i.e. during boot or memory hotplug;
4383 * or when mem_cgroup_disabled().
4384 */
4385 if (likely(pc) && PageCgroupUsed(pc))
4386 return pc;
4387 return NULL;
4388 }
4389
4390 bool mem_cgroup_bad_page_check(struct page *page)
4391 {
4392 if (mem_cgroup_disabled())
4393 return false;
4394
4395 return lookup_page_cgroup_used(page) != NULL;
4396 }
4397
4398 void mem_cgroup_print_bad_page(struct page *page)
4399 {
4400 struct page_cgroup *pc;
4401
4402 pc = lookup_page_cgroup_used(page);
4403 if (pc) {
4404 pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
4405 pc, pc->flags, pc->mem_cgroup);
4406 }
4407 }
4408 #endif
4409
4410 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
4411 unsigned long long val)
4412 {
4413 int retry_count;
4414 u64 memswlimit, memlimit;
4415 int ret = 0;
4416 int children = mem_cgroup_count_children(memcg);
4417 u64 curusage, oldusage;
4418 int enlarge;
4419
4420 /*
4421 * For keeping hierarchical_reclaim simple, how long we should retry
4422 * is depends on callers. We set our retry-count to be function
4423 * of # of children which we should visit in this loop.
4424 */
4425 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
4426
4427 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4428
4429 enlarge = 0;
4430 while (retry_count) {
4431 if (signal_pending(current)) {
4432 ret = -EINTR;
4433 break;
4434 }
4435 /*
4436 * Rather than hide all in some function, I do this in
4437 * open coded manner. You see what this really does.
4438 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4439 */
4440 mutex_lock(&set_limit_mutex);
4441 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4442 if (memswlimit < val) {
4443 ret = -EINVAL;
4444 mutex_unlock(&set_limit_mutex);
4445 break;
4446 }
4447
4448 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4449 if (memlimit < val)
4450 enlarge = 1;
4451
4452 ret = res_counter_set_limit(&memcg->res, val);
4453 if (!ret) {
4454 if (memswlimit == val)
4455 memcg->memsw_is_minimum = true;
4456 else
4457 memcg->memsw_is_minimum = false;
4458 }
4459 mutex_unlock(&set_limit_mutex);
4460
4461 if (!ret)
4462 break;
4463
4464 mem_cgroup_reclaim(memcg, GFP_KERNEL,
4465 MEM_CGROUP_RECLAIM_SHRINK);
4466 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4467 /* Usage is reduced ? */
4468 if (curusage >= oldusage)
4469 retry_count--;
4470 else
4471 oldusage = curusage;
4472 }
4473 if (!ret && enlarge)
4474 memcg_oom_recover(memcg);
4475
4476 return ret;
4477 }
4478
4479 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4480 unsigned long long val)
4481 {
4482 int retry_count;
4483 u64 memlimit, memswlimit, oldusage, curusage;
4484 int children = mem_cgroup_count_children(memcg);
4485 int ret = -EBUSY;
4486 int enlarge = 0;
4487
4488 /* see mem_cgroup_resize_res_limit */
4489 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
4490 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4491 while (retry_count) {
4492 if (signal_pending(current)) {
4493 ret = -EINTR;
4494 break;
4495 }
4496 /*
4497 * Rather than hide all in some function, I do this in
4498 * open coded manner. You see what this really does.
4499 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4500 */
4501 mutex_lock(&set_limit_mutex);
4502 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4503 if (memlimit > val) {
4504 ret = -EINVAL;
4505 mutex_unlock(&set_limit_mutex);
4506 break;
4507 }
4508 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4509 if (memswlimit < val)
4510 enlarge = 1;
4511 ret = res_counter_set_limit(&memcg->memsw, val);
4512 if (!ret) {
4513 if (memlimit == val)
4514 memcg->memsw_is_minimum = true;
4515 else
4516 memcg->memsw_is_minimum = false;
4517 }
4518 mutex_unlock(&set_limit_mutex);
4519
4520 if (!ret)
4521 break;
4522
4523 mem_cgroup_reclaim(memcg, GFP_KERNEL,
4524 MEM_CGROUP_RECLAIM_NOSWAP |
4525 MEM_CGROUP_RECLAIM_SHRINK);
4526 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4527 /* Usage is reduced ? */
4528 if (curusage >= oldusage)
4529 retry_count--;
4530 else
4531 oldusage = curusage;
4532 }
4533 if (!ret && enlarge)
4534 memcg_oom_recover(memcg);
4535 return ret;
4536 }
4537
4538 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
4539 gfp_t gfp_mask,
4540 unsigned long *total_scanned)
4541 {
4542 unsigned long nr_reclaimed = 0;
4543 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4544 unsigned long reclaimed;
4545 int loop = 0;
4546 struct mem_cgroup_tree_per_zone *mctz;
4547 unsigned long long excess;
4548 unsigned long nr_scanned;
4549
4550 if (order > 0)
4551 return 0;
4552
4553 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4554 /*
4555 * This loop can run a while, specially if mem_cgroup's continuously
4556 * keep exceeding their soft limit and putting the system under
4557 * pressure
4558 */
4559 do {
4560 if (next_mz)
4561 mz = next_mz;
4562 else
4563 mz = mem_cgroup_largest_soft_limit_node(mctz);
4564 if (!mz)
4565 break;
4566
4567 nr_scanned = 0;
4568 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
4569 gfp_mask, &nr_scanned);
4570 nr_reclaimed += reclaimed;
4571 *total_scanned += nr_scanned;
4572 spin_lock(&mctz->lock);
4573
4574 /*
4575 * If we failed to reclaim anything from this memory cgroup
4576 * it is time to move on to the next cgroup
4577 */
4578 next_mz = NULL;
4579 if (!reclaimed) {
4580 do {
4581 /*
4582 * Loop until we find yet another one.
4583 *
4584 * By the time we get the soft_limit lock
4585 * again, someone might have aded the
4586 * group back on the RB tree. Iterate to
4587 * make sure we get a different mem.
4588 * mem_cgroup_largest_soft_limit_node returns
4589 * NULL if no other cgroup is present on
4590 * the tree
4591 */
4592 next_mz =
4593 __mem_cgroup_largest_soft_limit_node(mctz);
4594 if (next_mz == mz)
4595 css_put(&next_mz->memcg->css);
4596 else /* next_mz == NULL or other memcg */
4597 break;
4598 } while (1);
4599 }
4600 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
4601 excess = res_counter_soft_limit_excess(&mz->memcg->res);
4602 /*
4603 * One school of thought says that we should not add
4604 * back the node to the tree if reclaim returns 0.
4605 * But our reclaim could return 0, simply because due
4606 * to priority we are exposing a smaller subset of
4607 * memory to reclaim from. Consider this as a longer
4608 * term TODO.
4609 */
4610 /* If excess == 0, no tree ops */
4611 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
4612 spin_unlock(&mctz->lock);
4613 css_put(&mz->memcg->css);
4614 loop++;
4615 /*
4616 * Could not reclaim anything and there are no more
4617 * mem cgroups to try or we seem to be looping without
4618 * reclaiming anything.
4619 */
4620 if (!nr_reclaimed &&
4621 (next_mz == NULL ||
4622 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4623 break;
4624 } while (!nr_reclaimed);
4625 if (next_mz)
4626 css_put(&next_mz->memcg->css);
4627 return nr_reclaimed;
4628 }
4629
4630 /**
4631 * mem_cgroup_force_empty_list - clears LRU of a group
4632 * @memcg: group to clear
4633 * @node: NUMA node
4634 * @zid: zone id
4635 * @lru: lru to to clear
4636 *
4637 * Traverse a specified page_cgroup list and try to drop them all. This doesn't
4638 * reclaim the pages page themselves - pages are moved to the parent (or root)
4639 * group.
4640 */
4641 static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
4642 int node, int zid, enum lru_list lru)
4643 {
4644 struct lruvec *lruvec;
4645 unsigned long flags;
4646 struct list_head *list;
4647 struct page *busy;
4648 struct zone *zone;
4649
4650 zone = &NODE_DATA(node)->node_zones[zid];
4651 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
4652 list = &lruvec->lists[lru];
4653
4654 busy = NULL;
4655 do {
4656 struct page_cgroup *pc;
4657 struct page *page;
4658
4659 spin_lock_irqsave(&zone->lru_lock, flags);
4660 if (list_empty(list)) {
4661 spin_unlock_irqrestore(&zone->lru_lock, flags);
4662 break;
4663 }
4664 page = list_entry(list->prev, struct page, lru);
4665 if (busy == page) {
4666 list_move(&page->lru, list);
4667 busy = NULL;
4668 spin_unlock_irqrestore(&zone->lru_lock, flags);
4669 continue;
4670 }
4671 spin_unlock_irqrestore(&zone->lru_lock, flags);
4672
4673 pc = lookup_page_cgroup(page);
4674
4675 if (mem_cgroup_move_parent(page, pc, memcg)) {
4676 /* found lock contention or "pc" is obsolete. */
4677 busy = page;
4678 } else
4679 busy = NULL;
4680 cond_resched();
4681 } while (!list_empty(list));
4682 }
4683
4684 /*
4685 * make mem_cgroup's charge to be 0 if there is no task by moving
4686 * all the charges and pages to the parent.
4687 * This enables deleting this mem_cgroup.
4688 *
4689 * Caller is responsible for holding css reference on the memcg.
4690 */
4691 static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
4692 {
4693 int node, zid;
4694 u64 usage;
4695
4696 do {
4697 /* This is for making all *used* pages to be on LRU. */
4698 lru_add_drain_all();
4699 drain_all_stock_sync(memcg);
4700 mem_cgroup_start_move(memcg);
4701 for_each_node_state(node, N_MEMORY) {
4702 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4703 enum lru_list lru;
4704 for_each_lru(lru) {
4705 mem_cgroup_force_empty_list(memcg,
4706 node, zid, lru);
4707 }
4708 }
4709 }
4710 mem_cgroup_end_move(memcg);
4711 memcg_oom_recover(memcg);
4712 cond_resched();
4713
4714 /*
4715 * Kernel memory may not necessarily be trackable to a specific
4716 * process. So they are not migrated, and therefore we can't
4717 * expect their value to drop to 0 here.
4718 * Having res filled up with kmem only is enough.
4719 *
4720 * This is a safety check because mem_cgroup_force_empty_list
4721 * could have raced with mem_cgroup_replace_page_cache callers
4722 * so the lru seemed empty but the page could have been added
4723 * right after the check. RES_USAGE should be safe as we always
4724 * charge before adding to the LRU.
4725 */
4726 usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
4727 res_counter_read_u64(&memcg->kmem, RES_USAGE);
4728 } while (usage > 0);
4729 }
4730
4731 static inline bool memcg_has_children(struct mem_cgroup *memcg)
4732 {
4733 lockdep_assert_held(&memcg_create_mutex);
4734 /*
4735 * The lock does not prevent addition or deletion to the list
4736 * of children, but it prevents a new child from being
4737 * initialized based on this parent in css_online(), so it's
4738 * enough to decide whether hierarchically inherited
4739 * attributes can still be changed or not.
4740 */
4741 return memcg->use_hierarchy &&
4742 !list_empty(&memcg->css.cgroup->children);
4743 }
4744
4745 /*
4746 * Reclaims as many pages from the given memcg as possible and moves
4747 * the rest to the parent.
4748 *
4749 * Caller is responsible for holding css reference for memcg.
4750 */
4751 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
4752 {
4753 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
4754 struct cgroup *cgrp = memcg->css.cgroup;
4755
4756 /* returns EBUSY if there is a task or if we come here twice. */
4757 if (cgroup_has_tasks(cgrp) || !list_empty(&cgrp->children))
4758 return -EBUSY;
4759
4760 /* we call try-to-free pages for make this cgroup empty */
4761 lru_add_drain_all();
4762 /* try to free all pages in this cgroup */
4763 while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
4764 int progress;
4765
4766 if (signal_pending(current))
4767 return -EINTR;
4768
4769 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
4770 false);
4771 if (!progress) {
4772 nr_retries--;
4773 /* maybe some writeback is necessary */
4774 congestion_wait(BLK_RW_ASYNC, HZ/10);
4775 }
4776
4777 }
4778 lru_add_drain();
4779 mem_cgroup_reparent_charges(memcg);
4780
4781 return 0;
4782 }
4783
4784 static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
4785 unsigned int event)
4786 {
4787 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4788
4789 if (mem_cgroup_is_root(memcg))
4790 return -EINVAL;
4791 return mem_cgroup_force_empty(memcg);
4792 }
4793
4794 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
4795 struct cftype *cft)
4796 {
4797 return mem_cgroup_from_css(css)->use_hierarchy;
4798 }
4799
4800 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
4801 struct cftype *cft, u64 val)
4802 {
4803 int retval = 0;
4804 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4805 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
4806
4807 mutex_lock(&memcg_create_mutex);
4808
4809 if (memcg->use_hierarchy == val)
4810 goto out;
4811
4812 /*
4813 * If parent's use_hierarchy is set, we can't make any modifications
4814 * in the child subtrees. If it is unset, then the change can
4815 * occur, provided the current cgroup has no children.
4816 *
4817 * For the root cgroup, parent_mem is NULL, we allow value to be
4818 * set if there are no children.
4819 */
4820 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
4821 (val == 1 || val == 0)) {
4822 if (list_empty(&memcg->css.cgroup->children))
4823 memcg->use_hierarchy = val;
4824 else
4825 retval = -EBUSY;
4826 } else
4827 retval = -EINVAL;
4828
4829 out:
4830 mutex_unlock(&memcg_create_mutex);
4831
4832 return retval;
4833 }
4834
4835
4836 static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
4837 enum mem_cgroup_stat_index idx)
4838 {
4839 struct mem_cgroup *iter;
4840 long val = 0;
4841
4842 /* Per-cpu values can be negative, use a signed accumulator */
4843 for_each_mem_cgroup_tree(iter, memcg)
4844 val += mem_cgroup_read_stat(iter, idx);
4845
4846 if (val < 0) /* race ? */
4847 val = 0;
4848 return val;
4849 }
4850
4851 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
4852 {
4853 u64 val;
4854
4855 if (!mem_cgroup_is_root(memcg)) {
4856 if (!swap)
4857 return res_counter_read_u64(&memcg->res, RES_USAGE);
4858 else
4859 return res_counter_read_u64(&memcg->memsw, RES_USAGE);
4860 }
4861
4862 /*
4863 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
4864 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
4865 */
4866 val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
4867 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
4868
4869 if (swap)
4870 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
4871
4872 return val << PAGE_SHIFT;
4873 }
4874
4875 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
4876 struct cftype *cft)
4877 {
4878 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4879 u64 val;
4880 int name;
4881 enum res_type type;
4882
4883 type = MEMFILE_TYPE(cft->private);
4884 name = MEMFILE_ATTR(cft->private);
4885
4886 switch (type) {
4887 case _MEM:
4888 if (name == RES_USAGE)
4889 val = mem_cgroup_usage(memcg, false);
4890 else
4891 val = res_counter_read_u64(&memcg->res, name);
4892 break;
4893 case _MEMSWAP:
4894 if (name == RES_USAGE)
4895 val = mem_cgroup_usage(memcg, true);
4896 else
4897 val = res_counter_read_u64(&memcg->memsw, name);
4898 break;
4899 case _KMEM:
4900 val = res_counter_read_u64(&memcg->kmem, name);
4901 break;
4902 default:
4903 BUG();
4904 }
4905
4906 return val;
4907 }
4908
4909 #ifdef CONFIG_MEMCG_KMEM
4910 /* should be called with activate_kmem_mutex held */
4911 static int __memcg_activate_kmem(struct mem_cgroup *memcg,
4912 unsigned long long limit)
4913 {
4914 int err = 0;
4915 int memcg_id;
4916
4917 if (memcg_kmem_is_active(memcg))
4918 return 0;
4919
4920 /*
4921 * We are going to allocate memory for data shared by all memory
4922 * cgroups so let's stop accounting here.
4923 */
4924 memcg_stop_kmem_account();
4925
4926 /*
4927 * For simplicity, we won't allow this to be disabled. It also can't
4928 * be changed if the cgroup has children already, or if tasks had
4929 * already joined.
4930 *
4931 * If tasks join before we set the limit, a person looking at
4932 * kmem.usage_in_bytes will have no way to determine when it took
4933 * place, which makes the value quite meaningless.
4934 *
4935 * After it first became limited, changes in the value of the limit are
4936 * of course permitted.
4937 */
4938 mutex_lock(&memcg_create_mutex);
4939 if (cgroup_has_tasks(memcg->css.cgroup) || memcg_has_children(memcg))
4940 err = -EBUSY;
4941 mutex_unlock(&memcg_create_mutex);
4942 if (err)
4943 goto out;
4944
4945 memcg_id = ida_simple_get(&kmem_limited_groups,
4946 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
4947 if (memcg_id < 0) {
4948 err = memcg_id;
4949 goto out;
4950 }
4951
4952 /*
4953 * Make sure we have enough space for this cgroup in each root cache's
4954 * memcg_params.
4955 */
4956 mutex_lock(&memcg_slab_mutex);
4957 err = memcg_update_all_caches(memcg_id + 1);
4958 mutex_unlock(&memcg_slab_mutex);
4959 if (err)
4960 goto out_rmid;
4961
4962 memcg->kmemcg_id = memcg_id;
4963 INIT_LIST_HEAD(&memcg->memcg_slab_caches);
4964
4965 /*
4966 * We couldn't have accounted to this cgroup, because it hasn't got the
4967 * active bit set yet, so this should succeed.
4968 */
4969 err = res_counter_set_limit(&memcg->kmem, limit);
4970 VM_BUG_ON(err);
4971
4972 static_key_slow_inc(&memcg_kmem_enabled_key);
4973 /*
4974 * Setting the active bit after enabling static branching will
4975 * guarantee no one starts accounting before all call sites are
4976 * patched.
4977 */
4978 memcg_kmem_set_active(memcg);
4979 out:
4980 memcg_resume_kmem_account();
4981 return err;
4982
4983 out_rmid:
4984 ida_simple_remove(&kmem_limited_groups, memcg_id);
4985 goto out;
4986 }
4987
4988 static int memcg_activate_kmem(struct mem_cgroup *memcg,
4989 unsigned long long limit)
4990 {
4991 int ret;
4992
4993 mutex_lock(&activate_kmem_mutex);
4994 ret = __memcg_activate_kmem(memcg, limit);
4995 mutex_unlock(&activate_kmem_mutex);
4996 return ret;
4997 }
4998
4999 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5000 unsigned long long val)
5001 {
5002 int ret;
5003
5004 if (!memcg_kmem_is_active(memcg))
5005 ret = memcg_activate_kmem(memcg, val);
5006 else
5007 ret = res_counter_set_limit(&memcg->kmem, val);
5008 return ret;
5009 }
5010
5011 static int memcg_propagate_kmem(struct mem_cgroup *memcg)
5012 {
5013 int ret = 0;
5014 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5015
5016 if (!parent)
5017 return 0;
5018
5019 mutex_lock(&activate_kmem_mutex);
5020 /*
5021 * If the parent cgroup is not kmem-active now, it cannot be activated
5022 * after this point, because it has at least one child already.
5023 */
5024 if (memcg_kmem_is_active(parent))
5025 ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX);
5026 mutex_unlock(&activate_kmem_mutex);
5027 return ret;
5028 }
5029 #else
5030 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5031 unsigned long long val)
5032 {
5033 return -EINVAL;
5034 }
5035 #endif /* CONFIG_MEMCG_KMEM */
5036
5037 /*
5038 * The user of this function is...
5039 * RES_LIMIT.
5040 */
5041 static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
5042 char *buffer)
5043 {
5044 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5045 enum res_type type;
5046 int name;
5047 unsigned long long val;
5048 int ret;
5049
5050 type = MEMFILE_TYPE(cft->private);
5051 name = MEMFILE_ATTR(cft->private);
5052
5053 switch (name) {
5054 case RES_LIMIT:
5055 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
5056 ret = -EINVAL;
5057 break;
5058 }
5059 /* This function does all necessary parse...reuse it */
5060 ret = res_counter_memparse_write_strategy(buffer, &val);
5061 if (ret)
5062 break;
5063 if (type == _MEM)
5064 ret = mem_cgroup_resize_limit(memcg, val);
5065 else if (type == _MEMSWAP)
5066 ret = mem_cgroup_resize_memsw_limit(memcg, val);
5067 else if (type == _KMEM)
5068 ret = memcg_update_kmem_limit(memcg, val);
5069 else
5070 return -EINVAL;
5071 break;
5072 case RES_SOFT_LIMIT:
5073 ret = res_counter_memparse_write_strategy(buffer, &val);
5074 if (ret)
5075 break;
5076 /*
5077 * For memsw, soft limits are hard to implement in terms
5078 * of semantics, for now, we support soft limits for
5079 * control without swap
5080 */
5081 if (type == _MEM)
5082 ret = res_counter_set_soft_limit(&memcg->res, val);
5083 else
5084 ret = -EINVAL;
5085 break;
5086 default:
5087 ret = -EINVAL; /* should be BUG() ? */
5088 break;
5089 }
5090 return ret;
5091 }
5092
5093 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
5094 unsigned long long *mem_limit, unsigned long long *memsw_limit)
5095 {
5096 unsigned long long min_limit, min_memsw_limit, tmp;
5097
5098 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
5099 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5100 if (!memcg->use_hierarchy)
5101 goto out;
5102
5103 while (css_parent(&memcg->css)) {
5104 memcg = mem_cgroup_from_css(css_parent(&memcg->css));
5105 if (!memcg->use_hierarchy)
5106 break;
5107 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
5108 min_limit = min(min_limit, tmp);
5109 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5110 min_memsw_limit = min(min_memsw_limit, tmp);
5111 }
5112 out:
5113 *mem_limit = min_limit;
5114 *memsw_limit = min_memsw_limit;
5115 }
5116
5117 static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
5118 {
5119 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5120 int name;
5121 enum res_type type;
5122
5123 type = MEMFILE_TYPE(event);
5124 name = MEMFILE_ATTR(event);
5125
5126 switch (name) {
5127 case RES_MAX_USAGE:
5128 if (type == _MEM)
5129 res_counter_reset_max(&memcg->res);
5130 else if (type == _MEMSWAP)
5131 res_counter_reset_max(&memcg->memsw);
5132 else if (type == _KMEM)
5133 res_counter_reset_max(&memcg->kmem);
5134 else
5135 return -EINVAL;
5136 break;
5137 case RES_FAILCNT:
5138 if (type == _MEM)
5139 res_counter_reset_failcnt(&memcg->res);
5140 else if (type == _MEMSWAP)
5141 res_counter_reset_failcnt(&memcg->memsw);
5142 else if (type == _KMEM)
5143 res_counter_reset_failcnt(&memcg->kmem);
5144 else
5145 return -EINVAL;
5146 break;
5147 }
5148
5149 return 0;
5150 }
5151
5152 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
5153 struct cftype *cft)
5154 {
5155 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
5156 }
5157
5158 #ifdef CONFIG_MMU
5159 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
5160 struct cftype *cft, u64 val)
5161 {
5162 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5163
5164 if (val >= (1 << NR_MOVE_TYPE))
5165 return -EINVAL;
5166
5167 /*
5168 * No kind of locking is needed in here, because ->can_attach() will
5169 * check this value once in the beginning of the process, and then carry
5170 * on with stale data. This means that changes to this value will only
5171 * affect task migrations starting after the change.
5172 */
5173 memcg->move_charge_at_immigrate = val;
5174 return 0;
5175 }
5176 #else
5177 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
5178 struct cftype *cft, u64 val)
5179 {
5180 return -ENOSYS;
5181 }
5182 #endif
5183
5184 #ifdef CONFIG_NUMA
5185 static int memcg_numa_stat_show(struct seq_file *m, void *v)
5186 {
5187 struct numa_stat {
5188 const char *name;
5189 unsigned int lru_mask;
5190 };
5191
5192 static const struct numa_stat stats[] = {
5193 { "total", LRU_ALL },
5194 { "file", LRU_ALL_FILE },
5195 { "anon", LRU_ALL_ANON },
5196 { "unevictable", BIT(LRU_UNEVICTABLE) },
5197 };
5198 const struct numa_stat *stat;
5199 int nid;
5200 unsigned long nr;
5201 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5202
5203 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5204 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
5205 seq_printf(m, "%s=%lu", stat->name, nr);
5206 for_each_node_state(nid, N_MEMORY) {
5207 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5208 stat->lru_mask);
5209 seq_printf(m, " N%d=%lu", nid, nr);
5210 }
5211 seq_putc(m, '\n');
5212 }
5213
5214 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5215 struct mem_cgroup *iter;
5216
5217 nr = 0;
5218 for_each_mem_cgroup_tree(iter, memcg)
5219 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
5220 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
5221 for_each_node_state(nid, N_MEMORY) {
5222 nr = 0;
5223 for_each_mem_cgroup_tree(iter, memcg)
5224 nr += mem_cgroup_node_nr_lru_pages(
5225 iter, nid, stat->lru_mask);
5226 seq_printf(m, " N%d=%lu", nid, nr);
5227 }
5228 seq_putc(m, '\n');
5229 }
5230
5231 return 0;
5232 }
5233 #endif /* CONFIG_NUMA */
5234
5235 static inline void mem_cgroup_lru_names_not_uptodate(void)
5236 {
5237 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
5238 }
5239
5240 static int memcg_stat_show(struct seq_file *m, void *v)
5241 {
5242 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5243 struct mem_cgroup *mi;
5244 unsigned int i;
5245
5246 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5247 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5248 continue;
5249 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
5250 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
5251 }
5252
5253 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
5254 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
5255 mem_cgroup_read_events(memcg, i));
5256
5257 for (i = 0; i < NR_LRU_LISTS; i++)
5258 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
5259 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
5260
5261 /* Hierarchical information */
5262 {
5263 unsigned long long limit, memsw_limit;
5264 memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
5265 seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
5266 if (do_swap_account)
5267 seq_printf(m, "hierarchical_memsw_limit %llu\n",
5268 memsw_limit);
5269 }
5270
5271 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5272 long long val = 0;
5273
5274 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5275 continue;
5276 for_each_mem_cgroup_tree(mi, memcg)
5277 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
5278 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
5279 }
5280
5281 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
5282 unsigned long long val = 0;
5283
5284 for_each_mem_cgroup_tree(mi, memcg)
5285 val += mem_cgroup_read_events(mi, i);
5286 seq_printf(m, "total_%s %llu\n",
5287 mem_cgroup_events_names[i], val);
5288 }
5289
5290 for (i = 0; i < NR_LRU_LISTS; i++) {
5291 unsigned long long val = 0;
5292
5293 for_each_mem_cgroup_tree(mi, memcg)
5294 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
5295 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
5296 }
5297
5298 #ifdef CONFIG_DEBUG_VM
5299 {
5300 int nid, zid;
5301 struct mem_cgroup_per_zone *mz;
5302 struct zone_reclaim_stat *rstat;
5303 unsigned long recent_rotated[2] = {0, 0};
5304 unsigned long recent_scanned[2] = {0, 0};
5305
5306 for_each_online_node(nid)
5307 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
5308 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
5309 rstat = &mz->lruvec.reclaim_stat;
5310
5311 recent_rotated[0] += rstat->recent_rotated[0];
5312 recent_rotated[1] += rstat->recent_rotated[1];
5313 recent_scanned[0] += rstat->recent_scanned[0];
5314 recent_scanned[1] += rstat->recent_scanned[1];
5315 }
5316 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
5317 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
5318 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
5319 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
5320 }
5321 #endif
5322
5323 return 0;
5324 }
5325
5326 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
5327 struct cftype *cft)
5328 {
5329 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5330
5331 return mem_cgroup_swappiness(memcg);
5332 }
5333
5334 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
5335 struct cftype *cft, u64 val)
5336 {
5337 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5338
5339 if (val > 100)
5340 return -EINVAL;
5341
5342 if (css_parent(css))
5343 memcg->swappiness = val;
5344 else
5345 vm_swappiness = val;
5346
5347 return 0;
5348 }
5349
5350 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
5351 {
5352 struct mem_cgroup_threshold_ary *t;
5353 u64 usage;
5354 int i;
5355
5356 rcu_read_lock();
5357 if (!swap)
5358 t = rcu_dereference(memcg->thresholds.primary);
5359 else
5360 t = rcu_dereference(memcg->memsw_thresholds.primary);
5361
5362 if (!t)
5363 goto unlock;
5364
5365 usage = mem_cgroup_usage(memcg, swap);
5366
5367 /*
5368 * current_threshold points to threshold just below or equal to usage.
5369 * If it's not true, a threshold was crossed after last
5370 * call of __mem_cgroup_threshold().
5371 */
5372 i = t->current_threshold;
5373
5374 /*
5375 * Iterate backward over array of thresholds starting from
5376 * current_threshold and check if a threshold is crossed.
5377 * If none of thresholds below usage is crossed, we read
5378 * only one element of the array here.
5379 */
5380 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
5381 eventfd_signal(t->entries[i].eventfd, 1);
5382
5383 /* i = current_threshold + 1 */
5384 i++;
5385
5386 /*
5387 * Iterate forward over array of thresholds starting from
5388 * current_threshold+1 and check if a threshold is crossed.
5389 * If none of thresholds above usage is crossed, we read
5390 * only one element of the array here.
5391 */
5392 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
5393 eventfd_signal(t->entries[i].eventfd, 1);
5394
5395 /* Update current_threshold */
5396 t->current_threshold = i - 1;
5397 unlock:
5398 rcu_read_unlock();
5399 }
5400
5401 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
5402 {
5403 while (memcg) {
5404 __mem_cgroup_threshold(memcg, false);
5405 if (do_swap_account)
5406 __mem_cgroup_threshold(memcg, true);
5407
5408 memcg = parent_mem_cgroup(memcg);
5409 }
5410 }
5411
5412 static int compare_thresholds(const void *a, const void *b)
5413 {
5414 const struct mem_cgroup_threshold *_a = a;
5415 const struct mem_cgroup_threshold *_b = b;
5416
5417 if (_a->threshold > _b->threshold)
5418 return 1;
5419
5420 if (_a->threshold < _b->threshold)
5421 return -1;
5422
5423 return 0;
5424 }
5425
5426 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
5427 {
5428 struct mem_cgroup_eventfd_list *ev;
5429
5430 list_for_each_entry(ev, &memcg->oom_notify, list)
5431 eventfd_signal(ev->eventfd, 1);
5432 return 0;
5433 }
5434
5435 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
5436 {
5437 struct mem_cgroup *iter;
5438
5439 for_each_mem_cgroup_tree(iter, memcg)
5440 mem_cgroup_oom_notify_cb(iter);
5441 }
5442
5443 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
5444 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
5445 {
5446 struct mem_cgroup_thresholds *thresholds;
5447 struct mem_cgroup_threshold_ary *new;
5448 u64 threshold, usage;
5449 int i, size, ret;
5450
5451 ret = res_counter_memparse_write_strategy(args, &threshold);
5452 if (ret)
5453 return ret;
5454
5455 mutex_lock(&memcg->thresholds_lock);
5456
5457 if (type == _MEM)
5458 thresholds = &memcg->thresholds;
5459 else if (type == _MEMSWAP)
5460 thresholds = &memcg->memsw_thresholds;
5461 else
5462 BUG();
5463
5464 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5465
5466 /* Check if a threshold crossed before adding a new one */
5467 if (thresholds->primary)
5468 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
5469
5470 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
5471
5472 /* Allocate memory for new array of thresholds */
5473 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
5474 GFP_KERNEL);
5475 if (!new) {
5476 ret = -ENOMEM;
5477 goto unlock;
5478 }
5479 new->size = size;
5480
5481 /* Copy thresholds (if any) to new array */
5482 if (thresholds->primary) {
5483 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
5484 sizeof(struct mem_cgroup_threshold));
5485 }
5486
5487 /* Add new threshold */
5488 new->entries[size - 1].eventfd = eventfd;
5489 new->entries[size - 1].threshold = threshold;
5490
5491 /* Sort thresholds. Registering of new threshold isn't time-critical */
5492 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
5493 compare_thresholds, NULL);
5494
5495 /* Find current threshold */
5496 new->current_threshold = -1;
5497 for (i = 0; i < size; i++) {
5498 if (new->entries[i].threshold <= usage) {
5499 /*
5500 * new->current_threshold will not be used until
5501 * rcu_assign_pointer(), so it's safe to increment
5502 * it here.
5503 */
5504 ++new->current_threshold;
5505 } else
5506 break;
5507 }
5508
5509 /* Free old spare buffer and save old primary buffer as spare */
5510 kfree(thresholds->spare);
5511 thresholds->spare = thresholds->primary;
5512
5513 rcu_assign_pointer(thresholds->primary, new);
5514
5515 /* To be sure that nobody uses thresholds */
5516 synchronize_rcu();
5517
5518 unlock:
5519 mutex_unlock(&memcg->thresholds_lock);
5520
5521 return ret;
5522 }
5523
5524 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
5525 struct eventfd_ctx *eventfd, const char *args)
5526 {
5527 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
5528 }
5529
5530 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
5531 struct eventfd_ctx *eventfd, const char *args)
5532 {
5533 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
5534 }
5535
5536 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5537 struct eventfd_ctx *eventfd, enum res_type type)
5538 {
5539 struct mem_cgroup_thresholds *thresholds;
5540 struct mem_cgroup_threshold_ary *new;
5541 u64 usage;
5542 int i, j, size;
5543
5544 mutex_lock(&memcg->thresholds_lock);
5545 if (type == _MEM)
5546 thresholds = &memcg->thresholds;
5547 else if (type == _MEMSWAP)
5548 thresholds = &memcg->memsw_thresholds;
5549 else
5550 BUG();
5551
5552 if (!thresholds->primary)
5553 goto unlock;
5554
5555 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5556
5557 /* Check if a threshold crossed before removing */
5558 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
5559
5560 /* Calculate new number of threshold */
5561 size = 0;
5562 for (i = 0; i < thresholds->primary->size; i++) {
5563 if (thresholds->primary->entries[i].eventfd != eventfd)
5564 size++;
5565 }
5566
5567 new = thresholds->spare;
5568
5569 /* Set thresholds array to NULL if we don't have thresholds */
5570 if (!size) {
5571 kfree(new);
5572 new = NULL;
5573 goto swap_buffers;
5574 }
5575
5576 new->size = size;
5577
5578 /* Copy thresholds and find current threshold */
5579 new->current_threshold = -1;
5580 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
5581 if (thresholds->primary->entries[i].eventfd == eventfd)
5582 continue;
5583
5584 new->entries[j] = thresholds->primary->entries[i];
5585 if (new->entries[j].threshold <= usage) {
5586 /*
5587 * new->current_threshold will not be used
5588 * until rcu_assign_pointer(), so it's safe to increment
5589 * it here.
5590 */
5591 ++new->current_threshold;
5592 }
5593 j++;
5594 }
5595
5596 swap_buffers:
5597 /* Swap primary and spare array */
5598 thresholds->spare = thresholds->primary;
5599 /* If all events are unregistered, free the spare array */
5600 if (!new) {
5601 kfree(thresholds->spare);
5602 thresholds->spare = NULL;
5603 }
5604
5605 rcu_assign_pointer(thresholds->primary, new);
5606
5607 /* To be sure that nobody uses thresholds */
5608 synchronize_rcu();
5609 unlock:
5610 mutex_unlock(&memcg->thresholds_lock);
5611 }
5612
5613 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5614 struct eventfd_ctx *eventfd)
5615 {
5616 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
5617 }
5618
5619 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5620 struct eventfd_ctx *eventfd)
5621 {
5622 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
5623 }
5624
5625 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
5626 struct eventfd_ctx *eventfd, const char *args)
5627 {
5628 struct mem_cgroup_eventfd_list *event;
5629
5630 event = kmalloc(sizeof(*event), GFP_KERNEL);
5631 if (!event)
5632 return -ENOMEM;
5633
5634 spin_lock(&memcg_oom_lock);
5635
5636 event->eventfd = eventfd;
5637 list_add(&event->list, &memcg->oom_notify);
5638
5639 /* already in OOM ? */
5640 if (atomic_read(&memcg->under_oom))
5641 eventfd_signal(eventfd, 1);
5642 spin_unlock(&memcg_oom_lock);
5643
5644 return 0;
5645 }
5646
5647 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
5648 struct eventfd_ctx *eventfd)
5649 {
5650 struct mem_cgroup_eventfd_list *ev, *tmp;
5651
5652 spin_lock(&memcg_oom_lock);
5653
5654 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
5655 if (ev->eventfd == eventfd) {
5656 list_del(&ev->list);
5657 kfree(ev);
5658 }
5659 }
5660
5661 spin_unlock(&memcg_oom_lock);
5662 }
5663
5664 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
5665 {
5666 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5667
5668 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
5669 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
5670 return 0;
5671 }
5672
5673 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
5674 struct cftype *cft, u64 val)
5675 {
5676 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5677
5678 /* cannot set to root cgroup and only 0 and 1 are allowed */
5679 if (!css_parent(css) || !((val == 0) || (val == 1)))
5680 return -EINVAL;
5681
5682 memcg->oom_kill_disable = val;
5683 if (!val)
5684 memcg_oom_recover(memcg);
5685
5686 return 0;
5687 }
5688
5689 #ifdef CONFIG_MEMCG_KMEM
5690 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5691 {
5692 int ret;
5693
5694 memcg->kmemcg_id = -1;
5695 ret = memcg_propagate_kmem(memcg);
5696 if (ret)
5697 return ret;
5698
5699 return mem_cgroup_sockets_init(memcg, ss);
5700 }
5701
5702 static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5703 {
5704 mem_cgroup_sockets_destroy(memcg);
5705 }
5706
5707 static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5708 {
5709 if (!memcg_kmem_is_active(memcg))
5710 return;
5711
5712 /*
5713 * kmem charges can outlive the cgroup. In the case of slab
5714 * pages, for instance, a page contain objects from various
5715 * processes. As we prevent from taking a reference for every
5716 * such allocation we have to be careful when doing uncharge
5717 * (see memcg_uncharge_kmem) and here during offlining.
5718 *
5719 * The idea is that that only the _last_ uncharge which sees
5720 * the dead memcg will drop the last reference. An additional
5721 * reference is taken here before the group is marked dead
5722 * which is then paired with css_put during uncharge resp. here.
5723 *
5724 * Although this might sound strange as this path is called from
5725 * css_offline() when the referencemight have dropped down to 0
5726 * and shouldn't be incremented anymore (css_tryget would fail)
5727 * we do not have other options because of the kmem allocations
5728 * lifetime.
5729 */
5730 css_get(&memcg->css);
5731
5732 memcg_kmem_mark_dead(memcg);
5733
5734 if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
5735 return;
5736
5737 if (memcg_kmem_test_and_clear_dead(memcg))
5738 css_put(&memcg->css);
5739 }
5740 #else
5741 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5742 {
5743 return 0;
5744 }
5745
5746 static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5747 {
5748 }
5749
5750 static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5751 {
5752 }
5753 #endif
5754
5755 /*
5756 * DO NOT USE IN NEW FILES.
5757 *
5758 * "cgroup.event_control" implementation.
5759 *
5760 * This is way over-engineered. It tries to support fully configurable
5761 * events for each user. Such level of flexibility is completely
5762 * unnecessary especially in the light of the planned unified hierarchy.
5763 *
5764 * Please deprecate this and replace with something simpler if at all
5765 * possible.
5766 */
5767
5768 /*
5769 * Unregister event and free resources.
5770 *
5771 * Gets called from workqueue.
5772 */
5773 static void memcg_event_remove(struct work_struct *work)
5774 {
5775 struct mem_cgroup_event *event =
5776 container_of(work, struct mem_cgroup_event, remove);
5777 struct mem_cgroup *memcg = event->memcg;
5778
5779 remove_wait_queue(event->wqh, &event->wait);
5780
5781 event->unregister_event(memcg, event->eventfd);
5782
5783 /* Notify userspace the event is going away. */
5784 eventfd_signal(event->eventfd, 1);
5785
5786 eventfd_ctx_put(event->eventfd);
5787 kfree(event);
5788 css_put(&memcg->css);
5789 }
5790
5791 /*
5792 * Gets called on POLLHUP on eventfd when user closes it.
5793 *
5794 * Called with wqh->lock held and interrupts disabled.
5795 */
5796 static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
5797 int sync, void *key)
5798 {
5799 struct mem_cgroup_event *event =
5800 container_of(wait, struct mem_cgroup_event, wait);
5801 struct mem_cgroup *memcg = event->memcg;
5802 unsigned long flags = (unsigned long)key;
5803
5804 if (flags & POLLHUP) {
5805 /*
5806 * If the event has been detached at cgroup removal, we
5807 * can simply return knowing the other side will cleanup
5808 * for us.
5809 *
5810 * We can't race against event freeing since the other
5811 * side will require wqh->lock via remove_wait_queue(),
5812 * which we hold.
5813 */
5814 spin_lock(&memcg->event_list_lock);
5815 if (!list_empty(&event->list)) {
5816 list_del_init(&event->list);
5817 /*
5818 * We are in atomic context, but cgroup_event_remove()
5819 * may sleep, so we have to call it in workqueue.
5820 */
5821 schedule_work(&event->remove);
5822 }
5823 spin_unlock(&memcg->event_list_lock);
5824 }
5825
5826 return 0;
5827 }
5828
5829 static void memcg_event_ptable_queue_proc(struct file *file,
5830 wait_queue_head_t *wqh, poll_table *pt)
5831 {
5832 struct mem_cgroup_event *event =
5833 container_of(pt, struct mem_cgroup_event, pt);
5834
5835 event->wqh = wqh;
5836 add_wait_queue(wqh, &event->wait);
5837 }
5838
5839 /*
5840 * DO NOT USE IN NEW FILES.
5841 *
5842 * Parse input and register new cgroup event handler.
5843 *
5844 * Input must be in format '<event_fd> <control_fd> <args>'.
5845 * Interpretation of args is defined by control file implementation.
5846 */
5847 static int memcg_write_event_control(struct cgroup_subsys_state *css,
5848 struct cftype *cft, char *buffer)
5849 {
5850 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5851 struct mem_cgroup_event *event;
5852 struct cgroup_subsys_state *cfile_css;
5853 unsigned int efd, cfd;
5854 struct fd efile;
5855 struct fd cfile;
5856 const char *name;
5857 char *endp;
5858 int ret;
5859
5860 efd = simple_strtoul(buffer, &endp, 10);
5861 if (*endp != ' ')
5862 return -EINVAL;
5863 buffer = endp + 1;
5864
5865 cfd = simple_strtoul(buffer, &endp, 10);
5866 if ((*endp != ' ') && (*endp != '\0'))
5867 return -EINVAL;
5868 buffer = endp + 1;
5869
5870 event = kzalloc(sizeof(*event), GFP_KERNEL);
5871 if (!event)
5872 return -ENOMEM;
5873
5874 event->memcg = memcg;
5875 INIT_LIST_HEAD(&event->list);
5876 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5877 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5878 INIT_WORK(&event->remove, memcg_event_remove);
5879
5880 efile = fdget(efd);
5881 if (!efile.file) {
5882 ret = -EBADF;
5883 goto out_kfree;
5884 }
5885
5886 event->eventfd = eventfd_ctx_fileget(efile.file);
5887 if (IS_ERR(event->eventfd)) {
5888 ret = PTR_ERR(event->eventfd);
5889 goto out_put_efile;
5890 }
5891
5892 cfile = fdget(cfd);
5893 if (!cfile.file) {
5894 ret = -EBADF;
5895 goto out_put_eventfd;
5896 }
5897
5898 /* the process need read permission on control file */
5899 /* AV: shouldn't we check that it's been opened for read instead? */
5900 ret = inode_permission(file_inode(cfile.file), MAY_READ);
5901 if (ret < 0)
5902 goto out_put_cfile;
5903
5904 /*
5905 * Determine the event callbacks and set them in @event. This used
5906 * to be done via struct cftype but cgroup core no longer knows
5907 * about these events. The following is crude but the whole thing
5908 * is for compatibility anyway.
5909 *
5910 * DO NOT ADD NEW FILES.
5911 */
5912 name = cfile.file->f_dentry->d_name.name;
5913
5914 if (!strcmp(name, "memory.usage_in_bytes")) {
5915 event->register_event = mem_cgroup_usage_register_event;
5916 event->unregister_event = mem_cgroup_usage_unregister_event;
5917 } else if (!strcmp(name, "memory.oom_control")) {
5918 event->register_event = mem_cgroup_oom_register_event;
5919 event->unregister_event = mem_cgroup_oom_unregister_event;
5920 } else if (!strcmp(name, "memory.pressure_level")) {
5921 event->register_event = vmpressure_register_event;
5922 event->unregister_event = vmpressure_unregister_event;
5923 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
5924 event->register_event = memsw_cgroup_usage_register_event;
5925 event->unregister_event = memsw_cgroup_usage_unregister_event;
5926 } else {
5927 ret = -EINVAL;
5928 goto out_put_cfile;
5929 }
5930
5931 /*
5932 * Verify @cfile should belong to @css. Also, remaining events are
5933 * automatically removed on cgroup destruction but the removal is
5934 * asynchronous, so take an extra ref on @css.
5935 */
5936 cfile_css = css_tryget_from_dir(cfile.file->f_dentry->d_parent,
5937 &memory_cgrp_subsys);
5938 ret = -EINVAL;
5939 if (IS_ERR(cfile_css))
5940 goto out_put_cfile;
5941 if (cfile_css != css) {
5942 css_put(cfile_css);
5943 goto out_put_cfile;
5944 }
5945
5946 ret = event->register_event(memcg, event->eventfd, buffer);
5947 if (ret)
5948 goto out_put_css;
5949
5950 efile.file->f_op->poll(efile.file, &event->pt);
5951
5952 spin_lock(&memcg->event_list_lock);
5953 list_add(&event->list, &memcg->event_list);
5954 spin_unlock(&memcg->event_list_lock);
5955
5956 fdput(cfile);
5957 fdput(efile);
5958
5959 return 0;
5960
5961 out_put_css:
5962 css_put(css);
5963 out_put_cfile:
5964 fdput(cfile);
5965 out_put_eventfd:
5966 eventfd_ctx_put(event->eventfd);
5967 out_put_efile:
5968 fdput(efile);
5969 out_kfree:
5970 kfree(event);
5971
5972 return ret;
5973 }
5974
5975 static struct cftype mem_cgroup_files[] = {
5976 {
5977 .name = "usage_in_bytes",
5978 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5979 .read_u64 = mem_cgroup_read_u64,
5980 },
5981 {
5982 .name = "max_usage_in_bytes",
5983 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5984 .trigger = mem_cgroup_reset,
5985 .read_u64 = mem_cgroup_read_u64,
5986 },
5987 {
5988 .name = "limit_in_bytes",
5989 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5990 .write_string = mem_cgroup_write,
5991 .read_u64 = mem_cgroup_read_u64,
5992 },
5993 {
5994 .name = "soft_limit_in_bytes",
5995 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5996 .write_string = mem_cgroup_write,
5997 .read_u64 = mem_cgroup_read_u64,
5998 },
5999 {
6000 .name = "failcnt",
6001 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
6002 .trigger = mem_cgroup_reset,
6003 .read_u64 = mem_cgroup_read_u64,
6004 },
6005 {
6006 .name = "stat",
6007 .seq_show = memcg_stat_show,
6008 },
6009 {
6010 .name = "force_empty",
6011 .trigger = mem_cgroup_force_empty_write,
6012 },
6013 {
6014 .name = "use_hierarchy",
6015 .flags = CFTYPE_INSANE,
6016 .write_u64 = mem_cgroup_hierarchy_write,
6017 .read_u64 = mem_cgroup_hierarchy_read,
6018 },
6019 {
6020 .name = "cgroup.event_control", /* XXX: for compat */
6021 .write_string = memcg_write_event_control,
6022 .flags = CFTYPE_NO_PREFIX,
6023 .mode = S_IWUGO,
6024 },
6025 {
6026 .name = "swappiness",
6027 .read_u64 = mem_cgroup_swappiness_read,
6028 .write_u64 = mem_cgroup_swappiness_write,
6029 },
6030 {
6031 .name = "move_charge_at_immigrate",
6032 .read_u64 = mem_cgroup_move_charge_read,
6033 .write_u64 = mem_cgroup_move_charge_write,
6034 },
6035 {
6036 .name = "oom_control",
6037 .seq_show = mem_cgroup_oom_control_read,
6038 .write_u64 = mem_cgroup_oom_control_write,
6039 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
6040 },
6041 {
6042 .name = "pressure_level",
6043 },
6044 #ifdef CONFIG_NUMA
6045 {
6046 .name = "numa_stat",
6047 .seq_show = memcg_numa_stat_show,
6048 },
6049 #endif
6050 #ifdef CONFIG_MEMCG_KMEM
6051 {
6052 .name = "kmem.limit_in_bytes",
6053 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
6054 .write_string = mem_cgroup_write,
6055 .read_u64 = mem_cgroup_read_u64,
6056 },
6057 {
6058 .name = "kmem.usage_in_bytes",
6059 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
6060 .read_u64 = mem_cgroup_read_u64,
6061 },
6062 {
6063 .name = "kmem.failcnt",
6064 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6065 .trigger = mem_cgroup_reset,
6066 .read_u64 = mem_cgroup_read_u64,
6067 },
6068 {
6069 .name = "kmem.max_usage_in_bytes",
6070 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6071 .trigger = mem_cgroup_reset,
6072 .read_u64 = mem_cgroup_read_u64,
6073 },
6074 #ifdef CONFIG_SLABINFO
6075 {
6076 .name = "kmem.slabinfo",
6077 .seq_show = mem_cgroup_slabinfo_read,
6078 },
6079 #endif
6080 #endif
6081 { }, /* terminate */
6082 };
6083
6084 #ifdef CONFIG_MEMCG_SWAP
6085 static struct cftype memsw_cgroup_files[] = {
6086 {
6087 .name = "memsw.usage_in_bytes",
6088 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6089 .read_u64 = mem_cgroup_read_u64,
6090 },
6091 {
6092 .name = "memsw.max_usage_in_bytes",
6093 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6094 .trigger = mem_cgroup_reset,
6095 .read_u64 = mem_cgroup_read_u64,
6096 },
6097 {
6098 .name = "memsw.limit_in_bytes",
6099 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6100 .write_string = mem_cgroup_write,
6101 .read_u64 = mem_cgroup_read_u64,
6102 },
6103 {
6104 .name = "memsw.failcnt",
6105 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6106 .trigger = mem_cgroup_reset,
6107 .read_u64 = mem_cgroup_read_u64,
6108 },
6109 { }, /* terminate */
6110 };
6111 #endif
6112 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
6113 {
6114 struct mem_cgroup_per_node *pn;
6115 struct mem_cgroup_per_zone *mz;
6116 int zone, tmp = node;
6117 /*
6118 * This routine is called against possible nodes.
6119 * But it's BUG to call kmalloc() against offline node.
6120 *
6121 * TODO: this routine can waste much memory for nodes which will
6122 * never be onlined. It's better to use memory hotplug callback
6123 * function.
6124 */
6125 if (!node_state(node, N_NORMAL_MEMORY))
6126 tmp = -1;
6127 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6128 if (!pn)
6129 return 1;
6130
6131 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6132 mz = &pn->zoneinfo[zone];
6133 lruvec_init(&mz->lruvec);
6134 mz->usage_in_excess = 0;
6135 mz->on_tree = false;
6136 mz->memcg = memcg;
6137 }
6138 memcg->nodeinfo[node] = pn;
6139 return 0;
6140 }
6141
6142 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
6143 {
6144 kfree(memcg->nodeinfo[node]);
6145 }
6146
6147 static struct mem_cgroup *mem_cgroup_alloc(void)
6148 {
6149 struct mem_cgroup *memcg;
6150 size_t size;
6151
6152 size = sizeof(struct mem_cgroup);
6153 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
6154
6155 memcg = kzalloc(size, GFP_KERNEL);
6156 if (!memcg)
6157 return NULL;
6158
6159 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
6160 if (!memcg->stat)
6161 goto out_free;
6162 spin_lock_init(&memcg->pcp_counter_lock);
6163 return memcg;
6164
6165 out_free:
6166 kfree(memcg);
6167 return NULL;
6168 }
6169
6170 /*
6171 * At destroying mem_cgroup, references from swap_cgroup can remain.
6172 * (scanning all at force_empty is too costly...)
6173 *
6174 * Instead of clearing all references at force_empty, we remember
6175 * the number of reference from swap_cgroup and free mem_cgroup when
6176 * it goes down to 0.
6177 *
6178 * Removal of cgroup itself succeeds regardless of refs from swap.
6179 */
6180
6181 static void __mem_cgroup_free(struct mem_cgroup *memcg)
6182 {
6183 int node;
6184
6185 mem_cgroup_remove_from_trees(memcg);
6186
6187 for_each_node(node)
6188 free_mem_cgroup_per_zone_info(memcg, node);
6189
6190 free_percpu(memcg->stat);
6191
6192 /*
6193 * We need to make sure that (at least for now), the jump label
6194 * destruction code runs outside of the cgroup lock. This is because
6195 * get_online_cpus(), which is called from the static_branch update,
6196 * can't be called inside the cgroup_lock. cpusets are the ones
6197 * enforcing this dependency, so if they ever change, we might as well.
6198 *
6199 * schedule_work() will guarantee this happens. Be careful if you need
6200 * to move this code around, and make sure it is outside
6201 * the cgroup_lock.
6202 */
6203 disarm_static_keys(memcg);
6204 kfree(memcg);
6205 }
6206
6207 /*
6208 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
6209 */
6210 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
6211 {
6212 if (!memcg->res.parent)
6213 return NULL;
6214 return mem_cgroup_from_res_counter(memcg->res.parent, res);
6215 }
6216 EXPORT_SYMBOL(parent_mem_cgroup);
6217
6218 static void __init mem_cgroup_soft_limit_tree_init(void)
6219 {
6220 struct mem_cgroup_tree_per_node *rtpn;
6221 struct mem_cgroup_tree_per_zone *rtpz;
6222 int tmp, node, zone;
6223
6224 for_each_node(node) {
6225 tmp = node;
6226 if (!node_state(node, N_NORMAL_MEMORY))
6227 tmp = -1;
6228 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6229 BUG_ON(!rtpn);
6230
6231 soft_limit_tree.rb_tree_per_node[node] = rtpn;
6232
6233 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6234 rtpz = &rtpn->rb_tree_per_zone[zone];
6235 rtpz->rb_root = RB_ROOT;
6236 spin_lock_init(&rtpz->lock);
6237 }
6238 }
6239 }
6240
6241 static struct cgroup_subsys_state * __ref
6242 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6243 {
6244 struct mem_cgroup *memcg;
6245 long error = -ENOMEM;
6246 int node;
6247
6248 memcg = mem_cgroup_alloc();
6249 if (!memcg)
6250 return ERR_PTR(error);
6251
6252 for_each_node(node)
6253 if (alloc_mem_cgroup_per_zone_info(memcg, node))
6254 goto free_out;
6255
6256 /* root ? */
6257 if (parent_css == NULL) {
6258 root_mem_cgroup = memcg;
6259 res_counter_init(&memcg->res, NULL);
6260 res_counter_init(&memcg->memsw, NULL);
6261 res_counter_init(&memcg->kmem, NULL);
6262 }
6263
6264 memcg->last_scanned_node = MAX_NUMNODES;
6265 INIT_LIST_HEAD(&memcg->oom_notify);
6266 memcg->move_charge_at_immigrate = 0;
6267 mutex_init(&memcg->thresholds_lock);
6268 spin_lock_init(&memcg->move_lock);
6269 vmpressure_init(&memcg->vmpressure);
6270 INIT_LIST_HEAD(&memcg->event_list);
6271 spin_lock_init(&memcg->event_list_lock);
6272
6273 return &memcg->css;
6274
6275 free_out:
6276 __mem_cgroup_free(memcg);
6277 return ERR_PTR(error);
6278 }
6279
6280 static int
6281 mem_cgroup_css_online(struct cgroup_subsys_state *css)
6282 {
6283 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6284 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
6285
6286 if (css->cgroup->id > MEM_CGROUP_ID_MAX)
6287 return -ENOSPC;
6288
6289 if (!parent)
6290 return 0;
6291
6292 mutex_lock(&memcg_create_mutex);
6293
6294 memcg->use_hierarchy = parent->use_hierarchy;
6295 memcg->oom_kill_disable = parent->oom_kill_disable;
6296 memcg->swappiness = mem_cgroup_swappiness(parent);
6297
6298 if (parent->use_hierarchy) {
6299 res_counter_init(&memcg->res, &parent->res);
6300 res_counter_init(&memcg->memsw, &parent->memsw);
6301 res_counter_init(&memcg->kmem, &parent->kmem);
6302
6303 /*
6304 * No need to take a reference to the parent because cgroup
6305 * core guarantees its existence.
6306 */
6307 } else {
6308 res_counter_init(&memcg->res, NULL);
6309 res_counter_init(&memcg->memsw, NULL);
6310 res_counter_init(&memcg->kmem, NULL);
6311 /*
6312 * Deeper hierachy with use_hierarchy == false doesn't make
6313 * much sense so let cgroup subsystem know about this
6314 * unfortunate state in our controller.
6315 */
6316 if (parent != root_mem_cgroup)
6317 memory_cgrp_subsys.broken_hierarchy = true;
6318 }
6319 mutex_unlock(&memcg_create_mutex);
6320
6321 return memcg_init_kmem(memcg, &memory_cgrp_subsys);
6322 }
6323
6324 /*
6325 * Announce all parents that a group from their hierarchy is gone.
6326 */
6327 static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
6328 {
6329 struct mem_cgroup *parent = memcg;
6330
6331 while ((parent = parent_mem_cgroup(parent)))
6332 mem_cgroup_iter_invalidate(parent);
6333
6334 /*
6335 * if the root memcg is not hierarchical we have to check it
6336 * explicitely.
6337 */
6338 if (!root_mem_cgroup->use_hierarchy)
6339 mem_cgroup_iter_invalidate(root_mem_cgroup);
6340 }
6341
6342 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6343 {
6344 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6345 struct mem_cgroup_event *event, *tmp;
6346 struct cgroup_subsys_state *iter;
6347
6348 /*
6349 * Unregister events and notify userspace.
6350 * Notify userspace about cgroup removing only after rmdir of cgroup
6351 * directory to avoid race between userspace and kernelspace.
6352 */
6353 spin_lock(&memcg->event_list_lock);
6354 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
6355 list_del_init(&event->list);
6356 schedule_work(&event->remove);
6357 }
6358 spin_unlock(&memcg->event_list_lock);
6359
6360 kmem_cgroup_css_offline(memcg);
6361
6362 mem_cgroup_invalidate_reclaim_iterators(memcg);
6363
6364 /*
6365 * This requires that offlining is serialized. Right now that is
6366 * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
6367 */
6368 css_for_each_descendant_post(iter, css)
6369 mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
6370
6371 memcg_unregister_all_caches(memcg);
6372 vmpressure_cleanup(&memcg->vmpressure);
6373 }
6374
6375 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
6376 {
6377 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6378 /*
6379 * XXX: css_offline() would be where we should reparent all
6380 * memory to prepare the cgroup for destruction. However,
6381 * memcg does not do css_tryget() and res_counter charging
6382 * under the same RCU lock region, which means that charging
6383 * could race with offlining. Offlining only happens to
6384 * cgroups with no tasks in them but charges can show up
6385 * without any tasks from the swapin path when the target
6386 * memcg is looked up from the swapout record and not from the
6387 * current task as it usually is. A race like this can leak
6388 * charges and put pages with stale cgroup pointers into
6389 * circulation:
6390 *
6391 * #0 #1
6392 * lookup_swap_cgroup_id()
6393 * rcu_read_lock()
6394 * mem_cgroup_lookup()
6395 * css_tryget()
6396 * rcu_read_unlock()
6397 * disable css_tryget()
6398 * call_rcu()
6399 * offline_css()
6400 * reparent_charges()
6401 * res_counter_charge()
6402 * css_put()
6403 * css_free()
6404 * pc->mem_cgroup = dead memcg
6405 * add page to lru
6406 *
6407 * The bulk of the charges are still moved in offline_css() to
6408 * avoid pinning a lot of pages in case a long-term reference
6409 * like a swapout record is deferring the css_free() to long
6410 * after offlining. But this makes sure we catch any charges
6411 * made after offlining:
6412 */
6413 mem_cgroup_reparent_charges(memcg);
6414
6415 memcg_destroy_kmem(memcg);
6416 __mem_cgroup_free(memcg);
6417 }
6418
6419 #ifdef CONFIG_MMU
6420 /* Handlers for move charge at task migration. */
6421 #define PRECHARGE_COUNT_AT_ONCE 256
6422 static int mem_cgroup_do_precharge(unsigned long count)
6423 {
6424 int ret = 0;
6425 int batch_count = PRECHARGE_COUNT_AT_ONCE;
6426 struct mem_cgroup *memcg = mc.to;
6427
6428 if (mem_cgroup_is_root(memcg)) {
6429 mc.precharge += count;
6430 /* we don't need css_get for root */
6431 return ret;
6432 }
6433 /* try to charge at once */
6434 if (count > 1) {
6435 struct res_counter *dummy;
6436 /*
6437 * "memcg" cannot be under rmdir() because we've already checked
6438 * by cgroup_lock_live_cgroup() that it is not removed and we
6439 * are still under the same cgroup_mutex. So we can postpone
6440 * css_get().
6441 */
6442 if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
6443 goto one_by_one;
6444 if (do_swap_account && res_counter_charge(&memcg->memsw,
6445 PAGE_SIZE * count, &dummy)) {
6446 res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
6447 goto one_by_one;
6448 }
6449 mc.precharge += count;
6450 return ret;
6451 }
6452 one_by_one:
6453 /* fall back to one by one charge */
6454 while (count--) {
6455 if (signal_pending(current)) {
6456 ret = -EINTR;
6457 break;
6458 }
6459 if (!batch_count--) {
6460 batch_count = PRECHARGE_COUNT_AT_ONCE;
6461 cond_resched();
6462 }
6463 ret = mem_cgroup_try_charge(memcg, GFP_KERNEL, 1, false);
6464 if (ret)
6465 /* mem_cgroup_clear_mc() will do uncharge later */
6466 return ret;
6467 mc.precharge++;
6468 }
6469 return ret;
6470 }
6471
6472 /**
6473 * get_mctgt_type - get target type of moving charge
6474 * @vma: the vma the pte to be checked belongs
6475 * @addr: the address corresponding to the pte to be checked
6476 * @ptent: the pte to be checked
6477 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6478 *
6479 * Returns
6480 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
6481 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
6482 * move charge. if @target is not NULL, the page is stored in target->page
6483 * with extra refcnt got(Callers should handle it).
6484 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
6485 * target for charge migration. if @target is not NULL, the entry is stored
6486 * in target->ent.
6487 *
6488 * Called with pte lock held.
6489 */
6490 union mc_target {
6491 struct page *page;
6492 swp_entry_t ent;
6493 };
6494
6495 enum mc_target_type {
6496 MC_TARGET_NONE = 0,
6497 MC_TARGET_PAGE,
6498 MC_TARGET_SWAP,
6499 };
6500
6501 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
6502 unsigned long addr, pte_t ptent)
6503 {
6504 struct page *page = vm_normal_page(vma, addr, ptent);
6505
6506 if (!page || !page_mapped(page))
6507 return NULL;
6508 if (PageAnon(page)) {
6509 /* we don't move shared anon */
6510 if (!move_anon())
6511 return NULL;
6512 } else if (!move_file())
6513 /* we ignore mapcount for file pages */
6514 return NULL;
6515 if (!get_page_unless_zero(page))
6516 return NULL;
6517
6518 return page;
6519 }
6520
6521 #ifdef CONFIG_SWAP
6522 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6523 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6524 {
6525 struct page *page = NULL;
6526 swp_entry_t ent = pte_to_swp_entry(ptent);
6527
6528 if (!move_anon() || non_swap_entry(ent))
6529 return NULL;
6530 /*
6531 * Because lookup_swap_cache() updates some statistics counter,
6532 * we call find_get_page() with swapper_space directly.
6533 */
6534 page = find_get_page(swap_address_space(ent), ent.val);
6535 if (do_swap_account)
6536 entry->val = ent.val;
6537
6538 return page;
6539 }
6540 #else
6541 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6542 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6543 {
6544 return NULL;
6545 }
6546 #endif
6547
6548 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
6549 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6550 {
6551 struct page *page = NULL;
6552 struct address_space *mapping;
6553 pgoff_t pgoff;
6554
6555 if (!vma->vm_file) /* anonymous vma */
6556 return NULL;
6557 if (!move_file())
6558 return NULL;
6559
6560 mapping = vma->vm_file->f_mapping;
6561 if (pte_none(ptent))
6562 pgoff = linear_page_index(vma, addr);
6563 else /* pte_file(ptent) is true */
6564 pgoff = pte_to_pgoff(ptent);
6565
6566 /* page is moved even if it's not RSS of this task(page-faulted). */
6567 #ifdef CONFIG_SWAP
6568 /* shmem/tmpfs may report page out on swap: account for that too. */
6569 if (shmem_mapping(mapping)) {
6570 page = find_get_entry(mapping, pgoff);
6571 if (radix_tree_exceptional_entry(page)) {
6572 swp_entry_t swp = radix_to_swp_entry(page);
6573 if (do_swap_account)
6574 *entry = swp;
6575 page = find_get_page(swap_address_space(swp), swp.val);
6576 }
6577 } else
6578 page = find_get_page(mapping, pgoff);
6579 #else
6580 page = find_get_page(mapping, pgoff);
6581 #endif
6582 return page;
6583 }
6584
6585 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6586 unsigned long addr, pte_t ptent, union mc_target *target)
6587 {
6588 struct page *page = NULL;
6589 struct page_cgroup *pc;
6590 enum mc_target_type ret = MC_TARGET_NONE;
6591 swp_entry_t ent = { .val = 0 };
6592
6593 if (pte_present(ptent))
6594 page = mc_handle_present_pte(vma, addr, ptent);
6595 else if (is_swap_pte(ptent))
6596 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
6597 else if (pte_none(ptent) || pte_file(ptent))
6598 page = mc_handle_file_pte(vma, addr, ptent, &ent);
6599
6600 if (!page && !ent.val)
6601 return ret;
6602 if (page) {
6603 pc = lookup_page_cgroup(page);
6604 /*
6605 * Do only loose check w/o page_cgroup lock.
6606 * mem_cgroup_move_account() checks the pc is valid or not under
6607 * the lock.
6608 */
6609 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6610 ret = MC_TARGET_PAGE;
6611 if (target)
6612 target->page = page;
6613 }
6614 if (!ret || !target)
6615 put_page(page);
6616 }
6617 /* There is a swap entry and a page doesn't exist or isn't charged */
6618 if (ent.val && !ret &&
6619 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6620 ret = MC_TARGET_SWAP;
6621 if (target)
6622 target->ent = ent;
6623 }
6624 return ret;
6625 }
6626
6627 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6628 /*
6629 * We don't consider swapping or file mapped pages because THP does not
6630 * support them for now.
6631 * Caller should make sure that pmd_trans_huge(pmd) is true.
6632 */
6633 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6634 unsigned long addr, pmd_t pmd, union mc_target *target)
6635 {
6636 struct page *page = NULL;
6637 struct page_cgroup *pc;
6638 enum mc_target_type ret = MC_TARGET_NONE;
6639
6640 page = pmd_page(pmd);
6641 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6642 if (!move_anon())
6643 return ret;
6644 pc = lookup_page_cgroup(page);
6645 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6646 ret = MC_TARGET_PAGE;
6647 if (target) {
6648 get_page(page);
6649 target->page = page;
6650 }
6651 }
6652 return ret;
6653 }
6654 #else
6655 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6656 unsigned long addr, pmd_t pmd, union mc_target *target)
6657 {
6658 return MC_TARGET_NONE;
6659 }
6660 #endif
6661
6662 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6663 unsigned long addr, unsigned long end,
6664 struct mm_walk *walk)
6665 {
6666 struct vm_area_struct *vma = walk->private;
6667 pte_t *pte;
6668 spinlock_t *ptl;
6669
6670 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6671 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6672 mc.precharge += HPAGE_PMD_NR;
6673 spin_unlock(ptl);
6674 return 0;
6675 }
6676
6677 if (pmd_trans_unstable(pmd))
6678 return 0;
6679 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6680 for (; addr != end; pte++, addr += PAGE_SIZE)
6681 if (get_mctgt_type(vma, addr, *pte, NULL))
6682 mc.precharge++; /* increment precharge temporarily */
6683 pte_unmap_unlock(pte - 1, ptl);
6684 cond_resched();
6685
6686 return 0;
6687 }
6688
6689 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6690 {
6691 unsigned long precharge;
6692 struct vm_area_struct *vma;
6693
6694 down_read(&mm->mmap_sem);
6695 for (vma = mm->mmap; vma; vma = vma->vm_next) {
6696 struct mm_walk mem_cgroup_count_precharge_walk = {
6697 .pmd_entry = mem_cgroup_count_precharge_pte_range,
6698 .mm = mm,
6699 .private = vma,
6700 };
6701 if (is_vm_hugetlb_page(vma))
6702 continue;
6703 walk_page_range(vma->vm_start, vma->vm_end,
6704 &mem_cgroup_count_precharge_walk);
6705 }
6706 up_read(&mm->mmap_sem);
6707
6708 precharge = mc.precharge;
6709 mc.precharge = 0;
6710
6711 return precharge;
6712 }
6713
6714 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6715 {
6716 unsigned long precharge = mem_cgroup_count_precharge(mm);
6717
6718 VM_BUG_ON(mc.moving_task);
6719 mc.moving_task = current;
6720 return mem_cgroup_do_precharge(precharge);
6721 }
6722
6723 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6724 static void __mem_cgroup_clear_mc(void)
6725 {
6726 struct mem_cgroup *from = mc.from;
6727 struct mem_cgroup *to = mc.to;
6728 int i;
6729
6730 /* we must uncharge all the leftover precharges from mc.to */
6731 if (mc.precharge) {
6732 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
6733 mc.precharge = 0;
6734 }
6735 /*
6736 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6737 * we must uncharge here.
6738 */
6739 if (mc.moved_charge) {
6740 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6741 mc.moved_charge = 0;
6742 }
6743 /* we must fixup refcnts and charges */
6744 if (mc.moved_swap) {
6745 /* uncharge swap account from the old cgroup */
6746 if (!mem_cgroup_is_root(mc.from))
6747 res_counter_uncharge(&mc.from->memsw,
6748 PAGE_SIZE * mc.moved_swap);
6749
6750 for (i = 0; i < mc.moved_swap; i++)
6751 css_put(&mc.from->css);
6752
6753 if (!mem_cgroup_is_root(mc.to)) {
6754 /*
6755 * we charged both to->res and to->memsw, so we should
6756 * uncharge to->res.
6757 */
6758 res_counter_uncharge(&mc.to->res,
6759 PAGE_SIZE * mc.moved_swap);
6760 }
6761 /* we've already done css_get(mc.to) */
6762 mc.moved_swap = 0;
6763 }
6764 memcg_oom_recover(from);
6765 memcg_oom_recover(to);
6766 wake_up_all(&mc.waitq);
6767 }
6768
6769 static void mem_cgroup_clear_mc(void)
6770 {
6771 struct mem_cgroup *from = mc.from;
6772
6773 /*
6774 * we must clear moving_task before waking up waiters at the end of
6775 * task migration.
6776 */
6777 mc.moving_task = NULL;
6778 __mem_cgroup_clear_mc();
6779 spin_lock(&mc.lock);
6780 mc.from = NULL;
6781 mc.to = NULL;
6782 spin_unlock(&mc.lock);
6783 mem_cgroup_end_move(from);
6784 }
6785
6786 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
6787 struct cgroup_taskset *tset)
6788 {
6789 struct task_struct *p = cgroup_taskset_first(tset);
6790 int ret = 0;
6791 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6792 unsigned long move_charge_at_immigrate;
6793
6794 /*
6795 * We are now commited to this value whatever it is. Changes in this
6796 * tunable will only affect upcoming migrations, not the current one.
6797 * So we need to save it, and keep it going.
6798 */
6799 move_charge_at_immigrate = memcg->move_charge_at_immigrate;
6800 if (move_charge_at_immigrate) {
6801 struct mm_struct *mm;
6802 struct mem_cgroup *from = mem_cgroup_from_task(p);
6803
6804 VM_BUG_ON(from == memcg);
6805
6806 mm = get_task_mm(p);
6807 if (!mm)
6808 return 0;
6809 /* We move charges only when we move a owner of the mm */
6810 if (mm->owner == p) {
6811 VM_BUG_ON(mc.from);
6812 VM_BUG_ON(mc.to);
6813 VM_BUG_ON(mc.precharge);
6814 VM_BUG_ON(mc.moved_charge);
6815 VM_BUG_ON(mc.moved_swap);
6816 mem_cgroup_start_move(from);
6817 spin_lock(&mc.lock);
6818 mc.from = from;
6819 mc.to = memcg;
6820 mc.immigrate_flags = move_charge_at_immigrate;
6821 spin_unlock(&mc.lock);
6822 /* We set mc.moving_task later */
6823
6824 ret = mem_cgroup_precharge_mc(mm);
6825 if (ret)
6826 mem_cgroup_clear_mc();
6827 }
6828 mmput(mm);
6829 }
6830 return ret;
6831 }
6832
6833 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
6834 struct cgroup_taskset *tset)
6835 {
6836 mem_cgroup_clear_mc();
6837 }
6838
6839 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6840 unsigned long addr, unsigned long end,
6841 struct mm_walk *walk)
6842 {
6843 int ret = 0;
6844 struct vm_area_struct *vma = walk->private;
6845 pte_t *pte;
6846 spinlock_t *ptl;
6847 enum mc_target_type target_type;
6848 union mc_target target;
6849 struct page *page;
6850 struct page_cgroup *pc;
6851
6852 /*
6853 * We don't take compound_lock() here but no race with splitting thp
6854 * happens because:
6855 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
6856 * under splitting, which means there's no concurrent thp split,
6857 * - if another thread runs into split_huge_page() just after we
6858 * entered this if-block, the thread must wait for page table lock
6859 * to be unlocked in __split_huge_page_splitting(), where the main
6860 * part of thp split is not executed yet.
6861 */
6862 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6863 if (mc.precharge < HPAGE_PMD_NR) {
6864 spin_unlock(ptl);
6865 return 0;
6866 }
6867 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6868 if (target_type == MC_TARGET_PAGE) {
6869 page = target.page;
6870 if (!isolate_lru_page(page)) {
6871 pc = lookup_page_cgroup(page);
6872 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
6873 pc, mc.from, mc.to)) {
6874 mc.precharge -= HPAGE_PMD_NR;
6875 mc.moved_charge += HPAGE_PMD_NR;
6876 }
6877 putback_lru_page(page);
6878 }
6879 put_page(page);
6880 }
6881 spin_unlock(ptl);
6882 return 0;
6883 }
6884
6885 if (pmd_trans_unstable(pmd))
6886 return 0;
6887 retry:
6888 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6889 for (; addr != end; addr += PAGE_SIZE) {
6890 pte_t ptent = *(pte++);
6891 swp_entry_t ent;
6892
6893 if (!mc.precharge)
6894 break;
6895
6896 switch (get_mctgt_type(vma, addr, ptent, &target)) {
6897 case MC_TARGET_PAGE:
6898 page = target.page;
6899 if (isolate_lru_page(page))
6900 goto put;
6901 pc = lookup_page_cgroup(page);
6902 if (!mem_cgroup_move_account(page, 1, pc,
6903 mc.from, mc.to)) {
6904 mc.precharge--;
6905 /* we uncharge from mc.from later. */
6906 mc.moved_charge++;
6907 }
6908 putback_lru_page(page);
6909 put: /* get_mctgt_type() gets the page */
6910 put_page(page);
6911 break;
6912 case MC_TARGET_SWAP:
6913 ent = target.ent;
6914 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6915 mc.precharge--;
6916 /* we fixup refcnts and charges later. */
6917 mc.moved_swap++;
6918 }
6919 break;
6920 default:
6921 break;
6922 }
6923 }
6924 pte_unmap_unlock(pte - 1, ptl);
6925 cond_resched();
6926
6927 if (addr != end) {
6928 /*
6929 * We have consumed all precharges we got in can_attach().
6930 * We try charge one by one, but don't do any additional
6931 * charges to mc.to if we have failed in charge once in attach()
6932 * phase.
6933 */
6934 ret = mem_cgroup_do_precharge(1);
6935 if (!ret)
6936 goto retry;
6937 }
6938
6939 return ret;
6940 }
6941
6942 static void mem_cgroup_move_charge(struct mm_struct *mm)
6943 {
6944 struct vm_area_struct *vma;
6945
6946 lru_add_drain_all();
6947 retry:
6948 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
6949 /*
6950 * Someone who are holding the mmap_sem might be waiting in
6951 * waitq. So we cancel all extra charges, wake up all waiters,
6952 * and retry. Because we cancel precharges, we might not be able
6953 * to move enough charges, but moving charge is a best-effort
6954 * feature anyway, so it wouldn't be a big problem.
6955 */
6956 __mem_cgroup_clear_mc();
6957 cond_resched();
6958 goto retry;
6959 }
6960 for (vma = mm->mmap; vma; vma = vma->vm_next) {
6961 int ret;
6962 struct mm_walk mem_cgroup_move_charge_walk = {
6963 .pmd_entry = mem_cgroup_move_charge_pte_range,
6964 .mm = mm,
6965 .private = vma,
6966 };
6967 if (is_vm_hugetlb_page(vma))
6968 continue;
6969 ret = walk_page_range(vma->vm_start, vma->vm_end,
6970 &mem_cgroup_move_charge_walk);
6971 if (ret)
6972 /*
6973 * means we have consumed all precharges and failed in
6974 * doing additional charge. Just abandon here.
6975 */
6976 break;
6977 }
6978 up_read(&mm->mmap_sem);
6979 }
6980
6981 static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
6982 struct cgroup_taskset *tset)
6983 {
6984 struct task_struct *p = cgroup_taskset_first(tset);
6985 struct mm_struct *mm = get_task_mm(p);
6986
6987 if (mm) {
6988 if (mc.to)
6989 mem_cgroup_move_charge(mm);
6990 mmput(mm);
6991 }
6992 if (mc.to)
6993 mem_cgroup_clear_mc();
6994 }
6995 #else /* !CONFIG_MMU */
6996 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
6997 struct cgroup_taskset *tset)
6998 {
6999 return 0;
7000 }
7001 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
7002 struct cgroup_taskset *tset)
7003 {
7004 }
7005 static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
7006 struct cgroup_taskset *tset)
7007 {
7008 }
7009 #endif
7010
7011 /*
7012 * Cgroup retains root cgroups across [un]mount cycles making it necessary
7013 * to verify sane_behavior flag on each mount attempt.
7014 */
7015 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
7016 {
7017 /*
7018 * use_hierarchy is forced with sane_behavior. cgroup core
7019 * guarantees that @root doesn't have any children, so turning it
7020 * on for the root memcg is enough.
7021 */
7022 if (cgroup_sane_behavior(root_css->cgroup))
7023 mem_cgroup_from_css(root_css)->use_hierarchy = true;
7024 }
7025
7026 struct cgroup_subsys memory_cgrp_subsys = {
7027 .css_alloc = mem_cgroup_css_alloc,
7028 .css_online = mem_cgroup_css_online,
7029 .css_offline = mem_cgroup_css_offline,
7030 .css_free = mem_cgroup_css_free,
7031 .can_attach = mem_cgroup_can_attach,
7032 .cancel_attach = mem_cgroup_cancel_attach,
7033 .attach = mem_cgroup_move_task,
7034 .bind = mem_cgroup_bind,
7035 .base_cftypes = mem_cgroup_files,
7036 .early_init = 0,
7037 };
7038
7039 #ifdef CONFIG_MEMCG_SWAP
7040 static int __init enable_swap_account(char *s)
7041 {
7042 if (!strcmp(s, "1"))
7043 really_do_swap_account = 1;
7044 else if (!strcmp(s, "0"))
7045 really_do_swap_account = 0;
7046 return 1;
7047 }
7048 __setup("swapaccount=", enable_swap_account);
7049
7050 static void __init memsw_file_init(void)
7051 {
7052 WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files));
7053 }
7054
7055 static void __init enable_swap_cgroup(void)
7056 {
7057 if (!mem_cgroup_disabled() && really_do_swap_account) {
7058 do_swap_account = 1;
7059 memsw_file_init();
7060 }
7061 }
7062
7063 #else
7064 static void __init enable_swap_cgroup(void)
7065 {
7066 }
7067 #endif
7068
7069 /*
7070 * subsys_initcall() for memory controller.
7071 *
7072 * Some parts like hotcpu_notifier() have to be initialized from this context
7073 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
7074 * everything that doesn't depend on a specific mem_cgroup structure should
7075 * be initialized from here.
7076 */
7077 static int __init mem_cgroup_init(void)
7078 {
7079 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
7080 enable_swap_cgroup();
7081 mem_cgroup_soft_limit_tree_init();
7082 memcg_stock_init();
7083 return 0;
7084 }
7085 subsys_initcall(mem_cgroup_init);
This page took 0.165622 seconds and 6 git commands to generate.