memcg, slab: cleanup memcg cache creation
[deliverable/linux.git] / mm / memcontrol.c
CommitLineData
8cdea7c0
BS
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
2e72b634
KS
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
7ae1e1d0
GC
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
8cdea7c0
BS
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 */
27
28#include <linux/res_counter.h>
29#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
78fb7466 31#include <linux/mm.h>
4ffef5fe 32#include <linux/hugetlb.h>
d13d1443 33#include <linux/pagemap.h>
d52aa412 34#include <linux/smp.h>
8a9f3ccd 35#include <linux/page-flags.h>
66e1707b 36#include <linux/backing-dev.h>
8a9f3ccd
BS
37#include <linux/bit_spinlock.h>
38#include <linux/rcupdate.h>
e222432b 39#include <linux/limits.h>
b9e15baf 40#include <linux/export.h>
8c7c6e34 41#include <linux/mutex.h>
bb4cc1a8 42#include <linux/rbtree.h>
b6ac57d5 43#include <linux/slab.h>
66e1707b 44#include <linux/swap.h>
02491447 45#include <linux/swapops.h>
66e1707b 46#include <linux/spinlock.h>
2e72b634 47#include <linux/eventfd.h>
79bd9814 48#include <linux/poll.h>
2e72b634 49#include <linux/sort.h>
66e1707b 50#include <linux/fs.h>
d2ceb9b7 51#include <linux/seq_file.h>
70ddf637 52#include <linux/vmpressure.h>
b69408e8 53#include <linux/mm_inline.h>
52d4b9ac 54#include <linux/page_cgroup.h>
cdec2e42 55#include <linux/cpu.h>
158e0a2d 56#include <linux/oom.h>
0056f4e6 57#include <linux/lockdep.h>
79bd9814 58#include <linux/file.h>
08e552c6 59#include "internal.h"
d1a4c0b3 60#include <net/sock.h>
4bd2c1ee 61#include <net/ip.h>
d1a4c0b3 62#include <net/tcp_memcontrol.h>
f35c3a8e 63#include "slab.h"
8cdea7c0 64
8697d331
BS
65#include <asm/uaccess.h>
66
cc8e970c
KM
67#include <trace/events/vmscan.h>
68
073219e9
TH
69struct cgroup_subsys memory_cgrp_subsys __read_mostly;
70EXPORT_SYMBOL(memory_cgrp_subsys);
68ae564b 71
a181b0e8 72#define MEM_CGROUP_RECLAIM_RETRIES 5
6bbda35c 73static struct mem_cgroup *root_mem_cgroup __read_mostly;
8cdea7c0 74
c255a458 75#ifdef CONFIG_MEMCG_SWAP
338c8431 76/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
c077719b 77int do_swap_account __read_mostly;
a42c390c
MH
78
79/* for remember boot option*/
c255a458 80#ifdef CONFIG_MEMCG_SWAP_ENABLED
a42c390c
MH
81static int really_do_swap_account __initdata = 1;
82#else
83static int really_do_swap_account __initdata = 0;
84#endif
85
c077719b 86#else
a0db00fc 87#define do_swap_account 0
c077719b
KH
88#endif
89
90
af7c4b0e
JW
91static const char * const mem_cgroup_stat_names[] = {
92 "cache",
93 "rss",
b070e65c 94 "rss_huge",
af7c4b0e 95 "mapped_file",
3ea67d06 96 "writeback",
af7c4b0e
JW
97 "swap",
98};
99
e9f8974f
JW
100enum mem_cgroup_events_index {
101 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
102 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
456f998e
YH
103 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
104 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
e9f8974f
JW
105 MEM_CGROUP_EVENTS_NSTATS,
106};
af7c4b0e
JW
107
108static const char * const mem_cgroup_events_names[] = {
109 "pgpgin",
110 "pgpgout",
111 "pgfault",
112 "pgmajfault",
113};
114
58cf188e
SZ
115static const char * const mem_cgroup_lru_names[] = {
116 "inactive_anon",
117 "active_anon",
118 "inactive_file",
119 "active_file",
120 "unevictable",
121};
122
7a159cc9
JW
123/*
124 * Per memcg event counter is incremented at every pagein/pageout. With THP,
125 * it will be incremated by the number of pages. This counter is used for
126 * for trigger some periodic events. This is straightforward and better
127 * than using jiffies etc. to handle periodic memcg event.
128 */
129enum mem_cgroup_events_target {
130 MEM_CGROUP_TARGET_THRESH,
bb4cc1a8 131 MEM_CGROUP_TARGET_SOFTLIMIT,
453a9bf3 132 MEM_CGROUP_TARGET_NUMAINFO,
7a159cc9
JW
133 MEM_CGROUP_NTARGETS,
134};
a0db00fc
KS
135#define THRESHOLDS_EVENTS_TARGET 128
136#define SOFTLIMIT_EVENTS_TARGET 1024
137#define NUMAINFO_EVENTS_TARGET 1024
e9f8974f 138
d52aa412 139struct mem_cgroup_stat_cpu {
7a159cc9 140 long count[MEM_CGROUP_STAT_NSTATS];
e9f8974f 141 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
13114716 142 unsigned long nr_page_events;
7a159cc9 143 unsigned long targets[MEM_CGROUP_NTARGETS];
d52aa412
KH
144};
145
527a5ec9 146struct mem_cgroup_reclaim_iter {
5f578161
MH
147 /*
148 * last scanned hierarchy member. Valid only if last_dead_count
149 * matches memcg->dead_count of the hierarchy root group.
150 */
542f85f9 151 struct mem_cgroup *last_visited;
d2ab70aa 152 int last_dead_count;
5f578161 153
527a5ec9
JW
154 /* scan generation, increased every round-trip */
155 unsigned int generation;
156};
157
6d12e2d8
KH
158/*
159 * per-zone information in memory controller.
160 */
6d12e2d8 161struct mem_cgroup_per_zone {
6290df54 162 struct lruvec lruvec;
1eb49272 163 unsigned long lru_size[NR_LRU_LISTS];
3e2f41f1 164
527a5ec9
JW
165 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
166
bb4cc1a8
AM
167 struct rb_node tree_node; /* RB tree node */
168 unsigned long long usage_in_excess;/* Set to the value by which */
169 /* the soft limit is exceeded*/
170 bool on_tree;
d79154bb 171 struct mem_cgroup *memcg; /* Back pointer, we cannot */
4e416953 172 /* use container_of */
6d12e2d8 173};
6d12e2d8
KH
174
175struct mem_cgroup_per_node {
176 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
177};
178
bb4cc1a8
AM
179/*
180 * Cgroups above their limits are maintained in a RB-Tree, independent of
181 * their hierarchy representation
182 */
183
184struct mem_cgroup_tree_per_zone {
185 struct rb_root rb_root;
186 spinlock_t lock;
187};
188
189struct mem_cgroup_tree_per_node {
190 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
191};
192
193struct mem_cgroup_tree {
194 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
195};
196
197static struct mem_cgroup_tree soft_limit_tree __read_mostly;
198
2e72b634
KS
199struct mem_cgroup_threshold {
200 struct eventfd_ctx *eventfd;
201 u64 threshold;
202};
203
9490ff27 204/* For threshold */
2e72b634 205struct mem_cgroup_threshold_ary {
748dad36 206 /* An array index points to threshold just below or equal to usage. */
5407a562 207 int current_threshold;
2e72b634
KS
208 /* Size of entries[] */
209 unsigned int size;
210 /* Array of thresholds */
211 struct mem_cgroup_threshold entries[0];
212};
2c488db2
KS
213
214struct mem_cgroup_thresholds {
215 /* Primary thresholds array */
216 struct mem_cgroup_threshold_ary *primary;
217 /*
218 * Spare threshold array.
219 * This is needed to make mem_cgroup_unregister_event() "never fail".
220 * It must be able to store at least primary->size - 1 entries.
221 */
222 struct mem_cgroup_threshold_ary *spare;
223};
224
9490ff27
KH
225/* for OOM */
226struct mem_cgroup_eventfd_list {
227 struct list_head list;
228 struct eventfd_ctx *eventfd;
229};
2e72b634 230
79bd9814
TH
231/*
232 * cgroup_event represents events which userspace want to receive.
233 */
3bc942f3 234struct mem_cgroup_event {
79bd9814 235 /*
59b6f873 236 * memcg which the event belongs to.
79bd9814 237 */
59b6f873 238 struct mem_cgroup *memcg;
79bd9814
TH
239 /*
240 * eventfd to signal userspace about the event.
241 */
242 struct eventfd_ctx *eventfd;
243 /*
244 * Each of these stored in a list by the cgroup.
245 */
246 struct list_head list;
fba94807
TH
247 /*
248 * register_event() callback will be used to add new userspace
249 * waiter for changes related to this event. Use eventfd_signal()
250 * on eventfd to send notification to userspace.
251 */
59b6f873 252 int (*register_event)(struct mem_cgroup *memcg,
347c4a87 253 struct eventfd_ctx *eventfd, const char *args);
fba94807
TH
254 /*
255 * unregister_event() callback will be called when userspace closes
256 * the eventfd or on cgroup removing. This callback must be set,
257 * if you want provide notification functionality.
258 */
59b6f873 259 void (*unregister_event)(struct mem_cgroup *memcg,
fba94807 260 struct eventfd_ctx *eventfd);
79bd9814
TH
261 /*
262 * All fields below needed to unregister event when
263 * userspace closes eventfd.
264 */
265 poll_table pt;
266 wait_queue_head_t *wqh;
267 wait_queue_t wait;
268 struct work_struct remove;
269};
270
c0ff4b85
R
271static void mem_cgroup_threshold(struct mem_cgroup *memcg);
272static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
2e72b634 273
8cdea7c0
BS
274/*
275 * The memory controller data structure. The memory controller controls both
276 * page cache and RSS per cgroup. We would eventually like to provide
277 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
278 * to help the administrator determine what knobs to tune.
279 *
280 * TODO: Add a water mark for the memory controller. Reclaim will begin when
8a9f3ccd
BS
281 * we hit the water mark. May be even add a low water mark, such that
282 * no reclaim occurs from a cgroup at it's low water mark, this is
283 * a feature that will be implemented much later in the future.
8cdea7c0
BS
284 */
285struct mem_cgroup {
286 struct cgroup_subsys_state css;
287 /*
288 * the counter to account for memory usage
289 */
290 struct res_counter res;
59927fb9 291
70ddf637
AV
292 /* vmpressure notifications */
293 struct vmpressure vmpressure;
294
465939a1
LZ
295 /*
296 * the counter to account for mem+swap usage.
297 */
298 struct res_counter memsw;
59927fb9 299
510fc4e1
GC
300 /*
301 * the counter to account for kernel memory usage.
302 */
303 struct res_counter kmem;
18f59ea7
BS
304 /*
305 * Should the accounting and control be hierarchical, per subtree?
306 */
307 bool use_hierarchy;
510fc4e1 308 unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
79dfdacc
MH
309
310 bool oom_lock;
311 atomic_t under_oom;
3812c8c8 312 atomic_t oom_wakeups;
79dfdacc 313
1f4c025b 314 int swappiness;
3c11ecf4
KH
315 /* OOM-Killer disable */
316 int oom_kill_disable;
a7885eb8 317
22a668d7
KH
318 /* set when res.limit == memsw.limit */
319 bool memsw_is_minimum;
320
2e72b634
KS
321 /* protect arrays of thresholds */
322 struct mutex thresholds_lock;
323
324 /* thresholds for memory usage. RCU-protected */
2c488db2 325 struct mem_cgroup_thresholds thresholds;
907860ed 326
2e72b634 327 /* thresholds for mem+swap usage. RCU-protected */
2c488db2 328 struct mem_cgroup_thresholds memsw_thresholds;
907860ed 329
9490ff27
KH
330 /* For oom notifier event fd */
331 struct list_head oom_notify;
185efc0f 332
7dc74be0
DN
333 /*
334 * Should we move charges of a task when a task is moved into this
335 * mem_cgroup ? And what type of charges should we move ?
336 */
f894ffa8 337 unsigned long move_charge_at_immigrate;
619d094b
KH
338 /*
339 * set > 0 if pages under this cgroup are moving to other cgroup.
340 */
341 atomic_t moving_account;
312734c0
KH
342 /* taken only while moving_account > 0 */
343 spinlock_t move_lock;
d52aa412 344 /*
c62b1a3b 345 * percpu counter.
d52aa412 346 */
3a7951b4 347 struct mem_cgroup_stat_cpu __percpu *stat;
711d3d2c
KH
348 /*
349 * used when a cpu is offlined or other synchronizations
350 * See mem_cgroup_read_stat().
351 */
352 struct mem_cgroup_stat_cpu nocpu_base;
353 spinlock_t pcp_counter_lock;
d1a4c0b3 354
5f578161 355 atomic_t dead_count;
4bd2c1ee 356#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
2e685cad 357 struct cg_proto tcp_mem;
d1a4c0b3 358#endif
2633d7a0
GC
359#if defined(CONFIG_MEMCG_KMEM)
360 /* analogous to slab_common's slab_caches list. per-memcg */
361 struct list_head memcg_slab_caches;
362 /* Not a spinlock, we can take a lot of time walking the list */
363 struct mutex slab_caches_mutex;
364 /* Index in the kmem_cache->memcg_params->memcg_caches array */
365 int kmemcg_id;
366#endif
45cf7ebd
GC
367
368 int last_scanned_node;
369#if MAX_NUMNODES > 1
370 nodemask_t scan_nodes;
371 atomic_t numainfo_events;
372 atomic_t numainfo_updating;
373#endif
70ddf637 374
fba94807
TH
375 /* List of events which userspace want to receive */
376 struct list_head event_list;
377 spinlock_t event_list_lock;
378
54f72fe0
JW
379 struct mem_cgroup_per_node *nodeinfo[0];
380 /* WARNING: nodeinfo must be the last member here */
8cdea7c0
BS
381};
382
510fc4e1
GC
383/* internal only representation about the status of kmem accounting. */
384enum {
6de64beb 385 KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
7de37682 386 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
510fc4e1
GC
387};
388
510fc4e1
GC
389#ifdef CONFIG_MEMCG_KMEM
390static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
391{
392 set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
393}
7de37682
GC
394
395static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
396{
397 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
398}
399
400static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
401{
10d5ebf4
LZ
402 /*
403 * Our caller must use css_get() first, because memcg_uncharge_kmem()
404 * will call css_put() if it sees the memcg is dead.
405 */
406 smp_wmb();
7de37682
GC
407 if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
408 set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
409}
410
411static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
412{
413 return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
414 &memcg->kmem_account_flags);
415}
510fc4e1
GC
416#endif
417
7dc74be0
DN
418/* Stuffs for move charges at task migration. */
419/*
ee5e8472
GC
420 * Types of charges to be moved. "move_charge_at_immitgrate" and
421 * "immigrate_flags" are treated as a left-shifted bitmap of these types.
7dc74be0
DN
422 */
423enum move_type {
4ffef5fe 424 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
87946a72 425 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
7dc74be0
DN
426 NR_MOVE_TYPE,
427};
428
4ffef5fe
DN
429/* "mc" and its members are protected by cgroup_mutex */
430static struct move_charge_struct {
b1dd693e 431 spinlock_t lock; /* for from, to */
4ffef5fe
DN
432 struct mem_cgroup *from;
433 struct mem_cgroup *to;
ee5e8472 434 unsigned long immigrate_flags;
4ffef5fe 435 unsigned long precharge;
854ffa8d 436 unsigned long moved_charge;
483c30b5 437 unsigned long moved_swap;
8033b97c
DN
438 struct task_struct *moving_task; /* a task moving charges */
439 wait_queue_head_t waitq; /* a waitq for other context */
440} mc = {
2bd9bb20 441 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
8033b97c
DN
442 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
443};
4ffef5fe 444
90254a65
DN
445static bool move_anon(void)
446{
ee5e8472 447 return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
90254a65
DN
448}
449
87946a72
DN
450static bool move_file(void)
451{
ee5e8472 452 return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
87946a72
DN
453}
454
4e416953
BS
455/*
456 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
457 * limit reclaim to prevent infinite loops, if they ever occur.
458 */
a0db00fc 459#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
bb4cc1a8 460#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
4e416953 461
217bc319
KH
462enum charge_type {
463 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
41326c17 464 MEM_CGROUP_CHARGE_TYPE_ANON,
d13d1443 465 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
8a9478ca 466 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
c05555b5
KH
467 NR_CHARGE_TYPE,
468};
469
8c7c6e34 470/* for encoding cft->private value on file */
86ae53e1
GC
471enum res_type {
472 _MEM,
473 _MEMSWAP,
474 _OOM_TYPE,
510fc4e1 475 _KMEM,
86ae53e1
GC
476};
477
a0db00fc
KS
478#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
479#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
8c7c6e34 480#define MEMFILE_ATTR(val) ((val) & 0xffff)
9490ff27
KH
481/* Used for OOM nofiier */
482#define OOM_CONTROL (0)
8c7c6e34 483
75822b44
BS
484/*
485 * Reclaim flags for mem_cgroup_hierarchical_reclaim
486 */
487#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
488#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
489#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
490#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
491
0999821b
GC
492/*
493 * The memcg_create_mutex will be held whenever a new cgroup is created.
494 * As a consequence, any change that needs to protect against new child cgroups
495 * appearing has to hold it as well.
496 */
497static DEFINE_MUTEX(memcg_create_mutex);
498
b2145145
WL
499struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
500{
a7c6d554 501 return s ? container_of(s, struct mem_cgroup, css) : NULL;
b2145145
WL
502}
503
70ddf637
AV
504/* Some nice accessors for the vmpressure. */
505struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
506{
507 if (!memcg)
508 memcg = root_mem_cgroup;
509 return &memcg->vmpressure;
510}
511
512struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
513{
514 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
515}
516
7ffc0edc
MH
517static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
518{
519 return (memcg == root_mem_cgroup);
520}
521
4219b2da
LZ
522/*
523 * We restrict the id in the range of [1, 65535], so it can fit into
524 * an unsigned short.
525 */
526#define MEM_CGROUP_ID_MAX USHRT_MAX
527
34c00c31
LZ
528static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
529{
530 /*
531 * The ID of the root cgroup is 0, but memcg treat 0 as an
532 * invalid ID, so we return (cgroup_id + 1).
533 */
534 return memcg->css.cgroup->id + 1;
535}
536
537static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
538{
539 struct cgroup_subsys_state *css;
540
073219e9 541 css = css_from_id(id - 1, &memory_cgrp_subsys);
34c00c31
LZ
542 return mem_cgroup_from_css(css);
543}
544
e1aab161 545/* Writing them here to avoid exposing memcg's inner layout */
4bd2c1ee 546#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
e1aab161 547
e1aab161
GC
548void sock_update_memcg(struct sock *sk)
549{
376be5ff 550 if (mem_cgroup_sockets_enabled) {
e1aab161 551 struct mem_cgroup *memcg;
3f134619 552 struct cg_proto *cg_proto;
e1aab161
GC
553
554 BUG_ON(!sk->sk_prot->proto_cgroup);
555
f3f511e1
GC
556 /* Socket cloning can throw us here with sk_cgrp already
557 * filled. It won't however, necessarily happen from
558 * process context. So the test for root memcg given
559 * the current task's memcg won't help us in this case.
560 *
561 * Respecting the original socket's memcg is a better
562 * decision in this case.
563 */
564 if (sk->sk_cgrp) {
565 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
5347e5ae 566 css_get(&sk->sk_cgrp->memcg->css);
f3f511e1
GC
567 return;
568 }
569
e1aab161
GC
570 rcu_read_lock();
571 memcg = mem_cgroup_from_task(current);
3f134619 572 cg_proto = sk->sk_prot->proto_cgroup(memcg);
5347e5ae
LZ
573 if (!mem_cgroup_is_root(memcg) &&
574 memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
3f134619 575 sk->sk_cgrp = cg_proto;
e1aab161
GC
576 }
577 rcu_read_unlock();
578 }
579}
580EXPORT_SYMBOL(sock_update_memcg);
581
582void sock_release_memcg(struct sock *sk)
583{
376be5ff 584 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
e1aab161
GC
585 struct mem_cgroup *memcg;
586 WARN_ON(!sk->sk_cgrp->memcg);
587 memcg = sk->sk_cgrp->memcg;
5347e5ae 588 css_put(&sk->sk_cgrp->memcg->css);
e1aab161
GC
589 }
590}
d1a4c0b3
GC
591
592struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
593{
594 if (!memcg || mem_cgroup_is_root(memcg))
595 return NULL;
596
2e685cad 597 return &memcg->tcp_mem;
d1a4c0b3
GC
598}
599EXPORT_SYMBOL(tcp_proto_cgroup);
e1aab161 600
3f134619
GC
601static void disarm_sock_keys(struct mem_cgroup *memcg)
602{
2e685cad 603 if (!memcg_proto_activated(&memcg->tcp_mem))
3f134619
GC
604 return;
605 static_key_slow_dec(&memcg_socket_limit_enabled);
606}
607#else
608static void disarm_sock_keys(struct mem_cgroup *memcg)
609{
610}
611#endif
612
a8964b9b 613#ifdef CONFIG_MEMCG_KMEM
55007d84
GC
614/*
615 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
b8627835
LZ
616 * The main reason for not using cgroup id for this:
617 * this works better in sparse environments, where we have a lot of memcgs,
618 * but only a few kmem-limited. Or also, if we have, for instance, 200
619 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
620 * 200 entry array for that.
55007d84
GC
621 *
622 * The current size of the caches array is stored in
623 * memcg_limited_groups_array_size. It will double each time we have to
624 * increase it.
625 */
626static DEFINE_IDA(kmem_limited_groups);
749c5415
GC
627int memcg_limited_groups_array_size;
628
55007d84
GC
629/*
630 * MIN_SIZE is different than 1, because we would like to avoid going through
631 * the alloc/free process all the time. In a small machine, 4 kmem-limited
632 * cgroups is a reasonable guess. In the future, it could be a parameter or
633 * tunable, but that is strictly not necessary.
634 *
b8627835 635 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
55007d84
GC
636 * this constant directly from cgroup, but it is understandable that this is
637 * better kept as an internal representation in cgroup.c. In any case, the
b8627835 638 * cgrp_id space is not getting any smaller, and we don't have to necessarily
55007d84
GC
639 * increase ours as well if it increases.
640 */
641#define MEMCG_CACHES_MIN_SIZE 4
b8627835 642#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
55007d84 643
d7f25f8a
GC
644/*
645 * A lot of the calls to the cache allocation functions are expected to be
646 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
647 * conditional to this static branch, we'll have to allow modules that does
648 * kmem_cache_alloc and the such to see this symbol as well
649 */
a8964b9b 650struct static_key memcg_kmem_enabled_key;
d7f25f8a 651EXPORT_SYMBOL(memcg_kmem_enabled_key);
a8964b9b
GC
652
653static void disarm_kmem_keys(struct mem_cgroup *memcg)
654{
55007d84 655 if (memcg_kmem_is_active(memcg)) {
a8964b9b 656 static_key_slow_dec(&memcg_kmem_enabled_key);
55007d84
GC
657 ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
658 }
bea207c8
GC
659 /*
660 * This check can't live in kmem destruction function,
661 * since the charges will outlive the cgroup
662 */
663 WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
a8964b9b
GC
664}
665#else
666static void disarm_kmem_keys(struct mem_cgroup *memcg)
667{
668}
669#endif /* CONFIG_MEMCG_KMEM */
670
671static void disarm_static_keys(struct mem_cgroup *memcg)
672{
673 disarm_sock_keys(memcg);
674 disarm_kmem_keys(memcg);
675}
676
c0ff4b85 677static void drain_all_stock_async(struct mem_cgroup *memcg);
8c7c6e34 678
f64c3f54 679static struct mem_cgroup_per_zone *
c0ff4b85 680mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
f64c3f54 681{
45cf7ebd 682 VM_BUG_ON((unsigned)nid >= nr_node_ids);
54f72fe0 683 return &memcg->nodeinfo[nid]->zoneinfo[zid];
f64c3f54
BS
684}
685
c0ff4b85 686struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
d324236b 687{
c0ff4b85 688 return &memcg->css;
d324236b
WF
689}
690
f64c3f54 691static struct mem_cgroup_per_zone *
c0ff4b85 692page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
f64c3f54 693{
97a6c37b
JW
694 int nid = page_to_nid(page);
695 int zid = page_zonenum(page);
f64c3f54 696
c0ff4b85 697 return mem_cgroup_zoneinfo(memcg, nid, zid);
f64c3f54
BS
698}
699
bb4cc1a8
AM
700static struct mem_cgroup_tree_per_zone *
701soft_limit_tree_node_zone(int nid, int zid)
702{
703 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
704}
705
706static struct mem_cgroup_tree_per_zone *
707soft_limit_tree_from_page(struct page *page)
708{
709 int nid = page_to_nid(page);
710 int zid = page_zonenum(page);
711
712 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
713}
714
715static void
716__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
717 struct mem_cgroup_per_zone *mz,
718 struct mem_cgroup_tree_per_zone *mctz,
719 unsigned long long new_usage_in_excess)
720{
721 struct rb_node **p = &mctz->rb_root.rb_node;
722 struct rb_node *parent = NULL;
723 struct mem_cgroup_per_zone *mz_node;
724
725 if (mz->on_tree)
726 return;
727
728 mz->usage_in_excess = new_usage_in_excess;
729 if (!mz->usage_in_excess)
730 return;
731 while (*p) {
732 parent = *p;
733 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
734 tree_node);
735 if (mz->usage_in_excess < mz_node->usage_in_excess)
736 p = &(*p)->rb_left;
737 /*
738 * We can't avoid mem cgroups that are over their soft
739 * limit by the same amount
740 */
741 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
742 p = &(*p)->rb_right;
743 }
744 rb_link_node(&mz->tree_node, parent, p);
745 rb_insert_color(&mz->tree_node, &mctz->rb_root);
746 mz->on_tree = true;
747}
748
749static void
750__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
751 struct mem_cgroup_per_zone *mz,
752 struct mem_cgroup_tree_per_zone *mctz)
753{
754 if (!mz->on_tree)
755 return;
756 rb_erase(&mz->tree_node, &mctz->rb_root);
757 mz->on_tree = false;
758}
759
760static void
761mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
762 struct mem_cgroup_per_zone *mz,
763 struct mem_cgroup_tree_per_zone *mctz)
764{
765 spin_lock(&mctz->lock);
766 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
767 spin_unlock(&mctz->lock);
768}
769
770
771static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
772{
773 unsigned long long excess;
774 struct mem_cgroup_per_zone *mz;
775 struct mem_cgroup_tree_per_zone *mctz;
776 int nid = page_to_nid(page);
777 int zid = page_zonenum(page);
778 mctz = soft_limit_tree_from_page(page);
779
780 /*
781 * Necessary to update all ancestors when hierarchy is used.
782 * because their event counter is not touched.
783 */
784 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
785 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
786 excess = res_counter_soft_limit_excess(&memcg->res);
787 /*
788 * We have to update the tree if mz is on RB-tree or
789 * mem is over its softlimit.
790 */
791 if (excess || mz->on_tree) {
792 spin_lock(&mctz->lock);
793 /* if on-tree, remove it */
794 if (mz->on_tree)
795 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
796 /*
797 * Insert again. mz->usage_in_excess will be updated.
798 * If excess is 0, no tree ops.
799 */
800 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
801 spin_unlock(&mctz->lock);
802 }
803 }
804}
805
806static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
807{
808 int node, zone;
809 struct mem_cgroup_per_zone *mz;
810 struct mem_cgroup_tree_per_zone *mctz;
811
812 for_each_node(node) {
813 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
814 mz = mem_cgroup_zoneinfo(memcg, node, zone);
815 mctz = soft_limit_tree_node_zone(node, zone);
816 mem_cgroup_remove_exceeded(memcg, mz, mctz);
817 }
818 }
819}
820
821static struct mem_cgroup_per_zone *
822__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
823{
824 struct rb_node *rightmost = NULL;
825 struct mem_cgroup_per_zone *mz;
826
827retry:
828 mz = NULL;
829 rightmost = rb_last(&mctz->rb_root);
830 if (!rightmost)
831 goto done; /* Nothing to reclaim from */
832
833 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
834 /*
835 * Remove the node now but someone else can add it back,
836 * we will to add it back at the end of reclaim to its correct
837 * position in the tree.
838 */
839 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
840 if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
841 !css_tryget(&mz->memcg->css))
842 goto retry;
843done:
844 return mz;
845}
846
847static struct mem_cgroup_per_zone *
848mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
849{
850 struct mem_cgroup_per_zone *mz;
851
852 spin_lock(&mctz->lock);
853 mz = __mem_cgroup_largest_soft_limit_node(mctz);
854 spin_unlock(&mctz->lock);
855 return mz;
856}
857
711d3d2c
KH
858/*
859 * Implementation Note: reading percpu statistics for memcg.
860 *
861 * Both of vmstat[] and percpu_counter has threshold and do periodic
862 * synchronization to implement "quick" read. There are trade-off between
863 * reading cost and precision of value. Then, we may have a chance to implement
864 * a periodic synchronizion of counter in memcg's counter.
865 *
866 * But this _read() function is used for user interface now. The user accounts
867 * memory usage by memory cgroup and he _always_ requires exact value because
868 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
869 * have to visit all online cpus and make sum. So, for now, unnecessary
870 * synchronization is not implemented. (just implemented for cpu hotplug)
871 *
872 * If there are kernel internal actions which can make use of some not-exact
873 * value, and reading all cpu value can be performance bottleneck in some
874 * common workload, threashold and synchonization as vmstat[] should be
875 * implemented.
876 */
c0ff4b85 877static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
7a159cc9 878 enum mem_cgroup_stat_index idx)
c62b1a3b 879{
7a159cc9 880 long val = 0;
c62b1a3b 881 int cpu;
c62b1a3b 882
711d3d2c
KH
883 get_online_cpus();
884 for_each_online_cpu(cpu)
c0ff4b85 885 val += per_cpu(memcg->stat->count[idx], cpu);
711d3d2c 886#ifdef CONFIG_HOTPLUG_CPU
c0ff4b85
R
887 spin_lock(&memcg->pcp_counter_lock);
888 val += memcg->nocpu_base.count[idx];
889 spin_unlock(&memcg->pcp_counter_lock);
711d3d2c
KH
890#endif
891 put_online_cpus();
c62b1a3b
KH
892 return val;
893}
894
c0ff4b85 895static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
0c3e73e8
BS
896 bool charge)
897{
898 int val = (charge) ? 1 : -1;
bff6bb83 899 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
0c3e73e8
BS
900}
901
c0ff4b85 902static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
e9f8974f
JW
903 enum mem_cgroup_events_index idx)
904{
905 unsigned long val = 0;
906 int cpu;
907
9c567512 908 get_online_cpus();
e9f8974f 909 for_each_online_cpu(cpu)
c0ff4b85 910 val += per_cpu(memcg->stat->events[idx], cpu);
e9f8974f 911#ifdef CONFIG_HOTPLUG_CPU
c0ff4b85
R
912 spin_lock(&memcg->pcp_counter_lock);
913 val += memcg->nocpu_base.events[idx];
914 spin_unlock(&memcg->pcp_counter_lock);
e9f8974f 915#endif
9c567512 916 put_online_cpus();
e9f8974f
JW
917 return val;
918}
919
c0ff4b85 920static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
b070e65c 921 struct page *page,
b2402857 922 bool anon, int nr_pages)
d52aa412 923{
b2402857
KH
924 /*
925 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
926 * counted as CACHE even if it's on ANON LRU.
927 */
928 if (anon)
929 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
c0ff4b85 930 nr_pages);
d52aa412 931 else
b2402857 932 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
c0ff4b85 933 nr_pages);
55e462b0 934
b070e65c
DR
935 if (PageTransHuge(page))
936 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
937 nr_pages);
938
e401f176
KH
939 /* pagein of a big page is an event. So, ignore page size */
940 if (nr_pages > 0)
c0ff4b85 941 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
3751d604 942 else {
c0ff4b85 943 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
3751d604
KH
944 nr_pages = -nr_pages; /* for event */
945 }
e401f176 946
13114716 947 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
6d12e2d8
KH
948}
949
bb2a0de9 950unsigned long
4d7dcca2 951mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
074291fe
KK
952{
953 struct mem_cgroup_per_zone *mz;
954
955 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
956 return mz->lru_size[lru];
957}
958
959static unsigned long
c0ff4b85 960mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
bb2a0de9 961 unsigned int lru_mask)
889976db
YH
962{
963 struct mem_cgroup_per_zone *mz;
f156ab93 964 enum lru_list lru;
bb2a0de9
KH
965 unsigned long ret = 0;
966
c0ff4b85 967 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
bb2a0de9 968
f156ab93
HD
969 for_each_lru(lru) {
970 if (BIT(lru) & lru_mask)
971 ret += mz->lru_size[lru];
bb2a0de9
KH
972 }
973 return ret;
974}
975
976static unsigned long
c0ff4b85 977mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
bb2a0de9
KH
978 int nid, unsigned int lru_mask)
979{
889976db
YH
980 u64 total = 0;
981 int zid;
982
bb2a0de9 983 for (zid = 0; zid < MAX_NR_ZONES; zid++)
c0ff4b85
R
984 total += mem_cgroup_zone_nr_lru_pages(memcg,
985 nid, zid, lru_mask);
bb2a0de9 986
889976db
YH
987 return total;
988}
bb2a0de9 989
c0ff4b85 990static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
bb2a0de9 991 unsigned int lru_mask)
6d12e2d8 992{
889976db 993 int nid;
6d12e2d8
KH
994 u64 total = 0;
995
31aaea4a 996 for_each_node_state(nid, N_MEMORY)
c0ff4b85 997 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
6d12e2d8 998 return total;
d52aa412
KH
999}
1000
f53d7ce3
JW
1001static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1002 enum mem_cgroup_events_target target)
7a159cc9
JW
1003{
1004 unsigned long val, next;
1005
13114716 1006 val = __this_cpu_read(memcg->stat->nr_page_events);
4799401f 1007 next = __this_cpu_read(memcg->stat->targets[target]);
7a159cc9 1008 /* from time_after() in jiffies.h */
f53d7ce3
JW
1009 if ((long)next - (long)val < 0) {
1010 switch (target) {
1011 case MEM_CGROUP_TARGET_THRESH:
1012 next = val + THRESHOLDS_EVENTS_TARGET;
1013 break;
bb4cc1a8
AM
1014 case MEM_CGROUP_TARGET_SOFTLIMIT:
1015 next = val + SOFTLIMIT_EVENTS_TARGET;
1016 break;
f53d7ce3
JW
1017 case MEM_CGROUP_TARGET_NUMAINFO:
1018 next = val + NUMAINFO_EVENTS_TARGET;
1019 break;
1020 default:
1021 break;
1022 }
1023 __this_cpu_write(memcg->stat->targets[target], next);
1024 return true;
7a159cc9 1025 }
f53d7ce3 1026 return false;
d2265e6f
KH
1027}
1028
1029/*
1030 * Check events in order.
1031 *
1032 */
c0ff4b85 1033static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
d2265e6f 1034{
4799401f 1035 preempt_disable();
d2265e6f 1036 /* threshold event is triggered in finer grain than soft limit */
f53d7ce3
JW
1037 if (unlikely(mem_cgroup_event_ratelimit(memcg,
1038 MEM_CGROUP_TARGET_THRESH))) {
bb4cc1a8 1039 bool do_softlimit;
82b3f2a7 1040 bool do_numainfo __maybe_unused;
f53d7ce3 1041
bb4cc1a8
AM
1042 do_softlimit = mem_cgroup_event_ratelimit(memcg,
1043 MEM_CGROUP_TARGET_SOFTLIMIT);
f53d7ce3
JW
1044#if MAX_NUMNODES > 1
1045 do_numainfo = mem_cgroup_event_ratelimit(memcg,
1046 MEM_CGROUP_TARGET_NUMAINFO);
1047#endif
1048 preempt_enable();
1049
c0ff4b85 1050 mem_cgroup_threshold(memcg);
bb4cc1a8
AM
1051 if (unlikely(do_softlimit))
1052 mem_cgroup_update_tree(memcg, page);
453a9bf3 1053#if MAX_NUMNODES > 1
f53d7ce3 1054 if (unlikely(do_numainfo))
c0ff4b85 1055 atomic_inc(&memcg->numainfo_events);
453a9bf3 1056#endif
f53d7ce3
JW
1057 } else
1058 preempt_enable();
d2265e6f
KH
1059}
1060
cf475ad2 1061struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 1062{
31a78f23
BS
1063 /*
1064 * mm_update_next_owner() may clear mm->owner to NULL
1065 * if it races with swapoff, page migration, etc.
1066 * So this can be called with p == NULL.
1067 */
1068 if (unlikely(!p))
1069 return NULL;
1070
073219e9 1071 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
78fb7466
PE
1072}
1073
df381975 1074static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
54595fe2 1075{
c0ff4b85 1076 struct mem_cgroup *memcg = NULL;
0b7f569e 1077
54595fe2
KH
1078 rcu_read_lock();
1079 do {
c0ff4b85
R
1080 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1081 if (unlikely(!memcg))
df381975 1082 memcg = root_mem_cgroup;
c0ff4b85 1083 } while (!css_tryget(&memcg->css));
54595fe2 1084 rcu_read_unlock();
c0ff4b85 1085 return memcg;
54595fe2
KH
1086}
1087
16248d8f
MH
1088/*
1089 * Returns a next (in a pre-order walk) alive memcg (with elevated css
1090 * ref. count) or NULL if the whole root's subtree has been visited.
1091 *
1092 * helper function to be used by mem_cgroup_iter
1093 */
1094static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
694fbc0f 1095 struct mem_cgroup *last_visited)
16248d8f 1096{
492eb21b 1097 struct cgroup_subsys_state *prev_css, *next_css;
16248d8f 1098
bd8815a6 1099 prev_css = last_visited ? &last_visited->css : NULL;
16248d8f 1100skip_node:
492eb21b 1101 next_css = css_next_descendant_pre(prev_css, &root->css);
16248d8f
MH
1102
1103 /*
1104 * Even if we found a group we have to make sure it is
1105 * alive. css && !memcg means that the groups should be
1106 * skipped and we should continue the tree walk.
1107 * last_visited css is safe to use because it is
1108 * protected by css_get and the tree walk is rcu safe.
0eef6156
MH
1109 *
1110 * We do not take a reference on the root of the tree walk
1111 * because we might race with the root removal when it would
1112 * be the only node in the iterated hierarchy and mem_cgroup_iter
1113 * would end up in an endless loop because it expects that at
1114 * least one valid node will be returned. Root cannot disappear
1115 * because caller of the iterator should hold it already so
1116 * skipping css reference should be safe.
16248d8f 1117 */
492eb21b 1118 if (next_css) {
ce48225f
HD
1119 if ((next_css == &root->css) ||
1120 ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
d8ad3055 1121 return mem_cgroup_from_css(next_css);
0eef6156
MH
1122
1123 prev_css = next_css;
1124 goto skip_node;
16248d8f
MH
1125 }
1126
1127 return NULL;
1128}
1129
519ebea3
JW
1130static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
1131{
1132 /*
1133 * When a group in the hierarchy below root is destroyed, the
1134 * hierarchy iterator can no longer be trusted since it might
1135 * have pointed to the destroyed group. Invalidate it.
1136 */
1137 atomic_inc(&root->dead_count);
1138}
1139
1140static struct mem_cgroup *
1141mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
1142 struct mem_cgroup *root,
1143 int *sequence)
1144{
1145 struct mem_cgroup *position = NULL;
1146 /*
1147 * A cgroup destruction happens in two stages: offlining and
1148 * release. They are separated by a RCU grace period.
1149 *
1150 * If the iterator is valid, we may still race with an
1151 * offlining. The RCU lock ensures the object won't be
1152 * released, tryget will fail if we lost the race.
1153 */
1154 *sequence = atomic_read(&root->dead_count);
1155 if (iter->last_dead_count == *sequence) {
1156 smp_rmb();
1157 position = iter->last_visited;
ecc736fc
MH
1158
1159 /*
1160 * We cannot take a reference to root because we might race
1161 * with root removal and returning NULL would end up in
1162 * an endless loop on the iterator user level when root
1163 * would be returned all the time.
1164 */
1165 if (position && position != root &&
1166 !css_tryget(&position->css))
519ebea3
JW
1167 position = NULL;
1168 }
1169 return position;
1170}
1171
1172static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1173 struct mem_cgroup *last_visited,
1174 struct mem_cgroup *new_position,
ecc736fc 1175 struct mem_cgroup *root,
519ebea3
JW
1176 int sequence)
1177{
ecc736fc
MH
1178 /* root reference counting symmetric to mem_cgroup_iter_load */
1179 if (last_visited && last_visited != root)
519ebea3
JW
1180 css_put(&last_visited->css);
1181 /*
1182 * We store the sequence count from the time @last_visited was
1183 * loaded successfully instead of rereading it here so that we
1184 * don't lose destruction events in between. We could have
1185 * raced with the destruction of @new_position after all.
1186 */
1187 iter->last_visited = new_position;
1188 smp_wmb();
1189 iter->last_dead_count = sequence;
1190}
1191
5660048c
JW
1192/**
1193 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1194 * @root: hierarchy root
1195 * @prev: previously returned memcg, NULL on first invocation
1196 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1197 *
1198 * Returns references to children of the hierarchy below @root, or
1199 * @root itself, or %NULL after a full round-trip.
1200 *
1201 * Caller must pass the return value in @prev on subsequent
1202 * invocations for reference counting, or use mem_cgroup_iter_break()
1203 * to cancel a hierarchy walk before the round-trip is complete.
1204 *
1205 * Reclaimers can specify a zone and a priority level in @reclaim to
1206 * divide up the memcgs in the hierarchy among all concurrent
1207 * reclaimers operating on the same zone and priority.
1208 */
694fbc0f 1209struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
5660048c 1210 struct mem_cgroup *prev,
694fbc0f 1211 struct mem_cgroup_reclaim_cookie *reclaim)
14067bb3 1212{
9f3a0d09 1213 struct mem_cgroup *memcg = NULL;
542f85f9 1214 struct mem_cgroup *last_visited = NULL;
711d3d2c 1215
694fbc0f
AM
1216 if (mem_cgroup_disabled())
1217 return NULL;
5660048c 1218
9f3a0d09
JW
1219 if (!root)
1220 root = root_mem_cgroup;
7d74b06f 1221
9f3a0d09 1222 if (prev && !reclaim)
542f85f9 1223 last_visited = prev;
14067bb3 1224
9f3a0d09
JW
1225 if (!root->use_hierarchy && root != root_mem_cgroup) {
1226 if (prev)
c40046f3 1227 goto out_css_put;
694fbc0f 1228 return root;
9f3a0d09 1229 }
14067bb3 1230
542f85f9 1231 rcu_read_lock();
9f3a0d09 1232 while (!memcg) {
527a5ec9 1233 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
519ebea3 1234 int uninitialized_var(seq);
711d3d2c 1235
527a5ec9
JW
1236 if (reclaim) {
1237 int nid = zone_to_nid(reclaim->zone);
1238 int zid = zone_idx(reclaim->zone);
1239 struct mem_cgroup_per_zone *mz;
1240
1241 mz = mem_cgroup_zoneinfo(root, nid, zid);
1242 iter = &mz->reclaim_iter[reclaim->priority];
542f85f9 1243 if (prev && reclaim->generation != iter->generation) {
5f578161 1244 iter->last_visited = NULL;
542f85f9
MH
1245 goto out_unlock;
1246 }
5f578161 1247
519ebea3 1248 last_visited = mem_cgroup_iter_load(iter, root, &seq);
527a5ec9 1249 }
7d74b06f 1250
694fbc0f 1251 memcg = __mem_cgroup_iter_next(root, last_visited);
14067bb3 1252
527a5ec9 1253 if (reclaim) {
ecc736fc
MH
1254 mem_cgroup_iter_update(iter, last_visited, memcg, root,
1255 seq);
542f85f9 1256
19f39402 1257 if (!memcg)
527a5ec9
JW
1258 iter->generation++;
1259 else if (!prev && memcg)
1260 reclaim->generation = iter->generation;
1261 }
9f3a0d09 1262
694fbc0f 1263 if (prev && !memcg)
542f85f9 1264 goto out_unlock;
9f3a0d09 1265 }
542f85f9
MH
1266out_unlock:
1267 rcu_read_unlock();
c40046f3
MH
1268out_css_put:
1269 if (prev && prev != root)
1270 css_put(&prev->css);
1271
9f3a0d09 1272 return memcg;
14067bb3 1273}
7d74b06f 1274
5660048c
JW
1275/**
1276 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1277 * @root: hierarchy root
1278 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1279 */
1280void mem_cgroup_iter_break(struct mem_cgroup *root,
1281 struct mem_cgroup *prev)
9f3a0d09
JW
1282{
1283 if (!root)
1284 root = root_mem_cgroup;
1285 if (prev && prev != root)
1286 css_put(&prev->css);
1287}
7d74b06f 1288
9f3a0d09
JW
1289/*
1290 * Iteration constructs for visiting all cgroups (under a tree). If
1291 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1292 * be used for reference counting.
1293 */
1294#define for_each_mem_cgroup_tree(iter, root) \
527a5ec9 1295 for (iter = mem_cgroup_iter(root, NULL, NULL); \
9f3a0d09 1296 iter != NULL; \
527a5ec9 1297 iter = mem_cgroup_iter(root, iter, NULL))
711d3d2c 1298
9f3a0d09 1299#define for_each_mem_cgroup(iter) \
527a5ec9 1300 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
9f3a0d09 1301 iter != NULL; \
527a5ec9 1302 iter = mem_cgroup_iter(NULL, iter, NULL))
14067bb3 1303
68ae564b 1304void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
456f998e 1305{
c0ff4b85 1306 struct mem_cgroup *memcg;
456f998e 1307
456f998e 1308 rcu_read_lock();
c0ff4b85
R
1309 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1310 if (unlikely(!memcg))
456f998e
YH
1311 goto out;
1312
1313 switch (idx) {
456f998e 1314 case PGFAULT:
0e574a93
JW
1315 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1316 break;
1317 case PGMAJFAULT:
1318 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
456f998e
YH
1319 break;
1320 default:
1321 BUG();
1322 }
1323out:
1324 rcu_read_unlock();
1325}
68ae564b 1326EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
456f998e 1327
925b7673
JW
1328/**
1329 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1330 * @zone: zone of the wanted lruvec
fa9add64 1331 * @memcg: memcg of the wanted lruvec
925b7673
JW
1332 *
1333 * Returns the lru list vector holding pages for the given @zone and
1334 * @mem. This can be the global zone lruvec, if the memory controller
1335 * is disabled.
1336 */
1337struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1338 struct mem_cgroup *memcg)
1339{
1340 struct mem_cgroup_per_zone *mz;
bea8c150 1341 struct lruvec *lruvec;
925b7673 1342
bea8c150
HD
1343 if (mem_cgroup_disabled()) {
1344 lruvec = &zone->lruvec;
1345 goto out;
1346 }
925b7673
JW
1347
1348 mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
bea8c150
HD
1349 lruvec = &mz->lruvec;
1350out:
1351 /*
1352 * Since a node can be onlined after the mem_cgroup was created,
1353 * we have to be prepared to initialize lruvec->zone here;
1354 * and if offlined then reonlined, we need to reinitialize it.
1355 */
1356 if (unlikely(lruvec->zone != zone))
1357 lruvec->zone = zone;
1358 return lruvec;
925b7673
JW
1359}
1360
08e552c6
KH
1361/*
1362 * Following LRU functions are allowed to be used without PCG_LOCK.
1363 * Operations are called by routine of global LRU independently from memcg.
1364 * What we have to take care of here is validness of pc->mem_cgroup.
1365 *
1366 * Changes to pc->mem_cgroup happens when
1367 * 1. charge
1368 * 2. moving account
1369 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1370 * It is added to LRU before charge.
1371 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1372 * When moving account, the page is not on LRU. It's isolated.
1373 */
4f98a2fe 1374
925b7673 1375/**
fa9add64 1376 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
925b7673 1377 * @page: the page
fa9add64 1378 * @zone: zone of the page
925b7673 1379 */
fa9add64 1380struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
08e552c6 1381{
08e552c6 1382 struct mem_cgroup_per_zone *mz;
925b7673
JW
1383 struct mem_cgroup *memcg;
1384 struct page_cgroup *pc;
bea8c150 1385 struct lruvec *lruvec;
6d12e2d8 1386
bea8c150
HD
1387 if (mem_cgroup_disabled()) {
1388 lruvec = &zone->lruvec;
1389 goto out;
1390 }
925b7673 1391
08e552c6 1392 pc = lookup_page_cgroup(page);
38c5d72f 1393 memcg = pc->mem_cgroup;
7512102c
HD
1394
1395 /*
fa9add64 1396 * Surreptitiously switch any uncharged offlist page to root:
7512102c
HD
1397 * an uncharged page off lru does nothing to secure
1398 * its former mem_cgroup from sudden removal.
1399 *
1400 * Our caller holds lru_lock, and PageCgroupUsed is updated
1401 * under page_cgroup lock: between them, they make all uses
1402 * of pc->mem_cgroup safe.
1403 */
fa9add64 1404 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
7512102c
HD
1405 pc->mem_cgroup = memcg = root_mem_cgroup;
1406
925b7673 1407 mz = page_cgroup_zoneinfo(memcg, page);
bea8c150
HD
1408 lruvec = &mz->lruvec;
1409out:
1410 /*
1411 * Since a node can be onlined after the mem_cgroup was created,
1412 * we have to be prepared to initialize lruvec->zone here;
1413 * and if offlined then reonlined, we need to reinitialize it.
1414 */
1415 if (unlikely(lruvec->zone != zone))
1416 lruvec->zone = zone;
1417 return lruvec;
08e552c6 1418}
b69408e8 1419
925b7673 1420/**
fa9add64
HD
1421 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1422 * @lruvec: mem_cgroup per zone lru vector
1423 * @lru: index of lru list the page is sitting on
1424 * @nr_pages: positive when adding or negative when removing
925b7673 1425 *
fa9add64
HD
1426 * This function must be called when a page is added to or removed from an
1427 * lru list.
3f58a829 1428 */
fa9add64
HD
1429void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1430 int nr_pages)
3f58a829
MK
1431{
1432 struct mem_cgroup_per_zone *mz;
fa9add64 1433 unsigned long *lru_size;
3f58a829
MK
1434
1435 if (mem_cgroup_disabled())
1436 return;
1437
fa9add64
HD
1438 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1439 lru_size = mz->lru_size + lru;
1440 *lru_size += nr_pages;
1441 VM_BUG_ON((long)(*lru_size) < 0);
08e552c6 1442}
544122e5 1443
3e92041d 1444/*
c0ff4b85 1445 * Checks whether given mem is same or in the root_mem_cgroup's
3e92041d
MH
1446 * hierarchy subtree
1447 */
c3ac9a8a
JW
1448bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1449 struct mem_cgroup *memcg)
3e92041d 1450{
91c63734
JW
1451 if (root_memcg == memcg)
1452 return true;
3a981f48 1453 if (!root_memcg->use_hierarchy || !memcg)
91c63734 1454 return false;
b47f77b5 1455 return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
c3ac9a8a
JW
1456}
1457
1458static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1459 struct mem_cgroup *memcg)
1460{
1461 bool ret;
1462
91c63734 1463 rcu_read_lock();
c3ac9a8a 1464 ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
91c63734
JW
1465 rcu_read_unlock();
1466 return ret;
3e92041d
MH
1467}
1468
ffbdccf5
DR
1469bool task_in_mem_cgroup(struct task_struct *task,
1470 const struct mem_cgroup *memcg)
4c4a2214 1471{
0b7f569e 1472 struct mem_cgroup *curr = NULL;
158e0a2d 1473 struct task_struct *p;
ffbdccf5 1474 bool ret;
4c4a2214 1475
158e0a2d 1476 p = find_lock_task_mm(task);
de077d22 1477 if (p) {
df381975 1478 curr = get_mem_cgroup_from_mm(p->mm);
de077d22
DR
1479 task_unlock(p);
1480 } else {
1481 /*
1482 * All threads may have already detached their mm's, but the oom
1483 * killer still needs to detect if they have already been oom
1484 * killed to prevent needlessly killing additional tasks.
1485 */
ffbdccf5 1486 rcu_read_lock();
de077d22
DR
1487 curr = mem_cgroup_from_task(task);
1488 if (curr)
1489 css_get(&curr->css);
ffbdccf5 1490 rcu_read_unlock();
de077d22 1491 }
d31f56db 1492 /*
c0ff4b85 1493 * We should check use_hierarchy of "memcg" not "curr". Because checking
d31f56db 1494 * use_hierarchy of "curr" here make this function true if hierarchy is
c0ff4b85
R
1495 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1496 * hierarchy(even if use_hierarchy is disabled in "memcg").
d31f56db 1497 */
c0ff4b85 1498 ret = mem_cgroup_same_or_subtree(memcg, curr);
0b7f569e 1499 css_put(&curr->css);
4c4a2214
DR
1500 return ret;
1501}
1502
c56d5c7d 1503int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
14797e23 1504{
9b272977 1505 unsigned long inactive_ratio;
14797e23 1506 unsigned long inactive;
9b272977 1507 unsigned long active;
c772be93 1508 unsigned long gb;
14797e23 1509
4d7dcca2
HD
1510 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1511 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
14797e23 1512
c772be93
KM
1513 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1514 if (gb)
1515 inactive_ratio = int_sqrt(10 * gb);
1516 else
1517 inactive_ratio = 1;
1518
9b272977 1519 return inactive * inactive_ratio < active;
14797e23
KM
1520}
1521
6d61ef40
BS
1522#define mem_cgroup_from_res_counter(counter, member) \
1523 container_of(counter, struct mem_cgroup, member)
1524
19942822 1525/**
9d11ea9f 1526 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
dad7557e 1527 * @memcg: the memory cgroup
19942822 1528 *
9d11ea9f 1529 * Returns the maximum amount of memory @mem can be charged with, in
7ec99d62 1530 * pages.
19942822 1531 */
c0ff4b85 1532static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
19942822 1533{
9d11ea9f
JW
1534 unsigned long long margin;
1535
c0ff4b85 1536 margin = res_counter_margin(&memcg->res);
9d11ea9f 1537 if (do_swap_account)
c0ff4b85 1538 margin = min(margin, res_counter_margin(&memcg->memsw));
7ec99d62 1539 return margin >> PAGE_SHIFT;
19942822
JW
1540}
1541
1f4c025b 1542int mem_cgroup_swappiness(struct mem_cgroup *memcg)
a7885eb8 1543{
a7885eb8 1544 /* root ? */
63876986 1545 if (!css_parent(&memcg->css))
a7885eb8
KM
1546 return vm_swappiness;
1547
bf1ff263 1548 return memcg->swappiness;
a7885eb8
KM
1549}
1550
619d094b
KH
1551/*
1552 * memcg->moving_account is used for checking possibility that some thread is
1553 * calling move_account(). When a thread on CPU-A starts moving pages under
1554 * a memcg, other threads should check memcg->moving_account under
1555 * rcu_read_lock(), like this:
1556 *
1557 * CPU-A CPU-B
1558 * rcu_read_lock()
1559 * memcg->moving_account+1 if (memcg->mocing_account)
1560 * take heavy locks.
1561 * synchronize_rcu() update something.
1562 * rcu_read_unlock()
1563 * start move here.
1564 */
4331f7d3
KH
1565
1566/* for quick checking without looking up memcg */
1567atomic_t memcg_moving __read_mostly;
1568
c0ff4b85 1569static void mem_cgroup_start_move(struct mem_cgroup *memcg)
32047e2a 1570{
4331f7d3 1571 atomic_inc(&memcg_moving);
619d094b 1572 atomic_inc(&memcg->moving_account);
32047e2a
KH
1573 synchronize_rcu();
1574}
1575
c0ff4b85 1576static void mem_cgroup_end_move(struct mem_cgroup *memcg)
32047e2a 1577{
619d094b
KH
1578 /*
1579 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1580 * We check NULL in callee rather than caller.
1581 */
4331f7d3
KH
1582 if (memcg) {
1583 atomic_dec(&memcg_moving);
619d094b 1584 atomic_dec(&memcg->moving_account);
4331f7d3 1585 }
32047e2a 1586}
619d094b 1587
32047e2a
KH
1588/*
1589 * 2 routines for checking "mem" is under move_account() or not.
1590 *
13fd1dd9
AM
1591 * mem_cgroup_stolen() - checking whether a cgroup is mc.from or not. This
1592 * is used for avoiding races in accounting. If true,
32047e2a
KH
1593 * pc->mem_cgroup may be overwritten.
1594 *
1595 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1596 * under hierarchy of moving cgroups. This is for
1597 * waiting at hith-memory prressure caused by "move".
1598 */
1599
13fd1dd9 1600static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
32047e2a
KH
1601{
1602 VM_BUG_ON(!rcu_read_lock_held());
619d094b 1603 return atomic_read(&memcg->moving_account) > 0;
32047e2a 1604}
4b534334 1605
c0ff4b85 1606static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
4b534334 1607{
2bd9bb20
KH
1608 struct mem_cgroup *from;
1609 struct mem_cgroup *to;
4b534334 1610 bool ret = false;
2bd9bb20
KH
1611 /*
1612 * Unlike task_move routines, we access mc.to, mc.from not under
1613 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1614 */
1615 spin_lock(&mc.lock);
1616 from = mc.from;
1617 to = mc.to;
1618 if (!from)
1619 goto unlock;
3e92041d 1620
c0ff4b85
R
1621 ret = mem_cgroup_same_or_subtree(memcg, from)
1622 || mem_cgroup_same_or_subtree(memcg, to);
2bd9bb20
KH
1623unlock:
1624 spin_unlock(&mc.lock);
4b534334
KH
1625 return ret;
1626}
1627
c0ff4b85 1628static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
4b534334
KH
1629{
1630 if (mc.moving_task && current != mc.moving_task) {
c0ff4b85 1631 if (mem_cgroup_under_move(memcg)) {
4b534334
KH
1632 DEFINE_WAIT(wait);
1633 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1634 /* moving charge context might have finished. */
1635 if (mc.moving_task)
1636 schedule();
1637 finish_wait(&mc.waitq, &wait);
1638 return true;
1639 }
1640 }
1641 return false;
1642}
1643
312734c0
KH
1644/*
1645 * Take this lock when
1646 * - a code tries to modify page's memcg while it's USED.
1647 * - a code tries to modify page state accounting in a memcg.
13fd1dd9 1648 * see mem_cgroup_stolen(), too.
312734c0
KH
1649 */
1650static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1651 unsigned long *flags)
1652{
1653 spin_lock_irqsave(&memcg->move_lock, *flags);
1654}
1655
1656static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1657 unsigned long *flags)
1658{
1659 spin_unlock_irqrestore(&memcg->move_lock, *flags);
1660}
1661
58cf188e 1662#define K(x) ((x) << (PAGE_SHIFT-10))
e222432b 1663/**
58cf188e 1664 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
e222432b
BS
1665 * @memcg: The memory cgroup that went over limit
1666 * @p: Task that is going to be killed
1667 *
1668 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1669 * enabled
1670 */
1671void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1672{
e61734c5 1673 /* oom_info_lock ensures that parallel ooms do not interleave */
08088cb9 1674 static DEFINE_MUTEX(oom_info_lock);
58cf188e
SZ
1675 struct mem_cgroup *iter;
1676 unsigned int i;
e222432b 1677
58cf188e 1678 if (!p)
e222432b
BS
1679 return;
1680
08088cb9 1681 mutex_lock(&oom_info_lock);
e222432b
BS
1682 rcu_read_lock();
1683
e61734c5
TH
1684 pr_info("Task in ");
1685 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1686 pr_info(" killed as a result of limit of ");
1687 pr_cont_cgroup_path(memcg->css.cgroup);
1688 pr_info("\n");
e222432b 1689
e222432b
BS
1690 rcu_read_unlock();
1691
d045197f 1692 pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
e222432b
BS
1693 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1694 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1695 res_counter_read_u64(&memcg->res, RES_FAILCNT));
d045197f 1696 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
e222432b
BS
1697 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1698 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1699 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
d045197f 1700 pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
510fc4e1
GC
1701 res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
1702 res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
1703 res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
58cf188e
SZ
1704
1705 for_each_mem_cgroup_tree(iter, memcg) {
e61734c5
TH
1706 pr_info("Memory cgroup stats for ");
1707 pr_cont_cgroup_path(iter->css.cgroup);
58cf188e
SZ
1708 pr_cont(":");
1709
1710 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1711 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1712 continue;
1713 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1714 K(mem_cgroup_read_stat(iter, i)));
1715 }
1716
1717 for (i = 0; i < NR_LRU_LISTS; i++)
1718 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1719 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1720
1721 pr_cont("\n");
1722 }
08088cb9 1723 mutex_unlock(&oom_info_lock);
e222432b
BS
1724}
1725
81d39c20
KH
1726/*
1727 * This function returns the number of memcg under hierarchy tree. Returns
1728 * 1(self count) if no children.
1729 */
c0ff4b85 1730static int mem_cgroup_count_children(struct mem_cgroup *memcg)
81d39c20
KH
1731{
1732 int num = 0;
7d74b06f
KH
1733 struct mem_cgroup *iter;
1734
c0ff4b85 1735 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 1736 num++;
81d39c20
KH
1737 return num;
1738}
1739
a63d83f4
DR
1740/*
1741 * Return the memory (and swap, if configured) limit for a memcg.
1742 */
9cbb78bb 1743static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
a63d83f4
DR
1744{
1745 u64 limit;
a63d83f4 1746
f3e8eb70 1747 limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
f3e8eb70 1748
a63d83f4 1749 /*
9a5a8f19 1750 * Do not consider swap space if we cannot swap due to swappiness
a63d83f4 1751 */
9a5a8f19
MH
1752 if (mem_cgroup_swappiness(memcg)) {
1753 u64 memsw;
1754
1755 limit += total_swap_pages << PAGE_SHIFT;
1756 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1757
1758 /*
1759 * If memsw is finite and limits the amount of swap space
1760 * available to this memcg, return that limit.
1761 */
1762 limit = min(limit, memsw);
1763 }
1764
1765 return limit;
a63d83f4
DR
1766}
1767
19965460
DR
1768static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1769 int order)
9cbb78bb
DR
1770{
1771 struct mem_cgroup *iter;
1772 unsigned long chosen_points = 0;
1773 unsigned long totalpages;
1774 unsigned int points = 0;
1775 struct task_struct *chosen = NULL;
1776
876aafbf 1777 /*
465adcf1
DR
1778 * If current has a pending SIGKILL or is exiting, then automatically
1779 * select it. The goal is to allow it to allocate so that it may
1780 * quickly exit and free its memory.
876aafbf 1781 */
465adcf1 1782 if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
876aafbf
DR
1783 set_thread_flag(TIF_MEMDIE);
1784 return;
1785 }
1786
1787 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
9cbb78bb
DR
1788 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1789 for_each_mem_cgroup_tree(iter, memcg) {
72ec7029 1790 struct css_task_iter it;
9cbb78bb
DR
1791 struct task_struct *task;
1792
72ec7029
TH
1793 css_task_iter_start(&iter->css, &it);
1794 while ((task = css_task_iter_next(&it))) {
9cbb78bb
DR
1795 switch (oom_scan_process_thread(task, totalpages, NULL,
1796 false)) {
1797 case OOM_SCAN_SELECT:
1798 if (chosen)
1799 put_task_struct(chosen);
1800 chosen = task;
1801 chosen_points = ULONG_MAX;
1802 get_task_struct(chosen);
1803 /* fall through */
1804 case OOM_SCAN_CONTINUE:
1805 continue;
1806 case OOM_SCAN_ABORT:
72ec7029 1807 css_task_iter_end(&it);
9cbb78bb
DR
1808 mem_cgroup_iter_break(memcg, iter);
1809 if (chosen)
1810 put_task_struct(chosen);
1811 return;
1812 case OOM_SCAN_OK:
1813 break;
1814 };
1815 points = oom_badness(task, memcg, NULL, totalpages);
d49ad935
DR
1816 if (!points || points < chosen_points)
1817 continue;
1818 /* Prefer thread group leaders for display purposes */
1819 if (points == chosen_points &&
1820 thread_group_leader(chosen))
1821 continue;
1822
1823 if (chosen)
1824 put_task_struct(chosen);
1825 chosen = task;
1826 chosen_points = points;
1827 get_task_struct(chosen);
9cbb78bb 1828 }
72ec7029 1829 css_task_iter_end(&it);
9cbb78bb
DR
1830 }
1831
1832 if (!chosen)
1833 return;
1834 points = chosen_points * 1000 / totalpages;
9cbb78bb
DR
1835 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1836 NULL, "Memory cgroup out of memory");
9cbb78bb
DR
1837}
1838
5660048c
JW
1839static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1840 gfp_t gfp_mask,
1841 unsigned long flags)
1842{
1843 unsigned long total = 0;
1844 bool noswap = false;
1845 int loop;
1846
1847 if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1848 noswap = true;
1849 if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1850 noswap = true;
1851
1852 for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1853 if (loop)
1854 drain_all_stock_async(memcg);
1855 total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1856 /*
1857 * Allow limit shrinkers, which are triggered directly
1858 * by userspace, to catch signals and stop reclaim
1859 * after minimal progress, regardless of the margin.
1860 */
1861 if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1862 break;
1863 if (mem_cgroup_margin(memcg))
1864 break;
1865 /*
1866 * If nothing was reclaimed after two attempts, there
1867 * may be no reclaimable pages in this hierarchy.
1868 */
1869 if (loop && !total)
1870 break;
1871 }
1872 return total;
1873}
1874
4d0c066d
KH
1875/**
1876 * test_mem_cgroup_node_reclaimable
dad7557e 1877 * @memcg: the target memcg
4d0c066d
KH
1878 * @nid: the node ID to be checked.
1879 * @noswap : specify true here if the user wants flle only information.
1880 *
1881 * This function returns whether the specified memcg contains any
1882 * reclaimable pages on a node. Returns true if there are any reclaimable
1883 * pages in the node.
1884 */
c0ff4b85 1885static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
4d0c066d
KH
1886 int nid, bool noswap)
1887{
c0ff4b85 1888 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
4d0c066d
KH
1889 return true;
1890 if (noswap || !total_swap_pages)
1891 return false;
c0ff4b85 1892 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
4d0c066d
KH
1893 return true;
1894 return false;
1895
1896}
bb4cc1a8 1897#if MAX_NUMNODES > 1
889976db
YH
1898
1899/*
1900 * Always updating the nodemask is not very good - even if we have an empty
1901 * list or the wrong list here, we can start from some node and traverse all
1902 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1903 *
1904 */
c0ff4b85 1905static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
889976db
YH
1906{
1907 int nid;
453a9bf3
KH
1908 /*
1909 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1910 * pagein/pageout changes since the last update.
1911 */
c0ff4b85 1912 if (!atomic_read(&memcg->numainfo_events))
453a9bf3 1913 return;
c0ff4b85 1914 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
889976db
YH
1915 return;
1916
889976db 1917 /* make a nodemask where this memcg uses memory from */
31aaea4a 1918 memcg->scan_nodes = node_states[N_MEMORY];
889976db 1919
31aaea4a 1920 for_each_node_mask(nid, node_states[N_MEMORY]) {
889976db 1921
c0ff4b85
R
1922 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1923 node_clear(nid, memcg->scan_nodes);
889976db 1924 }
453a9bf3 1925
c0ff4b85
R
1926 atomic_set(&memcg->numainfo_events, 0);
1927 atomic_set(&memcg->numainfo_updating, 0);
889976db
YH
1928}
1929
1930/*
1931 * Selecting a node where we start reclaim from. Because what we need is just
1932 * reducing usage counter, start from anywhere is O,K. Considering
1933 * memory reclaim from current node, there are pros. and cons.
1934 *
1935 * Freeing memory from current node means freeing memory from a node which
1936 * we'll use or we've used. So, it may make LRU bad. And if several threads
1937 * hit limits, it will see a contention on a node. But freeing from remote
1938 * node means more costs for memory reclaim because of memory latency.
1939 *
1940 * Now, we use round-robin. Better algorithm is welcomed.
1941 */
c0ff4b85 1942int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
889976db
YH
1943{
1944 int node;
1945
c0ff4b85
R
1946 mem_cgroup_may_update_nodemask(memcg);
1947 node = memcg->last_scanned_node;
889976db 1948
c0ff4b85 1949 node = next_node(node, memcg->scan_nodes);
889976db 1950 if (node == MAX_NUMNODES)
c0ff4b85 1951 node = first_node(memcg->scan_nodes);
889976db
YH
1952 /*
1953 * We call this when we hit limit, not when pages are added to LRU.
1954 * No LRU may hold pages because all pages are UNEVICTABLE or
1955 * memcg is too small and all pages are not on LRU. In that case,
1956 * we use curret node.
1957 */
1958 if (unlikely(node == MAX_NUMNODES))
1959 node = numa_node_id();
1960
c0ff4b85 1961 memcg->last_scanned_node = node;
889976db
YH
1962 return node;
1963}
1964
bb4cc1a8
AM
1965/*
1966 * Check all nodes whether it contains reclaimable pages or not.
1967 * For quick scan, we make use of scan_nodes. This will allow us to skip
1968 * unused nodes. But scan_nodes is lazily updated and may not cotain
1969 * enough new information. We need to do double check.
1970 */
1971static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1972{
1973 int nid;
1974
1975 /*
1976 * quick check...making use of scan_node.
1977 * We can skip unused nodes.
1978 */
1979 if (!nodes_empty(memcg->scan_nodes)) {
1980 for (nid = first_node(memcg->scan_nodes);
1981 nid < MAX_NUMNODES;
1982 nid = next_node(nid, memcg->scan_nodes)) {
1983
1984 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1985 return true;
1986 }
1987 }
1988 /*
1989 * Check rest of nodes.
1990 */
1991 for_each_node_state(nid, N_MEMORY) {
1992 if (node_isset(nid, memcg->scan_nodes))
1993 continue;
1994 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1995 return true;
1996 }
1997 return false;
1998}
1999
889976db 2000#else
c0ff4b85 2001int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
889976db
YH
2002{
2003 return 0;
2004}
4d0c066d 2005
bb4cc1a8
AM
2006static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
2007{
2008 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
2009}
889976db
YH
2010#endif
2011
0608f43d
AM
2012static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
2013 struct zone *zone,
2014 gfp_t gfp_mask,
2015 unsigned long *total_scanned)
2016{
2017 struct mem_cgroup *victim = NULL;
2018 int total = 0;
2019 int loop = 0;
2020 unsigned long excess;
2021 unsigned long nr_scanned;
2022 struct mem_cgroup_reclaim_cookie reclaim = {
2023 .zone = zone,
2024 .priority = 0,
2025 };
2026
2027 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
2028
2029 while (1) {
2030 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2031 if (!victim) {
2032 loop++;
2033 if (loop >= 2) {
2034 /*
2035 * If we have not been able to reclaim
2036 * anything, it might because there are
2037 * no reclaimable pages under this hierarchy
2038 */
2039 if (!total)
2040 break;
2041 /*
2042 * We want to do more targeted reclaim.
2043 * excess >> 2 is not to excessive so as to
2044 * reclaim too much, nor too less that we keep
2045 * coming back to reclaim from this cgroup
2046 */
2047 if (total >= (excess >> 2) ||
2048 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2049 break;
2050 }
2051 continue;
2052 }
2053 if (!mem_cgroup_reclaimable(victim, false))
2054 continue;
2055 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2056 zone, &nr_scanned);
2057 *total_scanned += nr_scanned;
2058 if (!res_counter_soft_limit_excess(&root_memcg->res))
2059 break;
6d61ef40 2060 }
0608f43d
AM
2061 mem_cgroup_iter_break(root_memcg, victim);
2062 return total;
6d61ef40
BS
2063}
2064
0056f4e6
JW
2065#ifdef CONFIG_LOCKDEP
2066static struct lockdep_map memcg_oom_lock_dep_map = {
2067 .name = "memcg_oom_lock",
2068};
2069#endif
2070
fb2a6fc5
JW
2071static DEFINE_SPINLOCK(memcg_oom_lock);
2072
867578cb
KH
2073/*
2074 * Check OOM-Killer is already running under our hierarchy.
2075 * If someone is running, return false.
2076 */
fb2a6fc5 2077static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
867578cb 2078{
79dfdacc 2079 struct mem_cgroup *iter, *failed = NULL;
a636b327 2080
fb2a6fc5
JW
2081 spin_lock(&memcg_oom_lock);
2082
9f3a0d09 2083 for_each_mem_cgroup_tree(iter, memcg) {
23751be0 2084 if (iter->oom_lock) {
79dfdacc
MH
2085 /*
2086 * this subtree of our hierarchy is already locked
2087 * so we cannot give a lock.
2088 */
79dfdacc 2089 failed = iter;
9f3a0d09
JW
2090 mem_cgroup_iter_break(memcg, iter);
2091 break;
23751be0
JW
2092 } else
2093 iter->oom_lock = true;
7d74b06f 2094 }
867578cb 2095
fb2a6fc5
JW
2096 if (failed) {
2097 /*
2098 * OK, we failed to lock the whole subtree so we have
2099 * to clean up what we set up to the failing subtree
2100 */
2101 for_each_mem_cgroup_tree(iter, memcg) {
2102 if (iter == failed) {
2103 mem_cgroup_iter_break(memcg, iter);
2104 break;
2105 }
2106 iter->oom_lock = false;
79dfdacc 2107 }
0056f4e6
JW
2108 } else
2109 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
fb2a6fc5
JW
2110
2111 spin_unlock(&memcg_oom_lock);
2112
2113 return !failed;
a636b327 2114}
0b7f569e 2115
fb2a6fc5 2116static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
0b7f569e 2117{
7d74b06f
KH
2118 struct mem_cgroup *iter;
2119
fb2a6fc5 2120 spin_lock(&memcg_oom_lock);
0056f4e6 2121 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
c0ff4b85 2122 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc 2123 iter->oom_lock = false;
fb2a6fc5 2124 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
2125}
2126
c0ff4b85 2127static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
2128{
2129 struct mem_cgroup *iter;
2130
c0ff4b85 2131 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc
MH
2132 atomic_inc(&iter->under_oom);
2133}
2134
c0ff4b85 2135static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
2136{
2137 struct mem_cgroup *iter;
2138
867578cb
KH
2139 /*
2140 * When a new child is created while the hierarchy is under oom,
2141 * mem_cgroup_oom_lock() may not be called. We have to use
2142 * atomic_add_unless() here.
2143 */
c0ff4b85 2144 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc 2145 atomic_add_unless(&iter->under_oom, -1, 0);
0b7f569e
KH
2146}
2147
867578cb
KH
2148static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
2149
dc98df5a 2150struct oom_wait_info {
d79154bb 2151 struct mem_cgroup *memcg;
dc98df5a
KH
2152 wait_queue_t wait;
2153};
2154
2155static int memcg_oom_wake_function(wait_queue_t *wait,
2156 unsigned mode, int sync, void *arg)
2157{
d79154bb
HD
2158 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
2159 struct mem_cgroup *oom_wait_memcg;
dc98df5a
KH
2160 struct oom_wait_info *oom_wait_info;
2161
2162 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
d79154bb 2163 oom_wait_memcg = oom_wait_info->memcg;
dc98df5a 2164
dc98df5a 2165 /*
d79154bb 2166 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
dc98df5a
KH
2167 * Then we can use css_is_ancestor without taking care of RCU.
2168 */
c0ff4b85
R
2169 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
2170 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
dc98df5a 2171 return 0;
dc98df5a
KH
2172 return autoremove_wake_function(wait, mode, sync, arg);
2173}
2174
c0ff4b85 2175static void memcg_wakeup_oom(struct mem_cgroup *memcg)
dc98df5a 2176{
3812c8c8 2177 atomic_inc(&memcg->oom_wakeups);
c0ff4b85
R
2178 /* for filtering, pass "memcg" as argument. */
2179 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
dc98df5a
KH
2180}
2181
c0ff4b85 2182static void memcg_oom_recover(struct mem_cgroup *memcg)
3c11ecf4 2183{
c0ff4b85
R
2184 if (memcg && atomic_read(&memcg->under_oom))
2185 memcg_wakeup_oom(memcg);
3c11ecf4
KH
2186}
2187
3812c8c8 2188static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
0b7f569e 2189{
3812c8c8
JW
2190 if (!current->memcg_oom.may_oom)
2191 return;
867578cb 2192 /*
49426420
JW
2193 * We are in the middle of the charge context here, so we
2194 * don't want to block when potentially sitting on a callstack
2195 * that holds all kinds of filesystem and mm locks.
2196 *
2197 * Also, the caller may handle a failed allocation gracefully
2198 * (like optional page cache readahead) and so an OOM killer
2199 * invocation might not even be necessary.
2200 *
2201 * That's why we don't do anything here except remember the
2202 * OOM context and then deal with it at the end of the page
2203 * fault when the stack is unwound, the locks are released,
2204 * and when we know whether the fault was overall successful.
867578cb 2205 */
49426420
JW
2206 css_get(&memcg->css);
2207 current->memcg_oom.memcg = memcg;
2208 current->memcg_oom.gfp_mask = mask;
2209 current->memcg_oom.order = order;
3812c8c8
JW
2210}
2211
2212/**
2213 * mem_cgroup_oom_synchronize - complete memcg OOM handling
49426420 2214 * @handle: actually kill/wait or just clean up the OOM state
3812c8c8 2215 *
49426420
JW
2216 * This has to be called at the end of a page fault if the memcg OOM
2217 * handler was enabled.
3812c8c8 2218 *
49426420 2219 * Memcg supports userspace OOM handling where failed allocations must
3812c8c8
JW
2220 * sleep on a waitqueue until the userspace task resolves the
2221 * situation. Sleeping directly in the charge context with all kinds
2222 * of locks held is not a good idea, instead we remember an OOM state
2223 * in the task and mem_cgroup_oom_synchronize() has to be called at
49426420 2224 * the end of the page fault to complete the OOM handling.
3812c8c8
JW
2225 *
2226 * Returns %true if an ongoing memcg OOM situation was detected and
49426420 2227 * completed, %false otherwise.
3812c8c8 2228 */
49426420 2229bool mem_cgroup_oom_synchronize(bool handle)
3812c8c8 2230{
49426420 2231 struct mem_cgroup *memcg = current->memcg_oom.memcg;
3812c8c8 2232 struct oom_wait_info owait;
49426420 2233 bool locked;
3812c8c8
JW
2234
2235 /* OOM is global, do not handle */
3812c8c8 2236 if (!memcg)
49426420 2237 return false;
3812c8c8 2238
49426420
JW
2239 if (!handle)
2240 goto cleanup;
3812c8c8
JW
2241
2242 owait.memcg = memcg;
2243 owait.wait.flags = 0;
2244 owait.wait.func = memcg_oom_wake_function;
2245 owait.wait.private = current;
2246 INIT_LIST_HEAD(&owait.wait.task_list);
867578cb 2247
3812c8c8 2248 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
49426420
JW
2249 mem_cgroup_mark_under_oom(memcg);
2250
2251 locked = mem_cgroup_oom_trylock(memcg);
2252
2253 if (locked)
2254 mem_cgroup_oom_notify(memcg);
2255
2256 if (locked && !memcg->oom_kill_disable) {
2257 mem_cgroup_unmark_under_oom(memcg);
2258 finish_wait(&memcg_oom_waitq, &owait.wait);
2259 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
2260 current->memcg_oom.order);
2261 } else {
3812c8c8 2262 schedule();
49426420
JW
2263 mem_cgroup_unmark_under_oom(memcg);
2264 finish_wait(&memcg_oom_waitq, &owait.wait);
2265 }
2266
2267 if (locked) {
fb2a6fc5
JW
2268 mem_cgroup_oom_unlock(memcg);
2269 /*
2270 * There is no guarantee that an OOM-lock contender
2271 * sees the wakeups triggered by the OOM kill
2272 * uncharges. Wake any sleepers explicitely.
2273 */
2274 memcg_oom_recover(memcg);
2275 }
49426420
JW
2276cleanup:
2277 current->memcg_oom.memcg = NULL;
3812c8c8 2278 css_put(&memcg->css);
867578cb 2279 return true;
0b7f569e
KH
2280}
2281
d69b042f
BS
2282/*
2283 * Currently used to update mapped file statistics, but the routine can be
2284 * generalized to update other statistics as well.
32047e2a
KH
2285 *
2286 * Notes: Race condition
2287 *
2288 * We usually use page_cgroup_lock() for accessing page_cgroup member but
2289 * it tends to be costly. But considering some conditions, we doesn't need
2290 * to do so _always_.
2291 *
2292 * Considering "charge", lock_page_cgroup() is not required because all
2293 * file-stat operations happen after a page is attached to radix-tree. There
2294 * are no race with "charge".
2295 *
2296 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
2297 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
2298 * if there are race with "uncharge". Statistics itself is properly handled
2299 * by flags.
2300 *
2301 * Considering "move", this is an only case we see a race. To make the race
619d094b
KH
2302 * small, we check mm->moving_account and detect there are possibility of race
2303 * If there is, we take a lock.
d69b042f 2304 */
26174efd 2305
89c06bd5
KH
2306void __mem_cgroup_begin_update_page_stat(struct page *page,
2307 bool *locked, unsigned long *flags)
2308{
2309 struct mem_cgroup *memcg;
2310 struct page_cgroup *pc;
2311
2312 pc = lookup_page_cgroup(page);
2313again:
2314 memcg = pc->mem_cgroup;
2315 if (unlikely(!memcg || !PageCgroupUsed(pc)))
2316 return;
2317 /*
2318 * If this memory cgroup is not under account moving, we don't
da92c47d 2319 * need to take move_lock_mem_cgroup(). Because we already hold
89c06bd5 2320 * rcu_read_lock(), any calls to move_account will be delayed until
13fd1dd9 2321 * rcu_read_unlock() if mem_cgroup_stolen() == true.
89c06bd5 2322 */
13fd1dd9 2323 if (!mem_cgroup_stolen(memcg))
89c06bd5
KH
2324 return;
2325
2326 move_lock_mem_cgroup(memcg, flags);
2327 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
2328 move_unlock_mem_cgroup(memcg, flags);
2329 goto again;
2330 }
2331 *locked = true;
2332}
2333
2334void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
2335{
2336 struct page_cgroup *pc = lookup_page_cgroup(page);
2337
2338 /*
2339 * It's guaranteed that pc->mem_cgroup never changes while
2340 * lock is held because a routine modifies pc->mem_cgroup
da92c47d 2341 * should take move_lock_mem_cgroup().
89c06bd5
KH
2342 */
2343 move_unlock_mem_cgroup(pc->mem_cgroup, flags);
2344}
2345
2a7106f2 2346void mem_cgroup_update_page_stat(struct page *page,
68b4876d 2347 enum mem_cgroup_stat_index idx, int val)
d69b042f 2348{
c0ff4b85 2349 struct mem_cgroup *memcg;
32047e2a 2350 struct page_cgroup *pc = lookup_page_cgroup(page);
dbd4ea78 2351 unsigned long uninitialized_var(flags);
d69b042f 2352
cfa44946 2353 if (mem_cgroup_disabled())
d69b042f 2354 return;
89c06bd5 2355
658b72c5 2356 VM_BUG_ON(!rcu_read_lock_held());
c0ff4b85
R
2357 memcg = pc->mem_cgroup;
2358 if (unlikely(!memcg || !PageCgroupUsed(pc)))
89c06bd5 2359 return;
26174efd 2360
c0ff4b85 2361 this_cpu_add(memcg->stat->count[idx], val);
d69b042f 2362}
26174efd 2363
cdec2e42
KH
2364/*
2365 * size of first charge trial. "32" comes from vmscan.c's magic value.
2366 * TODO: maybe necessary to use big numbers in big irons.
2367 */
7ec99d62 2368#define CHARGE_BATCH 32U
cdec2e42
KH
2369struct memcg_stock_pcp {
2370 struct mem_cgroup *cached; /* this never be root cgroup */
11c9ea4e 2371 unsigned int nr_pages;
cdec2e42 2372 struct work_struct work;
26fe6168 2373 unsigned long flags;
a0db00fc 2374#define FLUSHING_CACHED_CHARGE 0
cdec2e42
KH
2375};
2376static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
9f50fad6 2377static DEFINE_MUTEX(percpu_charge_mutex);
cdec2e42 2378
a0956d54
SS
2379/**
2380 * consume_stock: Try to consume stocked charge on this cpu.
2381 * @memcg: memcg to consume from.
2382 * @nr_pages: how many pages to charge.
2383 *
2384 * The charges will only happen if @memcg matches the current cpu's memcg
2385 * stock, and at least @nr_pages are available in that stock. Failure to
2386 * service an allocation will refill the stock.
2387 *
2388 * returns true if successful, false otherwise.
cdec2e42 2389 */
a0956d54 2390static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42
KH
2391{
2392 struct memcg_stock_pcp *stock;
2393 bool ret = true;
2394
a0956d54
SS
2395 if (nr_pages > CHARGE_BATCH)
2396 return false;
2397
cdec2e42 2398 stock = &get_cpu_var(memcg_stock);
a0956d54
SS
2399 if (memcg == stock->cached && stock->nr_pages >= nr_pages)
2400 stock->nr_pages -= nr_pages;
cdec2e42
KH
2401 else /* need to call res_counter_charge */
2402 ret = false;
2403 put_cpu_var(memcg_stock);
2404 return ret;
2405}
2406
2407/*
2408 * Returns stocks cached in percpu to res_counter and reset cached information.
2409 */
2410static void drain_stock(struct memcg_stock_pcp *stock)
2411{
2412 struct mem_cgroup *old = stock->cached;
2413
11c9ea4e
JW
2414 if (stock->nr_pages) {
2415 unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2416
2417 res_counter_uncharge(&old->res, bytes);
cdec2e42 2418 if (do_swap_account)
11c9ea4e
JW
2419 res_counter_uncharge(&old->memsw, bytes);
2420 stock->nr_pages = 0;
cdec2e42
KH
2421 }
2422 stock->cached = NULL;
cdec2e42
KH
2423}
2424
2425/*
2426 * This must be called under preempt disabled or must be called by
2427 * a thread which is pinned to local cpu.
2428 */
2429static void drain_local_stock(struct work_struct *dummy)
2430{
2431 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2432 drain_stock(stock);
26fe6168 2433 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
cdec2e42
KH
2434}
2435
e4777496
MH
2436static void __init memcg_stock_init(void)
2437{
2438 int cpu;
2439
2440 for_each_possible_cpu(cpu) {
2441 struct memcg_stock_pcp *stock =
2442 &per_cpu(memcg_stock, cpu);
2443 INIT_WORK(&stock->work, drain_local_stock);
2444 }
2445}
2446
cdec2e42
KH
2447/*
2448 * Cache charges(val) which is from res_counter, to local per_cpu area.
320cc51d 2449 * This will be consumed by consume_stock() function, later.
cdec2e42 2450 */
c0ff4b85 2451static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42
KH
2452{
2453 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2454
c0ff4b85 2455 if (stock->cached != memcg) { /* reset if necessary */
cdec2e42 2456 drain_stock(stock);
c0ff4b85 2457 stock->cached = memcg;
cdec2e42 2458 }
11c9ea4e 2459 stock->nr_pages += nr_pages;
cdec2e42
KH
2460 put_cpu_var(memcg_stock);
2461}
2462
2463/*
c0ff4b85 2464 * Drains all per-CPU charge caches for given root_memcg resp. subtree
d38144b7
MH
2465 * of the hierarchy under it. sync flag says whether we should block
2466 * until the work is done.
cdec2e42 2467 */
c0ff4b85 2468static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
cdec2e42 2469{
26fe6168 2470 int cpu, curcpu;
d38144b7 2471
cdec2e42 2472 /* Notify other cpus that system-wide "drain" is running */
cdec2e42 2473 get_online_cpus();
5af12d0e 2474 curcpu = get_cpu();
cdec2e42
KH
2475 for_each_online_cpu(cpu) {
2476 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
c0ff4b85 2477 struct mem_cgroup *memcg;
26fe6168 2478
c0ff4b85
R
2479 memcg = stock->cached;
2480 if (!memcg || !stock->nr_pages)
26fe6168 2481 continue;
c0ff4b85 2482 if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
3e92041d 2483 continue;
d1a05b69
MH
2484 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2485 if (cpu == curcpu)
2486 drain_local_stock(&stock->work);
2487 else
2488 schedule_work_on(cpu, &stock->work);
2489 }
cdec2e42 2490 }
5af12d0e 2491 put_cpu();
d38144b7
MH
2492
2493 if (!sync)
2494 goto out;
2495
2496 for_each_online_cpu(cpu) {
2497 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
9f50fad6 2498 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
d38144b7
MH
2499 flush_work(&stock->work);
2500 }
2501out:
f894ffa8 2502 put_online_cpus();
d38144b7
MH
2503}
2504
2505/*
2506 * Tries to drain stocked charges in other cpus. This function is asynchronous
2507 * and just put a work per cpu for draining localy on each cpu. Caller can
2508 * expects some charges will be back to res_counter later but cannot wait for
2509 * it.
2510 */
c0ff4b85 2511static void drain_all_stock_async(struct mem_cgroup *root_memcg)
d38144b7 2512{
9f50fad6
MH
2513 /*
2514 * If someone calls draining, avoid adding more kworker runs.
2515 */
2516 if (!mutex_trylock(&percpu_charge_mutex))
2517 return;
c0ff4b85 2518 drain_all_stock(root_memcg, false);
9f50fad6 2519 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
2520}
2521
2522/* This is a synchronous drain interface. */
c0ff4b85 2523static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
cdec2e42
KH
2524{
2525 /* called when force_empty is called */
9f50fad6 2526 mutex_lock(&percpu_charge_mutex);
c0ff4b85 2527 drain_all_stock(root_memcg, true);
9f50fad6 2528 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
2529}
2530
711d3d2c
KH
2531/*
2532 * This function drains percpu counter value from DEAD cpu and
2533 * move it to local cpu. Note that this function can be preempted.
2534 */
c0ff4b85 2535static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
711d3d2c
KH
2536{
2537 int i;
2538
c0ff4b85 2539 spin_lock(&memcg->pcp_counter_lock);
6104621d 2540 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
c0ff4b85 2541 long x = per_cpu(memcg->stat->count[i], cpu);
711d3d2c 2542
c0ff4b85
R
2543 per_cpu(memcg->stat->count[i], cpu) = 0;
2544 memcg->nocpu_base.count[i] += x;
711d3d2c 2545 }
e9f8974f 2546 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
c0ff4b85 2547 unsigned long x = per_cpu(memcg->stat->events[i], cpu);
e9f8974f 2548
c0ff4b85
R
2549 per_cpu(memcg->stat->events[i], cpu) = 0;
2550 memcg->nocpu_base.events[i] += x;
e9f8974f 2551 }
c0ff4b85 2552 spin_unlock(&memcg->pcp_counter_lock);
711d3d2c
KH
2553}
2554
0db0628d 2555static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
cdec2e42
KH
2556 unsigned long action,
2557 void *hcpu)
2558{
2559 int cpu = (unsigned long)hcpu;
2560 struct memcg_stock_pcp *stock;
711d3d2c 2561 struct mem_cgroup *iter;
cdec2e42 2562
619d094b 2563 if (action == CPU_ONLINE)
1489ebad 2564 return NOTIFY_OK;
1489ebad 2565
d833049b 2566 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
cdec2e42 2567 return NOTIFY_OK;
711d3d2c 2568
9f3a0d09 2569 for_each_mem_cgroup(iter)
711d3d2c
KH
2570 mem_cgroup_drain_pcp_counter(iter, cpu);
2571
cdec2e42
KH
2572 stock = &per_cpu(memcg_stock, cpu);
2573 drain_stock(stock);
2574 return NOTIFY_OK;
2575}
2576
4b534334 2577
6d1fdc48 2578/* See mem_cgroup_try_charge() for details */
4b534334
KH
2579enum {
2580 CHARGE_OK, /* success */
2581 CHARGE_RETRY, /* need to retry but retry is not bad */
2582 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
2583 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
4b534334
KH
2584};
2585
c0ff4b85 2586static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
4c9c5359 2587 unsigned int nr_pages, unsigned int min_pages,
3812c8c8 2588 bool invoke_oom)
4b534334 2589{
7ec99d62 2590 unsigned long csize = nr_pages * PAGE_SIZE;
4b534334
KH
2591 struct mem_cgroup *mem_over_limit;
2592 struct res_counter *fail_res;
2593 unsigned long flags = 0;
2594 int ret;
2595
c0ff4b85 2596 ret = res_counter_charge(&memcg->res, csize, &fail_res);
4b534334
KH
2597
2598 if (likely(!ret)) {
2599 if (!do_swap_account)
2600 return CHARGE_OK;
c0ff4b85 2601 ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
4b534334
KH
2602 if (likely(!ret))
2603 return CHARGE_OK;
2604
c0ff4b85 2605 res_counter_uncharge(&memcg->res, csize);
4b534334
KH
2606 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2607 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2608 } else
2609 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
9221edb7 2610 /*
9221edb7
JW
2611 * Never reclaim on behalf of optional batching, retry with a
2612 * single page instead.
2613 */
4c9c5359 2614 if (nr_pages > min_pages)
4b534334
KH
2615 return CHARGE_RETRY;
2616
2617 if (!(gfp_mask & __GFP_WAIT))
2618 return CHARGE_WOULDBLOCK;
2619
4c9c5359
SS
2620 if (gfp_mask & __GFP_NORETRY)
2621 return CHARGE_NOMEM;
2622
5660048c 2623 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
7ec99d62 2624 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
19942822 2625 return CHARGE_RETRY;
4b534334 2626 /*
19942822
JW
2627 * Even though the limit is exceeded at this point, reclaim
2628 * may have been able to free some pages. Retry the charge
2629 * before killing the task.
2630 *
2631 * Only for regular pages, though: huge pages are rather
2632 * unlikely to succeed so close to the limit, and we fall back
2633 * to regular pages anyway in case of failure.
4b534334 2634 */
4c9c5359 2635 if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
4b534334
KH
2636 return CHARGE_RETRY;
2637
2638 /*
2639 * At task move, charge accounts can be doubly counted. So, it's
2640 * better to wait until the end of task_move if something is going on.
2641 */
2642 if (mem_cgroup_wait_acct_move(mem_over_limit))
2643 return CHARGE_RETRY;
2644
3812c8c8
JW
2645 if (invoke_oom)
2646 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize));
4b534334 2647
3812c8c8 2648 return CHARGE_NOMEM;
4b534334
KH
2649}
2650
6d1fdc48
JW
2651/**
2652 * mem_cgroup_try_charge - try charging a memcg
2653 * @memcg: memcg to charge
2654 * @nr_pages: number of pages to charge
2655 * @oom: trigger OOM if reclaim fails
38c5d72f 2656 *
6d1fdc48
JW
2657 * Returns 0 if @memcg was charged successfully, -EINTR if the charge
2658 * was bypassed to root_mem_cgroup, and -ENOMEM if the charge failed.
8a9f3ccd 2659 */
6d1fdc48
JW
2660static int mem_cgroup_try_charge(struct mem_cgroup *memcg,
2661 gfp_t gfp_mask,
2662 unsigned int nr_pages,
2663 bool oom)
8a9f3ccd 2664{
7ec99d62 2665 unsigned int batch = max(CHARGE_BATCH, nr_pages);
4b534334 2666 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
4b534334 2667 int ret;
a636b327 2668
6d1fdc48
JW
2669 if (mem_cgroup_is_root(memcg))
2670 goto done;
867578cb 2671 /*
6d1fdc48
JW
2672 * Unlike in global OOM situations, memcg is not in a physical
2673 * memory shortage. Allow dying and OOM-killed tasks to
2674 * bypass the last charges so that they can exit quickly and
2675 * free their memory.
867578cb 2676 */
6d1fdc48
JW
2677 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2678 fatal_signal_pending(current)))
867578cb 2679 goto bypass;
a636b327 2680
49426420 2681 if (unlikely(task_in_memcg_oom(current)))
1f14c1ac 2682 goto nomem;
49426420 2683
a0d8b00a
JW
2684 if (gfp_mask & __GFP_NOFAIL)
2685 oom = false;
f75ca962 2686again:
b6b6cc72
MH
2687 if (consume_stock(memcg, nr_pages))
2688 goto done;
8a9f3ccd 2689
4b534334 2690 do {
3812c8c8 2691 bool invoke_oom = oom && !nr_oom_retries;
7a81b88c 2692
4b534334 2693 /* If killed, bypass charge */
6d1fdc48 2694 if (fatal_signal_pending(current))
4b534334 2695 goto bypass;
6d61ef40 2696
3812c8c8
JW
2697 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch,
2698 nr_pages, invoke_oom);
4b534334
KH
2699 switch (ret) {
2700 case CHARGE_OK:
2701 break;
2702 case CHARGE_RETRY: /* not in OOM situation but retry */
7ec99d62 2703 batch = nr_pages;
f75ca962 2704 goto again;
4b534334
KH
2705 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2706 goto nomem;
2707 case CHARGE_NOMEM: /* OOM routine works */
6d1fdc48 2708 if (!oom || invoke_oom)
867578cb 2709 goto nomem;
4b534334
KH
2710 nr_oom_retries--;
2711 break;
66e1707b 2712 }
4b534334
KH
2713 } while (ret != CHARGE_OK);
2714
7ec99d62 2715 if (batch > nr_pages)
c0ff4b85 2716 refill_stock(memcg, batch - nr_pages);
0c3e73e8 2717done:
7a81b88c
KH
2718 return 0;
2719nomem:
6d1fdc48 2720 if (!(gfp_mask & __GFP_NOFAIL))
3168ecbe 2721 return -ENOMEM;
867578cb 2722bypass:
38c5d72f 2723 return -EINTR;
7a81b88c 2724}
8a9f3ccd 2725
6d1fdc48
JW
2726/**
2727 * mem_cgroup_try_charge_mm - try charging a mm
2728 * @mm: mm_struct to charge
2729 * @nr_pages: number of pages to charge
2730 * @oom: trigger OOM if reclaim fails
2731 *
2732 * Returns the charged mem_cgroup associated with the given mm_struct or
2733 * NULL the charge failed.
2734 */
2735static struct mem_cgroup *mem_cgroup_try_charge_mm(struct mm_struct *mm,
2736 gfp_t gfp_mask,
2737 unsigned int nr_pages,
2738 bool oom)
2739
2740{
2741 struct mem_cgroup *memcg;
2742 int ret;
2743
2744 memcg = get_mem_cgroup_from_mm(mm);
2745 ret = mem_cgroup_try_charge(memcg, gfp_mask, nr_pages, oom);
2746 css_put(&memcg->css);
2747 if (ret == -EINTR)
2748 memcg = root_mem_cgroup;
2749 else if (ret)
2750 memcg = NULL;
2751
2752 return memcg;
2753}
2754
a3032a2c
DN
2755/*
2756 * Somemtimes we have to undo a charge we got by try_charge().
2757 * This function is for that and do uncharge, put css's refcnt.
2758 * gotten by try_charge().
2759 */
c0ff4b85 2760static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
e7018b8d 2761 unsigned int nr_pages)
a3032a2c 2762{
c0ff4b85 2763 if (!mem_cgroup_is_root(memcg)) {
e7018b8d
JW
2764 unsigned long bytes = nr_pages * PAGE_SIZE;
2765
c0ff4b85 2766 res_counter_uncharge(&memcg->res, bytes);
a3032a2c 2767 if (do_swap_account)
c0ff4b85 2768 res_counter_uncharge(&memcg->memsw, bytes);
a3032a2c 2769 }
854ffa8d
DN
2770}
2771
d01dd17f
KH
2772/*
2773 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2774 * This is useful when moving usage to parent cgroup.
2775 */
2776static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2777 unsigned int nr_pages)
2778{
2779 unsigned long bytes = nr_pages * PAGE_SIZE;
2780
2781 if (mem_cgroup_is_root(memcg))
2782 return;
2783
2784 res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2785 if (do_swap_account)
2786 res_counter_uncharge_until(&memcg->memsw,
2787 memcg->memsw.parent, bytes);
2788}
2789
a3b2d692
KH
2790/*
2791 * A helper function to get mem_cgroup from ID. must be called under
e9316080
TH
2792 * rcu_read_lock(). The caller is responsible for calling css_tryget if
2793 * the mem_cgroup is used for charging. (dropping refcnt from swap can be
2794 * called against removed memcg.)
a3b2d692
KH
2795 */
2796static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2797{
a3b2d692
KH
2798 /* ID 0 is unused ID */
2799 if (!id)
2800 return NULL;
34c00c31 2801 return mem_cgroup_from_id(id);
a3b2d692
KH
2802}
2803
e42d9d5d 2804struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
b5a84319 2805{
c0ff4b85 2806 struct mem_cgroup *memcg = NULL;
3c776e64 2807 struct page_cgroup *pc;
a3b2d692 2808 unsigned short id;
b5a84319
KH
2809 swp_entry_t ent;
2810
309381fe 2811 VM_BUG_ON_PAGE(!PageLocked(page), page);
3c776e64 2812
3c776e64 2813 pc = lookup_page_cgroup(page);
c0bd3f63 2814 lock_page_cgroup(pc);
a3b2d692 2815 if (PageCgroupUsed(pc)) {
c0ff4b85
R
2816 memcg = pc->mem_cgroup;
2817 if (memcg && !css_tryget(&memcg->css))
2818 memcg = NULL;
e42d9d5d 2819 } else if (PageSwapCache(page)) {
3c776e64 2820 ent.val = page_private(page);
9fb4b7cc 2821 id = lookup_swap_cgroup_id(ent);
a3b2d692 2822 rcu_read_lock();
c0ff4b85
R
2823 memcg = mem_cgroup_lookup(id);
2824 if (memcg && !css_tryget(&memcg->css))
2825 memcg = NULL;
a3b2d692 2826 rcu_read_unlock();
3c776e64 2827 }
c0bd3f63 2828 unlock_page_cgroup(pc);
c0ff4b85 2829 return memcg;
b5a84319
KH
2830}
2831
c0ff4b85 2832static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
5564e88b 2833 struct page *page,
7ec99d62 2834 unsigned int nr_pages,
9ce70c02
HD
2835 enum charge_type ctype,
2836 bool lrucare)
7a81b88c 2837{
ce587e65 2838 struct page_cgroup *pc = lookup_page_cgroup(page);
9ce70c02 2839 struct zone *uninitialized_var(zone);
fa9add64 2840 struct lruvec *lruvec;
9ce70c02 2841 bool was_on_lru = false;
b2402857 2842 bool anon;
9ce70c02 2843
ca3e0214 2844 lock_page_cgroup(pc);
309381fe 2845 VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
ca3e0214
KH
2846 /*
2847 * we don't need page_cgroup_lock about tail pages, becase they are not
2848 * accessed by any other context at this point.
2849 */
9ce70c02
HD
2850
2851 /*
2852 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2853 * may already be on some other mem_cgroup's LRU. Take care of it.
2854 */
2855 if (lrucare) {
2856 zone = page_zone(page);
2857 spin_lock_irq(&zone->lru_lock);
2858 if (PageLRU(page)) {
fa9add64 2859 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
9ce70c02 2860 ClearPageLRU(page);
fa9add64 2861 del_page_from_lru_list(page, lruvec, page_lru(page));
9ce70c02
HD
2862 was_on_lru = true;
2863 }
2864 }
2865
c0ff4b85 2866 pc->mem_cgroup = memcg;
261fb61a
KH
2867 /*
2868 * We access a page_cgroup asynchronously without lock_page_cgroup().
2869 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2870 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2871 * before USED bit, we need memory barrier here.
2872 * See mem_cgroup_add_lru_list(), etc.
f894ffa8 2873 */
08e552c6 2874 smp_wmb();
b2402857 2875 SetPageCgroupUsed(pc);
3be91277 2876
9ce70c02
HD
2877 if (lrucare) {
2878 if (was_on_lru) {
fa9add64 2879 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
309381fe 2880 VM_BUG_ON_PAGE(PageLRU(page), page);
9ce70c02 2881 SetPageLRU(page);
fa9add64 2882 add_page_to_lru_list(page, lruvec, page_lru(page));
9ce70c02
HD
2883 }
2884 spin_unlock_irq(&zone->lru_lock);
2885 }
2886
41326c17 2887 if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
b2402857
KH
2888 anon = true;
2889 else
2890 anon = false;
2891
b070e65c 2892 mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
52d4b9ac 2893 unlock_page_cgroup(pc);
9ce70c02 2894
430e4863 2895 /*
bb4cc1a8
AM
2896 * "charge_statistics" updated event counter. Then, check it.
2897 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2898 * if they exceeds softlimit.
430e4863 2899 */
c0ff4b85 2900 memcg_check_events(memcg, page);
7a81b88c 2901}
66e1707b 2902
7cf27982
GC
2903static DEFINE_MUTEX(set_limit_mutex);
2904
7ae1e1d0 2905#ifdef CONFIG_MEMCG_KMEM
d6441637
VD
2906static DEFINE_MUTEX(activate_kmem_mutex);
2907
7ae1e1d0
GC
2908static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
2909{
2910 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
6de64beb 2911 memcg_kmem_is_active(memcg);
7ae1e1d0
GC
2912}
2913
1f458cbf
GC
2914/*
2915 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2916 * in the memcg_cache_params struct.
2917 */
2918static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2919{
2920 struct kmem_cache *cachep;
2921
2922 VM_BUG_ON(p->is_root_cache);
2923 cachep = p->root_cache;
7a67d7ab 2924 return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
1f458cbf
GC
2925}
2926
749c5415 2927#ifdef CONFIG_SLABINFO
2da8ca82 2928static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
749c5415 2929{
2da8ca82 2930 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
749c5415
GC
2931 struct memcg_cache_params *params;
2932
2933 if (!memcg_can_account_kmem(memcg))
2934 return -EIO;
2935
2936 print_slabinfo_header(m);
2937
2938 mutex_lock(&memcg->slab_caches_mutex);
2939 list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2940 cache_show(memcg_params_to_cache(params), m);
2941 mutex_unlock(&memcg->slab_caches_mutex);
2942
2943 return 0;
2944}
2945#endif
2946
7ae1e1d0
GC
2947static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2948{
2949 struct res_counter *fail_res;
7ae1e1d0 2950 int ret = 0;
7ae1e1d0
GC
2951
2952 ret = res_counter_charge(&memcg->kmem, size, &fail_res);
2953 if (ret)
2954 return ret;
2955
6d1fdc48
JW
2956 ret = mem_cgroup_try_charge(memcg, gfp, size >> PAGE_SHIFT,
2957 oom_gfp_allowed(gfp));
7ae1e1d0
GC
2958 if (ret == -EINTR) {
2959 /*
6d1fdc48 2960 * mem_cgroup_try_charge() chosed to bypass to root due to
7ae1e1d0
GC
2961 * OOM kill or fatal signal. Since our only options are to
2962 * either fail the allocation or charge it to this cgroup, do
2963 * it as a temporary condition. But we can't fail. From a
2964 * kmem/slab perspective, the cache has already been selected,
2965 * by mem_cgroup_kmem_get_cache(), so it is too late to change
2966 * our minds.
2967 *
2968 * This condition will only trigger if the task entered
2969 * memcg_charge_kmem in a sane state, but was OOM-killed during
6d1fdc48 2970 * mem_cgroup_try_charge() above. Tasks that were already
7ae1e1d0
GC
2971 * dying when the allocation triggers should have been already
2972 * directed to the root cgroup in memcontrol.h
2973 */
2974 res_counter_charge_nofail(&memcg->res, size, &fail_res);
2975 if (do_swap_account)
2976 res_counter_charge_nofail(&memcg->memsw, size,
2977 &fail_res);
2978 ret = 0;
2979 } else if (ret)
2980 res_counter_uncharge(&memcg->kmem, size);
2981
2982 return ret;
2983}
2984
2985static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
2986{
7ae1e1d0
GC
2987 res_counter_uncharge(&memcg->res, size);
2988 if (do_swap_account)
2989 res_counter_uncharge(&memcg->memsw, size);
7de37682
GC
2990
2991 /* Not down to 0 */
2992 if (res_counter_uncharge(&memcg->kmem, size))
2993 return;
2994
10d5ebf4
LZ
2995 /*
2996 * Releases a reference taken in kmem_cgroup_css_offline in case
2997 * this last uncharge is racing with the offlining code or it is
2998 * outliving the memcg existence.
2999 *
3000 * The memory barrier imposed by test&clear is paired with the
3001 * explicit one in memcg_kmem_mark_dead().
3002 */
7de37682 3003 if (memcg_kmem_test_and_clear_dead(memcg))
10d5ebf4 3004 css_put(&memcg->css);
7ae1e1d0
GC
3005}
3006
2633d7a0
GC
3007/*
3008 * helper for acessing a memcg's index. It will be used as an index in the
3009 * child cache array in kmem_cache, and also to derive its name. This function
3010 * will return -1 when this is not a kmem-limited memcg.
3011 */
3012int memcg_cache_id(struct mem_cgroup *memcg)
3013{
3014 return memcg ? memcg->kmemcg_id : -1;
3015}
3016
55007d84
GC
3017static size_t memcg_caches_array_size(int num_groups)
3018{
3019 ssize_t size;
3020 if (num_groups <= 0)
3021 return 0;
3022
3023 size = 2 * num_groups;
3024 if (size < MEMCG_CACHES_MIN_SIZE)
3025 size = MEMCG_CACHES_MIN_SIZE;
3026 else if (size > MEMCG_CACHES_MAX_SIZE)
3027 size = MEMCG_CACHES_MAX_SIZE;
3028
3029 return size;
3030}
3031
3032/*
3033 * We should update the current array size iff all caches updates succeed. This
3034 * can only be done from the slab side. The slab mutex needs to be held when
3035 * calling this.
3036 */
3037void memcg_update_array_size(int num)
3038{
3039 if (num > memcg_limited_groups_array_size)
3040 memcg_limited_groups_array_size = memcg_caches_array_size(num);
3041}
3042
15cf17d2
KK
3043static void kmem_cache_destroy_work_func(struct work_struct *w);
3044
55007d84
GC
3045int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3046{
3047 struct memcg_cache_params *cur_params = s->memcg_params;
3048
f35c3a8e 3049 VM_BUG_ON(!is_root_cache(s));
55007d84
GC
3050
3051 if (num_groups > memcg_limited_groups_array_size) {
3052 int i;
f8570263 3053 struct memcg_cache_params *new_params;
55007d84
GC
3054 ssize_t size = memcg_caches_array_size(num_groups);
3055
3056 size *= sizeof(void *);
90c7a79c 3057 size += offsetof(struct memcg_cache_params, memcg_caches);
55007d84 3058
f8570263
VD
3059 new_params = kzalloc(size, GFP_KERNEL);
3060 if (!new_params)
55007d84 3061 return -ENOMEM;
55007d84 3062
f8570263 3063 new_params->is_root_cache = true;
55007d84
GC
3064
3065 /*
3066 * There is the chance it will be bigger than
3067 * memcg_limited_groups_array_size, if we failed an allocation
3068 * in a cache, in which case all caches updated before it, will
3069 * have a bigger array.
3070 *
3071 * But if that is the case, the data after
3072 * memcg_limited_groups_array_size is certainly unused
3073 */
3074 for (i = 0; i < memcg_limited_groups_array_size; i++) {
3075 if (!cur_params->memcg_caches[i])
3076 continue;
f8570263 3077 new_params->memcg_caches[i] =
55007d84
GC
3078 cur_params->memcg_caches[i];
3079 }
3080
3081 /*
3082 * Ideally, we would wait until all caches succeed, and only
3083 * then free the old one. But this is not worth the extra
3084 * pointer per-cache we'd have to have for this.
3085 *
3086 * It is not a big deal if some caches are left with a size
3087 * bigger than the others. And all updates will reset this
3088 * anyway.
3089 */
f8570263
VD
3090 rcu_assign_pointer(s->memcg_params, new_params);
3091 if (cur_params)
3092 kfree_rcu(cur_params, rcu_head);
55007d84
GC
3093 }
3094 return 0;
3095}
3096
5722d094
VD
3097char *memcg_create_cache_name(struct mem_cgroup *memcg,
3098 struct kmem_cache *root_cache)
3099{
3100 static char *buf = NULL;
3101
3102 /*
3103 * We need a mutex here to protect the shared buffer. Since this is
3104 * expected to be called only on cache creation, we can employ the
3105 * slab_mutex for that purpose.
3106 */
3107 lockdep_assert_held(&slab_mutex);
3108
3109 if (!buf) {
3110 buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
3111 if (!buf)
3112 return NULL;
3113 }
3114
3115 cgroup_name(memcg->css.cgroup, buf, NAME_MAX + 1);
3116 return kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
3117 memcg_cache_id(memcg), buf);
3118}
3119
363a044f
VD
3120int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
3121 struct kmem_cache *root_cache)
2633d7a0 3122{
90c7a79c 3123 size_t size;
2633d7a0
GC
3124
3125 if (!memcg_kmem_enabled())
3126 return 0;
3127
90c7a79c
AV
3128 if (!memcg) {
3129 size = offsetof(struct memcg_cache_params, memcg_caches);
55007d84 3130 size += memcg_limited_groups_array_size * sizeof(void *);
90c7a79c
AV
3131 } else
3132 size = sizeof(struct memcg_cache_params);
55007d84 3133
2633d7a0
GC
3134 s->memcg_params = kzalloc(size, GFP_KERNEL);
3135 if (!s->memcg_params)
3136 return -ENOMEM;
3137
943a451a 3138 if (memcg) {
2633d7a0 3139 s->memcg_params->memcg = memcg;
943a451a 3140 s->memcg_params->root_cache = root_cache;
3e6b11df
AV
3141 INIT_WORK(&s->memcg_params->destroy,
3142 kmem_cache_destroy_work_func);
4ba902b5
GC
3143 } else
3144 s->memcg_params->is_root_cache = true;
3145
2633d7a0
GC
3146 return 0;
3147}
3148
363a044f
VD
3149void memcg_free_cache_params(struct kmem_cache *s)
3150{
3151 kfree(s->memcg_params);
3152}
3153
1aa13254 3154void memcg_register_cache(struct kmem_cache *s)
2633d7a0 3155{
d7f25f8a
GC
3156 struct kmem_cache *root;
3157 struct mem_cgroup *memcg;
3158 int id;
3159
1aa13254
VD
3160 if (is_root_cache(s))
3161 return;
3162
2edefe11
VD
3163 /*
3164 * Holding the slab_mutex assures nobody will touch the memcg_caches
3165 * array while we are modifying it.
3166 */
3167 lockdep_assert_held(&slab_mutex);
3168
1aa13254
VD
3169 root = s->memcg_params->root_cache;
3170 memcg = s->memcg_params->memcg;
3171 id = memcg_cache_id(memcg);
3172
3173 css_get(&memcg->css);
3174
1aa13254 3175
d7f25f8a 3176 /*
959c8963
VD
3177 * Since readers won't lock (see cache_from_memcg_idx()), we need a
3178 * barrier here to ensure nobody will see the kmem_cache partially
3179 * initialized.
d7f25f8a 3180 */
959c8963
VD
3181 smp_wmb();
3182
96403da2
VD
3183 /*
3184 * Initialize the pointer to this cache in its parent's memcg_params
3185 * before adding it to the memcg_slab_caches list, otherwise we can
3186 * fail to convert memcg_params_to_cache() while traversing the list.
3187 */
2edefe11 3188 VM_BUG_ON(root->memcg_params->memcg_caches[id]);
959c8963 3189 root->memcg_params->memcg_caches[id] = s;
96403da2
VD
3190
3191 mutex_lock(&memcg->slab_caches_mutex);
3192 list_add(&s->memcg_params->list, &memcg->memcg_slab_caches);
3193 mutex_unlock(&memcg->slab_caches_mutex);
1aa13254 3194}
d7f25f8a 3195
1aa13254
VD
3196void memcg_unregister_cache(struct kmem_cache *s)
3197{
3198 struct kmem_cache *root;
3199 struct mem_cgroup *memcg;
3200 int id;
3201
3202 if (is_root_cache(s))
3203 return;
d7f25f8a 3204
2edefe11
VD
3205 /*
3206 * Holding the slab_mutex assures nobody will touch the memcg_caches
3207 * array while we are modifying it.
3208 */
3209 lockdep_assert_held(&slab_mutex);
3210
d7f25f8a 3211 root = s->memcg_params->root_cache;
96403da2
VD
3212 memcg = s->memcg_params->memcg;
3213 id = memcg_cache_id(memcg);
d7f25f8a
GC
3214
3215 mutex_lock(&memcg->slab_caches_mutex);
3216 list_del(&s->memcg_params->list);
3217 mutex_unlock(&memcg->slab_caches_mutex);
3218
96403da2
VD
3219 /*
3220 * Clear the pointer to this cache in its parent's memcg_params only
3221 * after removing it from the memcg_slab_caches list, otherwise we can
3222 * fail to convert memcg_params_to_cache() while traversing the list.
3223 */
2edefe11 3224 VM_BUG_ON(!root->memcg_params->memcg_caches[id]);
96403da2
VD
3225 root->memcg_params->memcg_caches[id] = NULL;
3226
20f05310 3227 css_put(&memcg->css);
2633d7a0
GC
3228}
3229
0e9d92f2
GC
3230/*
3231 * During the creation a new cache, we need to disable our accounting mechanism
3232 * altogether. This is true even if we are not creating, but rather just
3233 * enqueing new caches to be created.
3234 *
3235 * This is because that process will trigger allocations; some visible, like
3236 * explicit kmallocs to auxiliary data structures, name strings and internal
3237 * cache structures; some well concealed, like INIT_WORK() that can allocate
3238 * objects during debug.
3239 *
3240 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
3241 * to it. This may not be a bounded recursion: since the first cache creation
3242 * failed to complete (waiting on the allocation), we'll just try to create the
3243 * cache again, failing at the same point.
3244 *
3245 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
3246 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
3247 * inside the following two functions.
3248 */
3249static inline void memcg_stop_kmem_account(void)
3250{
3251 VM_BUG_ON(!current->mm);
3252 current->memcg_kmem_skip_account++;
3253}
3254
3255static inline void memcg_resume_kmem_account(void)
3256{
3257 VM_BUG_ON(!current->mm);
3258 current->memcg_kmem_skip_account--;
3259}
3260
1f458cbf
GC
3261static void kmem_cache_destroy_work_func(struct work_struct *w)
3262{
3263 struct kmem_cache *cachep;
3264 struct memcg_cache_params *p;
3265
3266 p = container_of(w, struct memcg_cache_params, destroy);
3267
3268 cachep = memcg_params_to_cache(p);
3269
22933152
GC
3270 /*
3271 * If we get down to 0 after shrink, we could delete right away.
3272 * However, memcg_release_pages() already puts us back in the workqueue
3273 * in that case. If we proceed deleting, we'll get a dangling
3274 * reference, and removing the object from the workqueue in that case
3275 * is unnecessary complication. We are not a fast path.
3276 *
3277 * Note that this case is fundamentally different from racing with
3278 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
3279 * kmem_cache_shrink, not only we would be reinserting a dead cache
3280 * into the queue, but doing so from inside the worker racing to
3281 * destroy it.
3282 *
3283 * So if we aren't down to zero, we'll just schedule a worker and try
3284 * again
3285 */
0d8a4a37 3286 if (atomic_read(&cachep->memcg_params->nr_pages) != 0)
22933152 3287 kmem_cache_shrink(cachep);
0d8a4a37 3288 else
1f458cbf
GC
3289 kmem_cache_destroy(cachep);
3290}
3291
3292void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
3293{
3294 if (!cachep->memcg_params->dead)
3295 return;
3296
22933152
GC
3297 /*
3298 * There are many ways in which we can get here.
3299 *
3300 * We can get to a memory-pressure situation while the delayed work is
3301 * still pending to run. The vmscan shrinkers can then release all
3302 * cache memory and get us to destruction. If this is the case, we'll
3303 * be executed twice, which is a bug (the second time will execute over
3304 * bogus data). In this case, cancelling the work should be fine.
3305 *
3306 * But we can also get here from the worker itself, if
3307 * kmem_cache_shrink is enough to shake all the remaining objects and
3308 * get the page count to 0. In this case, we'll deadlock if we try to
3309 * cancel the work (the worker runs with an internal lock held, which
3310 * is the same lock we would hold for cancel_work_sync().)
3311 *
3312 * Since we can't possibly know who got us here, just refrain from
3313 * running if there is already work pending
3314 */
3315 if (work_pending(&cachep->memcg_params->destroy))
3316 return;
1f458cbf
GC
3317 /*
3318 * We have to defer the actual destroying to a workqueue, because
3319 * we might currently be in a context that cannot sleep.
3320 */
3321 schedule_work(&cachep->memcg_params->destroy);
3322}
3323
7cf27982
GC
3324void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
3325{
3326 struct kmem_cache *c;
3327 int i;
3328
3329 if (!s->memcg_params)
3330 return;
3331 if (!s->memcg_params->is_root_cache)
3332 return;
3333
3334 /*
3335 * If the cache is being destroyed, we trust that there is no one else
3336 * requesting objects from it. Even if there are, the sanity checks in
3337 * kmem_cache_destroy should caught this ill-case.
3338 *
3339 * Still, we don't want anyone else freeing memcg_caches under our
3340 * noses, which can happen if a new memcg comes to life. As usual,
d6441637
VD
3341 * we'll take the activate_kmem_mutex to protect ourselves against
3342 * this.
7cf27982 3343 */
d6441637 3344 mutex_lock(&activate_kmem_mutex);
7a67d7ab
QH
3345 for_each_memcg_cache_index(i) {
3346 c = cache_from_memcg_idx(s, i);
7cf27982
GC
3347 if (!c)
3348 continue;
3349
3350 /*
3351 * We will now manually delete the caches, so to avoid races
3352 * we need to cancel all pending destruction workers and
3353 * proceed with destruction ourselves.
3354 *
3355 * kmem_cache_destroy() will call kmem_cache_shrink internally,
3356 * and that could spawn the workers again: it is likely that
3357 * the cache still have active pages until this very moment.
3358 * This would lead us back to mem_cgroup_destroy_cache.
3359 *
3360 * But that will not execute at all if the "dead" flag is not
3361 * set, so flip it down to guarantee we are in control.
3362 */
3363 c->memcg_params->dead = false;
22933152 3364 cancel_work_sync(&c->memcg_params->destroy);
7cf27982
GC
3365 kmem_cache_destroy(c);
3366 }
d6441637 3367 mutex_unlock(&activate_kmem_mutex);
7cf27982
GC
3368}
3369
1f458cbf
GC
3370static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3371{
3372 struct kmem_cache *cachep;
3373 struct memcg_cache_params *params;
3374
3375 if (!memcg_kmem_is_active(memcg))
3376 return;
3377
3378 mutex_lock(&memcg->slab_caches_mutex);
3379 list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
3380 cachep = memcg_params_to_cache(params);
3381 cachep->memcg_params->dead = true;
1f458cbf
GC
3382 schedule_work(&cachep->memcg_params->destroy);
3383 }
3384 mutex_unlock(&memcg->slab_caches_mutex);
3385}
3386
5722d094
VD
3387struct create_work {
3388 struct mem_cgroup *memcg;
3389 struct kmem_cache *cachep;
3390 struct work_struct work;
3391};
3392
d7f25f8a
GC
3393static void memcg_create_cache_work_func(struct work_struct *w)
3394{
5722d094
VD
3395 struct create_work *cw = container_of(w, struct create_work, work);
3396 struct mem_cgroup *memcg = cw->memcg;
3397 struct kmem_cache *cachep = cw->cachep;
3398 struct kmem_cache *new;
d7f25f8a 3399
5722d094
VD
3400 new = kmem_cache_create_memcg(memcg, cachep->name,
3401 cachep->object_size, cachep->align,
3402 cachep->flags & ~SLAB_PANIC, cachep->ctor, cachep);
3403 if (new)
3404 new->allocflags |= __GFP_KMEMCG;
3405 css_put(&memcg->css);
d7f25f8a
GC
3406 kfree(cw);
3407}
3408
3409/*
3410 * Enqueue the creation of a per-memcg kmem_cache.
d7f25f8a 3411 */
0e9d92f2
GC
3412static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3413 struct kmem_cache *cachep)
d7f25f8a
GC
3414{
3415 struct create_work *cw;
3416
3417 cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
ca0dde97
LZ
3418 if (cw == NULL) {
3419 css_put(&memcg->css);
d7f25f8a
GC
3420 return;
3421 }
3422
3423 cw->memcg = memcg;
3424 cw->cachep = cachep;
3425
3426 INIT_WORK(&cw->work, memcg_create_cache_work_func);
3427 schedule_work(&cw->work);
3428}
3429
0e9d92f2
GC
3430static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3431 struct kmem_cache *cachep)
3432{
3433 /*
3434 * We need to stop accounting when we kmalloc, because if the
3435 * corresponding kmalloc cache is not yet created, the first allocation
3436 * in __memcg_create_cache_enqueue will recurse.
3437 *
3438 * However, it is better to enclose the whole function. Depending on
3439 * the debugging options enabled, INIT_WORK(), for instance, can
3440 * trigger an allocation. This too, will make us recurse. Because at
3441 * this point we can't allow ourselves back into memcg_kmem_get_cache,
3442 * the safest choice is to do it like this, wrapping the whole function.
3443 */
3444 memcg_stop_kmem_account();
3445 __memcg_create_cache_enqueue(memcg, cachep);
3446 memcg_resume_kmem_account();
3447}
d7f25f8a
GC
3448/*
3449 * Return the kmem_cache we're supposed to use for a slab allocation.
3450 * We try to use the current memcg's version of the cache.
3451 *
3452 * If the cache does not exist yet, if we are the first user of it,
3453 * we either create it immediately, if possible, or create it asynchronously
3454 * in a workqueue.
3455 * In the latter case, we will let the current allocation go through with
3456 * the original cache.
3457 *
3458 * Can't be called in interrupt context or from kernel threads.
3459 * This function needs to be called with rcu_read_lock() held.
3460 */
3461struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3462 gfp_t gfp)
3463{
3464 struct mem_cgroup *memcg;
959c8963 3465 struct kmem_cache *memcg_cachep;
d7f25f8a
GC
3466
3467 VM_BUG_ON(!cachep->memcg_params);
3468 VM_BUG_ON(!cachep->memcg_params->is_root_cache);
3469
0e9d92f2
GC
3470 if (!current->mm || current->memcg_kmem_skip_account)
3471 return cachep;
3472
d7f25f8a
GC
3473 rcu_read_lock();
3474 memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
d7f25f8a
GC
3475
3476 if (!memcg_can_account_kmem(memcg))
ca0dde97 3477 goto out;
d7f25f8a 3478
959c8963
VD
3479 memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
3480 if (likely(memcg_cachep)) {
3481 cachep = memcg_cachep;
ca0dde97 3482 goto out;
d7f25f8a
GC
3483 }
3484
ca0dde97
LZ
3485 /* The corresponding put will be done in the workqueue. */
3486 if (!css_tryget(&memcg->css))
3487 goto out;
3488 rcu_read_unlock();
3489
3490 /*
3491 * If we are in a safe context (can wait, and not in interrupt
3492 * context), we could be be predictable and return right away.
3493 * This would guarantee that the allocation being performed
3494 * already belongs in the new cache.
3495 *
3496 * However, there are some clashes that can arrive from locking.
3497 * For instance, because we acquire the slab_mutex while doing
3498 * kmem_cache_dup, this means no further allocation could happen
3499 * with the slab_mutex held.
3500 *
3501 * Also, because cache creation issue get_online_cpus(), this
3502 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
3503 * that ends up reversed during cpu hotplug. (cpuset allocates
3504 * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
3505 * better to defer everything.
3506 */
3507 memcg_create_cache_enqueue(memcg, cachep);
3508 return cachep;
3509out:
3510 rcu_read_unlock();
3511 return cachep;
d7f25f8a
GC
3512}
3513EXPORT_SYMBOL(__memcg_kmem_get_cache);
3514
7ae1e1d0
GC
3515/*
3516 * We need to verify if the allocation against current->mm->owner's memcg is
3517 * possible for the given order. But the page is not allocated yet, so we'll
3518 * need a further commit step to do the final arrangements.
3519 *
3520 * It is possible for the task to switch cgroups in this mean time, so at
3521 * commit time, we can't rely on task conversion any longer. We'll then use
3522 * the handle argument to return to the caller which cgroup we should commit
3523 * against. We could also return the memcg directly and avoid the pointer
3524 * passing, but a boolean return value gives better semantics considering
3525 * the compiled-out case as well.
3526 *
3527 * Returning true means the allocation is possible.
3528 */
3529bool
3530__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3531{
3532 struct mem_cgroup *memcg;
3533 int ret;
3534
3535 *_memcg = NULL;
6d42c232
GC
3536
3537 /*
3538 * Disabling accounting is only relevant for some specific memcg
3539 * internal allocations. Therefore we would initially not have such
3540 * check here, since direct calls to the page allocator that are marked
3541 * with GFP_KMEMCG only happen outside memcg core. We are mostly
3542 * concerned with cache allocations, and by having this test at
3543 * memcg_kmem_get_cache, we are already able to relay the allocation to
3544 * the root cache and bypass the memcg cache altogether.
3545 *
3546 * There is one exception, though: the SLUB allocator does not create
3547 * large order caches, but rather service large kmallocs directly from
3548 * the page allocator. Therefore, the following sequence when backed by
3549 * the SLUB allocator:
3550 *
f894ffa8
AM
3551 * memcg_stop_kmem_account();
3552 * kmalloc(<large_number>)
3553 * memcg_resume_kmem_account();
6d42c232
GC
3554 *
3555 * would effectively ignore the fact that we should skip accounting,
3556 * since it will drive us directly to this function without passing
3557 * through the cache selector memcg_kmem_get_cache. Such large
3558 * allocations are extremely rare but can happen, for instance, for the
3559 * cache arrays. We bring this test here.
3560 */
3561 if (!current->mm || current->memcg_kmem_skip_account)
3562 return true;
3563
df381975 3564 memcg = get_mem_cgroup_from_mm(current->mm);
7ae1e1d0
GC
3565
3566 if (!memcg_can_account_kmem(memcg)) {
3567 css_put(&memcg->css);
3568 return true;
3569 }
3570
7ae1e1d0
GC
3571 ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
3572 if (!ret)
3573 *_memcg = memcg;
7ae1e1d0
GC
3574
3575 css_put(&memcg->css);
3576 return (ret == 0);
3577}
3578
3579void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3580 int order)
3581{
3582 struct page_cgroup *pc;
3583
3584 VM_BUG_ON(mem_cgroup_is_root(memcg));
3585
3586 /* The page allocation failed. Revert */
3587 if (!page) {
3588 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
7ae1e1d0
GC
3589 return;
3590 }
3591
3592 pc = lookup_page_cgroup(page);
3593 lock_page_cgroup(pc);
3594 pc->mem_cgroup = memcg;
3595 SetPageCgroupUsed(pc);
3596 unlock_page_cgroup(pc);
3597}
3598
3599void __memcg_kmem_uncharge_pages(struct page *page, int order)
3600{
3601 struct mem_cgroup *memcg = NULL;
3602 struct page_cgroup *pc;
3603
3604
3605 pc = lookup_page_cgroup(page);
3606 /*
3607 * Fast unlocked return. Theoretically might have changed, have to
3608 * check again after locking.
3609 */
3610 if (!PageCgroupUsed(pc))
3611 return;
3612
3613 lock_page_cgroup(pc);
3614 if (PageCgroupUsed(pc)) {
3615 memcg = pc->mem_cgroup;
3616 ClearPageCgroupUsed(pc);
3617 }
3618 unlock_page_cgroup(pc);
3619
3620 /*
3621 * We trust that only if there is a memcg associated with the page, it
3622 * is a valid allocation
3623 */
3624 if (!memcg)
3625 return;
3626
309381fe 3627 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
7ae1e1d0 3628 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
7ae1e1d0 3629}
1f458cbf
GC
3630#else
3631static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3632{
3633}
7ae1e1d0
GC
3634#endif /* CONFIG_MEMCG_KMEM */
3635
ca3e0214
KH
3636#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3637
a0db00fc 3638#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
ca3e0214
KH
3639/*
3640 * Because tail pages are not marked as "used", set it. We're under
e94c8a9c
KH
3641 * zone->lru_lock, 'splitting on pmd' and compound_lock.
3642 * charge/uncharge will be never happen and move_account() is done under
3643 * compound_lock(), so we don't have to take care of races.
ca3e0214 3644 */
e94c8a9c 3645void mem_cgroup_split_huge_fixup(struct page *head)
ca3e0214
KH
3646{
3647 struct page_cgroup *head_pc = lookup_page_cgroup(head);
e94c8a9c 3648 struct page_cgroup *pc;
b070e65c 3649 struct mem_cgroup *memcg;
e94c8a9c 3650 int i;
ca3e0214 3651
3d37c4a9
KH
3652 if (mem_cgroup_disabled())
3653 return;
b070e65c
DR
3654
3655 memcg = head_pc->mem_cgroup;
e94c8a9c
KH
3656 for (i = 1; i < HPAGE_PMD_NR; i++) {
3657 pc = head_pc + i;
b070e65c 3658 pc->mem_cgroup = memcg;
e94c8a9c 3659 smp_wmb();/* see __commit_charge() */
e94c8a9c
KH
3660 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
3661 }
b070e65c
DR
3662 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
3663 HPAGE_PMD_NR);
ca3e0214 3664}
12d27107 3665#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
ca3e0214 3666
f817ed48 3667/**
de3638d9 3668 * mem_cgroup_move_account - move account of the page
5564e88b 3669 * @page: the page
7ec99d62 3670 * @nr_pages: number of regular pages (>1 for huge pages)
f817ed48
KH
3671 * @pc: page_cgroup of the page.
3672 * @from: mem_cgroup which the page is moved from.
3673 * @to: mem_cgroup which the page is moved to. @from != @to.
3674 *
3675 * The caller must confirm following.
08e552c6 3676 * - page is not on LRU (isolate_page() is useful.)
7ec99d62 3677 * - compound_lock is held when nr_pages > 1
f817ed48 3678 *
2f3479b1
KH
3679 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
3680 * from old cgroup.
f817ed48 3681 */
7ec99d62
JW
3682static int mem_cgroup_move_account(struct page *page,
3683 unsigned int nr_pages,
3684 struct page_cgroup *pc,
3685 struct mem_cgroup *from,
2f3479b1 3686 struct mem_cgroup *to)
f817ed48 3687{
de3638d9
JW
3688 unsigned long flags;
3689 int ret;
b2402857 3690 bool anon = PageAnon(page);
987eba66 3691
f817ed48 3692 VM_BUG_ON(from == to);
309381fe 3693 VM_BUG_ON_PAGE(PageLRU(page), page);
de3638d9
JW
3694 /*
3695 * The page is isolated from LRU. So, collapse function
3696 * will not handle this page. But page splitting can happen.
3697 * Do this check under compound_page_lock(). The caller should
3698 * hold it.
3699 */
3700 ret = -EBUSY;
7ec99d62 3701 if (nr_pages > 1 && !PageTransHuge(page))
de3638d9
JW
3702 goto out;
3703
3704 lock_page_cgroup(pc);
3705
3706 ret = -EINVAL;
3707 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
3708 goto unlock;
3709
312734c0 3710 move_lock_mem_cgroup(from, &flags);
f817ed48 3711
59d1d256
JW
3712 if (!anon && page_mapped(page)) {
3713 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3714 nr_pages);
3715 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3716 nr_pages);
3717 }
3ea67d06 3718
59d1d256
JW
3719 if (PageWriteback(page)) {
3720 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3721 nr_pages);
3722 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3723 nr_pages);
3724 }
3ea67d06 3725
b070e65c 3726 mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
d69b042f 3727
854ffa8d 3728 /* caller should have done css_get */
08e552c6 3729 pc->mem_cgroup = to;
b070e65c 3730 mem_cgroup_charge_statistics(to, page, anon, nr_pages);
312734c0 3731 move_unlock_mem_cgroup(from, &flags);
de3638d9
JW
3732 ret = 0;
3733unlock:
57f9fd7d 3734 unlock_page_cgroup(pc);
d2265e6f
KH
3735 /*
3736 * check events
3737 */
5564e88b
JW
3738 memcg_check_events(to, page);
3739 memcg_check_events(from, page);
de3638d9 3740out:
f817ed48
KH
3741 return ret;
3742}
3743
2ef37d3f
MH
3744/**
3745 * mem_cgroup_move_parent - moves page to the parent group
3746 * @page: the page to move
3747 * @pc: page_cgroup of the page
3748 * @child: page's cgroup
3749 *
3750 * move charges to its parent or the root cgroup if the group has no
3751 * parent (aka use_hierarchy==0).
3752 * Although this might fail (get_page_unless_zero, isolate_lru_page or
3753 * mem_cgroup_move_account fails) the failure is always temporary and
3754 * it signals a race with a page removal/uncharge or migration. In the
3755 * first case the page is on the way out and it will vanish from the LRU
3756 * on the next attempt and the call should be retried later.
3757 * Isolation from the LRU fails only if page has been isolated from
3758 * the LRU since we looked at it and that usually means either global
3759 * reclaim or migration going on. The page will either get back to the
3760 * LRU or vanish.
3761 * Finaly mem_cgroup_move_account fails only if the page got uncharged
3762 * (!PageCgroupUsed) or moved to a different group. The page will
3763 * disappear in the next attempt.
f817ed48 3764 */
5564e88b
JW
3765static int mem_cgroup_move_parent(struct page *page,
3766 struct page_cgroup *pc,
6068bf01 3767 struct mem_cgroup *child)
f817ed48 3768{
f817ed48 3769 struct mem_cgroup *parent;
7ec99d62 3770 unsigned int nr_pages;
4be4489f 3771 unsigned long uninitialized_var(flags);
f817ed48
KH
3772 int ret;
3773
d8423011 3774 VM_BUG_ON(mem_cgroup_is_root(child));
f817ed48 3775
57f9fd7d
DN
3776 ret = -EBUSY;
3777 if (!get_page_unless_zero(page))
3778 goto out;
3779 if (isolate_lru_page(page))
3780 goto put;
52dbb905 3781
7ec99d62 3782 nr_pages = hpage_nr_pages(page);
08e552c6 3783
cc926f78
KH
3784 parent = parent_mem_cgroup(child);
3785 /*
3786 * If no parent, move charges to root cgroup.
3787 */
3788 if (!parent)
3789 parent = root_mem_cgroup;
f817ed48 3790
2ef37d3f 3791 if (nr_pages > 1) {
309381fe 3792 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
987eba66 3793 flags = compound_lock_irqsave(page);
2ef37d3f 3794 }
987eba66 3795
cc926f78 3796 ret = mem_cgroup_move_account(page, nr_pages,
2f3479b1 3797 pc, child, parent);
cc926f78
KH
3798 if (!ret)
3799 __mem_cgroup_cancel_local_charge(child, nr_pages);
8dba474f 3800
7ec99d62 3801 if (nr_pages > 1)
987eba66 3802 compound_unlock_irqrestore(page, flags);
08e552c6 3803 putback_lru_page(page);
57f9fd7d 3804put:
40d58138 3805 put_page(page);
57f9fd7d 3806out:
f817ed48
KH
3807 return ret;
3808}
3809
d715ae08 3810int mem_cgroup_charge_anon(struct page *page,
1bec6b33 3811 struct mm_struct *mm, gfp_t gfp_mask)
7a81b88c 3812{
7ec99d62 3813 unsigned int nr_pages = 1;
6d1fdc48 3814 struct mem_cgroup *memcg;
8493ae43 3815 bool oom = true;
ec168510 3816
1bec6b33
JW
3817 if (mem_cgroup_disabled())
3818 return 0;
3819
3820 VM_BUG_ON_PAGE(page_mapped(page), page);
3821 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
3822 VM_BUG_ON(!mm);
3823
37c2ac78 3824 if (PageTransHuge(page)) {
7ec99d62 3825 nr_pages <<= compound_order(page);
309381fe 3826 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
8493ae43
JW
3827 /*
3828 * Never OOM-kill a process for a huge page. The
3829 * fault handler will fall back to regular pages.
3830 */
3831 oom = false;
37c2ac78 3832 }
7a81b88c 3833
6d1fdc48
JW
3834 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, nr_pages, oom);
3835 if (!memcg)
3836 return -ENOMEM;
1bec6b33
JW
3837 __mem_cgroup_commit_charge(memcg, page, nr_pages,
3838 MEM_CGROUP_CHARGE_TYPE_ANON, false);
8a9f3ccd 3839 return 0;
8a9f3ccd
BS
3840}
3841
54595fe2
KH
3842/*
3843 * While swap-in, try_charge -> commit or cancel, the page is locked.
3844 * And when try_charge() successfully returns, one refcnt to memcg without
21ae2956 3845 * struct page_cgroup is acquired. This refcnt will be consumed by
54595fe2
KH
3846 * "commit()" or removed by "cancel()"
3847 */
0435a2fd
JW
3848static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
3849 struct page *page,
3850 gfp_t mask,
3851 struct mem_cgroup **memcgp)
8c7c6e34 3852{
6d1fdc48 3853 struct mem_cgroup *memcg = NULL;
90deb788 3854 struct page_cgroup *pc;
54595fe2 3855 int ret;
8c7c6e34 3856
90deb788
JW
3857 pc = lookup_page_cgroup(page);
3858 /*
3859 * Every swap fault against a single page tries to charge the
3860 * page, bail as early as possible. shmem_unuse() encounters
3861 * already charged pages, too. The USED bit is protected by
3862 * the page lock, which serializes swap cache removal, which
3863 * in turn serializes uncharging.
3864 */
3865 if (PageCgroupUsed(pc))
6d1fdc48
JW
3866 goto out;
3867 if (do_swap_account)
3868 memcg = try_get_mem_cgroup_from_page(page);
c0ff4b85 3869 if (!memcg)
6d1fdc48
JW
3870 memcg = get_mem_cgroup_from_mm(mm);
3871 ret = mem_cgroup_try_charge(memcg, mask, 1, true);
c0ff4b85 3872 css_put(&memcg->css);
38c5d72f 3873 if (ret == -EINTR)
6d1fdc48
JW
3874 memcg = root_mem_cgroup;
3875 else if (ret)
3876 return ret;
3877out:
3878 *memcgp = memcg;
3879 return 0;
8c7c6e34
KH
3880}
3881
0435a2fd
JW
3882int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
3883 gfp_t gfp_mask, struct mem_cgroup **memcgp)
3884{
6d1fdc48
JW
3885 if (mem_cgroup_disabled()) {
3886 *memcgp = NULL;
0435a2fd 3887 return 0;
6d1fdc48 3888 }
bdf4f4d2
JW
3889 /*
3890 * A racing thread's fault, or swapoff, may have already
3891 * updated the pte, and even removed page from swap cache: in
3892 * those cases unuse_pte()'s pte_same() test will fail; but
3893 * there's also a KSM case which does need to charge the page.
3894 */
3895 if (!PageSwapCache(page)) {
6d1fdc48 3896 struct mem_cgroup *memcg;
bdf4f4d2 3897
6d1fdc48
JW
3898 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3899 if (!memcg)
3900 return -ENOMEM;
3901 *memcgp = memcg;
3902 return 0;
bdf4f4d2 3903 }
0435a2fd
JW
3904 return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
3905}
3906
827a03d2
JW
3907void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
3908{
3909 if (mem_cgroup_disabled())
3910 return;
3911 if (!memcg)
3912 return;
3913 __mem_cgroup_cancel_charge(memcg, 1);
3914}
3915
83aae4c7 3916static void
72835c86 3917__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
83aae4c7 3918 enum charge_type ctype)
7a81b88c 3919{
f8d66542 3920 if (mem_cgroup_disabled())
7a81b88c 3921 return;
72835c86 3922 if (!memcg)
7a81b88c 3923 return;
5a6475a4 3924
ce587e65 3925 __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
8c7c6e34
KH
3926 /*
3927 * Now swap is on-memory. This means this page may be
3928 * counted both as mem and swap....double count.
03f3c433
KH
3929 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
3930 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
3931 * may call delete_from_swap_cache() before reach here.
8c7c6e34 3932 */
03f3c433 3933 if (do_swap_account && PageSwapCache(page)) {
8c7c6e34 3934 swp_entry_t ent = {.val = page_private(page)};
86493009 3935 mem_cgroup_uncharge_swap(ent);
8c7c6e34 3936 }
7a81b88c
KH
3937}
3938
72835c86
JW
3939void mem_cgroup_commit_charge_swapin(struct page *page,
3940 struct mem_cgroup *memcg)
83aae4c7 3941{
72835c86 3942 __mem_cgroup_commit_charge_swapin(page, memcg,
41326c17 3943 MEM_CGROUP_CHARGE_TYPE_ANON);
83aae4c7
DN
3944}
3945
d715ae08 3946int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
827a03d2 3947 gfp_t gfp_mask)
7a81b88c 3948{
827a03d2 3949 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
6d1fdc48 3950 struct mem_cgroup *memcg;
827a03d2
JW
3951 int ret;
3952
f8d66542 3953 if (mem_cgroup_disabled())
827a03d2
JW
3954 return 0;
3955 if (PageCompound(page))
3956 return 0;
3957
6d1fdc48 3958 if (PageSwapCache(page)) { /* shmem */
0435a2fd
JW
3959 ret = __mem_cgroup_try_charge_swapin(mm, page,
3960 gfp_mask, &memcg);
6d1fdc48
JW
3961 if (ret)
3962 return ret;
3963 __mem_cgroup_commit_charge_swapin(page, memcg, type);
3964 return 0;
827a03d2 3965 }
6d1fdc48
JW
3966
3967 /*
3968 * Page cache insertions can happen without an actual mm
3969 * context, e.g. during disk probing on boot.
3970 */
3971 if (unlikely(!mm))
3972 memcg = root_mem_cgroup;
3973 else {
3974 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3975 if (!memcg)
3976 return -ENOMEM;
3977 }
3978 __mem_cgroup_commit_charge(memcg, page, 1, type, false);
3979 return 0;
7a81b88c
KH
3980}
3981
c0ff4b85 3982static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
7ec99d62
JW
3983 unsigned int nr_pages,
3984 const enum charge_type ctype)
569b846d
KH
3985{
3986 struct memcg_batch_info *batch = NULL;
3987 bool uncharge_memsw = true;
7ec99d62 3988
569b846d
KH
3989 /* If swapout, usage of swap doesn't decrease */
3990 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
3991 uncharge_memsw = false;
569b846d
KH
3992
3993 batch = &current->memcg_batch;
3994 /*
3995 * In usual, we do css_get() when we remember memcg pointer.
3996 * But in this case, we keep res->usage until end of a series of
3997 * uncharges. Then, it's ok to ignore memcg's refcnt.
3998 */
3999 if (!batch->memcg)
c0ff4b85 4000 batch->memcg = memcg;
3c11ecf4
KH
4001 /*
4002 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
25985edc 4003 * In those cases, all pages freed continuously can be expected to be in
3c11ecf4
KH
4004 * the same cgroup and we have chance to coalesce uncharges.
4005 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
4006 * because we want to do uncharge as soon as possible.
4007 */
4008
4009 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
4010 goto direct_uncharge;
4011
7ec99d62 4012 if (nr_pages > 1)
ec168510
AA
4013 goto direct_uncharge;
4014
569b846d
KH
4015 /*
4016 * In typical case, batch->memcg == mem. This means we can
4017 * merge a series of uncharges to an uncharge of res_counter.
4018 * If not, we uncharge res_counter ony by one.
4019 */
c0ff4b85 4020 if (batch->memcg != memcg)
569b846d
KH
4021 goto direct_uncharge;
4022 /* remember freed charge and uncharge it later */
7ffd4ca7 4023 batch->nr_pages++;
569b846d 4024 if (uncharge_memsw)
7ffd4ca7 4025 batch->memsw_nr_pages++;
569b846d
KH
4026 return;
4027direct_uncharge:
c0ff4b85 4028 res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
569b846d 4029 if (uncharge_memsw)
c0ff4b85
R
4030 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
4031 if (unlikely(batch->memcg != memcg))
4032 memcg_oom_recover(memcg);
569b846d 4033}
7a81b88c 4034
8a9f3ccd 4035/*
69029cd5 4036 * uncharge if !page_mapped(page)
8a9f3ccd 4037 */
8c7c6e34 4038static struct mem_cgroup *
0030f535
JW
4039__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
4040 bool end_migration)
8a9f3ccd 4041{
c0ff4b85 4042 struct mem_cgroup *memcg = NULL;
7ec99d62
JW
4043 unsigned int nr_pages = 1;
4044 struct page_cgroup *pc;
b2402857 4045 bool anon;
8a9f3ccd 4046
f8d66542 4047 if (mem_cgroup_disabled())
8c7c6e34 4048 return NULL;
4077960e 4049
37c2ac78 4050 if (PageTransHuge(page)) {
7ec99d62 4051 nr_pages <<= compound_order(page);
309381fe 4052 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
37c2ac78 4053 }
8697d331 4054 /*
3c541e14 4055 * Check if our page_cgroup is valid
8697d331 4056 */
52d4b9ac 4057 pc = lookup_page_cgroup(page);
cfa44946 4058 if (unlikely(!PageCgroupUsed(pc)))
8c7c6e34 4059 return NULL;
b9c565d5 4060
52d4b9ac 4061 lock_page_cgroup(pc);
d13d1443 4062
c0ff4b85 4063 memcg = pc->mem_cgroup;
8c7c6e34 4064
d13d1443
KH
4065 if (!PageCgroupUsed(pc))
4066 goto unlock_out;
4067
b2402857
KH
4068 anon = PageAnon(page);
4069
d13d1443 4070 switch (ctype) {
41326c17 4071 case MEM_CGROUP_CHARGE_TYPE_ANON:
2ff76f11
KH
4072 /*
4073 * Generally PageAnon tells if it's the anon statistics to be
4074 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
4075 * used before page reached the stage of being marked PageAnon.
4076 */
b2402857
KH
4077 anon = true;
4078 /* fallthrough */
8a9478ca 4079 case MEM_CGROUP_CHARGE_TYPE_DROP:
ac39cf8c 4080 /* See mem_cgroup_prepare_migration() */
0030f535
JW
4081 if (page_mapped(page))
4082 goto unlock_out;
4083 /*
4084 * Pages under migration may not be uncharged. But
4085 * end_migration() /must/ be the one uncharging the
4086 * unused post-migration page and so it has to call
4087 * here with the migration bit still set. See the
4088 * res_counter handling below.
4089 */
4090 if (!end_migration && PageCgroupMigration(pc))
d13d1443
KH
4091 goto unlock_out;
4092 break;
4093 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
4094 if (!PageAnon(page)) { /* Shared memory */
4095 if (page->mapping && !page_is_file_cache(page))
4096 goto unlock_out;
4097 } else if (page_mapped(page)) /* Anon */
4098 goto unlock_out;
4099 break;
4100 default:
4101 break;
52d4b9ac 4102 }
d13d1443 4103
b070e65c 4104 mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
04046e1a 4105
52d4b9ac 4106 ClearPageCgroupUsed(pc);
544122e5
KH
4107 /*
4108 * pc->mem_cgroup is not cleared here. It will be accessed when it's
4109 * freed from LRU. This is safe because uncharged page is expected not
4110 * to be reused (freed soon). Exception is SwapCache, it's handled by
4111 * special functions.
4112 */
b9c565d5 4113
52d4b9ac 4114 unlock_page_cgroup(pc);
f75ca962 4115 /*
c0ff4b85 4116 * even after unlock, we have memcg->res.usage here and this memcg
4050377b 4117 * will never be freed, so it's safe to call css_get().
f75ca962 4118 */
c0ff4b85 4119 memcg_check_events(memcg, page);
f75ca962 4120 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
c0ff4b85 4121 mem_cgroup_swap_statistics(memcg, true);
4050377b 4122 css_get(&memcg->css);
f75ca962 4123 }
0030f535
JW
4124 /*
4125 * Migration does not charge the res_counter for the
4126 * replacement page, so leave it alone when phasing out the
4127 * page that is unused after the migration.
4128 */
4129 if (!end_migration && !mem_cgroup_is_root(memcg))
c0ff4b85 4130 mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
6d12e2d8 4131
c0ff4b85 4132 return memcg;
d13d1443
KH
4133
4134unlock_out:
4135 unlock_page_cgroup(pc);
8c7c6e34 4136 return NULL;
3c541e14
BS
4137}
4138
69029cd5
KH
4139void mem_cgroup_uncharge_page(struct page *page)
4140{
52d4b9ac
KH
4141 /* early check. */
4142 if (page_mapped(page))
4143 return;
309381fe 4144 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
28ccddf7
JW
4145 /*
4146 * If the page is in swap cache, uncharge should be deferred
4147 * to the swap path, which also properly accounts swap usage
4148 * and handles memcg lifetime.
4149 *
4150 * Note that this check is not stable and reclaim may add the
4151 * page to swap cache at any time after this. However, if the
4152 * page is not in swap cache by the time page->mapcount hits
4153 * 0, there won't be any page table references to the swap
4154 * slot, and reclaim will free it and not actually write the
4155 * page to disk.
4156 */
0c59b89c
JW
4157 if (PageSwapCache(page))
4158 return;
0030f535 4159 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
69029cd5
KH
4160}
4161
4162void mem_cgroup_uncharge_cache_page(struct page *page)
4163{
309381fe
SL
4164 VM_BUG_ON_PAGE(page_mapped(page), page);
4165 VM_BUG_ON_PAGE(page->mapping, page);
0030f535 4166 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
69029cd5
KH
4167}
4168
569b846d
KH
4169/*
4170 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
4171 * In that cases, pages are freed continuously and we can expect pages
4172 * are in the same memcg. All these calls itself limits the number of
4173 * pages freed at once, then uncharge_start/end() is called properly.
4174 * This may be called prural(2) times in a context,
4175 */
4176
4177void mem_cgroup_uncharge_start(void)
4178{
4179 current->memcg_batch.do_batch++;
4180 /* We can do nest. */
4181 if (current->memcg_batch.do_batch == 1) {
4182 current->memcg_batch.memcg = NULL;
7ffd4ca7
JW
4183 current->memcg_batch.nr_pages = 0;
4184 current->memcg_batch.memsw_nr_pages = 0;
569b846d
KH
4185 }
4186}
4187
4188void mem_cgroup_uncharge_end(void)
4189{
4190 struct memcg_batch_info *batch = &current->memcg_batch;
4191
4192 if (!batch->do_batch)
4193 return;
4194
4195 batch->do_batch--;
4196 if (batch->do_batch) /* If stacked, do nothing. */
4197 return;
4198
4199 if (!batch->memcg)
4200 return;
4201 /*
4202 * This "batch->memcg" is valid without any css_get/put etc...
4203 * bacause we hide charges behind us.
4204 */
7ffd4ca7
JW
4205 if (batch->nr_pages)
4206 res_counter_uncharge(&batch->memcg->res,
4207 batch->nr_pages * PAGE_SIZE);
4208 if (batch->memsw_nr_pages)
4209 res_counter_uncharge(&batch->memcg->memsw,
4210 batch->memsw_nr_pages * PAGE_SIZE);
3c11ecf4 4211 memcg_oom_recover(batch->memcg);
569b846d
KH
4212 /* forget this pointer (for sanity check) */
4213 batch->memcg = NULL;
4214}
4215
e767e056 4216#ifdef CONFIG_SWAP
8c7c6e34 4217/*
e767e056 4218 * called after __delete_from_swap_cache() and drop "page" account.
8c7c6e34
KH
4219 * memcg information is recorded to swap_cgroup of "ent"
4220 */
8a9478ca
KH
4221void
4222mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
8c7c6e34
KH
4223{
4224 struct mem_cgroup *memcg;
8a9478ca
KH
4225 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
4226
4227 if (!swapout) /* this was a swap cache but the swap is unused ! */
4228 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
4229
0030f535 4230 memcg = __mem_cgroup_uncharge_common(page, ctype, false);
8c7c6e34 4231
f75ca962
KH
4232 /*
4233 * record memcg information, if swapout && memcg != NULL,
4050377b 4234 * css_get() was called in uncharge().
f75ca962
KH
4235 */
4236 if (do_swap_account && swapout && memcg)
34c00c31 4237 swap_cgroup_record(ent, mem_cgroup_id(memcg));
8c7c6e34 4238}
e767e056 4239#endif
8c7c6e34 4240
c255a458 4241#ifdef CONFIG_MEMCG_SWAP
8c7c6e34
KH
4242/*
4243 * called from swap_entry_free(). remove record in swap_cgroup and
4244 * uncharge "memsw" account.
4245 */
4246void mem_cgroup_uncharge_swap(swp_entry_t ent)
d13d1443 4247{
8c7c6e34 4248 struct mem_cgroup *memcg;
a3b2d692 4249 unsigned short id;
8c7c6e34
KH
4250
4251 if (!do_swap_account)
4252 return;
4253
a3b2d692
KH
4254 id = swap_cgroup_record(ent, 0);
4255 rcu_read_lock();
4256 memcg = mem_cgroup_lookup(id);
8c7c6e34 4257 if (memcg) {
a3b2d692
KH
4258 /*
4259 * We uncharge this because swap is freed.
4260 * This memcg can be obsolete one. We avoid calling css_tryget
4261 */
0c3e73e8 4262 if (!mem_cgroup_is_root(memcg))
4e649152 4263 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
0c3e73e8 4264 mem_cgroup_swap_statistics(memcg, false);
4050377b 4265 css_put(&memcg->css);
8c7c6e34 4266 }
a3b2d692 4267 rcu_read_unlock();
d13d1443 4268}
02491447
DN
4269
4270/**
4271 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
4272 * @entry: swap entry to be moved
4273 * @from: mem_cgroup which the entry is moved from
4274 * @to: mem_cgroup which the entry is moved to
4275 *
4276 * It succeeds only when the swap_cgroup's record for this entry is the same
4277 * as the mem_cgroup's id of @from.
4278 *
4279 * Returns 0 on success, -EINVAL on failure.
4280 *
4281 * The caller must have charged to @to, IOW, called res_counter_charge() about
4282 * both res and memsw, and called css_get().
4283 */
4284static int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 4285 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
4286{
4287 unsigned short old_id, new_id;
4288
34c00c31
LZ
4289 old_id = mem_cgroup_id(from);
4290 new_id = mem_cgroup_id(to);
02491447
DN
4291
4292 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
02491447 4293 mem_cgroup_swap_statistics(from, false);
483c30b5 4294 mem_cgroup_swap_statistics(to, true);
02491447 4295 /*
483c30b5
DN
4296 * This function is only called from task migration context now.
4297 * It postpones res_counter and refcount handling till the end
4298 * of task migration(mem_cgroup_clear_mc()) for performance
4050377b
LZ
4299 * improvement. But we cannot postpone css_get(to) because if
4300 * the process that has been moved to @to does swap-in, the
4301 * refcount of @to might be decreased to 0.
4302 *
4303 * We are in attach() phase, so the cgroup is guaranteed to be
4304 * alive, so we can just call css_get().
02491447 4305 */
4050377b 4306 css_get(&to->css);
02491447
DN
4307 return 0;
4308 }
4309 return -EINVAL;
4310}
4311#else
4312static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 4313 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
4314{
4315 return -EINVAL;
4316}
8c7c6e34 4317#endif
d13d1443 4318
ae41be37 4319/*
01b1ae63
KH
4320 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
4321 * page belongs to.
ae41be37 4322 */
0030f535
JW
4323void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
4324 struct mem_cgroup **memcgp)
ae41be37 4325{
c0ff4b85 4326 struct mem_cgroup *memcg = NULL;
b32967ff 4327 unsigned int nr_pages = 1;
7ec99d62 4328 struct page_cgroup *pc;
ac39cf8c 4329 enum charge_type ctype;
8869b8f6 4330
72835c86 4331 *memcgp = NULL;
56039efa 4332
f8d66542 4333 if (mem_cgroup_disabled())
0030f535 4334 return;
4077960e 4335
b32967ff
MG
4336 if (PageTransHuge(page))
4337 nr_pages <<= compound_order(page);
4338
52d4b9ac
KH
4339 pc = lookup_page_cgroup(page);
4340 lock_page_cgroup(pc);
4341 if (PageCgroupUsed(pc)) {
c0ff4b85
R
4342 memcg = pc->mem_cgroup;
4343 css_get(&memcg->css);
ac39cf8c 4344 /*
4345 * At migrating an anonymous page, its mapcount goes down
4346 * to 0 and uncharge() will be called. But, even if it's fully
4347 * unmapped, migration may fail and this page has to be
4348 * charged again. We set MIGRATION flag here and delay uncharge
4349 * until end_migration() is called
4350 *
4351 * Corner Case Thinking
4352 * A)
4353 * When the old page was mapped as Anon and it's unmap-and-freed
4354 * while migration was ongoing.
4355 * If unmap finds the old page, uncharge() of it will be delayed
4356 * until end_migration(). If unmap finds a new page, it's
4357 * uncharged when it make mapcount to be 1->0. If unmap code
4358 * finds swap_migration_entry, the new page will not be mapped
4359 * and end_migration() will find it(mapcount==0).
4360 *
4361 * B)
4362 * When the old page was mapped but migraion fails, the kernel
4363 * remaps it. A charge for it is kept by MIGRATION flag even
4364 * if mapcount goes down to 0. We can do remap successfully
4365 * without charging it again.
4366 *
4367 * C)
4368 * The "old" page is under lock_page() until the end of
4369 * migration, so, the old page itself will not be swapped-out.
4370 * If the new page is swapped out before end_migraton, our
4371 * hook to usual swap-out path will catch the event.
4372 */
4373 if (PageAnon(page))
4374 SetPageCgroupMigration(pc);
e8589cc1 4375 }
52d4b9ac 4376 unlock_page_cgroup(pc);
ac39cf8c 4377 /*
4378 * If the page is not charged at this point,
4379 * we return here.
4380 */
c0ff4b85 4381 if (!memcg)
0030f535 4382 return;
01b1ae63 4383
72835c86 4384 *memcgp = memcg;
ac39cf8c 4385 /*
4386 * We charge new page before it's used/mapped. So, even if unlock_page()
4387 * is called before end_migration, we can catch all events on this new
4388 * page. In the case new page is migrated but not remapped, new page's
4389 * mapcount will be finally 0 and we call uncharge in end_migration().
4390 */
ac39cf8c 4391 if (PageAnon(page))
41326c17 4392 ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
ac39cf8c 4393 else
62ba7442 4394 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
0030f535
JW
4395 /*
4396 * The page is committed to the memcg, but it's not actually
4397 * charged to the res_counter since we plan on replacing the
4398 * old one and only one page is going to be left afterwards.
4399 */
b32967ff 4400 __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
ae41be37 4401}
8869b8f6 4402
69029cd5 4403/* remove redundant charge if migration failed*/
c0ff4b85 4404void mem_cgroup_end_migration(struct mem_cgroup *memcg,
50de1dd9 4405 struct page *oldpage, struct page *newpage, bool migration_ok)
ae41be37 4406{
ac39cf8c 4407 struct page *used, *unused;
01b1ae63 4408 struct page_cgroup *pc;
b2402857 4409 bool anon;
01b1ae63 4410
c0ff4b85 4411 if (!memcg)
01b1ae63 4412 return;
b25ed609 4413
50de1dd9 4414 if (!migration_ok) {
ac39cf8c 4415 used = oldpage;
4416 unused = newpage;
01b1ae63 4417 } else {
ac39cf8c 4418 used = newpage;
01b1ae63
KH
4419 unused = oldpage;
4420 }
0030f535 4421 anon = PageAnon(used);
7d188958
JW
4422 __mem_cgroup_uncharge_common(unused,
4423 anon ? MEM_CGROUP_CHARGE_TYPE_ANON
4424 : MEM_CGROUP_CHARGE_TYPE_CACHE,
4425 true);
0030f535 4426 css_put(&memcg->css);
69029cd5 4427 /*
ac39cf8c 4428 * We disallowed uncharge of pages under migration because mapcount
4429 * of the page goes down to zero, temporarly.
4430 * Clear the flag and check the page should be charged.
01b1ae63 4431 */
ac39cf8c 4432 pc = lookup_page_cgroup(oldpage);
4433 lock_page_cgroup(pc);
4434 ClearPageCgroupMigration(pc);
4435 unlock_page_cgroup(pc);
ac39cf8c 4436
01b1ae63 4437 /*
ac39cf8c 4438 * If a page is a file cache, radix-tree replacement is very atomic
4439 * and we can skip this check. When it was an Anon page, its mapcount
4440 * goes down to 0. But because we added MIGRATION flage, it's not
4441 * uncharged yet. There are several case but page->mapcount check
4442 * and USED bit check in mem_cgroup_uncharge_page() will do enough
4443 * check. (see prepare_charge() also)
69029cd5 4444 */
b2402857 4445 if (anon)
ac39cf8c 4446 mem_cgroup_uncharge_page(used);
ae41be37 4447}
78fb7466 4448
ab936cbc
KH
4449/*
4450 * At replace page cache, newpage is not under any memcg but it's on
4451 * LRU. So, this function doesn't touch res_counter but handles LRU
4452 * in correct way. Both pages are locked so we cannot race with uncharge.
4453 */
4454void mem_cgroup_replace_page_cache(struct page *oldpage,
4455 struct page *newpage)
4456{
bde05d1c 4457 struct mem_cgroup *memcg = NULL;
ab936cbc 4458 struct page_cgroup *pc;
ab936cbc 4459 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
ab936cbc
KH
4460
4461 if (mem_cgroup_disabled())
4462 return;
4463
4464 pc = lookup_page_cgroup(oldpage);
4465 /* fix accounting on old pages */
4466 lock_page_cgroup(pc);
bde05d1c
HD
4467 if (PageCgroupUsed(pc)) {
4468 memcg = pc->mem_cgroup;
b070e65c 4469 mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
bde05d1c
HD
4470 ClearPageCgroupUsed(pc);
4471 }
ab936cbc
KH
4472 unlock_page_cgroup(pc);
4473
bde05d1c
HD
4474 /*
4475 * When called from shmem_replace_page(), in some cases the
4476 * oldpage has already been charged, and in some cases not.
4477 */
4478 if (!memcg)
4479 return;
ab936cbc
KH
4480 /*
4481 * Even if newpage->mapping was NULL before starting replacement,
4482 * the newpage may be on LRU(or pagevec for LRU) already. We lock
4483 * LRU while we overwrite pc->mem_cgroup.
4484 */
ce587e65 4485 __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
ab936cbc
KH
4486}
4487
f212ad7c
DN
4488#ifdef CONFIG_DEBUG_VM
4489static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
4490{
4491 struct page_cgroup *pc;
4492
4493 pc = lookup_page_cgroup(page);
cfa44946
JW
4494 /*
4495 * Can be NULL while feeding pages into the page allocator for
4496 * the first time, i.e. during boot or memory hotplug;
4497 * or when mem_cgroup_disabled().
4498 */
f212ad7c
DN
4499 if (likely(pc) && PageCgroupUsed(pc))
4500 return pc;
4501 return NULL;
4502}
4503
4504bool mem_cgroup_bad_page_check(struct page *page)
4505{
4506 if (mem_cgroup_disabled())
4507 return false;
4508
4509 return lookup_page_cgroup_used(page) != NULL;
4510}
4511
4512void mem_cgroup_print_bad_page(struct page *page)
4513{
4514 struct page_cgroup *pc;
4515
4516 pc = lookup_page_cgroup_used(page);
4517 if (pc) {
d045197f
AM
4518 pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
4519 pc, pc->flags, pc->mem_cgroup);
f212ad7c
DN
4520 }
4521}
4522#endif
4523
d38d2a75 4524static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
8c7c6e34 4525 unsigned long long val)
628f4235 4526{
81d39c20 4527 int retry_count;
3c11ecf4 4528 u64 memswlimit, memlimit;
628f4235 4529 int ret = 0;
81d39c20
KH
4530 int children = mem_cgroup_count_children(memcg);
4531 u64 curusage, oldusage;
3c11ecf4 4532 int enlarge;
81d39c20
KH
4533
4534 /*
4535 * For keeping hierarchical_reclaim simple, how long we should retry
4536 * is depends on callers. We set our retry-count to be function
4537 * of # of children which we should visit in this loop.
4538 */
4539 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
4540
4541 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
628f4235 4542
3c11ecf4 4543 enlarge = 0;
8c7c6e34 4544 while (retry_count) {
628f4235
KH
4545 if (signal_pending(current)) {
4546 ret = -EINTR;
4547 break;
4548 }
8c7c6e34
KH
4549 /*
4550 * Rather than hide all in some function, I do this in
4551 * open coded manner. You see what this really does.
aaad153e 4552 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
8c7c6e34
KH
4553 */
4554 mutex_lock(&set_limit_mutex);
4555 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4556 if (memswlimit < val) {
4557 ret = -EINVAL;
4558 mutex_unlock(&set_limit_mutex);
628f4235
KH
4559 break;
4560 }
3c11ecf4
KH
4561
4562 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4563 if (memlimit < val)
4564 enlarge = 1;
4565
8c7c6e34 4566 ret = res_counter_set_limit(&memcg->res, val);
22a668d7
KH
4567 if (!ret) {
4568 if (memswlimit == val)
4569 memcg->memsw_is_minimum = true;
4570 else
4571 memcg->memsw_is_minimum = false;
4572 }
8c7c6e34
KH
4573 mutex_unlock(&set_limit_mutex);
4574
4575 if (!ret)
4576 break;
4577
5660048c
JW
4578 mem_cgroup_reclaim(memcg, GFP_KERNEL,
4579 MEM_CGROUP_RECLAIM_SHRINK);
81d39c20
KH
4580 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4581 /* Usage is reduced ? */
f894ffa8 4582 if (curusage >= oldusage)
81d39c20
KH
4583 retry_count--;
4584 else
4585 oldusage = curusage;
8c7c6e34 4586 }
3c11ecf4
KH
4587 if (!ret && enlarge)
4588 memcg_oom_recover(memcg);
14797e23 4589
8c7c6e34
KH
4590 return ret;
4591}
4592
338c8431
LZ
4593static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4594 unsigned long long val)
8c7c6e34 4595{
81d39c20 4596 int retry_count;
3c11ecf4 4597 u64 memlimit, memswlimit, oldusage, curusage;
81d39c20
KH
4598 int children = mem_cgroup_count_children(memcg);
4599 int ret = -EBUSY;
3c11ecf4 4600 int enlarge = 0;
8c7c6e34 4601
81d39c20 4602 /* see mem_cgroup_resize_res_limit */
f894ffa8 4603 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
81d39c20 4604 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
8c7c6e34
KH
4605 while (retry_count) {
4606 if (signal_pending(current)) {
4607 ret = -EINTR;
4608 break;
4609 }
4610 /*
4611 * Rather than hide all in some function, I do this in
4612 * open coded manner. You see what this really does.
aaad153e 4613 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
8c7c6e34
KH
4614 */
4615 mutex_lock(&set_limit_mutex);
4616 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4617 if (memlimit > val) {
4618 ret = -EINVAL;
4619 mutex_unlock(&set_limit_mutex);
4620 break;
4621 }
3c11ecf4
KH
4622 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4623 if (memswlimit < val)
4624 enlarge = 1;
8c7c6e34 4625 ret = res_counter_set_limit(&memcg->memsw, val);
22a668d7
KH
4626 if (!ret) {
4627 if (memlimit == val)
4628 memcg->memsw_is_minimum = true;
4629 else
4630 memcg->memsw_is_minimum = false;
4631 }
8c7c6e34
KH
4632 mutex_unlock(&set_limit_mutex);
4633
4634 if (!ret)
4635 break;
4636
5660048c
JW
4637 mem_cgroup_reclaim(memcg, GFP_KERNEL,
4638 MEM_CGROUP_RECLAIM_NOSWAP |
4639 MEM_CGROUP_RECLAIM_SHRINK);
8c7c6e34 4640 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
81d39c20 4641 /* Usage is reduced ? */
8c7c6e34 4642 if (curusage >= oldusage)
628f4235 4643 retry_count--;
81d39c20
KH
4644 else
4645 oldusage = curusage;
628f4235 4646 }
3c11ecf4
KH
4647 if (!ret && enlarge)
4648 memcg_oom_recover(memcg);
628f4235
KH
4649 return ret;
4650}
4651
0608f43d
AM
4652unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
4653 gfp_t gfp_mask,
4654 unsigned long *total_scanned)
4655{
4656 unsigned long nr_reclaimed = 0;
4657 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4658 unsigned long reclaimed;
4659 int loop = 0;
4660 struct mem_cgroup_tree_per_zone *mctz;
4661 unsigned long long excess;
4662 unsigned long nr_scanned;
4663
4664 if (order > 0)
4665 return 0;
4666
4667 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4668 /*
4669 * This loop can run a while, specially if mem_cgroup's continuously
4670 * keep exceeding their soft limit and putting the system under
4671 * pressure
4672 */
4673 do {
4674 if (next_mz)
4675 mz = next_mz;
4676 else
4677 mz = mem_cgroup_largest_soft_limit_node(mctz);
4678 if (!mz)
4679 break;
4680
4681 nr_scanned = 0;
4682 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
4683 gfp_mask, &nr_scanned);
4684 nr_reclaimed += reclaimed;
4685 *total_scanned += nr_scanned;
4686 spin_lock(&mctz->lock);
4687
4688 /*
4689 * If we failed to reclaim anything from this memory cgroup
4690 * it is time to move on to the next cgroup
4691 */
4692 next_mz = NULL;
4693 if (!reclaimed) {
4694 do {
4695 /*
4696 * Loop until we find yet another one.
4697 *
4698 * By the time we get the soft_limit lock
4699 * again, someone might have aded the
4700 * group back on the RB tree. Iterate to
4701 * make sure we get a different mem.
4702 * mem_cgroup_largest_soft_limit_node returns
4703 * NULL if no other cgroup is present on
4704 * the tree
4705 */
4706 next_mz =
4707 __mem_cgroup_largest_soft_limit_node(mctz);
4708 if (next_mz == mz)
4709 css_put(&next_mz->memcg->css);
4710 else /* next_mz == NULL or other memcg */
4711 break;
4712 } while (1);
4713 }
4714 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
4715 excess = res_counter_soft_limit_excess(&mz->memcg->res);
4716 /*
4717 * One school of thought says that we should not add
4718 * back the node to the tree if reclaim returns 0.
4719 * But our reclaim could return 0, simply because due
4720 * to priority we are exposing a smaller subset of
4721 * memory to reclaim from. Consider this as a longer
4722 * term TODO.
4723 */
4724 /* If excess == 0, no tree ops */
4725 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
4726 spin_unlock(&mctz->lock);
4727 css_put(&mz->memcg->css);
4728 loop++;
4729 /*
4730 * Could not reclaim anything and there are no more
4731 * mem cgroups to try or we seem to be looping without
4732 * reclaiming anything.
4733 */
4734 if (!nr_reclaimed &&
4735 (next_mz == NULL ||
4736 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4737 break;
4738 } while (!nr_reclaimed);
4739 if (next_mz)
4740 css_put(&next_mz->memcg->css);
4741 return nr_reclaimed;
4742}
4743
2ef37d3f
MH
4744/**
4745 * mem_cgroup_force_empty_list - clears LRU of a group
4746 * @memcg: group to clear
4747 * @node: NUMA node
4748 * @zid: zone id
4749 * @lru: lru to to clear
4750 *
3c935d18 4751 * Traverse a specified page_cgroup list and try to drop them all. This doesn't
2ef37d3f
MH
4752 * reclaim the pages page themselves - pages are moved to the parent (or root)
4753 * group.
cc847582 4754 */
2ef37d3f 4755static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
08e552c6 4756 int node, int zid, enum lru_list lru)
cc847582 4757{
bea8c150 4758 struct lruvec *lruvec;
2ef37d3f 4759 unsigned long flags;
072c56c1 4760 struct list_head *list;
925b7673
JW
4761 struct page *busy;
4762 struct zone *zone;
072c56c1 4763
08e552c6 4764 zone = &NODE_DATA(node)->node_zones[zid];
bea8c150
HD
4765 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
4766 list = &lruvec->lists[lru];
cc847582 4767
f817ed48 4768 busy = NULL;
2ef37d3f 4769 do {
925b7673 4770 struct page_cgroup *pc;
5564e88b
JW
4771 struct page *page;
4772
08e552c6 4773 spin_lock_irqsave(&zone->lru_lock, flags);
f817ed48 4774 if (list_empty(list)) {
08e552c6 4775 spin_unlock_irqrestore(&zone->lru_lock, flags);
52d4b9ac 4776 break;
f817ed48 4777 }
925b7673
JW
4778 page = list_entry(list->prev, struct page, lru);
4779 if (busy == page) {
4780 list_move(&page->lru, list);
648bcc77 4781 busy = NULL;
08e552c6 4782 spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed48
KH
4783 continue;
4784 }
08e552c6 4785 spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed48 4786
925b7673 4787 pc = lookup_page_cgroup(page);
5564e88b 4788
3c935d18 4789 if (mem_cgroup_move_parent(page, pc, memcg)) {
f817ed48 4790 /* found lock contention or "pc" is obsolete. */
925b7673 4791 busy = page;
f817ed48
KH
4792 cond_resched();
4793 } else
4794 busy = NULL;
2ef37d3f 4795 } while (!list_empty(list));
cc847582
KH
4796}
4797
4798/*
c26251f9
MH
4799 * make mem_cgroup's charge to be 0 if there is no task by moving
4800 * all the charges and pages to the parent.
cc847582 4801 * This enables deleting this mem_cgroup.
c26251f9
MH
4802 *
4803 * Caller is responsible for holding css reference on the memcg.
cc847582 4804 */
ab5196c2 4805static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
cc847582 4806{
c26251f9 4807 int node, zid;
bea207c8 4808 u64 usage;
f817ed48 4809
fce66477 4810 do {
52d4b9ac
KH
4811 /* This is for making all *used* pages to be on LRU. */
4812 lru_add_drain_all();
c0ff4b85 4813 drain_all_stock_sync(memcg);
c0ff4b85 4814 mem_cgroup_start_move(memcg);
31aaea4a 4815 for_each_node_state(node, N_MEMORY) {
2ef37d3f 4816 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
f156ab93
HD
4817 enum lru_list lru;
4818 for_each_lru(lru) {
2ef37d3f 4819 mem_cgroup_force_empty_list(memcg,
f156ab93 4820 node, zid, lru);
f817ed48 4821 }
1ecaab2b 4822 }
f817ed48 4823 }
c0ff4b85
R
4824 mem_cgroup_end_move(memcg);
4825 memcg_oom_recover(memcg);
52d4b9ac 4826 cond_resched();
f817ed48 4827
2ef37d3f 4828 /*
bea207c8
GC
4829 * Kernel memory may not necessarily be trackable to a specific
4830 * process. So they are not migrated, and therefore we can't
4831 * expect their value to drop to 0 here.
4832 * Having res filled up with kmem only is enough.
4833 *
2ef37d3f
MH
4834 * This is a safety check because mem_cgroup_force_empty_list
4835 * could have raced with mem_cgroup_replace_page_cache callers
4836 * so the lru seemed empty but the page could have been added
4837 * right after the check. RES_USAGE should be safe as we always
4838 * charge before adding to the LRU.
4839 */
bea207c8
GC
4840 usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
4841 res_counter_read_u64(&memcg->kmem, RES_USAGE);
4842 } while (usage > 0);
c26251f9
MH
4843}
4844
b5f99b53
GC
4845static inline bool memcg_has_children(struct mem_cgroup *memcg)
4846{
696ac172
JW
4847 lockdep_assert_held(&memcg_create_mutex);
4848 /*
4849 * The lock does not prevent addition or deletion to the list
4850 * of children, but it prevents a new child from being
4851 * initialized based on this parent in css_online(), so it's
4852 * enough to decide whether hierarchically inherited
4853 * attributes can still be changed or not.
4854 */
4855 return memcg->use_hierarchy &&
4856 !list_empty(&memcg->css.cgroup->children);
b5f99b53
GC
4857}
4858
c26251f9
MH
4859/*
4860 * Reclaims as many pages from the given memcg as possible and moves
4861 * the rest to the parent.
4862 *
4863 * Caller is responsible for holding css reference for memcg.
4864 */
4865static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
4866{
4867 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
4868 struct cgroup *cgrp = memcg->css.cgroup;
f817ed48 4869
c1e862c1 4870 /* returns EBUSY if there is a task or if we come here twice. */
07bc356e 4871 if (cgroup_has_tasks(cgrp) || !list_empty(&cgrp->children))
c26251f9
MH
4872 return -EBUSY;
4873
c1e862c1
KH
4874 /* we call try-to-free pages for make this cgroup empty */
4875 lru_add_drain_all();
f817ed48 4876 /* try to free all pages in this cgroup */
569530fb 4877 while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
f817ed48 4878 int progress;
c1e862c1 4879
c26251f9
MH
4880 if (signal_pending(current))
4881 return -EINTR;
4882
c0ff4b85 4883 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
185efc0f 4884 false);
c1e862c1 4885 if (!progress) {
f817ed48 4886 nr_retries--;
c1e862c1 4887 /* maybe some writeback is necessary */
8aa7e847 4888 congestion_wait(BLK_RW_ASYNC, HZ/10);
c1e862c1 4889 }
f817ed48
KH
4890
4891 }
08e552c6 4892 lru_add_drain();
ab5196c2
MH
4893 mem_cgroup_reparent_charges(memcg);
4894
4895 return 0;
cc847582
KH
4896}
4897
182446d0
TH
4898static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
4899 unsigned int event)
c1e862c1 4900{
182446d0 4901 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
c26251f9 4902
d8423011
MH
4903 if (mem_cgroup_is_root(memcg))
4904 return -EINVAL;
c33bd835 4905 return mem_cgroup_force_empty(memcg);
c1e862c1
KH
4906}
4907
182446d0
TH
4908static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
4909 struct cftype *cft)
18f59ea7 4910{
182446d0 4911 return mem_cgroup_from_css(css)->use_hierarchy;
18f59ea7
BS
4912}
4913
182446d0
TH
4914static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
4915 struct cftype *cft, u64 val)
18f59ea7
BS
4916{
4917 int retval = 0;
182446d0 4918 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
63876986 4919 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
18f59ea7 4920
0999821b 4921 mutex_lock(&memcg_create_mutex);
567fb435
GC
4922
4923 if (memcg->use_hierarchy == val)
4924 goto out;
4925
18f59ea7 4926 /*
af901ca1 4927 * If parent's use_hierarchy is set, we can't make any modifications
18f59ea7
BS
4928 * in the child subtrees. If it is unset, then the change can
4929 * occur, provided the current cgroup has no children.
4930 *
4931 * For the root cgroup, parent_mem is NULL, we allow value to be
4932 * set if there are no children.
4933 */
c0ff4b85 4934 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
18f59ea7 4935 (val == 1 || val == 0)) {
696ac172 4936 if (list_empty(&memcg->css.cgroup->children))
c0ff4b85 4937 memcg->use_hierarchy = val;
18f59ea7
BS
4938 else
4939 retval = -EBUSY;
4940 } else
4941 retval = -EINVAL;
567fb435
GC
4942
4943out:
0999821b 4944 mutex_unlock(&memcg_create_mutex);
18f59ea7
BS
4945
4946 return retval;
4947}
4948
0c3e73e8 4949
c0ff4b85 4950static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
7a159cc9 4951 enum mem_cgroup_stat_index idx)
0c3e73e8 4952{
7d74b06f 4953 struct mem_cgroup *iter;
7a159cc9 4954 long val = 0;
0c3e73e8 4955
7a159cc9 4956 /* Per-cpu values can be negative, use a signed accumulator */
c0ff4b85 4957 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f
KH
4958 val += mem_cgroup_read_stat(iter, idx);
4959
4960 if (val < 0) /* race ? */
4961 val = 0;
4962 return val;
0c3e73e8
BS
4963}
4964
c0ff4b85 4965static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
104f3928 4966{
7d74b06f 4967 u64 val;
104f3928 4968
c0ff4b85 4969 if (!mem_cgroup_is_root(memcg)) {
104f3928 4970 if (!swap)
65c64ce8 4971 return res_counter_read_u64(&memcg->res, RES_USAGE);
104f3928 4972 else
65c64ce8 4973 return res_counter_read_u64(&memcg->memsw, RES_USAGE);
104f3928
KS
4974 }
4975
b070e65c
DR
4976 /*
4977 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
4978 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
4979 */
c0ff4b85
R
4980 val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
4981 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
104f3928 4982
7d74b06f 4983 if (swap)
bff6bb83 4984 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
104f3928
KS
4985
4986 return val << PAGE_SHIFT;
4987}
4988
791badbd
TH
4989static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
4990 struct cftype *cft)
8cdea7c0 4991{
182446d0 4992 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
104f3928 4993 u64 val;
791badbd 4994 int name;
86ae53e1 4995 enum res_type type;
8c7c6e34
KH
4996
4997 type = MEMFILE_TYPE(cft->private);
4998 name = MEMFILE_ATTR(cft->private);
af36f906 4999
8c7c6e34
KH
5000 switch (type) {
5001 case _MEM:
104f3928 5002 if (name == RES_USAGE)
c0ff4b85 5003 val = mem_cgroup_usage(memcg, false);
104f3928 5004 else
c0ff4b85 5005 val = res_counter_read_u64(&memcg->res, name);
8c7c6e34
KH
5006 break;
5007 case _MEMSWAP:
104f3928 5008 if (name == RES_USAGE)
c0ff4b85 5009 val = mem_cgroup_usage(memcg, true);
104f3928 5010 else
c0ff4b85 5011 val = res_counter_read_u64(&memcg->memsw, name);
8c7c6e34 5012 break;
510fc4e1
GC
5013 case _KMEM:
5014 val = res_counter_read_u64(&memcg->kmem, name);
5015 break;
8c7c6e34
KH
5016 default:
5017 BUG();
8c7c6e34 5018 }
af36f906 5019
791badbd 5020 return val;
8cdea7c0 5021}
510fc4e1 5022
510fc4e1 5023#ifdef CONFIG_MEMCG_KMEM
d6441637
VD
5024/* should be called with activate_kmem_mutex held */
5025static int __memcg_activate_kmem(struct mem_cgroup *memcg,
5026 unsigned long long limit)
5027{
5028 int err = 0;
5029 int memcg_id;
5030
5031 if (memcg_kmem_is_active(memcg))
5032 return 0;
5033
5034 /*
5035 * We are going to allocate memory for data shared by all memory
5036 * cgroups so let's stop accounting here.
5037 */
5038 memcg_stop_kmem_account();
5039
510fc4e1
GC
5040 /*
5041 * For simplicity, we won't allow this to be disabled. It also can't
5042 * be changed if the cgroup has children already, or if tasks had
5043 * already joined.
5044 *
5045 * If tasks join before we set the limit, a person looking at
5046 * kmem.usage_in_bytes will have no way to determine when it took
5047 * place, which makes the value quite meaningless.
5048 *
5049 * After it first became limited, changes in the value of the limit are
5050 * of course permitted.
510fc4e1 5051 */
0999821b 5052 mutex_lock(&memcg_create_mutex);
07bc356e 5053 if (cgroup_has_tasks(memcg->css.cgroup) || memcg_has_children(memcg))
d6441637
VD
5054 err = -EBUSY;
5055 mutex_unlock(&memcg_create_mutex);
5056 if (err)
5057 goto out;
510fc4e1 5058
d6441637
VD
5059 memcg_id = ida_simple_get(&kmem_limited_groups,
5060 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
5061 if (memcg_id < 0) {
5062 err = memcg_id;
5063 goto out;
5064 }
5065
5066 /*
5067 * Make sure we have enough space for this cgroup in each root cache's
5068 * memcg_params.
5069 */
5070 err = memcg_update_all_caches(memcg_id + 1);
5071 if (err)
5072 goto out_rmid;
5073
5074 memcg->kmemcg_id = memcg_id;
5075 INIT_LIST_HEAD(&memcg->memcg_slab_caches);
5076 mutex_init(&memcg->slab_caches_mutex);
5077
5078 /*
5079 * We couldn't have accounted to this cgroup, because it hasn't got the
5080 * active bit set yet, so this should succeed.
5081 */
5082 err = res_counter_set_limit(&memcg->kmem, limit);
5083 VM_BUG_ON(err);
5084
5085 static_key_slow_inc(&memcg_kmem_enabled_key);
5086 /*
5087 * Setting the active bit after enabling static branching will
5088 * guarantee no one starts accounting before all call sites are
5089 * patched.
5090 */
5091 memcg_kmem_set_active(memcg);
510fc4e1 5092out:
d6441637
VD
5093 memcg_resume_kmem_account();
5094 return err;
5095
5096out_rmid:
5097 ida_simple_remove(&kmem_limited_groups, memcg_id);
5098 goto out;
5099}
5100
5101static int memcg_activate_kmem(struct mem_cgroup *memcg,
5102 unsigned long long limit)
5103{
5104 int ret;
5105
5106 mutex_lock(&activate_kmem_mutex);
5107 ret = __memcg_activate_kmem(memcg, limit);
5108 mutex_unlock(&activate_kmem_mutex);
5109 return ret;
5110}
5111
5112static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5113 unsigned long long val)
5114{
5115 int ret;
5116
5117 if (!memcg_kmem_is_active(memcg))
5118 ret = memcg_activate_kmem(memcg, val);
5119 else
5120 ret = res_counter_set_limit(&memcg->kmem, val);
510fc4e1
GC
5121 return ret;
5122}
5123
55007d84 5124static int memcg_propagate_kmem(struct mem_cgroup *memcg)
510fc4e1 5125{
55007d84 5126 int ret = 0;
510fc4e1 5127 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
55007d84 5128
d6441637
VD
5129 if (!parent)
5130 return 0;
55007d84 5131
d6441637 5132 mutex_lock(&activate_kmem_mutex);
55007d84 5133 /*
d6441637
VD
5134 * If the parent cgroup is not kmem-active now, it cannot be activated
5135 * after this point, because it has at least one child already.
55007d84 5136 */
d6441637
VD
5137 if (memcg_kmem_is_active(parent))
5138 ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX);
5139 mutex_unlock(&activate_kmem_mutex);
55007d84 5140 return ret;
510fc4e1 5141}
d6441637
VD
5142#else
5143static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5144 unsigned long long val)
5145{
5146 return -EINVAL;
5147}
6d043990 5148#endif /* CONFIG_MEMCG_KMEM */
510fc4e1 5149
628f4235
KH
5150/*
5151 * The user of this function is...
5152 * RES_LIMIT.
5153 */
182446d0 5154static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
4d3bb511 5155 char *buffer)
8cdea7c0 5156{
182446d0 5157 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
86ae53e1
GC
5158 enum res_type type;
5159 int name;
628f4235
KH
5160 unsigned long long val;
5161 int ret;
5162
8c7c6e34
KH
5163 type = MEMFILE_TYPE(cft->private);
5164 name = MEMFILE_ATTR(cft->private);
af36f906 5165
8c7c6e34 5166 switch (name) {
628f4235 5167 case RES_LIMIT:
4b3bde4c
BS
5168 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
5169 ret = -EINVAL;
5170 break;
5171 }
628f4235
KH
5172 /* This function does all necessary parse...reuse it */
5173 ret = res_counter_memparse_write_strategy(buffer, &val);
8c7c6e34
KH
5174 if (ret)
5175 break;
5176 if (type == _MEM)
628f4235 5177 ret = mem_cgroup_resize_limit(memcg, val);
510fc4e1 5178 else if (type == _MEMSWAP)
8c7c6e34 5179 ret = mem_cgroup_resize_memsw_limit(memcg, val);
510fc4e1 5180 else if (type == _KMEM)
d6441637 5181 ret = memcg_update_kmem_limit(memcg, val);
510fc4e1
GC
5182 else
5183 return -EINVAL;
628f4235 5184 break;
296c81d8
BS
5185 case RES_SOFT_LIMIT:
5186 ret = res_counter_memparse_write_strategy(buffer, &val);
5187 if (ret)
5188 break;
5189 /*
5190 * For memsw, soft limits are hard to implement in terms
5191 * of semantics, for now, we support soft limits for
5192 * control without swap
5193 */
5194 if (type == _MEM)
5195 ret = res_counter_set_soft_limit(&memcg->res, val);
5196 else
5197 ret = -EINVAL;
5198 break;
628f4235
KH
5199 default:
5200 ret = -EINVAL; /* should be BUG() ? */
5201 break;
5202 }
5203 return ret;
8cdea7c0
BS
5204}
5205
fee7b548
KH
5206static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
5207 unsigned long long *mem_limit, unsigned long long *memsw_limit)
5208{
fee7b548
KH
5209 unsigned long long min_limit, min_memsw_limit, tmp;
5210
5211 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
5212 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
fee7b548
KH
5213 if (!memcg->use_hierarchy)
5214 goto out;
5215
63876986
TH
5216 while (css_parent(&memcg->css)) {
5217 memcg = mem_cgroup_from_css(css_parent(&memcg->css));
fee7b548
KH
5218 if (!memcg->use_hierarchy)
5219 break;
5220 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
5221 min_limit = min(min_limit, tmp);
5222 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5223 min_memsw_limit = min(min_memsw_limit, tmp);
5224 }
5225out:
5226 *mem_limit = min_limit;
5227 *memsw_limit = min_memsw_limit;
fee7b548
KH
5228}
5229
182446d0 5230static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
c84872e1 5231{
182446d0 5232 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
86ae53e1
GC
5233 int name;
5234 enum res_type type;
c84872e1 5235
8c7c6e34
KH
5236 type = MEMFILE_TYPE(event);
5237 name = MEMFILE_ATTR(event);
af36f906 5238
8c7c6e34 5239 switch (name) {
29f2a4da 5240 case RES_MAX_USAGE:
8c7c6e34 5241 if (type == _MEM)
c0ff4b85 5242 res_counter_reset_max(&memcg->res);
510fc4e1 5243 else if (type == _MEMSWAP)
c0ff4b85 5244 res_counter_reset_max(&memcg->memsw);
510fc4e1
GC
5245 else if (type == _KMEM)
5246 res_counter_reset_max(&memcg->kmem);
5247 else
5248 return -EINVAL;
29f2a4da
PE
5249 break;
5250 case RES_FAILCNT:
8c7c6e34 5251 if (type == _MEM)
c0ff4b85 5252 res_counter_reset_failcnt(&memcg->res);
510fc4e1 5253 else if (type == _MEMSWAP)
c0ff4b85 5254 res_counter_reset_failcnt(&memcg->memsw);
510fc4e1
GC
5255 else if (type == _KMEM)
5256 res_counter_reset_failcnt(&memcg->kmem);
5257 else
5258 return -EINVAL;
29f2a4da
PE
5259 break;
5260 }
f64c3f54 5261
85cc59db 5262 return 0;
c84872e1
PE
5263}
5264
182446d0 5265static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
7dc74be0
DN
5266 struct cftype *cft)
5267{
182446d0 5268 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
7dc74be0
DN
5269}
5270
02491447 5271#ifdef CONFIG_MMU
182446d0 5272static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
7dc74be0
DN
5273 struct cftype *cft, u64 val)
5274{
182446d0 5275 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7dc74be0
DN
5276
5277 if (val >= (1 << NR_MOVE_TYPE))
5278 return -EINVAL;
ee5e8472 5279
7dc74be0 5280 /*
ee5e8472
GC
5281 * No kind of locking is needed in here, because ->can_attach() will
5282 * check this value once in the beginning of the process, and then carry
5283 * on with stale data. This means that changes to this value will only
5284 * affect task migrations starting after the change.
7dc74be0 5285 */
c0ff4b85 5286 memcg->move_charge_at_immigrate = val;
7dc74be0
DN
5287 return 0;
5288}
02491447 5289#else
182446d0 5290static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
02491447
DN
5291 struct cftype *cft, u64 val)
5292{
5293 return -ENOSYS;
5294}
5295#endif
7dc74be0 5296
406eb0c9 5297#ifdef CONFIG_NUMA
2da8ca82 5298static int memcg_numa_stat_show(struct seq_file *m, void *v)
406eb0c9 5299{
25485de6
GT
5300 struct numa_stat {
5301 const char *name;
5302 unsigned int lru_mask;
5303 };
5304
5305 static const struct numa_stat stats[] = {
5306 { "total", LRU_ALL },
5307 { "file", LRU_ALL_FILE },
5308 { "anon", LRU_ALL_ANON },
5309 { "unevictable", BIT(LRU_UNEVICTABLE) },
5310 };
5311 const struct numa_stat *stat;
406eb0c9 5312 int nid;
25485de6 5313 unsigned long nr;
2da8ca82 5314 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
406eb0c9 5315
25485de6
GT
5316 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5317 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
5318 seq_printf(m, "%s=%lu", stat->name, nr);
5319 for_each_node_state(nid, N_MEMORY) {
5320 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5321 stat->lru_mask);
5322 seq_printf(m, " N%d=%lu", nid, nr);
5323 }
5324 seq_putc(m, '\n');
406eb0c9 5325 }
406eb0c9 5326
071aee13
YH
5327 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5328 struct mem_cgroup *iter;
5329
5330 nr = 0;
5331 for_each_mem_cgroup_tree(iter, memcg)
5332 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
5333 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
5334 for_each_node_state(nid, N_MEMORY) {
5335 nr = 0;
5336 for_each_mem_cgroup_tree(iter, memcg)
5337 nr += mem_cgroup_node_nr_lru_pages(
5338 iter, nid, stat->lru_mask);
5339 seq_printf(m, " N%d=%lu", nid, nr);
5340 }
5341 seq_putc(m, '\n');
406eb0c9 5342 }
406eb0c9 5343
406eb0c9
YH
5344 return 0;
5345}
5346#endif /* CONFIG_NUMA */
5347
af7c4b0e
JW
5348static inline void mem_cgroup_lru_names_not_uptodate(void)
5349{
5350 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
5351}
5352
2da8ca82 5353static int memcg_stat_show(struct seq_file *m, void *v)
d2ceb9b7 5354{
2da8ca82 5355 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
af7c4b0e
JW
5356 struct mem_cgroup *mi;
5357 unsigned int i;
406eb0c9 5358
af7c4b0e 5359 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
bff6bb83 5360 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1dd3a273 5361 continue;
af7c4b0e
JW
5362 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
5363 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
1dd3a273 5364 }
7b854121 5365
af7c4b0e
JW
5366 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
5367 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
5368 mem_cgroup_read_events(memcg, i));
5369
5370 for (i = 0; i < NR_LRU_LISTS; i++)
5371 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
5372 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
5373
14067bb3 5374 /* Hierarchical information */
fee7b548
KH
5375 {
5376 unsigned long long limit, memsw_limit;
d79154bb 5377 memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
78ccf5b5 5378 seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
fee7b548 5379 if (do_swap_account)
78ccf5b5
JW
5380 seq_printf(m, "hierarchical_memsw_limit %llu\n",
5381 memsw_limit);
fee7b548 5382 }
7f016ee8 5383
af7c4b0e
JW
5384 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5385 long long val = 0;
5386
bff6bb83 5387 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1dd3a273 5388 continue;
af7c4b0e
JW
5389 for_each_mem_cgroup_tree(mi, memcg)
5390 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
5391 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
5392 }
5393
5394 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
5395 unsigned long long val = 0;
5396
5397 for_each_mem_cgroup_tree(mi, memcg)
5398 val += mem_cgroup_read_events(mi, i);
5399 seq_printf(m, "total_%s %llu\n",
5400 mem_cgroup_events_names[i], val);
5401 }
5402
5403 for (i = 0; i < NR_LRU_LISTS; i++) {
5404 unsigned long long val = 0;
5405
5406 for_each_mem_cgroup_tree(mi, memcg)
5407 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
5408 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
1dd3a273 5409 }
14067bb3 5410
7f016ee8 5411#ifdef CONFIG_DEBUG_VM
7f016ee8
KM
5412 {
5413 int nid, zid;
5414 struct mem_cgroup_per_zone *mz;
89abfab1 5415 struct zone_reclaim_stat *rstat;
7f016ee8
KM
5416 unsigned long recent_rotated[2] = {0, 0};
5417 unsigned long recent_scanned[2] = {0, 0};
5418
5419 for_each_online_node(nid)
5420 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
d79154bb 5421 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
89abfab1 5422 rstat = &mz->lruvec.reclaim_stat;
7f016ee8 5423
89abfab1
HD
5424 recent_rotated[0] += rstat->recent_rotated[0];
5425 recent_rotated[1] += rstat->recent_rotated[1];
5426 recent_scanned[0] += rstat->recent_scanned[0];
5427 recent_scanned[1] += rstat->recent_scanned[1];
7f016ee8 5428 }
78ccf5b5
JW
5429 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
5430 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
5431 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
5432 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
7f016ee8
KM
5433 }
5434#endif
5435
d2ceb9b7
KH
5436 return 0;
5437}
5438
182446d0
TH
5439static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
5440 struct cftype *cft)
a7885eb8 5441{
182446d0 5442 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 5443
1f4c025b 5444 return mem_cgroup_swappiness(memcg);
a7885eb8
KM
5445}
5446
182446d0
TH
5447static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
5448 struct cftype *cft, u64 val)
a7885eb8 5449{
182446d0 5450 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
63876986 5451 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
a7885eb8 5452
63876986 5453 if (val > 100 || !parent)
a7885eb8
KM
5454 return -EINVAL;
5455
0999821b 5456 mutex_lock(&memcg_create_mutex);
068b38c1 5457
a7885eb8 5458 /* If under hierarchy, only empty-root can set this value */
b5f99b53 5459 if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
0999821b 5460 mutex_unlock(&memcg_create_mutex);
a7885eb8 5461 return -EINVAL;
068b38c1 5462 }
a7885eb8 5463
a7885eb8 5464 memcg->swappiness = val;
a7885eb8 5465
0999821b 5466 mutex_unlock(&memcg_create_mutex);
068b38c1 5467
a7885eb8
KM
5468 return 0;
5469}
5470
2e72b634
KS
5471static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
5472{
5473 struct mem_cgroup_threshold_ary *t;
5474 u64 usage;
5475 int i;
5476
5477 rcu_read_lock();
5478 if (!swap)
2c488db2 5479 t = rcu_dereference(memcg->thresholds.primary);
2e72b634 5480 else
2c488db2 5481 t = rcu_dereference(memcg->memsw_thresholds.primary);
2e72b634
KS
5482
5483 if (!t)
5484 goto unlock;
5485
5486 usage = mem_cgroup_usage(memcg, swap);
5487
5488 /*
748dad36 5489 * current_threshold points to threshold just below or equal to usage.
2e72b634
KS
5490 * If it's not true, a threshold was crossed after last
5491 * call of __mem_cgroup_threshold().
5492 */
5407a562 5493 i = t->current_threshold;
2e72b634
KS
5494
5495 /*
5496 * Iterate backward over array of thresholds starting from
5497 * current_threshold and check if a threshold is crossed.
5498 * If none of thresholds below usage is crossed, we read
5499 * only one element of the array here.
5500 */
5501 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
5502 eventfd_signal(t->entries[i].eventfd, 1);
5503
5504 /* i = current_threshold + 1 */
5505 i++;
5506
5507 /*
5508 * Iterate forward over array of thresholds starting from
5509 * current_threshold+1 and check if a threshold is crossed.
5510 * If none of thresholds above usage is crossed, we read
5511 * only one element of the array here.
5512 */
5513 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
5514 eventfd_signal(t->entries[i].eventfd, 1);
5515
5516 /* Update current_threshold */
5407a562 5517 t->current_threshold = i - 1;
2e72b634
KS
5518unlock:
5519 rcu_read_unlock();
5520}
5521
5522static void mem_cgroup_threshold(struct mem_cgroup *memcg)
5523{
ad4ca5f4
KS
5524 while (memcg) {
5525 __mem_cgroup_threshold(memcg, false);
5526 if (do_swap_account)
5527 __mem_cgroup_threshold(memcg, true);
5528
5529 memcg = parent_mem_cgroup(memcg);
5530 }
2e72b634
KS
5531}
5532
5533static int compare_thresholds(const void *a, const void *b)
5534{
5535 const struct mem_cgroup_threshold *_a = a;
5536 const struct mem_cgroup_threshold *_b = b;
5537
2bff24a3
GT
5538 if (_a->threshold > _b->threshold)
5539 return 1;
5540
5541 if (_a->threshold < _b->threshold)
5542 return -1;
5543
5544 return 0;
2e72b634
KS
5545}
5546
c0ff4b85 5547static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
9490ff27
KH
5548{
5549 struct mem_cgroup_eventfd_list *ev;
5550
c0ff4b85 5551 list_for_each_entry(ev, &memcg->oom_notify, list)
9490ff27
KH
5552 eventfd_signal(ev->eventfd, 1);
5553 return 0;
5554}
5555
c0ff4b85 5556static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
9490ff27 5557{
7d74b06f
KH
5558 struct mem_cgroup *iter;
5559
c0ff4b85 5560 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 5561 mem_cgroup_oom_notify_cb(iter);
9490ff27
KH
5562}
5563
59b6f873 5564static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87 5565 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
2e72b634 5566{
2c488db2
KS
5567 struct mem_cgroup_thresholds *thresholds;
5568 struct mem_cgroup_threshold_ary *new;
2e72b634 5569 u64 threshold, usage;
2c488db2 5570 int i, size, ret;
2e72b634
KS
5571
5572 ret = res_counter_memparse_write_strategy(args, &threshold);
5573 if (ret)
5574 return ret;
5575
5576 mutex_lock(&memcg->thresholds_lock);
2c488db2 5577
2e72b634 5578 if (type == _MEM)
2c488db2 5579 thresholds = &memcg->thresholds;
2e72b634 5580 else if (type == _MEMSWAP)
2c488db2 5581 thresholds = &memcg->memsw_thresholds;
2e72b634
KS
5582 else
5583 BUG();
5584
5585 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5586
5587 /* Check if a threshold crossed before adding a new one */
2c488db2 5588 if (thresholds->primary)
2e72b634
KS
5589 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
5590
2c488db2 5591 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
2e72b634
KS
5592
5593 /* Allocate memory for new array of thresholds */
2c488db2 5594 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
2e72b634 5595 GFP_KERNEL);
2c488db2 5596 if (!new) {
2e72b634
KS
5597 ret = -ENOMEM;
5598 goto unlock;
5599 }
2c488db2 5600 new->size = size;
2e72b634
KS
5601
5602 /* Copy thresholds (if any) to new array */
2c488db2
KS
5603 if (thresholds->primary) {
5604 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
2e72b634 5605 sizeof(struct mem_cgroup_threshold));
2c488db2
KS
5606 }
5607
2e72b634 5608 /* Add new threshold */
2c488db2
KS
5609 new->entries[size - 1].eventfd = eventfd;
5610 new->entries[size - 1].threshold = threshold;
2e72b634
KS
5611
5612 /* Sort thresholds. Registering of new threshold isn't time-critical */
2c488db2 5613 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
2e72b634
KS
5614 compare_thresholds, NULL);
5615
5616 /* Find current threshold */
2c488db2 5617 new->current_threshold = -1;
2e72b634 5618 for (i = 0; i < size; i++) {
748dad36 5619 if (new->entries[i].threshold <= usage) {
2e72b634 5620 /*
2c488db2
KS
5621 * new->current_threshold will not be used until
5622 * rcu_assign_pointer(), so it's safe to increment
2e72b634
KS
5623 * it here.
5624 */
2c488db2 5625 ++new->current_threshold;
748dad36
SZ
5626 } else
5627 break;
2e72b634
KS
5628 }
5629
2c488db2
KS
5630 /* Free old spare buffer and save old primary buffer as spare */
5631 kfree(thresholds->spare);
5632 thresholds->spare = thresholds->primary;
5633
5634 rcu_assign_pointer(thresholds->primary, new);
2e72b634 5635
907860ed 5636 /* To be sure that nobody uses thresholds */
2e72b634
KS
5637 synchronize_rcu();
5638
2e72b634
KS
5639unlock:
5640 mutex_unlock(&memcg->thresholds_lock);
5641
5642 return ret;
5643}
5644
59b6f873 5645static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
5646 struct eventfd_ctx *eventfd, const char *args)
5647{
59b6f873 5648 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
347c4a87
TH
5649}
5650
59b6f873 5651static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
5652 struct eventfd_ctx *eventfd, const char *args)
5653{
59b6f873 5654 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
347c4a87
TH
5655}
5656
59b6f873 5657static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87 5658 struct eventfd_ctx *eventfd, enum res_type type)
2e72b634 5659{
2c488db2
KS
5660 struct mem_cgroup_thresholds *thresholds;
5661 struct mem_cgroup_threshold_ary *new;
2e72b634 5662 u64 usage;
2c488db2 5663 int i, j, size;
2e72b634
KS
5664
5665 mutex_lock(&memcg->thresholds_lock);
5666 if (type == _MEM)
2c488db2 5667 thresholds = &memcg->thresholds;
2e72b634 5668 else if (type == _MEMSWAP)
2c488db2 5669 thresholds = &memcg->memsw_thresholds;
2e72b634
KS
5670 else
5671 BUG();
5672
371528ca
AV
5673 if (!thresholds->primary)
5674 goto unlock;
5675
2e72b634
KS
5676 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5677
5678 /* Check if a threshold crossed before removing */
5679 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
5680
5681 /* Calculate new number of threshold */
2c488db2
KS
5682 size = 0;
5683 for (i = 0; i < thresholds->primary->size; i++) {
5684 if (thresholds->primary->entries[i].eventfd != eventfd)
2e72b634
KS
5685 size++;
5686 }
5687
2c488db2 5688 new = thresholds->spare;
907860ed 5689
2e72b634
KS
5690 /* Set thresholds array to NULL if we don't have thresholds */
5691 if (!size) {
2c488db2
KS
5692 kfree(new);
5693 new = NULL;
907860ed 5694 goto swap_buffers;
2e72b634
KS
5695 }
5696
2c488db2 5697 new->size = size;
2e72b634
KS
5698
5699 /* Copy thresholds and find current threshold */
2c488db2
KS
5700 new->current_threshold = -1;
5701 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
5702 if (thresholds->primary->entries[i].eventfd == eventfd)
2e72b634
KS
5703 continue;
5704
2c488db2 5705 new->entries[j] = thresholds->primary->entries[i];
748dad36 5706 if (new->entries[j].threshold <= usage) {
2e72b634 5707 /*
2c488db2 5708 * new->current_threshold will not be used
2e72b634
KS
5709 * until rcu_assign_pointer(), so it's safe to increment
5710 * it here.
5711 */
2c488db2 5712 ++new->current_threshold;
2e72b634
KS
5713 }
5714 j++;
5715 }
5716
907860ed 5717swap_buffers:
2c488db2
KS
5718 /* Swap primary and spare array */
5719 thresholds->spare = thresholds->primary;
8c757763
SZ
5720 /* If all events are unregistered, free the spare array */
5721 if (!new) {
5722 kfree(thresholds->spare);
5723 thresholds->spare = NULL;
5724 }
5725
2c488db2 5726 rcu_assign_pointer(thresholds->primary, new);
2e72b634 5727
907860ed 5728 /* To be sure that nobody uses thresholds */
2e72b634 5729 synchronize_rcu();
371528ca 5730unlock:
2e72b634 5731 mutex_unlock(&memcg->thresholds_lock);
2e72b634 5732}
c1e862c1 5733
59b6f873 5734static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
5735 struct eventfd_ctx *eventfd)
5736{
59b6f873 5737 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
347c4a87
TH
5738}
5739
59b6f873 5740static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
5741 struct eventfd_ctx *eventfd)
5742{
59b6f873 5743 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
347c4a87
TH
5744}
5745
59b6f873 5746static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
347c4a87 5747 struct eventfd_ctx *eventfd, const char *args)
9490ff27 5748{
9490ff27 5749 struct mem_cgroup_eventfd_list *event;
9490ff27 5750
9490ff27
KH
5751 event = kmalloc(sizeof(*event), GFP_KERNEL);
5752 if (!event)
5753 return -ENOMEM;
5754
1af8efe9 5755 spin_lock(&memcg_oom_lock);
9490ff27
KH
5756
5757 event->eventfd = eventfd;
5758 list_add(&event->list, &memcg->oom_notify);
5759
5760 /* already in OOM ? */
79dfdacc 5761 if (atomic_read(&memcg->under_oom))
9490ff27 5762 eventfd_signal(eventfd, 1);
1af8efe9 5763 spin_unlock(&memcg_oom_lock);
9490ff27
KH
5764
5765 return 0;
5766}
5767
59b6f873 5768static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
347c4a87 5769 struct eventfd_ctx *eventfd)
9490ff27 5770{
9490ff27 5771 struct mem_cgroup_eventfd_list *ev, *tmp;
9490ff27 5772
1af8efe9 5773 spin_lock(&memcg_oom_lock);
9490ff27 5774
c0ff4b85 5775 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
9490ff27
KH
5776 if (ev->eventfd == eventfd) {
5777 list_del(&ev->list);
5778 kfree(ev);
5779 }
5780 }
5781
1af8efe9 5782 spin_unlock(&memcg_oom_lock);
9490ff27
KH
5783}
5784
2da8ca82 5785static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3c11ecf4 5786{
2da8ca82 5787 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3c11ecf4 5788
791badbd
TH
5789 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
5790 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
3c11ecf4
KH
5791 return 0;
5792}
5793
182446d0 5794static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3c11ecf4
KH
5795 struct cftype *cft, u64 val)
5796{
182446d0 5797 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
63876986 5798 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
3c11ecf4
KH
5799
5800 /* cannot set to root cgroup and only 0 and 1 are allowed */
63876986 5801 if (!parent || !((val == 0) || (val == 1)))
3c11ecf4
KH
5802 return -EINVAL;
5803
0999821b 5804 mutex_lock(&memcg_create_mutex);
3c11ecf4 5805 /* oom-kill-disable is a flag for subhierarchy. */
b5f99b53 5806 if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
0999821b 5807 mutex_unlock(&memcg_create_mutex);
3c11ecf4
KH
5808 return -EINVAL;
5809 }
c0ff4b85 5810 memcg->oom_kill_disable = val;
4d845ebf 5811 if (!val)
c0ff4b85 5812 memcg_oom_recover(memcg);
0999821b 5813 mutex_unlock(&memcg_create_mutex);
3c11ecf4
KH
5814 return 0;
5815}
5816
c255a458 5817#ifdef CONFIG_MEMCG_KMEM
cbe128e3 5818static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
e5671dfa 5819{
55007d84
GC
5820 int ret;
5821
2633d7a0 5822 memcg->kmemcg_id = -1;
55007d84
GC
5823 ret = memcg_propagate_kmem(memcg);
5824 if (ret)
5825 return ret;
2633d7a0 5826
1d62e436 5827 return mem_cgroup_sockets_init(memcg, ss);
573b400d 5828}
e5671dfa 5829
10d5ebf4 5830static void memcg_destroy_kmem(struct mem_cgroup *memcg)
d1a4c0b3 5831{
1d62e436 5832 mem_cgroup_sockets_destroy(memcg);
10d5ebf4
LZ
5833}
5834
5835static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5836{
5837 if (!memcg_kmem_is_active(memcg))
5838 return;
5839
5840 /*
5841 * kmem charges can outlive the cgroup. In the case of slab
5842 * pages, for instance, a page contain objects from various
5843 * processes. As we prevent from taking a reference for every
5844 * such allocation we have to be careful when doing uncharge
5845 * (see memcg_uncharge_kmem) and here during offlining.
5846 *
5847 * The idea is that that only the _last_ uncharge which sees
5848 * the dead memcg will drop the last reference. An additional
5849 * reference is taken here before the group is marked dead
5850 * which is then paired with css_put during uncharge resp. here.
5851 *
5852 * Although this might sound strange as this path is called from
5853 * css_offline() when the referencemight have dropped down to 0
5854 * and shouldn't be incremented anymore (css_tryget would fail)
5855 * we do not have other options because of the kmem allocations
5856 * lifetime.
5857 */
5858 css_get(&memcg->css);
7de37682
GC
5859
5860 memcg_kmem_mark_dead(memcg);
5861
5862 if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
5863 return;
5864
7de37682 5865 if (memcg_kmem_test_and_clear_dead(memcg))
10d5ebf4 5866 css_put(&memcg->css);
d1a4c0b3 5867}
e5671dfa 5868#else
cbe128e3 5869static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
e5671dfa
GC
5870{
5871 return 0;
5872}
d1a4c0b3 5873
10d5ebf4
LZ
5874static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5875{
5876}
5877
5878static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
d1a4c0b3
GC
5879{
5880}
e5671dfa
GC
5881#endif
5882
3bc942f3
TH
5883/*
5884 * DO NOT USE IN NEW FILES.
5885 *
5886 * "cgroup.event_control" implementation.
5887 *
5888 * This is way over-engineered. It tries to support fully configurable
5889 * events for each user. Such level of flexibility is completely
5890 * unnecessary especially in the light of the planned unified hierarchy.
5891 *
5892 * Please deprecate this and replace with something simpler if at all
5893 * possible.
5894 */
5895
79bd9814
TH
5896/*
5897 * Unregister event and free resources.
5898 *
5899 * Gets called from workqueue.
5900 */
3bc942f3 5901static void memcg_event_remove(struct work_struct *work)
79bd9814 5902{
3bc942f3
TH
5903 struct mem_cgroup_event *event =
5904 container_of(work, struct mem_cgroup_event, remove);
59b6f873 5905 struct mem_cgroup *memcg = event->memcg;
79bd9814
TH
5906
5907 remove_wait_queue(event->wqh, &event->wait);
5908
59b6f873 5909 event->unregister_event(memcg, event->eventfd);
79bd9814
TH
5910
5911 /* Notify userspace the event is going away. */
5912 eventfd_signal(event->eventfd, 1);
5913
5914 eventfd_ctx_put(event->eventfd);
5915 kfree(event);
59b6f873 5916 css_put(&memcg->css);
79bd9814
TH
5917}
5918
5919/*
5920 * Gets called on POLLHUP on eventfd when user closes it.
5921 *
5922 * Called with wqh->lock held and interrupts disabled.
5923 */
3bc942f3
TH
5924static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
5925 int sync, void *key)
79bd9814 5926{
3bc942f3
TH
5927 struct mem_cgroup_event *event =
5928 container_of(wait, struct mem_cgroup_event, wait);
59b6f873 5929 struct mem_cgroup *memcg = event->memcg;
79bd9814
TH
5930 unsigned long flags = (unsigned long)key;
5931
5932 if (flags & POLLHUP) {
5933 /*
5934 * If the event has been detached at cgroup removal, we
5935 * can simply return knowing the other side will cleanup
5936 * for us.
5937 *
5938 * We can't race against event freeing since the other
5939 * side will require wqh->lock via remove_wait_queue(),
5940 * which we hold.
5941 */
fba94807 5942 spin_lock(&memcg->event_list_lock);
79bd9814
TH
5943 if (!list_empty(&event->list)) {
5944 list_del_init(&event->list);
5945 /*
5946 * We are in atomic context, but cgroup_event_remove()
5947 * may sleep, so we have to call it in workqueue.
5948 */
5949 schedule_work(&event->remove);
5950 }
fba94807 5951 spin_unlock(&memcg->event_list_lock);
79bd9814
TH
5952 }
5953
5954 return 0;
5955}
5956
3bc942f3 5957static void memcg_event_ptable_queue_proc(struct file *file,
79bd9814
TH
5958 wait_queue_head_t *wqh, poll_table *pt)
5959{
3bc942f3
TH
5960 struct mem_cgroup_event *event =
5961 container_of(pt, struct mem_cgroup_event, pt);
79bd9814
TH
5962
5963 event->wqh = wqh;
5964 add_wait_queue(wqh, &event->wait);
5965}
5966
5967/*
3bc942f3
TH
5968 * DO NOT USE IN NEW FILES.
5969 *
79bd9814
TH
5970 * Parse input and register new cgroup event handler.
5971 *
5972 * Input must be in format '<event_fd> <control_fd> <args>'.
5973 * Interpretation of args is defined by control file implementation.
5974 */
3bc942f3 5975static int memcg_write_event_control(struct cgroup_subsys_state *css,
4d3bb511 5976 struct cftype *cft, char *buffer)
79bd9814 5977{
fba94807 5978 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 5979 struct mem_cgroup_event *event;
79bd9814
TH
5980 struct cgroup_subsys_state *cfile_css;
5981 unsigned int efd, cfd;
5982 struct fd efile;
5983 struct fd cfile;
fba94807 5984 const char *name;
79bd9814
TH
5985 char *endp;
5986 int ret;
5987
5988 efd = simple_strtoul(buffer, &endp, 10);
5989 if (*endp != ' ')
5990 return -EINVAL;
5991 buffer = endp + 1;
5992
5993 cfd = simple_strtoul(buffer, &endp, 10);
5994 if ((*endp != ' ') && (*endp != '\0'))
5995 return -EINVAL;
5996 buffer = endp + 1;
5997
5998 event = kzalloc(sizeof(*event), GFP_KERNEL);
5999 if (!event)
6000 return -ENOMEM;
6001
59b6f873 6002 event->memcg = memcg;
79bd9814 6003 INIT_LIST_HEAD(&event->list);
3bc942f3
TH
6004 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
6005 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
6006 INIT_WORK(&event->remove, memcg_event_remove);
79bd9814
TH
6007
6008 efile = fdget(efd);
6009 if (!efile.file) {
6010 ret = -EBADF;
6011 goto out_kfree;
6012 }
6013
6014 event->eventfd = eventfd_ctx_fileget(efile.file);
6015 if (IS_ERR(event->eventfd)) {
6016 ret = PTR_ERR(event->eventfd);
6017 goto out_put_efile;
6018 }
6019
6020 cfile = fdget(cfd);
6021 if (!cfile.file) {
6022 ret = -EBADF;
6023 goto out_put_eventfd;
6024 }
6025
6026 /* the process need read permission on control file */
6027 /* AV: shouldn't we check that it's been opened for read instead? */
6028 ret = inode_permission(file_inode(cfile.file), MAY_READ);
6029 if (ret < 0)
6030 goto out_put_cfile;
6031
fba94807
TH
6032 /*
6033 * Determine the event callbacks and set them in @event. This used
6034 * to be done via struct cftype but cgroup core no longer knows
6035 * about these events. The following is crude but the whole thing
6036 * is for compatibility anyway.
3bc942f3
TH
6037 *
6038 * DO NOT ADD NEW FILES.
fba94807
TH
6039 */
6040 name = cfile.file->f_dentry->d_name.name;
6041
6042 if (!strcmp(name, "memory.usage_in_bytes")) {
6043 event->register_event = mem_cgroup_usage_register_event;
6044 event->unregister_event = mem_cgroup_usage_unregister_event;
6045 } else if (!strcmp(name, "memory.oom_control")) {
6046 event->register_event = mem_cgroup_oom_register_event;
6047 event->unregister_event = mem_cgroup_oom_unregister_event;
6048 } else if (!strcmp(name, "memory.pressure_level")) {
6049 event->register_event = vmpressure_register_event;
6050 event->unregister_event = vmpressure_unregister_event;
6051 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
347c4a87
TH
6052 event->register_event = memsw_cgroup_usage_register_event;
6053 event->unregister_event = memsw_cgroup_usage_unregister_event;
fba94807
TH
6054 } else {
6055 ret = -EINVAL;
6056 goto out_put_cfile;
6057 }
6058
79bd9814 6059 /*
b5557c4c
TH
6060 * Verify @cfile should belong to @css. Also, remaining events are
6061 * automatically removed on cgroup destruction but the removal is
6062 * asynchronous, so take an extra ref on @css.
79bd9814 6063 */
5a17f543
TH
6064 cfile_css = css_tryget_from_dir(cfile.file->f_dentry->d_parent,
6065 &memory_cgrp_subsys);
79bd9814 6066 ret = -EINVAL;
5a17f543 6067 if (IS_ERR(cfile_css))
79bd9814 6068 goto out_put_cfile;
5a17f543
TH
6069 if (cfile_css != css) {
6070 css_put(cfile_css);
79bd9814 6071 goto out_put_cfile;
5a17f543 6072 }
79bd9814 6073
59b6f873 6074 ret = event->register_event(memcg, event->eventfd, buffer);
79bd9814
TH
6075 if (ret)
6076 goto out_put_css;
6077
6078 efile.file->f_op->poll(efile.file, &event->pt);
6079
fba94807
TH
6080 spin_lock(&memcg->event_list_lock);
6081 list_add(&event->list, &memcg->event_list);
6082 spin_unlock(&memcg->event_list_lock);
79bd9814
TH
6083
6084 fdput(cfile);
6085 fdput(efile);
6086
6087 return 0;
6088
6089out_put_css:
b5557c4c 6090 css_put(css);
79bd9814
TH
6091out_put_cfile:
6092 fdput(cfile);
6093out_put_eventfd:
6094 eventfd_ctx_put(event->eventfd);
6095out_put_efile:
6096 fdput(efile);
6097out_kfree:
6098 kfree(event);
6099
6100 return ret;
6101}
6102
8cdea7c0
BS
6103static struct cftype mem_cgroup_files[] = {
6104 {
0eea1030 6105 .name = "usage_in_bytes",
8c7c6e34 6106 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
791badbd 6107 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 6108 },
c84872e1
PE
6109 {
6110 .name = "max_usage_in_bytes",
8c7c6e34 6111 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
29f2a4da 6112 .trigger = mem_cgroup_reset,
791badbd 6113 .read_u64 = mem_cgroup_read_u64,
c84872e1 6114 },
8cdea7c0 6115 {
0eea1030 6116 .name = "limit_in_bytes",
8c7c6e34 6117 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
856c13aa 6118 .write_string = mem_cgroup_write,
791badbd 6119 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 6120 },
296c81d8
BS
6121 {
6122 .name = "soft_limit_in_bytes",
6123 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
6124 .write_string = mem_cgroup_write,
791badbd 6125 .read_u64 = mem_cgroup_read_u64,
296c81d8 6126 },
8cdea7c0
BS
6127 {
6128 .name = "failcnt",
8c7c6e34 6129 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
29f2a4da 6130 .trigger = mem_cgroup_reset,
791badbd 6131 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 6132 },
d2ceb9b7
KH
6133 {
6134 .name = "stat",
2da8ca82 6135 .seq_show = memcg_stat_show,
d2ceb9b7 6136 },
c1e862c1
KH
6137 {
6138 .name = "force_empty",
6139 .trigger = mem_cgroup_force_empty_write,
6140 },
18f59ea7
BS
6141 {
6142 .name = "use_hierarchy",
f00baae7 6143 .flags = CFTYPE_INSANE,
18f59ea7
BS
6144 .write_u64 = mem_cgroup_hierarchy_write,
6145 .read_u64 = mem_cgroup_hierarchy_read,
6146 },
79bd9814 6147 {
3bc942f3
TH
6148 .name = "cgroup.event_control", /* XXX: for compat */
6149 .write_string = memcg_write_event_control,
79bd9814
TH
6150 .flags = CFTYPE_NO_PREFIX,
6151 .mode = S_IWUGO,
6152 },
a7885eb8
KM
6153 {
6154 .name = "swappiness",
6155 .read_u64 = mem_cgroup_swappiness_read,
6156 .write_u64 = mem_cgroup_swappiness_write,
6157 },
7dc74be0
DN
6158 {
6159 .name = "move_charge_at_immigrate",
6160 .read_u64 = mem_cgroup_move_charge_read,
6161 .write_u64 = mem_cgroup_move_charge_write,
6162 },
9490ff27
KH
6163 {
6164 .name = "oom_control",
2da8ca82 6165 .seq_show = mem_cgroup_oom_control_read,
3c11ecf4 6166 .write_u64 = mem_cgroup_oom_control_write,
9490ff27
KH
6167 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
6168 },
70ddf637
AV
6169 {
6170 .name = "pressure_level",
70ddf637 6171 },
406eb0c9
YH
6172#ifdef CONFIG_NUMA
6173 {
6174 .name = "numa_stat",
2da8ca82 6175 .seq_show = memcg_numa_stat_show,
406eb0c9
YH
6176 },
6177#endif
510fc4e1
GC
6178#ifdef CONFIG_MEMCG_KMEM
6179 {
6180 .name = "kmem.limit_in_bytes",
6181 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
6182 .write_string = mem_cgroup_write,
791badbd 6183 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
6184 },
6185 {
6186 .name = "kmem.usage_in_bytes",
6187 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
791badbd 6188 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
6189 },
6190 {
6191 .name = "kmem.failcnt",
6192 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6193 .trigger = mem_cgroup_reset,
791badbd 6194 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
6195 },
6196 {
6197 .name = "kmem.max_usage_in_bytes",
6198 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6199 .trigger = mem_cgroup_reset,
791badbd 6200 .read_u64 = mem_cgroup_read_u64,
510fc4e1 6201 },
749c5415
GC
6202#ifdef CONFIG_SLABINFO
6203 {
6204 .name = "kmem.slabinfo",
2da8ca82 6205 .seq_show = mem_cgroup_slabinfo_read,
749c5415
GC
6206 },
6207#endif
8c7c6e34 6208#endif
6bc10349 6209 { }, /* terminate */
af36f906 6210};
8c7c6e34 6211
2d11085e
MH
6212#ifdef CONFIG_MEMCG_SWAP
6213static struct cftype memsw_cgroup_files[] = {
6214 {
6215 .name = "memsw.usage_in_bytes",
6216 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
791badbd 6217 .read_u64 = mem_cgroup_read_u64,
2d11085e
MH
6218 },
6219 {
6220 .name = "memsw.max_usage_in_bytes",
6221 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6222 .trigger = mem_cgroup_reset,
791badbd 6223 .read_u64 = mem_cgroup_read_u64,
2d11085e
MH
6224 },
6225 {
6226 .name = "memsw.limit_in_bytes",
6227 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6228 .write_string = mem_cgroup_write,
791badbd 6229 .read_u64 = mem_cgroup_read_u64,
2d11085e
MH
6230 },
6231 {
6232 .name = "memsw.failcnt",
6233 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6234 .trigger = mem_cgroup_reset,
791badbd 6235 .read_u64 = mem_cgroup_read_u64,
2d11085e
MH
6236 },
6237 { }, /* terminate */
6238};
6239#endif
c0ff4b85 6240static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
6d12e2d8
KH
6241{
6242 struct mem_cgroup_per_node *pn;
1ecaab2b 6243 struct mem_cgroup_per_zone *mz;
41e3355d 6244 int zone, tmp = node;
1ecaab2b
KH
6245 /*
6246 * This routine is called against possible nodes.
6247 * But it's BUG to call kmalloc() against offline node.
6248 *
6249 * TODO: this routine can waste much memory for nodes which will
6250 * never be onlined. It's better to use memory hotplug callback
6251 * function.
6252 */
41e3355d
KH
6253 if (!node_state(node, N_NORMAL_MEMORY))
6254 tmp = -1;
17295c88 6255 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6d12e2d8
KH
6256 if (!pn)
6257 return 1;
1ecaab2b 6258
1ecaab2b
KH
6259 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6260 mz = &pn->zoneinfo[zone];
bea8c150 6261 lruvec_init(&mz->lruvec);
bb4cc1a8
AM
6262 mz->usage_in_excess = 0;
6263 mz->on_tree = false;
d79154bb 6264 mz->memcg = memcg;
1ecaab2b 6265 }
54f72fe0 6266 memcg->nodeinfo[node] = pn;
6d12e2d8
KH
6267 return 0;
6268}
6269
c0ff4b85 6270static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
1ecaab2b 6271{
54f72fe0 6272 kfree(memcg->nodeinfo[node]);
1ecaab2b
KH
6273}
6274
33327948
KH
6275static struct mem_cgroup *mem_cgroup_alloc(void)
6276{
d79154bb 6277 struct mem_cgroup *memcg;
8ff69e2c 6278 size_t size;
33327948 6279
8ff69e2c
VD
6280 size = sizeof(struct mem_cgroup);
6281 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
33327948 6282
8ff69e2c 6283 memcg = kzalloc(size, GFP_KERNEL);
d79154bb 6284 if (!memcg)
e7bbcdf3
DC
6285 return NULL;
6286
d79154bb
HD
6287 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
6288 if (!memcg->stat)
d2e61b8d 6289 goto out_free;
d79154bb
HD
6290 spin_lock_init(&memcg->pcp_counter_lock);
6291 return memcg;
d2e61b8d
DC
6292
6293out_free:
8ff69e2c 6294 kfree(memcg);
d2e61b8d 6295 return NULL;
33327948
KH
6296}
6297
59927fb9 6298/*
c8b2a36f
GC
6299 * At destroying mem_cgroup, references from swap_cgroup can remain.
6300 * (scanning all at force_empty is too costly...)
6301 *
6302 * Instead of clearing all references at force_empty, we remember
6303 * the number of reference from swap_cgroup and free mem_cgroup when
6304 * it goes down to 0.
6305 *
6306 * Removal of cgroup itself succeeds regardless of refs from swap.
59927fb9 6307 */
c8b2a36f
GC
6308
6309static void __mem_cgroup_free(struct mem_cgroup *memcg)
59927fb9 6310{
c8b2a36f 6311 int node;
59927fb9 6312
bb4cc1a8 6313 mem_cgroup_remove_from_trees(memcg);
c8b2a36f
GC
6314
6315 for_each_node(node)
6316 free_mem_cgroup_per_zone_info(memcg, node);
6317
6318 free_percpu(memcg->stat);
6319
3f134619
GC
6320 /*
6321 * We need to make sure that (at least for now), the jump label
6322 * destruction code runs outside of the cgroup lock. This is because
6323 * get_online_cpus(), which is called from the static_branch update,
6324 * can't be called inside the cgroup_lock. cpusets are the ones
6325 * enforcing this dependency, so if they ever change, we might as well.
6326 *
6327 * schedule_work() will guarantee this happens. Be careful if you need
6328 * to move this code around, and make sure it is outside
6329 * the cgroup_lock.
6330 */
a8964b9b 6331 disarm_static_keys(memcg);
8ff69e2c 6332 kfree(memcg);
59927fb9 6333}
3afe36b1 6334
7bcc1bb1
DN
6335/*
6336 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
6337 */
e1aab161 6338struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
7bcc1bb1 6339{
c0ff4b85 6340 if (!memcg->res.parent)
7bcc1bb1 6341 return NULL;
c0ff4b85 6342 return mem_cgroup_from_res_counter(memcg->res.parent, res);
7bcc1bb1 6343}
e1aab161 6344EXPORT_SYMBOL(parent_mem_cgroup);
33327948 6345
bb4cc1a8
AM
6346static void __init mem_cgroup_soft_limit_tree_init(void)
6347{
6348 struct mem_cgroup_tree_per_node *rtpn;
6349 struct mem_cgroup_tree_per_zone *rtpz;
6350 int tmp, node, zone;
6351
6352 for_each_node(node) {
6353 tmp = node;
6354 if (!node_state(node, N_NORMAL_MEMORY))
6355 tmp = -1;
6356 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6357 BUG_ON(!rtpn);
6358
6359 soft_limit_tree.rb_tree_per_node[node] = rtpn;
6360
6361 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6362 rtpz = &rtpn->rb_tree_per_zone[zone];
6363 rtpz->rb_root = RB_ROOT;
6364 spin_lock_init(&rtpz->lock);
6365 }
6366 }
6367}
6368
0eb253e2 6369static struct cgroup_subsys_state * __ref
eb95419b 6370mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8cdea7c0 6371{
d142e3e6 6372 struct mem_cgroup *memcg;
04046e1a 6373 long error = -ENOMEM;
6d12e2d8 6374 int node;
8cdea7c0 6375
c0ff4b85
R
6376 memcg = mem_cgroup_alloc();
6377 if (!memcg)
04046e1a 6378 return ERR_PTR(error);
78fb7466 6379
3ed28fa1 6380 for_each_node(node)
c0ff4b85 6381 if (alloc_mem_cgroup_per_zone_info(memcg, node))
6d12e2d8 6382 goto free_out;
f64c3f54 6383
c077719b 6384 /* root ? */
eb95419b 6385 if (parent_css == NULL) {
a41c58a6 6386 root_mem_cgroup = memcg;
d142e3e6
GC
6387 res_counter_init(&memcg->res, NULL);
6388 res_counter_init(&memcg->memsw, NULL);
6389 res_counter_init(&memcg->kmem, NULL);
18f59ea7 6390 }
28dbc4b6 6391
d142e3e6
GC
6392 memcg->last_scanned_node = MAX_NUMNODES;
6393 INIT_LIST_HEAD(&memcg->oom_notify);
d142e3e6
GC
6394 memcg->move_charge_at_immigrate = 0;
6395 mutex_init(&memcg->thresholds_lock);
6396 spin_lock_init(&memcg->move_lock);
70ddf637 6397 vmpressure_init(&memcg->vmpressure);
fba94807
TH
6398 INIT_LIST_HEAD(&memcg->event_list);
6399 spin_lock_init(&memcg->event_list_lock);
d142e3e6
GC
6400
6401 return &memcg->css;
6402
6403free_out:
6404 __mem_cgroup_free(memcg);
6405 return ERR_PTR(error);
6406}
6407
6408static int
eb95419b 6409mem_cgroup_css_online(struct cgroup_subsys_state *css)
d142e3e6 6410{
eb95419b
TH
6411 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6412 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
d142e3e6 6413
4219b2da
LZ
6414 if (css->cgroup->id > MEM_CGROUP_ID_MAX)
6415 return -ENOSPC;
6416
63876986 6417 if (!parent)
d142e3e6
GC
6418 return 0;
6419
0999821b 6420 mutex_lock(&memcg_create_mutex);
d142e3e6
GC
6421
6422 memcg->use_hierarchy = parent->use_hierarchy;
6423 memcg->oom_kill_disable = parent->oom_kill_disable;
6424 memcg->swappiness = mem_cgroup_swappiness(parent);
6425
6426 if (parent->use_hierarchy) {
c0ff4b85
R
6427 res_counter_init(&memcg->res, &parent->res);
6428 res_counter_init(&memcg->memsw, &parent->memsw);
510fc4e1 6429 res_counter_init(&memcg->kmem, &parent->kmem);
55007d84 6430
7bcc1bb1 6431 /*
8d76a979
LZ
6432 * No need to take a reference to the parent because cgroup
6433 * core guarantees its existence.
7bcc1bb1 6434 */
18f59ea7 6435 } else {
c0ff4b85
R
6436 res_counter_init(&memcg->res, NULL);
6437 res_counter_init(&memcg->memsw, NULL);
510fc4e1 6438 res_counter_init(&memcg->kmem, NULL);
8c7f6edb
TH
6439 /*
6440 * Deeper hierachy with use_hierarchy == false doesn't make
6441 * much sense so let cgroup subsystem know about this
6442 * unfortunate state in our controller.
6443 */
d142e3e6 6444 if (parent != root_mem_cgroup)
073219e9 6445 memory_cgrp_subsys.broken_hierarchy = true;
18f59ea7 6446 }
0999821b 6447 mutex_unlock(&memcg_create_mutex);
d6441637 6448
073219e9 6449 return memcg_init_kmem(memcg, &memory_cgrp_subsys);
8cdea7c0
BS
6450}
6451
5f578161
MH
6452/*
6453 * Announce all parents that a group from their hierarchy is gone.
6454 */
6455static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
6456{
6457 struct mem_cgroup *parent = memcg;
6458
6459 while ((parent = parent_mem_cgroup(parent)))
519ebea3 6460 mem_cgroup_iter_invalidate(parent);
5f578161
MH
6461
6462 /*
6463 * if the root memcg is not hierarchical we have to check it
6464 * explicitely.
6465 */
6466 if (!root_mem_cgroup->use_hierarchy)
519ebea3 6467 mem_cgroup_iter_invalidate(root_mem_cgroup);
5f578161
MH
6468}
6469
eb95419b 6470static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
df878fb0 6471{
eb95419b 6472 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 6473 struct mem_cgroup_event *event, *tmp;
4fb1a86f 6474 struct cgroup_subsys_state *iter;
79bd9814
TH
6475
6476 /*
6477 * Unregister events and notify userspace.
6478 * Notify userspace about cgroup removing only after rmdir of cgroup
6479 * directory to avoid race between userspace and kernelspace.
6480 */
fba94807
TH
6481 spin_lock(&memcg->event_list_lock);
6482 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
79bd9814
TH
6483 list_del_init(&event->list);
6484 schedule_work(&event->remove);
6485 }
fba94807 6486 spin_unlock(&memcg->event_list_lock);
ec64f515 6487
10d5ebf4
LZ
6488 kmem_cgroup_css_offline(memcg);
6489
5f578161 6490 mem_cgroup_invalidate_reclaim_iterators(memcg);
4fb1a86f
FB
6491
6492 /*
6493 * This requires that offlining is serialized. Right now that is
6494 * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
6495 */
6496 css_for_each_descendant_post(iter, css)
6497 mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
6498
1f458cbf 6499 mem_cgroup_destroy_all_caches(memcg);
33cb876e 6500 vmpressure_cleanup(&memcg->vmpressure);
df878fb0
KH
6501}
6502
eb95419b 6503static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
8cdea7c0 6504{
eb95419b 6505 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
96f1c58d
JW
6506 /*
6507 * XXX: css_offline() would be where we should reparent all
6508 * memory to prepare the cgroup for destruction. However,
6509 * memcg does not do css_tryget() and res_counter charging
6510 * under the same RCU lock region, which means that charging
6511 * could race with offlining. Offlining only happens to
6512 * cgroups with no tasks in them but charges can show up
6513 * without any tasks from the swapin path when the target
6514 * memcg is looked up from the swapout record and not from the
6515 * current task as it usually is. A race like this can leak
6516 * charges and put pages with stale cgroup pointers into
6517 * circulation:
6518 *
6519 * #0 #1
6520 * lookup_swap_cgroup_id()
6521 * rcu_read_lock()
6522 * mem_cgroup_lookup()
6523 * css_tryget()
6524 * rcu_read_unlock()
6525 * disable css_tryget()
6526 * call_rcu()
6527 * offline_css()
6528 * reparent_charges()
6529 * res_counter_charge()
6530 * css_put()
6531 * css_free()
6532 * pc->mem_cgroup = dead memcg
6533 * add page to lru
6534 *
6535 * The bulk of the charges are still moved in offline_css() to
6536 * avoid pinning a lot of pages in case a long-term reference
6537 * like a swapout record is deferring the css_free() to long
6538 * after offlining. But this makes sure we catch any charges
6539 * made after offlining:
6540 */
6541 mem_cgroup_reparent_charges(memcg);
c268e994 6542
10d5ebf4 6543 memcg_destroy_kmem(memcg);
465939a1 6544 __mem_cgroup_free(memcg);
8cdea7c0
BS
6545}
6546
02491447 6547#ifdef CONFIG_MMU
7dc74be0 6548/* Handlers for move charge at task migration. */
854ffa8d
DN
6549#define PRECHARGE_COUNT_AT_ONCE 256
6550static int mem_cgroup_do_precharge(unsigned long count)
7dc74be0 6551{
854ffa8d
DN
6552 int ret = 0;
6553 int batch_count = PRECHARGE_COUNT_AT_ONCE;
c0ff4b85 6554 struct mem_cgroup *memcg = mc.to;
4ffef5fe 6555
c0ff4b85 6556 if (mem_cgroup_is_root(memcg)) {
854ffa8d
DN
6557 mc.precharge += count;
6558 /* we don't need css_get for root */
6559 return ret;
6560 }
6561 /* try to charge at once */
6562 if (count > 1) {
6563 struct res_counter *dummy;
6564 /*
c0ff4b85 6565 * "memcg" cannot be under rmdir() because we've already checked
854ffa8d
DN
6566 * by cgroup_lock_live_cgroup() that it is not removed and we
6567 * are still under the same cgroup_mutex. So we can postpone
6568 * css_get().
6569 */
c0ff4b85 6570 if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
854ffa8d 6571 goto one_by_one;
c0ff4b85 6572 if (do_swap_account && res_counter_charge(&memcg->memsw,
854ffa8d 6573 PAGE_SIZE * count, &dummy)) {
c0ff4b85 6574 res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
854ffa8d
DN
6575 goto one_by_one;
6576 }
6577 mc.precharge += count;
854ffa8d
DN
6578 return ret;
6579 }
6580one_by_one:
6581 /* fall back to one by one charge */
6582 while (count--) {
6583 if (signal_pending(current)) {
6584 ret = -EINTR;
6585 break;
6586 }
6587 if (!batch_count--) {
6588 batch_count = PRECHARGE_COUNT_AT_ONCE;
6589 cond_resched();
6590 }
6d1fdc48 6591 ret = mem_cgroup_try_charge(memcg, GFP_KERNEL, 1, false);
38c5d72f 6592 if (ret)
854ffa8d 6593 /* mem_cgroup_clear_mc() will do uncharge later */
38c5d72f 6594 return ret;
854ffa8d
DN
6595 mc.precharge++;
6596 }
4ffef5fe
DN
6597 return ret;
6598}
6599
6600/**
8d32ff84 6601 * get_mctgt_type - get target type of moving charge
4ffef5fe
DN
6602 * @vma: the vma the pte to be checked belongs
6603 * @addr: the address corresponding to the pte to be checked
6604 * @ptent: the pte to be checked
02491447 6605 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4ffef5fe
DN
6606 *
6607 * Returns
6608 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
6609 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
6610 * move charge. if @target is not NULL, the page is stored in target->page
6611 * with extra refcnt got(Callers should handle it).
02491447
DN
6612 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
6613 * target for charge migration. if @target is not NULL, the entry is stored
6614 * in target->ent.
4ffef5fe
DN
6615 *
6616 * Called with pte lock held.
6617 */
4ffef5fe
DN
6618union mc_target {
6619 struct page *page;
02491447 6620 swp_entry_t ent;
4ffef5fe
DN
6621};
6622
4ffef5fe 6623enum mc_target_type {
8d32ff84 6624 MC_TARGET_NONE = 0,
4ffef5fe 6625 MC_TARGET_PAGE,
02491447 6626 MC_TARGET_SWAP,
4ffef5fe
DN
6627};
6628
90254a65
DN
6629static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
6630 unsigned long addr, pte_t ptent)
4ffef5fe 6631{
90254a65 6632 struct page *page = vm_normal_page(vma, addr, ptent);
4ffef5fe 6633
90254a65
DN
6634 if (!page || !page_mapped(page))
6635 return NULL;
6636 if (PageAnon(page)) {
6637 /* we don't move shared anon */
4b91355e 6638 if (!move_anon())
90254a65 6639 return NULL;
87946a72
DN
6640 } else if (!move_file())
6641 /* we ignore mapcount for file pages */
90254a65
DN
6642 return NULL;
6643 if (!get_page_unless_zero(page))
6644 return NULL;
6645
6646 return page;
6647}
6648
4b91355e 6649#ifdef CONFIG_SWAP
90254a65
DN
6650static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6651 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6652{
90254a65
DN
6653 struct page *page = NULL;
6654 swp_entry_t ent = pte_to_swp_entry(ptent);
6655
6656 if (!move_anon() || non_swap_entry(ent))
6657 return NULL;
4b91355e
KH
6658 /*
6659 * Because lookup_swap_cache() updates some statistics counter,
6660 * we call find_get_page() with swapper_space directly.
6661 */
33806f06 6662 page = find_get_page(swap_address_space(ent), ent.val);
90254a65
DN
6663 if (do_swap_account)
6664 entry->val = ent.val;
6665
6666 return page;
6667}
4b91355e
KH
6668#else
6669static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6670 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6671{
6672 return NULL;
6673}
6674#endif
90254a65 6675
87946a72
DN
6676static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
6677 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6678{
6679 struct page *page = NULL;
87946a72
DN
6680 struct address_space *mapping;
6681 pgoff_t pgoff;
6682
6683 if (!vma->vm_file) /* anonymous vma */
6684 return NULL;
6685 if (!move_file())
6686 return NULL;
6687
87946a72
DN
6688 mapping = vma->vm_file->f_mapping;
6689 if (pte_none(ptent))
6690 pgoff = linear_page_index(vma, addr);
6691 else /* pte_file(ptent) is true */
6692 pgoff = pte_to_pgoff(ptent);
6693
6694 /* page is moved even if it's not RSS of this task(page-faulted). */
aa3b1895
HD
6695 page = find_get_page(mapping, pgoff);
6696
6697#ifdef CONFIG_SWAP
6698 /* shmem/tmpfs may report page out on swap: account for that too. */
6699 if (radix_tree_exceptional_entry(page)) {
6700 swp_entry_t swap = radix_to_swp_entry(page);
87946a72 6701 if (do_swap_account)
aa3b1895 6702 *entry = swap;
33806f06 6703 page = find_get_page(swap_address_space(swap), swap.val);
87946a72 6704 }
aa3b1895 6705#endif
87946a72
DN
6706 return page;
6707}
6708
8d32ff84 6709static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
90254a65
DN
6710 unsigned long addr, pte_t ptent, union mc_target *target)
6711{
6712 struct page *page = NULL;
6713 struct page_cgroup *pc;
8d32ff84 6714 enum mc_target_type ret = MC_TARGET_NONE;
90254a65
DN
6715 swp_entry_t ent = { .val = 0 };
6716
6717 if (pte_present(ptent))
6718 page = mc_handle_present_pte(vma, addr, ptent);
6719 else if (is_swap_pte(ptent))
6720 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
87946a72
DN
6721 else if (pte_none(ptent) || pte_file(ptent))
6722 page = mc_handle_file_pte(vma, addr, ptent, &ent);
90254a65
DN
6723
6724 if (!page && !ent.val)
8d32ff84 6725 return ret;
02491447
DN
6726 if (page) {
6727 pc = lookup_page_cgroup(page);
6728 /*
6729 * Do only loose check w/o page_cgroup lock.
6730 * mem_cgroup_move_account() checks the pc is valid or not under
6731 * the lock.
6732 */
6733 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6734 ret = MC_TARGET_PAGE;
6735 if (target)
6736 target->page = page;
6737 }
6738 if (!ret || !target)
6739 put_page(page);
6740 }
90254a65
DN
6741 /* There is a swap entry and a page doesn't exist or isn't charged */
6742 if (ent.val && !ret &&
34c00c31 6743 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
7f0f1546
KH
6744 ret = MC_TARGET_SWAP;
6745 if (target)
6746 target->ent = ent;
4ffef5fe 6747 }
4ffef5fe
DN
6748 return ret;
6749}
6750
12724850
NH
6751#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6752/*
6753 * We don't consider swapping or file mapped pages because THP does not
6754 * support them for now.
6755 * Caller should make sure that pmd_trans_huge(pmd) is true.
6756 */
6757static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6758 unsigned long addr, pmd_t pmd, union mc_target *target)
6759{
6760 struct page *page = NULL;
6761 struct page_cgroup *pc;
6762 enum mc_target_type ret = MC_TARGET_NONE;
6763
6764 page = pmd_page(pmd);
309381fe 6765 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
12724850
NH
6766 if (!move_anon())
6767 return ret;
6768 pc = lookup_page_cgroup(page);
6769 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6770 ret = MC_TARGET_PAGE;
6771 if (target) {
6772 get_page(page);
6773 target->page = page;
6774 }
6775 }
6776 return ret;
6777}
6778#else
6779static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6780 unsigned long addr, pmd_t pmd, union mc_target *target)
6781{
6782 return MC_TARGET_NONE;
6783}
6784#endif
6785
4ffef5fe
DN
6786static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6787 unsigned long addr, unsigned long end,
6788 struct mm_walk *walk)
6789{
6790 struct vm_area_struct *vma = walk->private;
6791 pte_t *pte;
6792 spinlock_t *ptl;
6793
bf929152 6794 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
12724850
NH
6795 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6796 mc.precharge += HPAGE_PMD_NR;
bf929152 6797 spin_unlock(ptl);
1a5a9906 6798 return 0;
12724850 6799 }
03319327 6800
45f83cef
AA
6801 if (pmd_trans_unstable(pmd))
6802 return 0;
4ffef5fe
DN
6803 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6804 for (; addr != end; pte++, addr += PAGE_SIZE)
8d32ff84 6805 if (get_mctgt_type(vma, addr, *pte, NULL))
4ffef5fe
DN
6806 mc.precharge++; /* increment precharge temporarily */
6807 pte_unmap_unlock(pte - 1, ptl);
6808 cond_resched();
6809
7dc74be0
DN
6810 return 0;
6811}
6812
4ffef5fe
DN
6813static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6814{
6815 unsigned long precharge;
6816 struct vm_area_struct *vma;
6817
dfe076b0 6818 down_read(&mm->mmap_sem);
4ffef5fe
DN
6819 for (vma = mm->mmap; vma; vma = vma->vm_next) {
6820 struct mm_walk mem_cgroup_count_precharge_walk = {
6821 .pmd_entry = mem_cgroup_count_precharge_pte_range,
6822 .mm = mm,
6823 .private = vma,
6824 };
6825 if (is_vm_hugetlb_page(vma))
6826 continue;
4ffef5fe
DN
6827 walk_page_range(vma->vm_start, vma->vm_end,
6828 &mem_cgroup_count_precharge_walk);
6829 }
dfe076b0 6830 up_read(&mm->mmap_sem);
4ffef5fe
DN
6831
6832 precharge = mc.precharge;
6833 mc.precharge = 0;
6834
6835 return precharge;
6836}
6837
4ffef5fe
DN
6838static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6839{
dfe076b0
DN
6840 unsigned long precharge = mem_cgroup_count_precharge(mm);
6841
6842 VM_BUG_ON(mc.moving_task);
6843 mc.moving_task = current;
6844 return mem_cgroup_do_precharge(precharge);
4ffef5fe
DN
6845}
6846
dfe076b0
DN
6847/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6848static void __mem_cgroup_clear_mc(void)
4ffef5fe 6849{
2bd9bb20
KH
6850 struct mem_cgroup *from = mc.from;
6851 struct mem_cgroup *to = mc.to;
4050377b 6852 int i;
2bd9bb20 6853
4ffef5fe 6854 /* we must uncharge all the leftover precharges from mc.to */
854ffa8d
DN
6855 if (mc.precharge) {
6856 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
6857 mc.precharge = 0;
6858 }
6859 /*
6860 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6861 * we must uncharge here.
6862 */
6863 if (mc.moved_charge) {
6864 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6865 mc.moved_charge = 0;
4ffef5fe 6866 }
483c30b5
DN
6867 /* we must fixup refcnts and charges */
6868 if (mc.moved_swap) {
483c30b5
DN
6869 /* uncharge swap account from the old cgroup */
6870 if (!mem_cgroup_is_root(mc.from))
6871 res_counter_uncharge(&mc.from->memsw,
6872 PAGE_SIZE * mc.moved_swap);
4050377b
LZ
6873
6874 for (i = 0; i < mc.moved_swap; i++)
6875 css_put(&mc.from->css);
483c30b5
DN
6876
6877 if (!mem_cgroup_is_root(mc.to)) {
6878 /*
6879 * we charged both to->res and to->memsw, so we should
6880 * uncharge to->res.
6881 */
6882 res_counter_uncharge(&mc.to->res,
6883 PAGE_SIZE * mc.moved_swap);
483c30b5 6884 }
4050377b 6885 /* we've already done css_get(mc.to) */
483c30b5
DN
6886 mc.moved_swap = 0;
6887 }
dfe076b0
DN
6888 memcg_oom_recover(from);
6889 memcg_oom_recover(to);
6890 wake_up_all(&mc.waitq);
6891}
6892
6893static void mem_cgroup_clear_mc(void)
6894{
6895 struct mem_cgroup *from = mc.from;
6896
6897 /*
6898 * we must clear moving_task before waking up waiters at the end of
6899 * task migration.
6900 */
6901 mc.moving_task = NULL;
6902 __mem_cgroup_clear_mc();
2bd9bb20 6903 spin_lock(&mc.lock);
4ffef5fe
DN
6904 mc.from = NULL;
6905 mc.to = NULL;
2bd9bb20 6906 spin_unlock(&mc.lock);
32047e2a 6907 mem_cgroup_end_move(from);
4ffef5fe
DN
6908}
6909
eb95419b 6910static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
761b3ef5 6911 struct cgroup_taskset *tset)
7dc74be0 6912{
2f7ee569 6913 struct task_struct *p = cgroup_taskset_first(tset);
7dc74be0 6914 int ret = 0;
eb95419b 6915 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
ee5e8472 6916 unsigned long move_charge_at_immigrate;
7dc74be0 6917
ee5e8472
GC
6918 /*
6919 * We are now commited to this value whatever it is. Changes in this
6920 * tunable will only affect upcoming migrations, not the current one.
6921 * So we need to save it, and keep it going.
6922 */
6923 move_charge_at_immigrate = memcg->move_charge_at_immigrate;
6924 if (move_charge_at_immigrate) {
7dc74be0
DN
6925 struct mm_struct *mm;
6926 struct mem_cgroup *from = mem_cgroup_from_task(p);
6927
c0ff4b85 6928 VM_BUG_ON(from == memcg);
7dc74be0
DN
6929
6930 mm = get_task_mm(p);
6931 if (!mm)
6932 return 0;
7dc74be0 6933 /* We move charges only when we move a owner of the mm */
4ffef5fe
DN
6934 if (mm->owner == p) {
6935 VM_BUG_ON(mc.from);
6936 VM_BUG_ON(mc.to);
6937 VM_BUG_ON(mc.precharge);
854ffa8d 6938 VM_BUG_ON(mc.moved_charge);
483c30b5 6939 VM_BUG_ON(mc.moved_swap);
32047e2a 6940 mem_cgroup_start_move(from);
2bd9bb20 6941 spin_lock(&mc.lock);
4ffef5fe 6942 mc.from = from;
c0ff4b85 6943 mc.to = memcg;
ee5e8472 6944 mc.immigrate_flags = move_charge_at_immigrate;
2bd9bb20 6945 spin_unlock(&mc.lock);
dfe076b0 6946 /* We set mc.moving_task later */
4ffef5fe
DN
6947
6948 ret = mem_cgroup_precharge_mc(mm);
6949 if (ret)
6950 mem_cgroup_clear_mc();
dfe076b0
DN
6951 }
6952 mmput(mm);
7dc74be0
DN
6953 }
6954 return ret;
6955}
6956
eb95419b 6957static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
761b3ef5 6958 struct cgroup_taskset *tset)
7dc74be0 6959{
4ffef5fe 6960 mem_cgroup_clear_mc();
7dc74be0
DN
6961}
6962
4ffef5fe
DN
6963static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6964 unsigned long addr, unsigned long end,
6965 struct mm_walk *walk)
7dc74be0 6966{
4ffef5fe
DN
6967 int ret = 0;
6968 struct vm_area_struct *vma = walk->private;
6969 pte_t *pte;
6970 spinlock_t *ptl;
12724850
NH
6971 enum mc_target_type target_type;
6972 union mc_target target;
6973 struct page *page;
6974 struct page_cgroup *pc;
4ffef5fe 6975
12724850
NH
6976 /*
6977 * We don't take compound_lock() here but no race with splitting thp
6978 * happens because:
6979 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
6980 * under splitting, which means there's no concurrent thp split,
6981 * - if another thread runs into split_huge_page() just after we
6982 * entered this if-block, the thread must wait for page table lock
6983 * to be unlocked in __split_huge_page_splitting(), where the main
6984 * part of thp split is not executed yet.
6985 */
bf929152 6986 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
62ade86a 6987 if (mc.precharge < HPAGE_PMD_NR) {
bf929152 6988 spin_unlock(ptl);
12724850
NH
6989 return 0;
6990 }
6991 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6992 if (target_type == MC_TARGET_PAGE) {
6993 page = target.page;
6994 if (!isolate_lru_page(page)) {
6995 pc = lookup_page_cgroup(page);
6996 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
2f3479b1 6997 pc, mc.from, mc.to)) {
12724850
NH
6998 mc.precharge -= HPAGE_PMD_NR;
6999 mc.moved_charge += HPAGE_PMD_NR;
7000 }
7001 putback_lru_page(page);
7002 }
7003 put_page(page);
7004 }
bf929152 7005 spin_unlock(ptl);
1a5a9906 7006 return 0;
12724850
NH
7007 }
7008
45f83cef
AA
7009 if (pmd_trans_unstable(pmd))
7010 return 0;
4ffef5fe
DN
7011retry:
7012 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
7013 for (; addr != end; addr += PAGE_SIZE) {
7014 pte_t ptent = *(pte++);
02491447 7015 swp_entry_t ent;
4ffef5fe
DN
7016
7017 if (!mc.precharge)
7018 break;
7019
8d32ff84 7020 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4ffef5fe
DN
7021 case MC_TARGET_PAGE:
7022 page = target.page;
7023 if (isolate_lru_page(page))
7024 goto put;
7025 pc = lookup_page_cgroup(page);
7ec99d62 7026 if (!mem_cgroup_move_account(page, 1, pc,
2f3479b1 7027 mc.from, mc.to)) {
4ffef5fe 7028 mc.precharge--;
854ffa8d
DN
7029 /* we uncharge from mc.from later. */
7030 mc.moved_charge++;
4ffef5fe
DN
7031 }
7032 putback_lru_page(page);
8d32ff84 7033put: /* get_mctgt_type() gets the page */
4ffef5fe
DN
7034 put_page(page);
7035 break;
02491447
DN
7036 case MC_TARGET_SWAP:
7037 ent = target.ent;
e91cbb42 7038 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
02491447 7039 mc.precharge--;
483c30b5
DN
7040 /* we fixup refcnts and charges later. */
7041 mc.moved_swap++;
7042 }
02491447 7043 break;
4ffef5fe
DN
7044 default:
7045 break;
7046 }
7047 }
7048 pte_unmap_unlock(pte - 1, ptl);
7049 cond_resched();
7050
7051 if (addr != end) {
7052 /*
7053 * We have consumed all precharges we got in can_attach().
7054 * We try charge one by one, but don't do any additional
7055 * charges to mc.to if we have failed in charge once in attach()
7056 * phase.
7057 */
854ffa8d 7058 ret = mem_cgroup_do_precharge(1);
4ffef5fe
DN
7059 if (!ret)
7060 goto retry;
7061 }
7062
7063 return ret;
7064}
7065
7066static void mem_cgroup_move_charge(struct mm_struct *mm)
7067{
7068 struct vm_area_struct *vma;
7069
7070 lru_add_drain_all();
dfe076b0
DN
7071retry:
7072 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
7073 /*
7074 * Someone who are holding the mmap_sem might be waiting in
7075 * waitq. So we cancel all extra charges, wake up all waiters,
7076 * and retry. Because we cancel precharges, we might not be able
7077 * to move enough charges, but moving charge is a best-effort
7078 * feature anyway, so it wouldn't be a big problem.
7079 */
7080 __mem_cgroup_clear_mc();
7081 cond_resched();
7082 goto retry;
7083 }
4ffef5fe
DN
7084 for (vma = mm->mmap; vma; vma = vma->vm_next) {
7085 int ret;
7086 struct mm_walk mem_cgroup_move_charge_walk = {
7087 .pmd_entry = mem_cgroup_move_charge_pte_range,
7088 .mm = mm,
7089 .private = vma,
7090 };
7091 if (is_vm_hugetlb_page(vma))
7092 continue;
4ffef5fe
DN
7093 ret = walk_page_range(vma->vm_start, vma->vm_end,
7094 &mem_cgroup_move_charge_walk);
7095 if (ret)
7096 /*
7097 * means we have consumed all precharges and failed in
7098 * doing additional charge. Just abandon here.
7099 */
7100 break;
7101 }
dfe076b0 7102 up_read(&mm->mmap_sem);
7dc74be0
DN
7103}
7104
eb95419b 7105static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
761b3ef5 7106 struct cgroup_taskset *tset)
67e465a7 7107{
2f7ee569 7108 struct task_struct *p = cgroup_taskset_first(tset);
a433658c 7109 struct mm_struct *mm = get_task_mm(p);
dfe076b0 7110
dfe076b0 7111 if (mm) {
a433658c
KM
7112 if (mc.to)
7113 mem_cgroup_move_charge(mm);
dfe076b0
DN
7114 mmput(mm);
7115 }
a433658c
KM
7116 if (mc.to)
7117 mem_cgroup_clear_mc();
67e465a7 7118}
5cfb80a7 7119#else /* !CONFIG_MMU */
eb95419b 7120static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
761b3ef5 7121 struct cgroup_taskset *tset)
5cfb80a7
DN
7122{
7123 return 0;
7124}
eb95419b 7125static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
761b3ef5 7126 struct cgroup_taskset *tset)
5cfb80a7
DN
7127{
7128}
eb95419b 7129static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
761b3ef5 7130 struct cgroup_taskset *tset)
5cfb80a7
DN
7131{
7132}
7133#endif
67e465a7 7134
f00baae7
TH
7135/*
7136 * Cgroup retains root cgroups across [un]mount cycles making it necessary
7137 * to verify sane_behavior flag on each mount attempt.
7138 */
eb95419b 7139static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
f00baae7
TH
7140{
7141 /*
7142 * use_hierarchy is forced with sane_behavior. cgroup core
7143 * guarantees that @root doesn't have any children, so turning it
7144 * on for the root memcg is enough.
7145 */
eb95419b
TH
7146 if (cgroup_sane_behavior(root_css->cgroup))
7147 mem_cgroup_from_css(root_css)->use_hierarchy = true;
f00baae7
TH
7148}
7149
073219e9 7150struct cgroup_subsys memory_cgrp_subsys = {
92fb9748 7151 .css_alloc = mem_cgroup_css_alloc,
d142e3e6 7152 .css_online = mem_cgroup_css_online,
92fb9748
TH
7153 .css_offline = mem_cgroup_css_offline,
7154 .css_free = mem_cgroup_css_free,
7dc74be0
DN
7155 .can_attach = mem_cgroup_can_attach,
7156 .cancel_attach = mem_cgroup_cancel_attach,
67e465a7 7157 .attach = mem_cgroup_move_task,
f00baae7 7158 .bind = mem_cgroup_bind,
6bc10349 7159 .base_cftypes = mem_cgroup_files,
6d12e2d8 7160 .early_init = 0,
8cdea7c0 7161};
c077719b 7162
c255a458 7163#ifdef CONFIG_MEMCG_SWAP
a42c390c
MH
7164static int __init enable_swap_account(char *s)
7165{
a2c8990a 7166 if (!strcmp(s, "1"))
a42c390c 7167 really_do_swap_account = 1;
a2c8990a 7168 else if (!strcmp(s, "0"))
a42c390c
MH
7169 really_do_swap_account = 0;
7170 return 1;
7171}
a2c8990a 7172__setup("swapaccount=", enable_swap_account);
c077719b 7173
2d11085e
MH
7174static void __init memsw_file_init(void)
7175{
073219e9 7176 WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files));
6acc8b02
MH
7177}
7178
7179static void __init enable_swap_cgroup(void)
7180{
7181 if (!mem_cgroup_disabled() && really_do_swap_account) {
7182 do_swap_account = 1;
7183 memsw_file_init();
7184 }
2d11085e 7185}
6acc8b02 7186
2d11085e 7187#else
6acc8b02 7188static void __init enable_swap_cgroup(void)
2d11085e
MH
7189{
7190}
c077719b 7191#endif
2d11085e
MH
7192
7193/*
1081312f
MH
7194 * subsys_initcall() for memory controller.
7195 *
7196 * Some parts like hotcpu_notifier() have to be initialized from this context
7197 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
7198 * everything that doesn't depend on a specific mem_cgroup structure should
7199 * be initialized from here.
2d11085e
MH
7200 */
7201static int __init mem_cgroup_init(void)
7202{
7203 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6acc8b02 7204 enable_swap_cgroup();
bb4cc1a8 7205 mem_cgroup_soft_limit_tree_init();
e4777496 7206 memcg_stock_init();
2d11085e
MH
7207 return 0;
7208}
7209subsys_initcall(mem_cgroup_init);
This page took 1.143495 seconds and 5 git commands to generate.