memcg: swap cgroup for remembering usage
[deliverable/linux.git] / mm / page_cgroup.c
1 #include <linux/mm.h>
2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
11 #include <linux/swapops.h>
12
13 static void __meminit
14 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
15 {
16 pc->flags = 0;
17 pc->mem_cgroup = NULL;
18 pc->page = pfn_to_page(pfn);
19 }
20 static unsigned long total_usage;
21
22 #if !defined(CONFIG_SPARSEMEM)
23
24
25 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
26 {
27 pgdat->node_page_cgroup = NULL;
28 }
29
30 struct page_cgroup *lookup_page_cgroup(struct page *page)
31 {
32 unsigned long pfn = page_to_pfn(page);
33 unsigned long offset;
34 struct page_cgroup *base;
35
36 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
37 if (unlikely(!base))
38 return NULL;
39
40 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
41 return base + offset;
42 }
43
44 static int __init alloc_node_page_cgroup(int nid)
45 {
46 struct page_cgroup *base, *pc;
47 unsigned long table_size;
48 unsigned long start_pfn, nr_pages, index;
49
50 start_pfn = NODE_DATA(nid)->node_start_pfn;
51 nr_pages = NODE_DATA(nid)->node_spanned_pages;
52
53 if (!nr_pages)
54 return 0;
55
56 table_size = sizeof(struct page_cgroup) * nr_pages;
57
58 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
59 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
60 if (!base)
61 return -ENOMEM;
62 for (index = 0; index < nr_pages; index++) {
63 pc = base + index;
64 __init_page_cgroup(pc, start_pfn + index);
65 }
66 NODE_DATA(nid)->node_page_cgroup = base;
67 total_usage += table_size;
68 return 0;
69 }
70
71 void __init page_cgroup_init(void)
72 {
73
74 int nid, fail;
75
76 if (mem_cgroup_subsys.disabled)
77 return;
78
79 for_each_online_node(nid) {
80 fail = alloc_node_page_cgroup(nid);
81 if (fail)
82 goto fail;
83 }
84 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
85 printk(KERN_INFO "please try cgroup_disable=memory option if you"
86 " don't want\n");
87 return;
88 fail:
89 printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
90 printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
91 panic("Out of memory");
92 }
93
94 #else /* CONFIG_FLAT_NODE_MEM_MAP */
95
96 struct page_cgroup *lookup_page_cgroup(struct page *page)
97 {
98 unsigned long pfn = page_to_pfn(page);
99 struct mem_section *section = __pfn_to_section(pfn);
100
101 return section->page_cgroup + pfn;
102 }
103
104 /* __alloc_bootmem...() is protected by !slab_available() */
105 static int __init_refok init_section_page_cgroup(unsigned long pfn)
106 {
107 struct mem_section *section = __pfn_to_section(pfn);
108 struct page_cgroup *base, *pc;
109 unsigned long table_size;
110 int nid, index;
111
112 if (!section->page_cgroup) {
113 nid = page_to_nid(pfn_to_page(pfn));
114 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
115 if (slab_is_available()) {
116 base = kmalloc_node(table_size, GFP_KERNEL, nid);
117 if (!base)
118 base = vmalloc_node(table_size, nid);
119 } else {
120 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
121 table_size,
122 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
123 }
124 } else {
125 /*
126 * We don't have to allocate page_cgroup again, but
127 * address of memmap may be changed. So, we have to initialize
128 * again.
129 */
130 base = section->page_cgroup + pfn;
131 table_size = 0;
132 /* check address of memmap is changed or not. */
133 if (base->page == pfn_to_page(pfn))
134 return 0;
135 }
136
137 if (!base) {
138 printk(KERN_ERR "page cgroup allocation failure\n");
139 return -ENOMEM;
140 }
141
142 for (index = 0; index < PAGES_PER_SECTION; index++) {
143 pc = base + index;
144 __init_page_cgroup(pc, pfn + index);
145 }
146
147 section->page_cgroup = base - pfn;
148 total_usage += table_size;
149 return 0;
150 }
151 #ifdef CONFIG_MEMORY_HOTPLUG
152 void __free_page_cgroup(unsigned long pfn)
153 {
154 struct mem_section *ms;
155 struct page_cgroup *base;
156
157 ms = __pfn_to_section(pfn);
158 if (!ms || !ms->page_cgroup)
159 return;
160 base = ms->page_cgroup + pfn;
161 if (is_vmalloc_addr(base)) {
162 vfree(base);
163 ms->page_cgroup = NULL;
164 } else {
165 struct page *page = virt_to_page(base);
166 if (!PageReserved(page)) { /* Is bootmem ? */
167 kfree(base);
168 ms->page_cgroup = NULL;
169 }
170 }
171 }
172
173 int __meminit online_page_cgroup(unsigned long start_pfn,
174 unsigned long nr_pages,
175 int nid)
176 {
177 unsigned long start, end, pfn;
178 int fail = 0;
179
180 start = start_pfn & ~(PAGES_PER_SECTION - 1);
181 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
182
183 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
184 if (!pfn_present(pfn))
185 continue;
186 fail = init_section_page_cgroup(pfn);
187 }
188 if (!fail)
189 return 0;
190
191 /* rollback */
192 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
193 __free_page_cgroup(pfn);
194
195 return -ENOMEM;
196 }
197
198 int __meminit offline_page_cgroup(unsigned long start_pfn,
199 unsigned long nr_pages, int nid)
200 {
201 unsigned long start, end, pfn;
202
203 start = start_pfn & ~(PAGES_PER_SECTION - 1);
204 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
205
206 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
207 __free_page_cgroup(pfn);
208 return 0;
209
210 }
211
212 static int __meminit page_cgroup_callback(struct notifier_block *self,
213 unsigned long action, void *arg)
214 {
215 struct memory_notify *mn = arg;
216 int ret = 0;
217 switch (action) {
218 case MEM_GOING_ONLINE:
219 ret = online_page_cgroup(mn->start_pfn,
220 mn->nr_pages, mn->status_change_nid);
221 break;
222 case MEM_OFFLINE:
223 offline_page_cgroup(mn->start_pfn,
224 mn->nr_pages, mn->status_change_nid);
225 break;
226 case MEM_CANCEL_ONLINE:
227 case MEM_GOING_OFFLINE:
228 break;
229 case MEM_ONLINE:
230 case MEM_CANCEL_OFFLINE:
231 break;
232 }
233
234 if (ret)
235 ret = notifier_from_errno(ret);
236 else
237 ret = NOTIFY_OK;
238
239 return ret;
240 }
241
242 #endif
243
244 void __init page_cgroup_init(void)
245 {
246 unsigned long pfn;
247 int fail = 0;
248
249 if (mem_cgroup_subsys.disabled)
250 return;
251
252 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
253 if (!pfn_present(pfn))
254 continue;
255 fail = init_section_page_cgroup(pfn);
256 }
257 if (fail) {
258 printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
259 panic("Out of memory");
260 } else {
261 hotplug_memory_notifier(page_cgroup_callback, 0);
262 }
263 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
264 printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
265 " want\n");
266 }
267
268 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
269 {
270 return;
271 }
272
273 #endif
274
275
276 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
277
278 static DEFINE_MUTEX(swap_cgroup_mutex);
279 struct swap_cgroup_ctrl {
280 struct page **map;
281 unsigned long length;
282 };
283
284 struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
285
286 /*
287 * This 8bytes seems big..maybe we can reduce this when we can use "id" for
288 * cgroup rather than pointer.
289 */
290 struct swap_cgroup {
291 struct mem_cgroup *val;
292 };
293 #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
294 #define SC_POS_MASK (SC_PER_PAGE - 1)
295
296 /*
297 * SwapCgroup implements "lookup" and "exchange" operations.
298 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
299 * against SwapCache. At swap_free(), this is accessed directly from swap.
300 *
301 * This means,
302 * - we have no race in "exchange" when we're accessed via SwapCache because
303 * SwapCache(and its swp_entry) is under lock.
304 * - When called via swap_free(), there is no user of this entry and no race.
305 * Then, we don't need lock around "exchange".
306 *
307 * TODO: we can push these buffers out to HIGHMEM.
308 */
309
310 /*
311 * allocate buffer for swap_cgroup.
312 */
313 static int swap_cgroup_prepare(int type)
314 {
315 struct page *page;
316 struct swap_cgroup_ctrl *ctrl;
317 unsigned long idx, max;
318
319 if (!do_swap_account)
320 return 0;
321 ctrl = &swap_cgroup_ctrl[type];
322
323 for (idx = 0; idx < ctrl->length; idx++) {
324 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
325 if (!page)
326 goto not_enough_page;
327 ctrl->map[idx] = page;
328 }
329 return 0;
330 not_enough_page:
331 max = idx;
332 for (idx = 0; idx < max; idx++)
333 __free_page(ctrl->map[idx]);
334
335 return -ENOMEM;
336 }
337
338 /**
339 * swap_cgroup_record - record mem_cgroup for this swp_entry.
340 * @ent: swap entry to be recorded into
341 * @mem: mem_cgroup to be recorded
342 *
343 * Returns old value at success, NULL at failure.
344 * (Of course, old value can be NULL.)
345 */
346 struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
347 {
348 int type = swp_type(ent);
349 unsigned long offset = swp_offset(ent);
350 unsigned long idx = offset / SC_PER_PAGE;
351 unsigned long pos = offset & SC_POS_MASK;
352 struct swap_cgroup_ctrl *ctrl;
353 struct page *mappage;
354 struct swap_cgroup *sc;
355 struct mem_cgroup *old;
356
357 if (!do_swap_account)
358 return NULL;
359
360 ctrl = &swap_cgroup_ctrl[type];
361
362 mappage = ctrl->map[idx];
363 sc = page_address(mappage);
364 sc += pos;
365 old = sc->val;
366 sc->val = mem;
367
368 return old;
369 }
370
371 /**
372 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
373 * @ent: swap entry to be looked up.
374 *
375 * Returns pointer to mem_cgroup at success. NULL at failure.
376 */
377 struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
378 {
379 int type = swp_type(ent);
380 unsigned long offset = swp_offset(ent);
381 unsigned long idx = offset / SC_PER_PAGE;
382 unsigned long pos = offset & SC_POS_MASK;
383 struct swap_cgroup_ctrl *ctrl;
384 struct page *mappage;
385 struct swap_cgroup *sc;
386 struct mem_cgroup *ret;
387
388 if (!do_swap_account)
389 return NULL;
390
391 ctrl = &swap_cgroup_ctrl[type];
392 mappage = ctrl->map[idx];
393 sc = page_address(mappage);
394 sc += pos;
395 ret = sc->val;
396 return ret;
397 }
398
399 int swap_cgroup_swapon(int type, unsigned long max_pages)
400 {
401 void *array;
402 unsigned long array_size;
403 unsigned long length;
404 struct swap_cgroup_ctrl *ctrl;
405
406 if (!do_swap_account)
407 return 0;
408
409 length = ((max_pages/SC_PER_PAGE) + 1);
410 array_size = length * sizeof(void *);
411
412 array = vmalloc(array_size);
413 if (!array)
414 goto nomem;
415
416 memset(array, 0, array_size);
417 ctrl = &swap_cgroup_ctrl[type];
418 mutex_lock(&swap_cgroup_mutex);
419 ctrl->length = length;
420 ctrl->map = array;
421 if (swap_cgroup_prepare(type)) {
422 /* memory shortage */
423 ctrl->map = NULL;
424 ctrl->length = 0;
425 vfree(array);
426 mutex_unlock(&swap_cgroup_mutex);
427 goto nomem;
428 }
429 mutex_unlock(&swap_cgroup_mutex);
430
431 printk(KERN_INFO
432 "swap_cgroup: uses %ld bytes of vmalloc for pointer array space"
433 " and %ld bytes to hold mem_cgroup pointers on swap\n",
434 array_size, length * PAGE_SIZE);
435 printk(KERN_INFO
436 "swap_cgroup can be disabled by noswapaccount boot option.\n");
437
438 return 0;
439 nomem:
440 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
441 printk(KERN_INFO
442 "swap_cgroup can be disabled by noswapaccount boot option\n");
443 return -ENOMEM;
444 }
445
446 void swap_cgroup_swapoff(int type)
447 {
448 int i;
449 struct swap_cgroup_ctrl *ctrl;
450
451 if (!do_swap_account)
452 return;
453
454 mutex_lock(&swap_cgroup_mutex);
455 ctrl = &swap_cgroup_ctrl[type];
456 if (ctrl->map) {
457 for (i = 0; i < ctrl->length; i++) {
458 struct page *page = ctrl->map[i];
459 if (page)
460 __free_page(page);
461 }
462 vfree(ctrl->map);
463 ctrl->map = NULL;
464 ctrl->length = 0;
465 }
466 mutex_unlock(&swap_cgroup_mutex);
467 }
468
469 #endif
This page took 0.040758 seconds and 5 git commands to generate.