2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
15 #ifdef CONFIG_MEMCG_KMEM
16 static LIST_HEAD(list_lrus
);
17 static DEFINE_MUTEX(list_lrus_mutex
);
19 static void list_lru_register(struct list_lru
*lru
)
21 mutex_lock(&list_lrus_mutex
);
22 list_add(&lru
->list
, &list_lrus
);
23 mutex_unlock(&list_lrus_mutex
);
26 static void list_lru_unregister(struct list_lru
*lru
)
28 mutex_lock(&list_lrus_mutex
);
30 mutex_unlock(&list_lrus_mutex
);
33 static void list_lru_register(struct list_lru
*lru
)
37 static void list_lru_unregister(struct list_lru
*lru
)
40 #endif /* CONFIG_MEMCG_KMEM */
42 #ifdef CONFIG_MEMCG_KMEM
43 static inline bool list_lru_memcg_aware(struct list_lru
*lru
)
46 * This needs node 0 to be always present, even
47 * in the systems supporting sparse numa ids.
49 return !!lru
->node
[0].memcg_lrus
;
52 static inline struct list_lru_one
*
53 list_lru_from_memcg_idx(struct list_lru_node
*nlru
, int idx
)
56 * The lock protects the array of per cgroup lists from relocation
57 * (see memcg_update_list_lru_node).
59 lockdep_assert_held(&nlru
->lock
);
60 if (nlru
->memcg_lrus
&& idx
>= 0)
61 return nlru
->memcg_lrus
->lru
[idx
];
66 static inline struct list_lru_one
*
67 list_lru_from_kmem(struct list_lru_node
*nlru
, void *ptr
)
69 struct mem_cgroup
*memcg
;
71 if (!nlru
->memcg_lrus
)
74 memcg
= mem_cgroup_from_kmem(ptr
);
78 return list_lru_from_memcg_idx(nlru
, memcg_cache_id(memcg
));
81 static inline bool list_lru_memcg_aware(struct list_lru
*lru
)
86 static inline struct list_lru_one
*
87 list_lru_from_memcg_idx(struct list_lru_node
*nlru
, int idx
)
92 static inline struct list_lru_one
*
93 list_lru_from_kmem(struct list_lru_node
*nlru
, void *ptr
)
97 #endif /* CONFIG_MEMCG_KMEM */
99 bool list_lru_add(struct list_lru
*lru
, struct list_head
*item
)
101 int nid
= page_to_nid(virt_to_page(item
));
102 struct list_lru_node
*nlru
= &lru
->node
[nid
];
103 struct list_lru_one
*l
;
105 spin_lock(&nlru
->lock
);
106 if (list_empty(item
)) {
107 l
= list_lru_from_kmem(nlru
, item
);
108 list_add_tail(item
, &l
->list
);
110 spin_unlock(&nlru
->lock
);
113 spin_unlock(&nlru
->lock
);
116 EXPORT_SYMBOL_GPL(list_lru_add
);
118 bool list_lru_del(struct list_lru
*lru
, struct list_head
*item
)
120 int nid
= page_to_nid(virt_to_page(item
));
121 struct list_lru_node
*nlru
= &lru
->node
[nid
];
122 struct list_lru_one
*l
;
124 spin_lock(&nlru
->lock
);
125 if (!list_empty(item
)) {
126 l
= list_lru_from_kmem(nlru
, item
);
129 spin_unlock(&nlru
->lock
);
132 spin_unlock(&nlru
->lock
);
135 EXPORT_SYMBOL_GPL(list_lru_del
);
137 void list_lru_isolate(struct list_lru_one
*list
, struct list_head
*item
)
142 EXPORT_SYMBOL_GPL(list_lru_isolate
);
144 void list_lru_isolate_move(struct list_lru_one
*list
, struct list_head
*item
,
145 struct list_head
*head
)
147 list_move(item
, head
);
150 EXPORT_SYMBOL_GPL(list_lru_isolate_move
);
152 static unsigned long __list_lru_count_one(struct list_lru
*lru
,
153 int nid
, int memcg_idx
)
155 struct list_lru_node
*nlru
= &lru
->node
[nid
];
156 struct list_lru_one
*l
;
159 spin_lock(&nlru
->lock
);
160 l
= list_lru_from_memcg_idx(nlru
, memcg_idx
);
162 spin_unlock(&nlru
->lock
);
167 unsigned long list_lru_count_one(struct list_lru
*lru
,
168 int nid
, struct mem_cgroup
*memcg
)
170 return __list_lru_count_one(lru
, nid
, memcg_cache_id(memcg
));
172 EXPORT_SYMBOL_GPL(list_lru_count_one
);
174 unsigned long list_lru_count_node(struct list_lru
*lru
, int nid
)
179 count
+= __list_lru_count_one(lru
, nid
, -1);
180 if (list_lru_memcg_aware(lru
)) {
181 for_each_memcg_cache_index(memcg_idx
)
182 count
+= __list_lru_count_one(lru
, nid
, memcg_idx
);
186 EXPORT_SYMBOL_GPL(list_lru_count_node
);
189 __list_lru_walk_one(struct list_lru
*lru
, int nid
, int memcg_idx
,
190 list_lru_walk_cb isolate
, void *cb_arg
,
191 unsigned long *nr_to_walk
)
194 struct list_lru_node
*nlru
= &lru
->node
[nid
];
195 struct list_lru_one
*l
;
196 struct list_head
*item
, *n
;
197 unsigned long isolated
= 0;
199 spin_lock(&nlru
->lock
);
200 l
= list_lru_from_memcg_idx(nlru
, memcg_idx
);
202 list_for_each_safe(item
, n
, &l
->list
) {
206 * decrement nr_to_walk first so that we don't livelock if we
207 * get stuck on large numbesr of LRU_RETRY items
213 ret
= isolate(item
, l
, &nlru
->lock
, cb_arg
);
215 case LRU_REMOVED_RETRY
:
216 assert_spin_locked(&nlru
->lock
);
220 * If the lru lock has been dropped, our list
221 * traversal is now invalid and so we have to
222 * restart from scratch.
224 if (ret
== LRU_REMOVED_RETRY
)
228 list_move_tail(item
, &l
->list
);
234 * The lru lock has been dropped, our list traversal is
235 * now invalid and so we have to restart from scratch.
237 assert_spin_locked(&nlru
->lock
);
244 spin_unlock(&nlru
->lock
);
249 list_lru_walk_one(struct list_lru
*lru
, int nid
, struct mem_cgroup
*memcg
,
250 list_lru_walk_cb isolate
, void *cb_arg
,
251 unsigned long *nr_to_walk
)
253 return __list_lru_walk_one(lru
, nid
, memcg_cache_id(memcg
),
254 isolate
, cb_arg
, nr_to_walk
);
256 EXPORT_SYMBOL_GPL(list_lru_walk_one
);
258 unsigned long list_lru_walk_node(struct list_lru
*lru
, int nid
,
259 list_lru_walk_cb isolate
, void *cb_arg
,
260 unsigned long *nr_to_walk
)
265 isolated
+= __list_lru_walk_one(lru
, nid
, -1, isolate
, cb_arg
,
267 if (*nr_to_walk
> 0 && list_lru_memcg_aware(lru
)) {
268 for_each_memcg_cache_index(memcg_idx
) {
269 isolated
+= __list_lru_walk_one(lru
, nid
, memcg_idx
,
270 isolate
, cb_arg
, nr_to_walk
);
271 if (*nr_to_walk
<= 0)
277 EXPORT_SYMBOL_GPL(list_lru_walk_node
);
279 static void init_one_lru(struct list_lru_one
*l
)
281 INIT_LIST_HEAD(&l
->list
);
285 #ifdef CONFIG_MEMCG_KMEM
286 static void __memcg_destroy_list_lru_node(struct list_lru_memcg
*memcg_lrus
,
291 for (i
= begin
; i
< end
; i
++)
292 kfree(memcg_lrus
->lru
[i
]);
295 static int __memcg_init_list_lru_node(struct list_lru_memcg
*memcg_lrus
,
300 for (i
= begin
; i
< end
; i
++) {
301 struct list_lru_one
*l
;
303 l
= kmalloc(sizeof(struct list_lru_one
), GFP_KERNEL
);
308 memcg_lrus
->lru
[i
] = l
;
312 __memcg_destroy_list_lru_node(memcg_lrus
, begin
, i
- 1);
316 static int memcg_init_list_lru_node(struct list_lru_node
*nlru
)
318 int size
= memcg_nr_cache_ids
;
320 nlru
->memcg_lrus
= kmalloc(size
* sizeof(void *), GFP_KERNEL
);
321 if (!nlru
->memcg_lrus
)
324 if (__memcg_init_list_lru_node(nlru
->memcg_lrus
, 0, size
)) {
325 kfree(nlru
->memcg_lrus
);
332 static void memcg_destroy_list_lru_node(struct list_lru_node
*nlru
)
334 __memcg_destroy_list_lru_node(nlru
->memcg_lrus
, 0, memcg_nr_cache_ids
);
335 kfree(nlru
->memcg_lrus
);
338 static int memcg_update_list_lru_node(struct list_lru_node
*nlru
,
339 int old_size
, int new_size
)
341 struct list_lru_memcg
*old
, *new;
343 BUG_ON(old_size
> new_size
);
345 old
= nlru
->memcg_lrus
;
346 new = kmalloc(new_size
* sizeof(void *), GFP_KERNEL
);
350 if (__memcg_init_list_lru_node(new, old_size
, new_size
)) {
355 memcpy(new, old
, old_size
* sizeof(void *));
358 * The lock guarantees that we won't race with a reader
359 * (see list_lru_from_memcg_idx).
361 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
362 * we have to use IRQ-safe primitives here to avoid deadlock.
364 spin_lock_irq(&nlru
->lock
);
365 nlru
->memcg_lrus
= new;
366 spin_unlock_irq(&nlru
->lock
);
372 static void memcg_cancel_update_list_lru_node(struct list_lru_node
*nlru
,
373 int old_size
, int new_size
)
375 /* do not bother shrinking the array back to the old size, because we
376 * cannot handle allocation failures here */
377 __memcg_destroy_list_lru_node(nlru
->memcg_lrus
, old_size
, new_size
);
380 static int memcg_init_list_lru(struct list_lru
*lru
, bool memcg_aware
)
388 if (memcg_init_list_lru_node(&lru
->node
[i
]))
393 for (i
= i
- 1; i
>= 0; i
--) {
394 if (!lru
->node
[i
].memcg_lrus
)
396 memcg_destroy_list_lru_node(&lru
->node
[i
]);
401 static void memcg_destroy_list_lru(struct list_lru
*lru
)
405 if (!list_lru_memcg_aware(lru
))
409 memcg_destroy_list_lru_node(&lru
->node
[i
]);
412 static int memcg_update_list_lru(struct list_lru
*lru
,
413 int old_size
, int new_size
)
417 if (!list_lru_memcg_aware(lru
))
421 if (memcg_update_list_lru_node(&lru
->node
[i
],
427 for (i
= i
- 1; i
>= 0; i
--) {
428 if (!lru
->node
[i
].memcg_lrus
)
431 memcg_cancel_update_list_lru_node(&lru
->node
[i
],
437 static void memcg_cancel_update_list_lru(struct list_lru
*lru
,
438 int old_size
, int new_size
)
442 if (!list_lru_memcg_aware(lru
))
446 memcg_cancel_update_list_lru_node(&lru
->node
[i
],
450 int memcg_update_all_list_lrus(int new_size
)
453 struct list_lru
*lru
;
454 int old_size
= memcg_nr_cache_ids
;
456 mutex_lock(&list_lrus_mutex
);
457 list_for_each_entry(lru
, &list_lrus
, list
) {
458 ret
= memcg_update_list_lru(lru
, old_size
, new_size
);
463 mutex_unlock(&list_lrus_mutex
);
466 list_for_each_entry_continue_reverse(lru
, &list_lrus
, list
)
467 memcg_cancel_update_list_lru(lru
, old_size
, new_size
);
471 static void memcg_drain_list_lru_node(struct list_lru_node
*nlru
,
472 int src_idx
, int dst_idx
)
474 struct list_lru_one
*src
, *dst
;
477 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
478 * we have to use IRQ-safe primitives here to avoid deadlock.
480 spin_lock_irq(&nlru
->lock
);
482 src
= list_lru_from_memcg_idx(nlru
, src_idx
);
483 dst
= list_lru_from_memcg_idx(nlru
, dst_idx
);
485 list_splice_init(&src
->list
, &dst
->list
);
486 dst
->nr_items
+= src
->nr_items
;
489 spin_unlock_irq(&nlru
->lock
);
492 static void memcg_drain_list_lru(struct list_lru
*lru
,
493 int src_idx
, int dst_idx
)
497 if (!list_lru_memcg_aware(lru
))
501 memcg_drain_list_lru_node(&lru
->node
[i
], src_idx
, dst_idx
);
504 void memcg_drain_all_list_lrus(int src_idx
, int dst_idx
)
506 struct list_lru
*lru
;
508 mutex_lock(&list_lrus_mutex
);
509 list_for_each_entry(lru
, &list_lrus
, list
)
510 memcg_drain_list_lru(lru
, src_idx
, dst_idx
);
511 mutex_unlock(&list_lrus_mutex
);
514 static int memcg_init_list_lru(struct list_lru
*lru
, bool memcg_aware
)
519 static void memcg_destroy_list_lru(struct list_lru
*lru
)
522 #endif /* CONFIG_MEMCG_KMEM */
524 int __list_lru_init(struct list_lru
*lru
, bool memcg_aware
,
525 struct lock_class_key
*key
)
528 size_t size
= sizeof(*lru
->node
) * nr_node_ids
;
531 memcg_get_cache_ids();
533 lru
->node
= kzalloc(size
, GFP_KERNEL
);
538 spin_lock_init(&lru
->node
[i
].lock
);
540 lockdep_set_class(&lru
->node
[i
].lock
, key
);
541 init_one_lru(&lru
->node
[i
].lru
);
544 err
= memcg_init_list_lru(lru
, memcg_aware
);
550 list_lru_register(lru
);
552 memcg_put_cache_ids();
555 EXPORT_SYMBOL_GPL(__list_lru_init
);
557 void list_lru_destroy(struct list_lru
*lru
)
559 /* Already destroyed or not yet initialized? */
563 memcg_get_cache_ids();
565 list_lru_unregister(lru
);
567 memcg_destroy_list_lru(lru
);
571 memcg_put_cache_ids();
573 EXPORT_SYMBOL_GPL(list_lru_destroy
);
This page took 0.041411 seconds and 5 git commands to generate.