2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
13 bool list_lru_add(struct list_lru
*lru
, struct list_head
*item
)
15 int nid
= page_to_nid(virt_to_page(item
));
16 struct list_lru_node
*nlru
= &lru
->node
[nid
];
18 spin_lock(&nlru
->lock
);
19 WARN_ON_ONCE(nlru
->nr_items
< 0);
20 if (list_empty(item
)) {
21 list_add_tail(item
, &nlru
->list
);
23 spin_unlock(&nlru
->lock
);
26 spin_unlock(&nlru
->lock
);
29 EXPORT_SYMBOL_GPL(list_lru_add
);
31 bool list_lru_del(struct list_lru
*lru
, struct list_head
*item
)
33 int nid
= page_to_nid(virt_to_page(item
));
34 struct list_lru_node
*nlru
= &lru
->node
[nid
];
36 spin_lock(&nlru
->lock
);
37 if (!list_empty(item
)) {
40 WARN_ON_ONCE(nlru
->nr_items
< 0);
41 spin_unlock(&nlru
->lock
);
44 spin_unlock(&nlru
->lock
);
47 EXPORT_SYMBOL_GPL(list_lru_del
);
50 list_lru_count_node(struct list_lru
*lru
, int nid
)
52 unsigned long count
= 0;
53 struct list_lru_node
*nlru
= &lru
->node
[nid
];
55 spin_lock(&nlru
->lock
);
56 WARN_ON_ONCE(nlru
->nr_items
< 0);
57 count
+= nlru
->nr_items
;
58 spin_unlock(&nlru
->lock
);
62 EXPORT_SYMBOL_GPL(list_lru_count_node
);
65 list_lru_walk_node(struct list_lru
*lru
, int nid
, list_lru_walk_cb isolate
,
66 void *cb_arg
, unsigned long *nr_to_walk
)
69 struct list_lru_node
*nlru
= &lru
->node
[nid
];
70 struct list_head
*item
, *n
;
71 unsigned long isolated
= 0;
73 spin_lock(&nlru
->lock
);
75 list_for_each_safe(item
, n
, &nlru
->list
) {
79 * decrement nr_to_walk first so that we don't livelock if we
80 * get stuck on large numbesr of LRU_RETRY items
86 ret
= isolate(item
, &nlru
->lock
, cb_arg
);
88 case LRU_REMOVED_RETRY
:
89 assert_spin_locked(&nlru
->lock
);
92 WARN_ON_ONCE(nlru
->nr_items
< 0);
95 * If the lru lock has been dropped, our list
96 * traversal is now invalid and so we have to
97 * restart from scratch.
99 if (ret
== LRU_REMOVED_RETRY
)
103 list_move_tail(item
, &nlru
->list
);
109 * The lru lock has been dropped, our list traversal is
110 * now invalid and so we have to restart from scratch.
112 assert_spin_locked(&nlru
->lock
);
119 spin_unlock(&nlru
->lock
);
122 EXPORT_SYMBOL_GPL(list_lru_walk_node
);
124 int list_lru_init_key(struct list_lru
*lru
, struct lock_class_key
*key
)
127 size_t size
= sizeof(*lru
->node
) * nr_node_ids
;
129 lru
->node
= kzalloc(size
, GFP_KERNEL
);
133 for (i
= 0; i
< nr_node_ids
; i
++) {
134 spin_lock_init(&lru
->node
[i
].lock
);
136 lockdep_set_class(&lru
->node
[i
].lock
, key
);
137 INIT_LIST_HEAD(&lru
->node
[i
].list
);
138 lru
->node
[i
].nr_items
= 0;
142 EXPORT_SYMBOL_GPL(list_lru_init_key
);
144 void list_lru_destroy(struct list_lru
*lru
)
148 EXPORT_SYMBOL_GPL(list_lru_destroy
);
This page took 0.036668 seconds and 6 git commands to generate.