list_lru: get rid of ->active_nodes
[deliverable/linux.git] / mm / list_lru.c
1 /*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12
13 bool list_lru_add(struct list_lru *lru, struct list_head *item)
14 {
15 int nid = page_to_nid(virt_to_page(item));
16 struct list_lru_node *nlru = &lru->node[nid];
17
18 spin_lock(&nlru->lock);
19 WARN_ON_ONCE(nlru->nr_items < 0);
20 if (list_empty(item)) {
21 list_add_tail(item, &nlru->list);
22 nlru->nr_items++;
23 spin_unlock(&nlru->lock);
24 return true;
25 }
26 spin_unlock(&nlru->lock);
27 return false;
28 }
29 EXPORT_SYMBOL_GPL(list_lru_add);
30
31 bool list_lru_del(struct list_lru *lru, struct list_head *item)
32 {
33 int nid = page_to_nid(virt_to_page(item));
34 struct list_lru_node *nlru = &lru->node[nid];
35
36 spin_lock(&nlru->lock);
37 if (!list_empty(item)) {
38 list_del_init(item);
39 nlru->nr_items--;
40 WARN_ON_ONCE(nlru->nr_items < 0);
41 spin_unlock(&nlru->lock);
42 return true;
43 }
44 spin_unlock(&nlru->lock);
45 return false;
46 }
47 EXPORT_SYMBOL_GPL(list_lru_del);
48
49 unsigned long
50 list_lru_count_node(struct list_lru *lru, int nid)
51 {
52 unsigned long count = 0;
53 struct list_lru_node *nlru = &lru->node[nid];
54
55 spin_lock(&nlru->lock);
56 WARN_ON_ONCE(nlru->nr_items < 0);
57 count += nlru->nr_items;
58 spin_unlock(&nlru->lock);
59
60 return count;
61 }
62 EXPORT_SYMBOL_GPL(list_lru_count_node);
63
64 unsigned long
65 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
66 void *cb_arg, unsigned long *nr_to_walk)
67 {
68
69 struct list_lru_node *nlru = &lru->node[nid];
70 struct list_head *item, *n;
71 unsigned long isolated = 0;
72
73 spin_lock(&nlru->lock);
74 restart:
75 list_for_each_safe(item, n, &nlru->list) {
76 enum lru_status ret;
77
78 /*
79 * decrement nr_to_walk first so that we don't livelock if we
80 * get stuck on large numbesr of LRU_RETRY items
81 */
82 if (!*nr_to_walk)
83 break;
84 --*nr_to_walk;
85
86 ret = isolate(item, &nlru->lock, cb_arg);
87 switch (ret) {
88 case LRU_REMOVED_RETRY:
89 assert_spin_locked(&nlru->lock);
90 case LRU_REMOVED:
91 nlru->nr_items--;
92 WARN_ON_ONCE(nlru->nr_items < 0);
93 isolated++;
94 /*
95 * If the lru lock has been dropped, our list
96 * traversal is now invalid and so we have to
97 * restart from scratch.
98 */
99 if (ret == LRU_REMOVED_RETRY)
100 goto restart;
101 break;
102 case LRU_ROTATE:
103 list_move_tail(item, &nlru->list);
104 break;
105 case LRU_SKIP:
106 break;
107 case LRU_RETRY:
108 /*
109 * The lru lock has been dropped, our list traversal is
110 * now invalid and so we have to restart from scratch.
111 */
112 assert_spin_locked(&nlru->lock);
113 goto restart;
114 default:
115 BUG();
116 }
117 }
118
119 spin_unlock(&nlru->lock);
120 return isolated;
121 }
122 EXPORT_SYMBOL_GPL(list_lru_walk_node);
123
124 int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
125 {
126 int i;
127 size_t size = sizeof(*lru->node) * nr_node_ids;
128
129 lru->node = kzalloc(size, GFP_KERNEL);
130 if (!lru->node)
131 return -ENOMEM;
132
133 for (i = 0; i < nr_node_ids; i++) {
134 spin_lock_init(&lru->node[i].lock);
135 if (key)
136 lockdep_set_class(&lru->node[i].lock, key);
137 INIT_LIST_HEAD(&lru->node[i].list);
138 lru->node[i].nr_items = 0;
139 }
140 return 0;
141 }
142 EXPORT_SYMBOL_GPL(list_lru_init_key);
143
144 void list_lru_destroy(struct list_lru *lru)
145 {
146 kfree(lru->node);
147 }
148 EXPORT_SYMBOL_GPL(list_lru_destroy);
This page took 0.036668 seconds and 6 git commands to generate.