Merge tag 'mfd-for-linus-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/lee...
[deliverable/linux.git] / mm / mempool.c
1 /*
2 * linux/mm/mempool.c
3 *
4 * memory buffer pool support. Such pools are mostly used
5 * for guaranteed, deadlock-free memory allocations during
6 * extreme VM load.
7 *
8 * started by Ingo Molnar, Copyright (C) 2001
9 */
10
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13 #include <linux/export.h>
14 #include <linux/mempool.h>
15 #include <linux/blkdev.h>
16 #include <linux/writeback.h>
17
18 static void add_element(mempool_t *pool, void *element)
19 {
20 BUG_ON(pool->curr_nr >= pool->min_nr);
21 pool->elements[pool->curr_nr++] = element;
22 }
23
24 static void *remove_element(mempool_t *pool)
25 {
26 BUG_ON(pool->curr_nr <= 0);
27 return pool->elements[--pool->curr_nr];
28 }
29
30 /**
31 * mempool_destroy - deallocate a memory pool
32 * @pool: pointer to the memory pool which was allocated via
33 * mempool_create().
34 *
35 * Free all reserved elements in @pool and @pool itself. This function
36 * only sleeps if the free_fn() function sleeps.
37 */
38 void mempool_destroy(mempool_t *pool)
39 {
40 while (pool->curr_nr) {
41 void *element = remove_element(pool);
42 pool->free(element, pool->pool_data);
43 }
44 kfree(pool->elements);
45 kfree(pool);
46 }
47 EXPORT_SYMBOL(mempool_destroy);
48
49 /**
50 * mempool_create - create a memory pool
51 * @min_nr: the minimum number of elements guaranteed to be
52 * allocated for this pool.
53 * @alloc_fn: user-defined element-allocation function.
54 * @free_fn: user-defined element-freeing function.
55 * @pool_data: optional private data available to the user-defined functions.
56 *
57 * this function creates and allocates a guaranteed size, preallocated
58 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
59 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
60 * functions might sleep - as long as the mempool_alloc() function is not called
61 * from IRQ contexts.
62 */
63 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
64 mempool_free_t *free_fn, void *pool_data)
65 {
66 return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
67 GFP_KERNEL, NUMA_NO_NODE);
68 }
69 EXPORT_SYMBOL(mempool_create);
70
71 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
72 mempool_free_t *free_fn, void *pool_data,
73 gfp_t gfp_mask, int node_id)
74 {
75 mempool_t *pool;
76 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
77 if (!pool)
78 return NULL;
79 pool->elements = kmalloc_node(min_nr * sizeof(void *),
80 gfp_mask, node_id);
81 if (!pool->elements) {
82 kfree(pool);
83 return NULL;
84 }
85 spin_lock_init(&pool->lock);
86 pool->min_nr = min_nr;
87 pool->pool_data = pool_data;
88 init_waitqueue_head(&pool->wait);
89 pool->alloc = alloc_fn;
90 pool->free = free_fn;
91
92 /*
93 * First pre-allocate the guaranteed number of buffers.
94 */
95 while (pool->curr_nr < pool->min_nr) {
96 void *element;
97
98 element = pool->alloc(gfp_mask, pool->pool_data);
99 if (unlikely(!element)) {
100 mempool_destroy(pool);
101 return NULL;
102 }
103 add_element(pool, element);
104 }
105 return pool;
106 }
107 EXPORT_SYMBOL(mempool_create_node);
108
109 /**
110 * mempool_resize - resize an existing memory pool
111 * @pool: pointer to the memory pool which was allocated via
112 * mempool_create().
113 * @new_min_nr: the new minimum number of elements guaranteed to be
114 * allocated for this pool.
115 * @gfp_mask: the usual allocation bitmask.
116 *
117 * This function shrinks/grows the pool. In the case of growing,
118 * it cannot be guaranteed that the pool will be grown to the new
119 * size immediately, but new mempool_free() calls will refill it.
120 *
121 * Note, the caller must guarantee that no mempool_destroy is called
122 * while this function is running. mempool_alloc() & mempool_free()
123 * might be called (eg. from IRQ contexts) while this function executes.
124 */
125 int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
126 {
127 void *element;
128 void **new_elements;
129 unsigned long flags;
130
131 BUG_ON(new_min_nr <= 0);
132
133 spin_lock_irqsave(&pool->lock, flags);
134 if (new_min_nr <= pool->min_nr) {
135 while (new_min_nr < pool->curr_nr) {
136 element = remove_element(pool);
137 spin_unlock_irqrestore(&pool->lock, flags);
138 pool->free(element, pool->pool_data);
139 spin_lock_irqsave(&pool->lock, flags);
140 }
141 pool->min_nr = new_min_nr;
142 goto out_unlock;
143 }
144 spin_unlock_irqrestore(&pool->lock, flags);
145
146 /* Grow the pool */
147 new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
148 if (!new_elements)
149 return -ENOMEM;
150
151 spin_lock_irqsave(&pool->lock, flags);
152 if (unlikely(new_min_nr <= pool->min_nr)) {
153 /* Raced, other resize will do our work */
154 spin_unlock_irqrestore(&pool->lock, flags);
155 kfree(new_elements);
156 goto out;
157 }
158 memcpy(new_elements, pool->elements,
159 pool->curr_nr * sizeof(*new_elements));
160 kfree(pool->elements);
161 pool->elements = new_elements;
162 pool->min_nr = new_min_nr;
163
164 while (pool->curr_nr < pool->min_nr) {
165 spin_unlock_irqrestore(&pool->lock, flags);
166 element = pool->alloc(gfp_mask, pool->pool_data);
167 if (!element)
168 goto out;
169 spin_lock_irqsave(&pool->lock, flags);
170 if (pool->curr_nr < pool->min_nr) {
171 add_element(pool, element);
172 } else {
173 spin_unlock_irqrestore(&pool->lock, flags);
174 pool->free(element, pool->pool_data); /* Raced */
175 goto out;
176 }
177 }
178 out_unlock:
179 spin_unlock_irqrestore(&pool->lock, flags);
180 out:
181 return 0;
182 }
183 EXPORT_SYMBOL(mempool_resize);
184
185 /**
186 * mempool_alloc - allocate an element from a specific memory pool
187 * @pool: pointer to the memory pool which was allocated via
188 * mempool_create().
189 * @gfp_mask: the usual allocation bitmask.
190 *
191 * this function only sleeps if the alloc_fn() function sleeps or
192 * returns NULL. Note that due to preallocation, this function
193 * *never* fails when called from process contexts. (it might
194 * fail if called from an IRQ context.)
195 * Note: using __GFP_ZERO is not supported.
196 */
197 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
198 {
199 void *element;
200 unsigned long flags;
201 wait_queue_t wait;
202 gfp_t gfp_temp;
203
204 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
205 might_sleep_if(gfp_mask & __GFP_WAIT);
206
207 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
208 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
209 gfp_mask |= __GFP_NOWARN; /* failures are OK */
210
211 gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
212
213 repeat_alloc:
214
215 element = pool->alloc(gfp_temp, pool->pool_data);
216 if (likely(element != NULL))
217 return element;
218
219 spin_lock_irqsave(&pool->lock, flags);
220 if (likely(pool->curr_nr)) {
221 element = remove_element(pool);
222 spin_unlock_irqrestore(&pool->lock, flags);
223 /* paired with rmb in mempool_free(), read comment there */
224 smp_wmb();
225 return element;
226 }
227
228 /*
229 * We use gfp mask w/o __GFP_WAIT or IO for the first round. If
230 * alloc failed with that and @pool was empty, retry immediately.
231 */
232 if (gfp_temp != gfp_mask) {
233 spin_unlock_irqrestore(&pool->lock, flags);
234 gfp_temp = gfp_mask;
235 goto repeat_alloc;
236 }
237
238 /* We must not sleep if !__GFP_WAIT */
239 if (!(gfp_mask & __GFP_WAIT)) {
240 spin_unlock_irqrestore(&pool->lock, flags);
241 return NULL;
242 }
243
244 /* Let's wait for someone else to return an element to @pool */
245 init_wait(&wait);
246 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
247
248 spin_unlock_irqrestore(&pool->lock, flags);
249
250 /*
251 * FIXME: this should be io_schedule(). The timeout is there as a
252 * workaround for some DM problems in 2.6.18.
253 */
254 io_schedule_timeout(5*HZ);
255
256 finish_wait(&pool->wait, &wait);
257 goto repeat_alloc;
258 }
259 EXPORT_SYMBOL(mempool_alloc);
260
261 /**
262 * mempool_free - return an element to the pool.
263 * @element: pool element pointer.
264 * @pool: pointer to the memory pool which was allocated via
265 * mempool_create().
266 *
267 * this function only sleeps if the free_fn() function sleeps.
268 */
269 void mempool_free(void *element, mempool_t *pool)
270 {
271 unsigned long flags;
272
273 if (unlikely(element == NULL))
274 return;
275
276 /*
277 * Paired with the wmb in mempool_alloc(). The preceding read is
278 * for @element and the following @pool->curr_nr. This ensures
279 * that the visible value of @pool->curr_nr is from after the
280 * allocation of @element. This is necessary for fringe cases
281 * where @element was passed to this task without going through
282 * barriers.
283 *
284 * For example, assume @p is %NULL at the beginning and one task
285 * performs "p = mempool_alloc(...);" while another task is doing
286 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
287 * may end up using curr_nr value which is from before allocation
288 * of @p without the following rmb.
289 */
290 smp_rmb();
291
292 /*
293 * For correctness, we need a test which is guaranteed to trigger
294 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
295 * without locking achieves that and refilling as soon as possible
296 * is desirable.
297 *
298 * Because curr_nr visible here is always a value after the
299 * allocation of @element, any task which decremented curr_nr below
300 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
301 * incremented to min_nr afterwards. If curr_nr gets incremented
302 * to min_nr after the allocation of @element, the elements
303 * allocated after that are subject to the same guarantee.
304 *
305 * Waiters happen iff curr_nr is 0 and the above guarantee also
306 * ensures that there will be frees which return elements to the
307 * pool waking up the waiters.
308 */
309 if (unlikely(pool->curr_nr < pool->min_nr)) {
310 spin_lock_irqsave(&pool->lock, flags);
311 if (likely(pool->curr_nr < pool->min_nr)) {
312 add_element(pool, element);
313 spin_unlock_irqrestore(&pool->lock, flags);
314 wake_up(&pool->wait);
315 return;
316 }
317 spin_unlock_irqrestore(&pool->lock, flags);
318 }
319 pool->free(element, pool->pool_data);
320 }
321 EXPORT_SYMBOL(mempool_free);
322
323 /*
324 * A commonly used alloc and free fn.
325 */
326 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
327 {
328 struct kmem_cache *mem = pool_data;
329 return kmem_cache_alloc(mem, gfp_mask);
330 }
331 EXPORT_SYMBOL(mempool_alloc_slab);
332
333 void mempool_free_slab(void *element, void *pool_data)
334 {
335 struct kmem_cache *mem = pool_data;
336 kmem_cache_free(mem, element);
337 }
338 EXPORT_SYMBOL(mempool_free_slab);
339
340 /*
341 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
342 * specified by pool_data
343 */
344 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
345 {
346 size_t size = (size_t)pool_data;
347 return kmalloc(size, gfp_mask);
348 }
349 EXPORT_SYMBOL(mempool_kmalloc);
350
351 void mempool_kfree(void *element, void *pool_data)
352 {
353 kfree(element);
354 }
355 EXPORT_SYMBOL(mempool_kfree);
356
357 /*
358 * A simple mempool-backed page allocator that allocates pages
359 * of the order specified by pool_data.
360 */
361 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
362 {
363 int order = (int)(long)pool_data;
364 return alloc_pages(gfp_mask, order);
365 }
366 EXPORT_SYMBOL(mempool_alloc_pages);
367
368 void mempool_free_pages(void *element, void *pool_data)
369 {
370 int order = (int)(long)pool_data;
371 __free_pages(element, order);
372 }
373 EXPORT_SYMBOL(mempool_free_pages);
This page took 0.049215 seconds and 6 git commands to generate.