Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/mempool.c | |
3 | * | |
4 | * memory buffer pool support. Such pools are mostly used | |
5 | * for guaranteed, deadlock-free memory allocations during | |
6 | * extreme VM load. | |
7 | * | |
8 | * started by Ingo Molnar, Copyright (C) 2001 | |
bdfedb76 | 9 | * debugging by David Rientjes, Copyright (C) 2015 |
1da177e4 LT |
10 | */ |
11 | ||
12 | #include <linux/mm.h> | |
13 | #include <linux/slab.h> | |
bdfedb76 | 14 | #include <linux/highmem.h> |
92393615 | 15 | #include <linux/kasan.h> |
17411962 | 16 | #include <linux/kmemleak.h> |
b95f1b31 | 17 | #include <linux/export.h> |
1da177e4 LT |
18 | #include <linux/mempool.h> |
19 | #include <linux/blkdev.h> | |
20 | #include <linux/writeback.h> | |
e244c9e6 | 21 | #include "slab.h" |
1da177e4 | 22 | |
bdfedb76 DR |
23 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) |
24 | static void poison_error(mempool_t *pool, void *element, size_t size, | |
25 | size_t byte) | |
26 | { | |
27 | const int nr = pool->curr_nr; | |
28 | const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); | |
29 | const int end = min_t(int, byte + (BITS_PER_LONG / 8), size); | |
30 | int i; | |
31 | ||
32 | pr_err("BUG: mempool element poison mismatch\n"); | |
33 | pr_err("Mempool %p size %zu\n", pool, size); | |
34 | pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); | |
35 | for (i = start; i < end; i++) | |
36 | pr_cont("%x ", *(u8 *)(element + i)); | |
37 | pr_cont("%s\n", end < size ? "..." : ""); | |
38 | dump_stack(); | |
39 | } | |
40 | ||
41 | static void __check_element(mempool_t *pool, void *element, size_t size) | |
42 | { | |
43 | u8 *obj = element; | |
44 | size_t i; | |
45 | ||
46 | for (i = 0; i < size; i++) { | |
47 | u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; | |
48 | ||
49 | if (obj[i] != exp) { | |
50 | poison_error(pool, element, size, i); | |
51 | return; | |
52 | } | |
53 | } | |
54 | memset(obj, POISON_INUSE, size); | |
55 | } | |
56 | ||
57 | static void check_element(mempool_t *pool, void *element) | |
58 | { | |
59 | /* Mempools backed by slab allocator */ | |
60 | if (pool->free == mempool_free_slab || pool->free == mempool_kfree) | |
61 | __check_element(pool, element, ksize(element)); | |
62 | ||
63 | /* Mempools backed by page allocator */ | |
64 | if (pool->free == mempool_free_pages) { | |
65 | int order = (int)(long)pool->pool_data; | |
66 | void *addr = kmap_atomic((struct page *)element); | |
67 | ||
68 | __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); | |
69 | kunmap_atomic(addr); | |
70 | } | |
71 | } | |
72 | ||
73 | static void __poison_element(void *element, size_t size) | |
74 | { | |
75 | u8 *obj = element; | |
76 | ||
77 | memset(obj, POISON_FREE, size - 1); | |
78 | obj[size - 1] = POISON_END; | |
79 | } | |
80 | ||
81 | static void poison_element(mempool_t *pool, void *element) | |
82 | { | |
83 | /* Mempools backed by slab allocator */ | |
84 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) | |
85 | __poison_element(element, ksize(element)); | |
86 | ||
87 | /* Mempools backed by page allocator */ | |
88 | if (pool->alloc == mempool_alloc_pages) { | |
89 | int order = (int)(long)pool->pool_data; | |
90 | void *addr = kmap_atomic((struct page *)element); | |
91 | ||
92 | __poison_element(addr, 1UL << (PAGE_SHIFT + order)); | |
93 | kunmap_atomic(addr); | |
94 | } | |
95 | } | |
96 | #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ | |
97 | static inline void check_element(mempool_t *pool, void *element) | |
98 | { | |
99 | } | |
100 | static inline void poison_element(mempool_t *pool, void *element) | |
101 | { | |
102 | } | |
103 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ | |
104 | ||
92393615 AR |
105 | static void kasan_poison_element(mempool_t *pool, void *element) |
106 | { | |
9b75a867 AR |
107 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) |
108 | kasan_poison_kfree(element); | |
92393615 AR |
109 | if (pool->alloc == mempool_alloc_pages) |
110 | kasan_free_pages(element, (unsigned long)pool->pool_data); | |
111 | } | |
112 | ||
505f5dcb | 113 | static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags) |
92393615 | 114 | { |
9b75a867 AR |
115 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) |
116 | kasan_unpoison_slab(element); | |
92393615 AR |
117 | if (pool->alloc == mempool_alloc_pages) |
118 | kasan_alloc_pages(element, (unsigned long)pool->pool_data); | |
119 | } | |
120 | ||
1da177e4 LT |
121 | static void add_element(mempool_t *pool, void *element) |
122 | { | |
123 | BUG_ON(pool->curr_nr >= pool->min_nr); | |
bdfedb76 | 124 | poison_element(pool, element); |
92393615 | 125 | kasan_poison_element(pool, element); |
1da177e4 LT |
126 | pool->elements[pool->curr_nr++] = element; |
127 | } | |
128 | ||
505f5dcb | 129 | static void *remove_element(mempool_t *pool, gfp_t flags) |
1da177e4 | 130 | { |
bdfedb76 DR |
131 | void *element = pool->elements[--pool->curr_nr]; |
132 | ||
133 | BUG_ON(pool->curr_nr < 0); | |
505f5dcb | 134 | kasan_unpoison_element(pool, element, flags); |
76401310 | 135 | check_element(pool, element); |
bdfedb76 | 136 | return element; |
1da177e4 LT |
137 | } |
138 | ||
0565d317 TH |
139 | /** |
140 | * mempool_destroy - deallocate a memory pool | |
141 | * @pool: pointer to the memory pool which was allocated via | |
142 | * mempool_create(). | |
143 | * | |
144 | * Free all reserved elements in @pool and @pool itself. This function | |
145 | * only sleeps if the free_fn() function sleeps. | |
146 | */ | |
147 | void mempool_destroy(mempool_t *pool) | |
1da177e4 | 148 | { |
4e3ca3e0 SS |
149 | if (unlikely(!pool)) |
150 | return; | |
151 | ||
1da177e4 | 152 | while (pool->curr_nr) { |
505f5dcb | 153 | void *element = remove_element(pool, GFP_KERNEL); |
1da177e4 LT |
154 | pool->free(element, pool->pool_data); |
155 | } | |
156 | kfree(pool->elements); | |
157 | kfree(pool); | |
158 | } | |
0565d317 | 159 | EXPORT_SYMBOL(mempool_destroy); |
1da177e4 LT |
160 | |
161 | /** | |
162 | * mempool_create - create a memory pool | |
163 | * @min_nr: the minimum number of elements guaranteed to be | |
164 | * allocated for this pool. | |
165 | * @alloc_fn: user-defined element-allocation function. | |
166 | * @free_fn: user-defined element-freeing function. | |
167 | * @pool_data: optional private data available to the user-defined functions. | |
168 | * | |
169 | * this function creates and allocates a guaranteed size, preallocated | |
72fd4a35 | 170 | * memory pool. The pool can be used from the mempool_alloc() and mempool_free() |
1da177e4 | 171 | * functions. This function might sleep. Both the alloc_fn() and the free_fn() |
72fd4a35 | 172 | * functions might sleep - as long as the mempool_alloc() function is not called |
1da177e4 LT |
173 | * from IRQ contexts. |
174 | */ | |
1946089a | 175 | mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, |
1da177e4 LT |
176 | mempool_free_t *free_fn, void *pool_data) |
177 | { | |
a91a5ac6 TH |
178 | return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, |
179 | GFP_KERNEL, NUMA_NO_NODE); | |
1946089a CL |
180 | } |
181 | EXPORT_SYMBOL(mempool_create); | |
1da177e4 | 182 | |
1946089a | 183 | mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, |
a91a5ac6 TH |
184 | mempool_free_t *free_fn, void *pool_data, |
185 | gfp_t gfp_mask, int node_id) | |
1946089a CL |
186 | { |
187 | mempool_t *pool; | |
7b5219db | 188 | pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); |
1da177e4 LT |
189 | if (!pool) |
190 | return NULL; | |
1946089a | 191 | pool->elements = kmalloc_node(min_nr * sizeof(void *), |
a91a5ac6 | 192 | gfp_mask, node_id); |
1da177e4 LT |
193 | if (!pool->elements) { |
194 | kfree(pool); | |
195 | return NULL; | |
196 | } | |
197 | spin_lock_init(&pool->lock); | |
198 | pool->min_nr = min_nr; | |
199 | pool->pool_data = pool_data; | |
200 | init_waitqueue_head(&pool->wait); | |
201 | pool->alloc = alloc_fn; | |
202 | pool->free = free_fn; | |
203 | ||
204 | /* | |
205 | * First pre-allocate the guaranteed number of buffers. | |
206 | */ | |
207 | while (pool->curr_nr < pool->min_nr) { | |
208 | void *element; | |
209 | ||
a91a5ac6 | 210 | element = pool->alloc(gfp_mask, pool->pool_data); |
1da177e4 | 211 | if (unlikely(!element)) { |
0565d317 | 212 | mempool_destroy(pool); |
1da177e4 LT |
213 | return NULL; |
214 | } | |
215 | add_element(pool, element); | |
216 | } | |
217 | return pool; | |
218 | } | |
1946089a | 219 | EXPORT_SYMBOL(mempool_create_node); |
1da177e4 LT |
220 | |
221 | /** | |
222 | * mempool_resize - resize an existing memory pool | |
223 | * @pool: pointer to the memory pool which was allocated via | |
224 | * mempool_create(). | |
225 | * @new_min_nr: the new minimum number of elements guaranteed to be | |
226 | * allocated for this pool. | |
1da177e4 LT |
227 | * |
228 | * This function shrinks/grows the pool. In the case of growing, | |
229 | * it cannot be guaranteed that the pool will be grown to the new | |
230 | * size immediately, but new mempool_free() calls will refill it. | |
11d83360 | 231 | * This function may sleep. |
1da177e4 LT |
232 | * |
233 | * Note, the caller must guarantee that no mempool_destroy is called | |
234 | * while this function is running. mempool_alloc() & mempool_free() | |
235 | * might be called (eg. from IRQ contexts) while this function executes. | |
236 | */ | |
11d83360 | 237 | int mempool_resize(mempool_t *pool, int new_min_nr) |
1da177e4 LT |
238 | { |
239 | void *element; | |
240 | void **new_elements; | |
241 | unsigned long flags; | |
242 | ||
243 | BUG_ON(new_min_nr <= 0); | |
11d83360 | 244 | might_sleep(); |
1da177e4 LT |
245 | |
246 | spin_lock_irqsave(&pool->lock, flags); | |
247 | if (new_min_nr <= pool->min_nr) { | |
248 | while (new_min_nr < pool->curr_nr) { | |
505f5dcb | 249 | element = remove_element(pool, GFP_KERNEL); |
1da177e4 LT |
250 | spin_unlock_irqrestore(&pool->lock, flags); |
251 | pool->free(element, pool->pool_data); | |
252 | spin_lock_irqsave(&pool->lock, flags); | |
253 | } | |
254 | pool->min_nr = new_min_nr; | |
255 | goto out_unlock; | |
256 | } | |
257 | spin_unlock_irqrestore(&pool->lock, flags); | |
258 | ||
259 | /* Grow the pool */ | |
11d83360 DR |
260 | new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements), |
261 | GFP_KERNEL); | |
1da177e4 LT |
262 | if (!new_elements) |
263 | return -ENOMEM; | |
264 | ||
265 | spin_lock_irqsave(&pool->lock, flags); | |
266 | if (unlikely(new_min_nr <= pool->min_nr)) { | |
267 | /* Raced, other resize will do our work */ | |
268 | spin_unlock_irqrestore(&pool->lock, flags); | |
269 | kfree(new_elements); | |
270 | goto out; | |
271 | } | |
272 | memcpy(new_elements, pool->elements, | |
273 | pool->curr_nr * sizeof(*new_elements)); | |
274 | kfree(pool->elements); | |
275 | pool->elements = new_elements; | |
276 | pool->min_nr = new_min_nr; | |
277 | ||
278 | while (pool->curr_nr < pool->min_nr) { | |
279 | spin_unlock_irqrestore(&pool->lock, flags); | |
11d83360 | 280 | element = pool->alloc(GFP_KERNEL, pool->pool_data); |
1da177e4 LT |
281 | if (!element) |
282 | goto out; | |
283 | spin_lock_irqsave(&pool->lock, flags); | |
284 | if (pool->curr_nr < pool->min_nr) { | |
285 | add_element(pool, element); | |
286 | } else { | |
287 | spin_unlock_irqrestore(&pool->lock, flags); | |
288 | pool->free(element, pool->pool_data); /* Raced */ | |
289 | goto out; | |
290 | } | |
291 | } | |
292 | out_unlock: | |
293 | spin_unlock_irqrestore(&pool->lock, flags); | |
294 | out: | |
295 | return 0; | |
296 | } | |
297 | EXPORT_SYMBOL(mempool_resize); | |
298 | ||
1da177e4 LT |
299 | /** |
300 | * mempool_alloc - allocate an element from a specific memory pool | |
301 | * @pool: pointer to the memory pool which was allocated via | |
302 | * mempool_create(). | |
303 | * @gfp_mask: the usual allocation bitmask. | |
304 | * | |
72fd4a35 | 305 | * this function only sleeps if the alloc_fn() function sleeps or |
1da177e4 LT |
306 | * returns NULL. Note that due to preallocation, this function |
307 | * *never* fails when called from process contexts. (it might | |
308 | * fail if called from an IRQ context.) | |
f9054c70 | 309 | * Note: neither __GFP_NOMEMALLOC nor __GFP_ZERO are supported. |
1da177e4 | 310 | */ |
f9054c70 | 311 | void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) |
1da177e4 LT |
312 | { |
313 | void *element; | |
314 | unsigned long flags; | |
01890a4c | 315 | wait_queue_t wait; |
6daa0e28 | 316 | gfp_t gfp_temp; |
20a77776 | 317 | |
f9054c70 DR |
318 | /* If oom killed, memory reserves are essential to prevent livelock */ |
319 | VM_WARN_ON_ONCE(gfp_mask & __GFP_NOMEMALLOC); | |
320 | /* No element size to zero on allocation */ | |
8bf8fcb0 | 321 | VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); |
f9054c70 | 322 | |
d0164adc | 323 | might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); |
b84a35be | 324 | |
b84a35be NP |
325 | gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ |
326 | gfp_mask |= __GFP_NOWARN; /* failures are OK */ | |
1da177e4 | 327 | |
d0164adc | 328 | gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO); |
20a77776 | 329 | |
1da177e4 | 330 | repeat_alloc: |
f9054c70 DR |
331 | if (likely(pool->curr_nr)) { |
332 | /* | |
333 | * Don't allocate from emergency reserves if there are | |
334 | * elements available. This check is racy, but it will | |
335 | * be rechecked each loop. | |
336 | */ | |
337 | gfp_temp |= __GFP_NOMEMALLOC; | |
338 | } | |
20a77776 NP |
339 | |
340 | element = pool->alloc(gfp_temp, pool->pool_data); | |
1da177e4 LT |
341 | if (likely(element != NULL)) |
342 | return element; | |
343 | ||
1da177e4 LT |
344 | spin_lock_irqsave(&pool->lock, flags); |
345 | if (likely(pool->curr_nr)) { | |
505f5dcb | 346 | element = remove_element(pool, gfp_temp); |
1da177e4 | 347 | spin_unlock_irqrestore(&pool->lock, flags); |
5b990546 TH |
348 | /* paired with rmb in mempool_free(), read comment there */ |
349 | smp_wmb(); | |
17411962 CM |
350 | /* |
351 | * Update the allocation stack trace as this is more useful | |
352 | * for debugging. | |
353 | */ | |
354 | kmemleak_update_trace(element); | |
1da177e4 LT |
355 | return element; |
356 | } | |
1da177e4 | 357 | |
1ebb7044 | 358 | /* |
d0164adc | 359 | * We use gfp mask w/o direct reclaim or IO for the first round. If |
1ebb7044 TH |
360 | * alloc failed with that and @pool was empty, retry immediately. |
361 | */ | |
f9054c70 | 362 | if ((gfp_temp & ~__GFP_NOMEMALLOC) != gfp_mask) { |
1ebb7044 TH |
363 | spin_unlock_irqrestore(&pool->lock, flags); |
364 | gfp_temp = gfp_mask; | |
365 | goto repeat_alloc; | |
366 | } | |
f9054c70 | 367 | gfp_temp = gfp_mask; |
1ebb7044 | 368 | |
d0164adc MG |
369 | /* We must not sleep if !__GFP_DIRECT_RECLAIM */ |
370 | if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { | |
5b990546 | 371 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 372 | return NULL; |
5b990546 | 373 | } |
1da177e4 | 374 | |
5b990546 | 375 | /* Let's wait for someone else to return an element to @pool */ |
01890a4c | 376 | init_wait(&wait); |
1da177e4 | 377 | prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); |
1da177e4 | 378 | |
5b990546 TH |
379 | spin_unlock_irqrestore(&pool->lock, flags); |
380 | ||
381 | /* | |
382 | * FIXME: this should be io_schedule(). The timeout is there as a | |
383 | * workaround for some DM problems in 2.6.18. | |
384 | */ | |
385 | io_schedule_timeout(5*HZ); | |
386 | ||
387 | finish_wait(&pool->wait, &wait); | |
1da177e4 LT |
388 | goto repeat_alloc; |
389 | } | |
390 | EXPORT_SYMBOL(mempool_alloc); | |
391 | ||
392 | /** | |
393 | * mempool_free - return an element to the pool. | |
394 | * @element: pool element pointer. | |
395 | * @pool: pointer to the memory pool which was allocated via | |
396 | * mempool_create(). | |
397 | * | |
398 | * this function only sleeps if the free_fn() function sleeps. | |
399 | */ | |
400 | void mempool_free(void *element, mempool_t *pool) | |
401 | { | |
402 | unsigned long flags; | |
403 | ||
c80e7a82 RR |
404 | if (unlikely(element == NULL)) |
405 | return; | |
406 | ||
5b990546 TH |
407 | /* |
408 | * Paired with the wmb in mempool_alloc(). The preceding read is | |
409 | * for @element and the following @pool->curr_nr. This ensures | |
410 | * that the visible value of @pool->curr_nr is from after the | |
411 | * allocation of @element. This is necessary for fringe cases | |
412 | * where @element was passed to this task without going through | |
413 | * barriers. | |
414 | * | |
415 | * For example, assume @p is %NULL at the beginning and one task | |
416 | * performs "p = mempool_alloc(...);" while another task is doing | |
417 | * "while (!p) cpu_relax(); mempool_free(p, ...);". This function | |
418 | * may end up using curr_nr value which is from before allocation | |
419 | * of @p without the following rmb. | |
420 | */ | |
421 | smp_rmb(); | |
422 | ||
423 | /* | |
424 | * For correctness, we need a test which is guaranteed to trigger | |
425 | * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr | |
426 | * without locking achieves that and refilling as soon as possible | |
427 | * is desirable. | |
428 | * | |
429 | * Because curr_nr visible here is always a value after the | |
430 | * allocation of @element, any task which decremented curr_nr below | |
431 | * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets | |
432 | * incremented to min_nr afterwards. If curr_nr gets incremented | |
433 | * to min_nr after the allocation of @element, the elements | |
434 | * allocated after that are subject to the same guarantee. | |
435 | * | |
436 | * Waiters happen iff curr_nr is 0 and the above guarantee also | |
437 | * ensures that there will be frees which return elements to the | |
438 | * pool waking up the waiters. | |
439 | */ | |
eb9a3c62 | 440 | if (unlikely(pool->curr_nr < pool->min_nr)) { |
1da177e4 | 441 | spin_lock_irqsave(&pool->lock, flags); |
eb9a3c62 | 442 | if (likely(pool->curr_nr < pool->min_nr)) { |
1da177e4 LT |
443 | add_element(pool, element); |
444 | spin_unlock_irqrestore(&pool->lock, flags); | |
445 | wake_up(&pool->wait); | |
446 | return; | |
447 | } | |
448 | spin_unlock_irqrestore(&pool->lock, flags); | |
449 | } | |
450 | pool->free(element, pool->pool_data); | |
451 | } | |
452 | EXPORT_SYMBOL(mempool_free); | |
453 | ||
454 | /* | |
455 | * A commonly used alloc and free fn. | |
456 | */ | |
dd0fc66f | 457 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) |
1da177e4 | 458 | { |
fcc234f8 | 459 | struct kmem_cache *mem = pool_data; |
e244c9e6 | 460 | VM_BUG_ON(mem->ctor); |
1da177e4 LT |
461 | return kmem_cache_alloc(mem, gfp_mask); |
462 | } | |
463 | EXPORT_SYMBOL(mempool_alloc_slab); | |
464 | ||
465 | void mempool_free_slab(void *element, void *pool_data) | |
466 | { | |
fcc234f8 | 467 | struct kmem_cache *mem = pool_data; |
1da177e4 LT |
468 | kmem_cache_free(mem, element); |
469 | } | |
470 | EXPORT_SYMBOL(mempool_free_slab); | |
6e0678f3 | 471 | |
53184082 MD |
472 | /* |
473 | * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory | |
183ff22b | 474 | * specified by pool_data |
53184082 MD |
475 | */ |
476 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) | |
477 | { | |
5e2f89b5 | 478 | size_t size = (size_t)pool_data; |
53184082 MD |
479 | return kmalloc(size, gfp_mask); |
480 | } | |
481 | EXPORT_SYMBOL(mempool_kmalloc); | |
482 | ||
483 | void mempool_kfree(void *element, void *pool_data) | |
484 | { | |
485 | kfree(element); | |
486 | } | |
487 | EXPORT_SYMBOL(mempool_kfree); | |
488 | ||
6e0678f3 MD |
489 | /* |
490 | * A simple mempool-backed page allocator that allocates pages | |
491 | * of the order specified by pool_data. | |
492 | */ | |
493 | void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) | |
494 | { | |
495 | int order = (int)(long)pool_data; | |
496 | return alloc_pages(gfp_mask, order); | |
497 | } | |
498 | EXPORT_SYMBOL(mempool_alloc_pages); | |
499 | ||
500 | void mempool_free_pages(void *element, void *pool_data) | |
501 | { | |
502 | int order = (int)(long)pool_data; | |
503 | __free_pages(element, order); | |
504 | } | |
505 | EXPORT_SYMBOL(mempool_free_pages); |