Commit | Line | Data |
---|---|---|
6182a094 MW |
1 | /* |
2 | * DMA Pool allocator | |
3 | * | |
4 | * Copyright 2001 David Brownell | |
5 | * Copyright 2007 Intel Corporation | |
6 | * Author: Matthew Wilcox <willy@linux.intel.com> | |
7 | * | |
8 | * This software may be redistributed and/or modified under the terms of | |
9 | * the GNU General Public License ("GPL") version 2 as published by the | |
10 | * Free Software Foundation. | |
11 | * | |
12 | * This allocator returns small blocks of a given size which are DMA-able by | |
13 | * the given device. It uses the dma_alloc_coherent page allocator to get | |
14 | * new pages, then splits them up into blocks of the required size. | |
15 | * Many older drivers still have their own code to do this. | |
16 | * | |
17 | * The current design of this allocator is fairly simple. The pool is | |
18 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of | |
19 | * allocated pages. Each page in the page_list is split into blocks of at | |
a35a3455 MW |
20 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked |
21 | * list of free blocks within the page. Used blocks aren't tracked, but we | |
22 | * keep a count of how many are currently allocated from each page. | |
6182a094 | 23 | */ |
1da177e4 LT |
24 | |
25 | #include <linux/device.h> | |
1da177e4 LT |
26 | #include <linux/dma-mapping.h> |
27 | #include <linux/dmapool.h> | |
6182a094 MW |
28 | #include <linux/kernel.h> |
29 | #include <linux/list.h> | |
1da177e4 | 30 | #include <linux/module.h> |
6182a094 | 31 | #include <linux/mutex.h> |
c9cf5528 | 32 | #include <linux/poison.h> |
e8edc6e0 | 33 | #include <linux/sched.h> |
6182a094 MW |
34 | #include <linux/slab.h> |
35 | #include <linux/spinlock.h> | |
36 | #include <linux/string.h> | |
37 | #include <linux/types.h> | |
38 | #include <linux/wait.h> | |
1da177e4 | 39 | |
e87aa773 MW |
40 | struct dma_pool { /* the pool */ |
41 | struct list_head page_list; | |
42 | spinlock_t lock; | |
e87aa773 MW |
43 | size_t size; |
44 | struct device *dev; | |
45 | size_t allocation; | |
e34f44b3 | 46 | size_t boundary; |
e87aa773 MW |
47 | char name[32]; |
48 | wait_queue_head_t waitq; | |
49 | struct list_head pools; | |
1da177e4 LT |
50 | }; |
51 | ||
e87aa773 MW |
52 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
53 | struct list_head page_list; | |
54 | void *vaddr; | |
55 | dma_addr_t dma; | |
a35a3455 MW |
56 | unsigned int in_use; |
57 | unsigned int offset; | |
1da177e4 LT |
58 | }; |
59 | ||
60 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) | |
1da177e4 | 61 | |
e87aa773 | 62 | static DEFINE_MUTEX(pools_lock); |
1da177e4 LT |
63 | |
64 | static ssize_t | |
e87aa773 | 65 | show_pools(struct device *dev, struct device_attribute *attr, char *buf) |
1da177e4 LT |
66 | { |
67 | unsigned temp; | |
68 | unsigned size; | |
69 | char *next; | |
70 | struct dma_page *page; | |
71 | struct dma_pool *pool; | |
72 | ||
73 | next = buf; | |
74 | size = PAGE_SIZE; | |
75 | ||
76 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); | |
77 | size -= temp; | |
78 | next += temp; | |
79 | ||
b2366d68 | 80 | mutex_lock(&pools_lock); |
1da177e4 LT |
81 | list_for_each_entry(pool, &dev->dma_pools, pools) { |
82 | unsigned pages = 0; | |
83 | unsigned blocks = 0; | |
84 | ||
85 | list_for_each_entry(page, &pool->page_list, page_list) { | |
86 | pages++; | |
87 | blocks += page->in_use; | |
88 | } | |
89 | ||
90 | /* per-pool info, no real statistics yet */ | |
91 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", | |
a35a3455 MW |
92 | pool->name, blocks, |
93 | pages * (pool->allocation / pool->size), | |
e87aa773 | 94 | pool->size, pages); |
1da177e4 LT |
95 | size -= temp; |
96 | next += temp; | |
97 | } | |
b2366d68 | 98 | mutex_unlock(&pools_lock); |
1da177e4 LT |
99 | |
100 | return PAGE_SIZE - size; | |
101 | } | |
e87aa773 MW |
102 | |
103 | static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); | |
1da177e4 LT |
104 | |
105 | /** | |
106 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | |
107 | * @name: name of pool, for diagnostics | |
108 | * @dev: device that will be doing the DMA | |
109 | * @size: size of the blocks in this pool. | |
110 | * @align: alignment requirement for blocks; must be a power of two | |
e34f44b3 | 111 | * @boundary: returned blocks won't cross this power of two boundary |
1da177e4 LT |
112 | * Context: !in_interrupt() |
113 | * | |
114 | * Returns a dma allocation pool with the requested characteristics, or | |
115 | * null if one can't be created. Given one of these pools, dma_pool_alloc() | |
116 | * may be used to allocate memory. Such memory will all have "consistent" | |
117 | * DMA mappings, accessible by the device and its driver without using | |
118 | * cache flushing primitives. The actual size of blocks allocated may be | |
119 | * larger than requested because of alignment. | |
120 | * | |
e34f44b3 | 121 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
1da177e4 LT |
122 | * cross that size boundary. This is useful for devices which have |
123 | * addressing restrictions on individual DMA transfers, such as not crossing | |
124 | * boundaries of 4KBytes. | |
125 | */ | |
e87aa773 | 126 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
e34f44b3 | 127 | size_t size, size_t align, size_t boundary) |
1da177e4 | 128 | { |
e87aa773 | 129 | struct dma_pool *retval; |
e34f44b3 | 130 | size_t allocation; |
1da177e4 | 131 | |
399154be | 132 | if (align == 0) { |
1da177e4 | 133 | align = 1; |
399154be | 134 | } else if (align & (align - 1)) { |
1da177e4 | 135 | return NULL; |
1da177e4 LT |
136 | } |
137 | ||
a35a3455 | 138 | if (size == 0) { |
399154be | 139 | return NULL; |
a35a3455 MW |
140 | } else if (size < 4) { |
141 | size = 4; | |
142 | } | |
399154be MW |
143 | |
144 | if ((size % align) != 0) | |
145 | size = ALIGN(size, align); | |
146 | ||
e34f44b3 MW |
147 | allocation = max_t(size_t, size, PAGE_SIZE); |
148 | ||
149 | if (!boundary) { | |
150 | boundary = allocation; | |
151 | } else if ((boundary < size) || (boundary & (boundary - 1))) { | |
1da177e4 | 152 | return NULL; |
e34f44b3 | 153 | } |
1da177e4 | 154 | |
e34f44b3 MW |
155 | retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); |
156 | if (!retval) | |
1da177e4 LT |
157 | return retval; |
158 | ||
e34f44b3 | 159 | strlcpy(retval->name, name, sizeof(retval->name)); |
1da177e4 LT |
160 | |
161 | retval->dev = dev; | |
162 | ||
e87aa773 MW |
163 | INIT_LIST_HEAD(&retval->page_list); |
164 | spin_lock_init(&retval->lock); | |
1da177e4 | 165 | retval->size = size; |
e34f44b3 | 166 | retval->boundary = boundary; |
1da177e4 | 167 | retval->allocation = allocation; |
e87aa773 | 168 | init_waitqueue_head(&retval->waitq); |
1da177e4 LT |
169 | |
170 | if (dev) { | |
141ecc53 CH |
171 | int ret; |
172 | ||
b2366d68 | 173 | mutex_lock(&pools_lock); |
e87aa773 MW |
174 | if (list_empty(&dev->dma_pools)) |
175 | ret = device_create_file(dev, &dev_attr_pools); | |
141ecc53 CH |
176 | else |
177 | ret = 0; | |
1da177e4 | 178 | /* note: not currently insisting "name" be unique */ |
141ecc53 | 179 | if (!ret) |
e87aa773 | 180 | list_add(&retval->pools, &dev->dma_pools); |
141ecc53 CH |
181 | else { |
182 | kfree(retval); | |
183 | retval = NULL; | |
184 | } | |
b2366d68 | 185 | mutex_unlock(&pools_lock); |
1da177e4 | 186 | } else |
e87aa773 | 187 | INIT_LIST_HEAD(&retval->pools); |
1da177e4 LT |
188 | |
189 | return retval; | |
190 | } | |
e87aa773 | 191 | EXPORT_SYMBOL(dma_pool_create); |
1da177e4 | 192 | |
a35a3455 MW |
193 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) |
194 | { | |
195 | unsigned int offset = 0; | |
e34f44b3 | 196 | unsigned int next_boundary = pool->boundary; |
a35a3455 MW |
197 | |
198 | do { | |
199 | unsigned int next = offset + pool->size; | |
e34f44b3 MW |
200 | if (unlikely((next + pool->size) >= next_boundary)) { |
201 | next = next_boundary; | |
202 | next_boundary += pool->boundary; | |
203 | } | |
a35a3455 MW |
204 | *(int *)(page->vaddr + offset) = next; |
205 | offset = next; | |
206 | } while (offset < pool->allocation); | |
207 | } | |
208 | ||
e87aa773 | 209 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
1da177e4 | 210 | { |
e87aa773 | 211 | struct dma_page *page; |
1da177e4 | 212 | |
a35a3455 | 213 | page = kmalloc(sizeof(*page), mem_flags); |
1da177e4 LT |
214 | if (!page) |
215 | return NULL; | |
a35a3455 | 216 | page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, |
e87aa773 | 217 | &page->dma, mem_flags); |
1da177e4 | 218 | if (page->vaddr) { |
1da177e4 | 219 | #ifdef CONFIG_DEBUG_SLAB |
e87aa773 | 220 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 221 | #endif |
a35a3455 | 222 | pool_initialise_page(pool, page); |
e87aa773 | 223 | list_add(&page->page_list, &pool->page_list); |
1da177e4 | 224 | page->in_use = 0; |
a35a3455 | 225 | page->offset = 0; |
1da177e4 | 226 | } else { |
e87aa773 | 227 | kfree(page); |
1da177e4 LT |
228 | page = NULL; |
229 | } | |
230 | return page; | |
231 | } | |
232 | ||
a35a3455 | 233 | static inline int is_page_busy(struct dma_page *page) |
1da177e4 | 234 | { |
a35a3455 | 235 | return page->in_use != 0; |
1da177e4 LT |
236 | } |
237 | ||
e87aa773 | 238 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
1da177e4 | 239 | { |
e87aa773 | 240 | dma_addr_t dma = page->dma; |
1da177e4 LT |
241 | |
242 | #ifdef CONFIG_DEBUG_SLAB | |
e87aa773 | 243 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 244 | #endif |
e87aa773 MW |
245 | dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); |
246 | list_del(&page->page_list); | |
247 | kfree(page); | |
1da177e4 LT |
248 | } |
249 | ||
1da177e4 LT |
250 | /** |
251 | * dma_pool_destroy - destroys a pool of dma memory blocks. | |
252 | * @pool: dma pool that will be destroyed | |
253 | * Context: !in_interrupt() | |
254 | * | |
255 | * Caller guarantees that no more memory from the pool is in use, | |
256 | * and that nothing will try to use the pool after this call. | |
257 | */ | |
e87aa773 | 258 | void dma_pool_destroy(struct dma_pool *pool) |
1da177e4 | 259 | { |
b2366d68 | 260 | mutex_lock(&pools_lock); |
e87aa773 MW |
261 | list_del(&pool->pools); |
262 | if (pool->dev && list_empty(&pool->dev->dma_pools)) | |
263 | device_remove_file(pool->dev, &dev_attr_pools); | |
b2366d68 | 264 | mutex_unlock(&pools_lock); |
1da177e4 | 265 | |
e87aa773 MW |
266 | while (!list_empty(&pool->page_list)) { |
267 | struct dma_page *page; | |
268 | page = list_entry(pool->page_list.next, | |
269 | struct dma_page, page_list); | |
a35a3455 | 270 | if (is_page_busy(page)) { |
1da177e4 | 271 | if (pool->dev) |
e87aa773 MW |
272 | dev_err(pool->dev, |
273 | "dma_pool_destroy %s, %p busy\n", | |
1da177e4 LT |
274 | pool->name, page->vaddr); |
275 | else | |
e87aa773 MW |
276 | printk(KERN_ERR |
277 | "dma_pool_destroy %s, %p busy\n", | |
278 | pool->name, page->vaddr); | |
1da177e4 | 279 | /* leak the still-in-use consistent memory */ |
e87aa773 MW |
280 | list_del(&page->page_list); |
281 | kfree(page); | |
1da177e4 | 282 | } else |
e87aa773 | 283 | pool_free_page(pool, page); |
1da177e4 LT |
284 | } |
285 | ||
e87aa773 | 286 | kfree(pool); |
1da177e4 | 287 | } |
e87aa773 | 288 | EXPORT_SYMBOL(dma_pool_destroy); |
1da177e4 LT |
289 | |
290 | /** | |
291 | * dma_pool_alloc - get a block of consistent memory | |
292 | * @pool: dma pool that will produce the block | |
293 | * @mem_flags: GFP_* bitmask | |
294 | * @handle: pointer to dma address of block | |
295 | * | |
296 | * This returns the kernel virtual address of a currently unused block, | |
297 | * and reports its dma address through the handle. | |
6182a094 | 298 | * If such a memory block can't be allocated, %NULL is returned. |
1da177e4 | 299 | */ |
e87aa773 MW |
300 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
301 | dma_addr_t *handle) | |
1da177e4 | 302 | { |
e87aa773 MW |
303 | unsigned long flags; |
304 | struct dma_page *page; | |
e87aa773 MW |
305 | size_t offset; |
306 | void *retval; | |
307 | ||
e87aa773 | 308 | spin_lock_irqsave(&pool->lock, flags); |
2cae367e | 309 | restart: |
1da177e4 | 310 | list_for_each_entry(page, &pool->page_list, page_list) { |
a35a3455 MW |
311 | if (page->offset < pool->allocation) |
312 | goto ready; | |
1da177e4 | 313 | } |
e87aa773 MW |
314 | page = pool_alloc_page(pool, GFP_ATOMIC); |
315 | if (!page) { | |
1da177e4 | 316 | if (mem_flags & __GFP_WAIT) { |
e87aa773 | 317 | DECLARE_WAITQUEUE(wait, current); |
1da177e4 | 318 | |
d9aacccf | 319 | __set_current_state(TASK_INTERRUPTIBLE); |
2cae367e | 320 | __add_wait_queue(&pool->waitq, &wait); |
e87aa773 | 321 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 322 | |
e87aa773 | 323 | schedule_timeout(POOL_TIMEOUT_JIFFIES); |
1da177e4 | 324 | |
2cae367e MW |
325 | spin_lock_irqsave(&pool->lock, flags); |
326 | __remove_wait_queue(&pool->waitq, &wait); | |
1da177e4 LT |
327 | goto restart; |
328 | } | |
329 | retval = NULL; | |
330 | goto done; | |
331 | } | |
332 | ||
e87aa773 | 333 | ready: |
1da177e4 | 334 | page->in_use++; |
a35a3455 MW |
335 | offset = page->offset; |
336 | page->offset = *(int *)(page->vaddr + offset); | |
1da177e4 LT |
337 | retval = offset + page->vaddr; |
338 | *handle = offset + page->dma; | |
339 | #ifdef CONFIG_DEBUG_SLAB | |
e87aa773 | 340 | memset(retval, POOL_POISON_ALLOCATED, pool->size); |
1da177e4 | 341 | #endif |
e87aa773 MW |
342 | done: |
343 | spin_unlock_irqrestore(&pool->lock, flags); | |
1da177e4 LT |
344 | return retval; |
345 | } | |
e87aa773 | 346 | EXPORT_SYMBOL(dma_pool_alloc); |
1da177e4 | 347 | |
e87aa773 | 348 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
1da177e4 | 349 | { |
e87aa773 MW |
350 | unsigned long flags; |
351 | struct dma_page *page; | |
1da177e4 | 352 | |
e87aa773 | 353 | spin_lock_irqsave(&pool->lock, flags); |
1da177e4 LT |
354 | list_for_each_entry(page, &pool->page_list, page_list) { |
355 | if (dma < page->dma) | |
356 | continue; | |
357 | if (dma < (page->dma + pool->allocation)) | |
358 | goto done; | |
359 | } | |
360 | page = NULL; | |
e87aa773 MW |
361 | done: |
362 | spin_unlock_irqrestore(&pool->lock, flags); | |
1da177e4 LT |
363 | return page; |
364 | } | |
365 | ||
1da177e4 LT |
366 | /** |
367 | * dma_pool_free - put block back into dma pool | |
368 | * @pool: the dma pool holding the block | |
369 | * @vaddr: virtual address of block | |
370 | * @dma: dma address of block | |
371 | * | |
372 | * Caller promises neither device nor driver will again touch this block | |
373 | * unless it is first re-allocated. | |
374 | */ | |
e87aa773 | 375 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
1da177e4 | 376 | { |
e87aa773 MW |
377 | struct dma_page *page; |
378 | unsigned long flags; | |
a35a3455 | 379 | unsigned int offset; |
1da177e4 | 380 | |
e87aa773 MW |
381 | page = pool_find_page(pool, dma); |
382 | if (!page) { | |
1da177e4 | 383 | if (pool->dev) |
e87aa773 MW |
384 | dev_err(pool->dev, |
385 | "dma_pool_free %s, %p/%lx (bad dma)\n", | |
386 | pool->name, vaddr, (unsigned long)dma); | |
1da177e4 | 387 | else |
e87aa773 MW |
388 | printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", |
389 | pool->name, vaddr, (unsigned long)dma); | |
1da177e4 LT |
390 | return; |
391 | } | |
392 | ||
a35a3455 | 393 | offset = vaddr - page->vaddr; |
1da177e4 | 394 | #ifdef CONFIG_DEBUG_SLAB |
a35a3455 | 395 | if ((dma - page->dma) != offset) { |
1da177e4 | 396 | if (pool->dev) |
e87aa773 MW |
397 | dev_err(pool->dev, |
398 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
399 | pool->name, vaddr, (unsigned long long)dma); | |
1da177e4 | 400 | else |
e87aa773 MW |
401 | printk(KERN_ERR |
402 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
403 | pool->name, vaddr, (unsigned long long)dma); | |
1da177e4 LT |
404 | return; |
405 | } | |
a35a3455 MW |
406 | { |
407 | unsigned int chain = page->offset; | |
408 | while (chain < pool->allocation) { | |
409 | if (chain != offset) { | |
410 | chain = *(int *)(page->vaddr + chain); | |
411 | continue; | |
412 | } | |
413 | if (pool->dev) | |
414 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx " | |
415 | "already free\n", pool->name, | |
416 | (unsigned long long)dma); | |
417 | else | |
418 | printk(KERN_ERR "dma_pool_free %s, dma %Lx " | |
419 | "already free\n", pool->name, | |
420 | (unsigned long long)dma); | |
421 | return; | |
422 | } | |
1da177e4 | 423 | } |
e87aa773 | 424 | memset(vaddr, POOL_POISON_FREED, pool->size); |
1da177e4 LT |
425 | #endif |
426 | ||
e87aa773 | 427 | spin_lock_irqsave(&pool->lock, flags); |
1da177e4 | 428 | page->in_use--; |
a35a3455 MW |
429 | *(int *)vaddr = page->offset; |
430 | page->offset = offset; | |
e87aa773 | 431 | if (waitqueue_active(&pool->waitq)) |
2cae367e | 432 | wake_up_locked(&pool->waitq); |
1da177e4 LT |
433 | /* |
434 | * Resist a temptation to do | |
a35a3455 | 435 | * if (!is_page_busy(page)) pool_free_page(pool, page); |
1da177e4 LT |
436 | * Better have a few empty pages hang around. |
437 | */ | |
e87aa773 | 438 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 439 | } |
e87aa773 | 440 | EXPORT_SYMBOL(dma_pool_free); |
1da177e4 | 441 | |
9ac7849e TH |
442 | /* |
443 | * Managed DMA pool | |
444 | */ | |
445 | static void dmam_pool_release(struct device *dev, void *res) | |
446 | { | |
447 | struct dma_pool *pool = *(struct dma_pool **)res; | |
448 | ||
449 | dma_pool_destroy(pool); | |
450 | } | |
451 | ||
452 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) | |
453 | { | |
454 | return *(struct dma_pool **)res == match_data; | |
455 | } | |
456 | ||
457 | /** | |
458 | * dmam_pool_create - Managed dma_pool_create() | |
459 | * @name: name of pool, for diagnostics | |
460 | * @dev: device that will be doing the DMA | |
461 | * @size: size of the blocks in this pool. | |
462 | * @align: alignment requirement for blocks; must be a power of two | |
463 | * @allocation: returned blocks won't cross this boundary (or zero) | |
464 | * | |
465 | * Managed dma_pool_create(). DMA pool created with this function is | |
466 | * automatically destroyed on driver detach. | |
467 | */ | |
468 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | |
469 | size_t size, size_t align, size_t allocation) | |
470 | { | |
471 | struct dma_pool **ptr, *pool; | |
472 | ||
473 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); | |
474 | if (!ptr) | |
475 | return NULL; | |
476 | ||
477 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); | |
478 | if (pool) | |
479 | devres_add(dev, ptr); | |
480 | else | |
481 | devres_free(ptr); | |
482 | ||
483 | return pool; | |
484 | } | |
e87aa773 | 485 | EXPORT_SYMBOL(dmam_pool_create); |
9ac7849e TH |
486 | |
487 | /** | |
488 | * dmam_pool_destroy - Managed dma_pool_destroy() | |
489 | * @pool: dma pool that will be destroyed | |
490 | * | |
491 | * Managed dma_pool_destroy(). | |
492 | */ | |
493 | void dmam_pool_destroy(struct dma_pool *pool) | |
494 | { | |
495 | struct device *dev = pool->dev; | |
496 | ||
497 | dma_pool_destroy(pool); | |
498 | WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); | |
499 | } | |
e87aa773 | 500 | EXPORT_SYMBOL(dmam_pool_destroy); |