Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | |
2 | #include <linux/device.h> | |
3 | #include <linux/mm.h> | |
4 | #include <asm/io.h> /* Needed for i386 to build */ | |
1da177e4 LT |
5 | #include <linux/dma-mapping.h> |
6 | #include <linux/dmapool.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/module.h> | |
c9cf5528 | 9 | #include <linux/poison.h> |
e8edc6e0 | 10 | #include <linux/sched.h> |
1da177e4 LT |
11 | |
12 | /* | |
13 | * Pool allocator ... wraps the dma_alloc_coherent page allocator, so | |
14 | * small blocks are easily used by drivers for bus mastering controllers. | |
15 | * This should probably be sharing the guts of the slab allocator. | |
16 | */ | |
17 | ||
18 | struct dma_pool { /* the pool */ | |
19 | struct list_head page_list; | |
20 | spinlock_t lock; | |
21 | size_t blocks_per_page; | |
22 | size_t size; | |
23 | struct device *dev; | |
24 | size_t allocation; | |
25 | char name [32]; | |
26 | wait_queue_head_t waitq; | |
27 | struct list_head pools; | |
28 | }; | |
29 | ||
30 | struct dma_page { /* cacheable header for 'allocation' bytes */ | |
31 | struct list_head page_list; | |
32 | void *vaddr; | |
33 | dma_addr_t dma; | |
34 | unsigned in_use; | |
35 | unsigned long bitmap [0]; | |
36 | }; | |
37 | ||
38 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) | |
1da177e4 | 39 | |
b2366d68 | 40 | static DEFINE_MUTEX (pools_lock); |
1da177e4 LT |
41 | |
42 | static ssize_t | |
74880c06 | 43 | show_pools (struct device *dev, struct device_attribute *attr, char *buf) |
1da177e4 LT |
44 | { |
45 | unsigned temp; | |
46 | unsigned size; | |
47 | char *next; | |
48 | struct dma_page *page; | |
49 | struct dma_pool *pool; | |
50 | ||
51 | next = buf; | |
52 | size = PAGE_SIZE; | |
53 | ||
54 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); | |
55 | size -= temp; | |
56 | next += temp; | |
57 | ||
b2366d68 | 58 | mutex_lock(&pools_lock); |
1da177e4 LT |
59 | list_for_each_entry(pool, &dev->dma_pools, pools) { |
60 | unsigned pages = 0; | |
61 | unsigned blocks = 0; | |
62 | ||
63 | list_for_each_entry(page, &pool->page_list, page_list) { | |
64 | pages++; | |
65 | blocks += page->in_use; | |
66 | } | |
67 | ||
68 | /* per-pool info, no real statistics yet */ | |
69 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", | |
70 | pool->name, | |
71 | blocks, pages * pool->blocks_per_page, | |
72 | pool->size, pages); | |
73 | size -= temp; | |
74 | next += temp; | |
75 | } | |
b2366d68 | 76 | mutex_unlock(&pools_lock); |
1da177e4 LT |
77 | |
78 | return PAGE_SIZE - size; | |
79 | } | |
80 | static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); | |
81 | ||
82 | /** | |
83 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | |
84 | * @name: name of pool, for diagnostics | |
85 | * @dev: device that will be doing the DMA | |
86 | * @size: size of the blocks in this pool. | |
87 | * @align: alignment requirement for blocks; must be a power of two | |
88 | * @allocation: returned blocks won't cross this boundary (or zero) | |
89 | * Context: !in_interrupt() | |
90 | * | |
91 | * Returns a dma allocation pool with the requested characteristics, or | |
92 | * null if one can't be created. Given one of these pools, dma_pool_alloc() | |
93 | * may be used to allocate memory. Such memory will all have "consistent" | |
94 | * DMA mappings, accessible by the device and its driver without using | |
95 | * cache flushing primitives. The actual size of blocks allocated may be | |
96 | * larger than requested because of alignment. | |
97 | * | |
98 | * If allocation is nonzero, objects returned from dma_pool_alloc() won't | |
99 | * cross that size boundary. This is useful for devices which have | |
100 | * addressing restrictions on individual DMA transfers, such as not crossing | |
101 | * boundaries of 4KBytes. | |
102 | */ | |
103 | struct dma_pool * | |
104 | dma_pool_create (const char *name, struct device *dev, | |
105 | size_t size, size_t align, size_t allocation) | |
106 | { | |
107 | struct dma_pool *retval; | |
108 | ||
109 | if (align == 0) | |
110 | align = 1; | |
111 | if (size == 0) | |
112 | return NULL; | |
113 | else if (size < align) | |
114 | size = align; | |
115 | else if ((size % align) != 0) { | |
116 | size += align + 1; | |
117 | size &= ~(align - 1); | |
118 | } | |
119 | ||
120 | if (allocation == 0) { | |
121 | if (PAGE_SIZE < size) | |
122 | allocation = size; | |
123 | else | |
124 | allocation = PAGE_SIZE; | |
125 | // FIXME: round up for less fragmentation | |
126 | } else if (allocation < size) | |
127 | return NULL; | |
128 | ||
2618c60b | 129 | if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) |
1da177e4 LT |
130 | return retval; |
131 | ||
132 | strlcpy (retval->name, name, sizeof retval->name); | |
133 | ||
134 | retval->dev = dev; | |
135 | ||
136 | INIT_LIST_HEAD (&retval->page_list); | |
137 | spin_lock_init (&retval->lock); | |
138 | retval->size = size; | |
139 | retval->allocation = allocation; | |
140 | retval->blocks_per_page = allocation / size; | |
141 | init_waitqueue_head (&retval->waitq); | |
142 | ||
143 | if (dev) { | |
141ecc53 CH |
144 | int ret; |
145 | ||
b2366d68 | 146 | mutex_lock(&pools_lock); |
1da177e4 | 147 | if (list_empty (&dev->dma_pools)) |
141ecc53 CH |
148 | ret = device_create_file (dev, &dev_attr_pools); |
149 | else | |
150 | ret = 0; | |
1da177e4 | 151 | /* note: not currently insisting "name" be unique */ |
141ecc53 CH |
152 | if (!ret) |
153 | list_add (&retval->pools, &dev->dma_pools); | |
154 | else { | |
155 | kfree(retval); | |
156 | retval = NULL; | |
157 | } | |
b2366d68 | 158 | mutex_unlock(&pools_lock); |
1da177e4 LT |
159 | } else |
160 | INIT_LIST_HEAD (&retval->pools); | |
161 | ||
162 | return retval; | |
163 | } | |
164 | ||
165 | ||
166 | static struct dma_page * | |
dd0fc66f | 167 | pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) |
1da177e4 LT |
168 | { |
169 | struct dma_page *page; | |
170 | int mapsize; | |
171 | ||
172 | mapsize = pool->blocks_per_page; | |
173 | mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; | |
174 | mapsize *= sizeof (long); | |
175 | ||
5cbded58 | 176 | page = kmalloc(mapsize + sizeof *page, mem_flags); |
1da177e4 LT |
177 | if (!page) |
178 | return NULL; | |
179 | page->vaddr = dma_alloc_coherent (pool->dev, | |
180 | pool->allocation, | |
181 | &page->dma, | |
182 | mem_flags); | |
183 | if (page->vaddr) { | |
184 | memset (page->bitmap, 0xff, mapsize); // bit set == free | |
185 | #ifdef CONFIG_DEBUG_SLAB | |
186 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); | |
187 | #endif | |
188 | list_add (&page->page_list, &pool->page_list); | |
189 | page->in_use = 0; | |
190 | } else { | |
191 | kfree (page); | |
192 | page = NULL; | |
193 | } | |
194 | return page; | |
195 | } | |
196 | ||
197 | ||
198 | static inline int | |
199 | is_page_busy (int blocks, unsigned long *bitmap) | |
200 | { | |
201 | while (blocks > 0) { | |
202 | if (*bitmap++ != ~0UL) | |
203 | return 1; | |
204 | blocks -= BITS_PER_LONG; | |
205 | } | |
206 | return 0; | |
207 | } | |
208 | ||
209 | static void | |
210 | pool_free_page (struct dma_pool *pool, struct dma_page *page) | |
211 | { | |
212 | dma_addr_t dma = page->dma; | |
213 | ||
214 | #ifdef CONFIG_DEBUG_SLAB | |
215 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); | |
216 | #endif | |
217 | dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma); | |
218 | list_del (&page->page_list); | |
219 | kfree (page); | |
220 | } | |
221 | ||
222 | ||
223 | /** | |
224 | * dma_pool_destroy - destroys a pool of dma memory blocks. | |
225 | * @pool: dma pool that will be destroyed | |
226 | * Context: !in_interrupt() | |
227 | * | |
228 | * Caller guarantees that no more memory from the pool is in use, | |
229 | * and that nothing will try to use the pool after this call. | |
230 | */ | |
231 | void | |
232 | dma_pool_destroy (struct dma_pool *pool) | |
233 | { | |
b2366d68 | 234 | mutex_lock(&pools_lock); |
1da177e4 LT |
235 | list_del (&pool->pools); |
236 | if (pool->dev && list_empty (&pool->dev->dma_pools)) | |
237 | device_remove_file (pool->dev, &dev_attr_pools); | |
b2366d68 | 238 | mutex_unlock(&pools_lock); |
1da177e4 LT |
239 | |
240 | while (!list_empty (&pool->page_list)) { | |
241 | struct dma_page *page; | |
242 | page = list_entry (pool->page_list.next, | |
243 | struct dma_page, page_list); | |
244 | if (is_page_busy (pool->blocks_per_page, page->bitmap)) { | |
245 | if (pool->dev) | |
246 | dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", | |
247 | pool->name, page->vaddr); | |
248 | else | |
249 | printk (KERN_ERR "dma_pool_destroy %s, %p busy\n", | |
250 | pool->name, page->vaddr); | |
251 | /* leak the still-in-use consistent memory */ | |
252 | list_del (&page->page_list); | |
253 | kfree (page); | |
254 | } else | |
255 | pool_free_page (pool, page); | |
256 | } | |
257 | ||
258 | kfree (pool); | |
259 | } | |
260 | ||
261 | ||
262 | /** | |
263 | * dma_pool_alloc - get a block of consistent memory | |
264 | * @pool: dma pool that will produce the block | |
265 | * @mem_flags: GFP_* bitmask | |
266 | * @handle: pointer to dma address of block | |
267 | * | |
268 | * This returns the kernel virtual address of a currently unused block, | |
269 | * and reports its dma address through the handle. | |
270 | * If such a memory block can't be allocated, null is returned. | |
271 | */ | |
272 | void * | |
dd0fc66f | 273 | dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) |
1da177e4 LT |
274 | { |
275 | unsigned long flags; | |
276 | struct dma_page *page; | |
277 | int map, block; | |
278 | size_t offset; | |
279 | void *retval; | |
280 | ||
281 | restart: | |
282 | spin_lock_irqsave (&pool->lock, flags); | |
283 | list_for_each_entry(page, &pool->page_list, page_list) { | |
284 | int i; | |
285 | /* only cachable accesses here ... */ | |
286 | for (map = 0, i = 0; | |
287 | i < pool->blocks_per_page; | |
288 | i += BITS_PER_LONG, map++) { | |
289 | if (page->bitmap [map] == 0) | |
290 | continue; | |
291 | block = ffz (~ page->bitmap [map]); | |
292 | if ((i + block) < pool->blocks_per_page) { | |
293 | clear_bit (block, &page->bitmap [map]); | |
294 | offset = (BITS_PER_LONG * map) + block; | |
295 | offset *= pool->size; | |
296 | goto ready; | |
297 | } | |
298 | } | |
299 | } | |
54e6ecb2 | 300 | if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) { |
1da177e4 LT |
301 | if (mem_flags & __GFP_WAIT) { |
302 | DECLARE_WAITQUEUE (wait, current); | |
303 | ||
d9aacccf | 304 | __set_current_state(TASK_INTERRUPTIBLE); |
1da177e4 LT |
305 | add_wait_queue (&pool->waitq, &wait); |
306 | spin_unlock_irqrestore (&pool->lock, flags); | |
307 | ||
308 | schedule_timeout (POOL_TIMEOUT_JIFFIES); | |
309 | ||
310 | remove_wait_queue (&pool->waitq, &wait); | |
311 | goto restart; | |
312 | } | |
313 | retval = NULL; | |
314 | goto done; | |
315 | } | |
316 | ||
317 | clear_bit (0, &page->bitmap [0]); | |
318 | offset = 0; | |
319 | ready: | |
320 | page->in_use++; | |
321 | retval = offset + page->vaddr; | |
322 | *handle = offset + page->dma; | |
323 | #ifdef CONFIG_DEBUG_SLAB | |
324 | memset (retval, POOL_POISON_ALLOCATED, pool->size); | |
325 | #endif | |
326 | done: | |
327 | spin_unlock_irqrestore (&pool->lock, flags); | |
328 | return retval; | |
329 | } | |
330 | ||
331 | ||
332 | static struct dma_page * | |
333 | pool_find_page (struct dma_pool *pool, dma_addr_t dma) | |
334 | { | |
335 | unsigned long flags; | |
336 | struct dma_page *page; | |
337 | ||
338 | spin_lock_irqsave (&pool->lock, flags); | |
339 | list_for_each_entry(page, &pool->page_list, page_list) { | |
340 | if (dma < page->dma) | |
341 | continue; | |
342 | if (dma < (page->dma + pool->allocation)) | |
343 | goto done; | |
344 | } | |
345 | page = NULL; | |
346 | done: | |
347 | spin_unlock_irqrestore (&pool->lock, flags); | |
348 | return page; | |
349 | } | |
350 | ||
351 | ||
352 | /** | |
353 | * dma_pool_free - put block back into dma pool | |
354 | * @pool: the dma pool holding the block | |
355 | * @vaddr: virtual address of block | |
356 | * @dma: dma address of block | |
357 | * | |
358 | * Caller promises neither device nor driver will again touch this block | |
359 | * unless it is first re-allocated. | |
360 | */ | |
361 | void | |
362 | dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) | |
363 | { | |
364 | struct dma_page *page; | |
365 | unsigned long flags; | |
366 | int map, block; | |
367 | ||
c80544dc | 368 | if ((page = pool_find_page(pool, dma)) == NULL) { |
1da177e4 LT |
369 | if (pool->dev) |
370 | dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", | |
371 | pool->name, vaddr, (unsigned long) dma); | |
372 | else | |
373 | printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", | |
374 | pool->name, vaddr, (unsigned long) dma); | |
375 | return; | |
376 | } | |
377 | ||
378 | block = dma - page->dma; | |
379 | block /= pool->size; | |
380 | map = block / BITS_PER_LONG; | |
381 | block %= BITS_PER_LONG; | |
382 | ||
383 | #ifdef CONFIG_DEBUG_SLAB | |
384 | if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { | |
385 | if (pool->dev) | |
386 | dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
387 | pool->name, vaddr, (unsigned long long) dma); | |
388 | else | |
389 | printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
390 | pool->name, vaddr, (unsigned long long) dma); | |
391 | return; | |
392 | } | |
393 | if (page->bitmap [map] & (1UL << block)) { | |
394 | if (pool->dev) | |
395 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", | |
396 | pool->name, (unsigned long long)dma); | |
397 | else | |
398 | printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n", | |
399 | pool->name, (unsigned long long)dma); | |
400 | return; | |
401 | } | |
402 | memset (vaddr, POOL_POISON_FREED, pool->size); | |
403 | #endif | |
404 | ||
405 | spin_lock_irqsave (&pool->lock, flags); | |
406 | page->in_use--; | |
407 | set_bit (block, &page->bitmap [map]); | |
408 | if (waitqueue_active (&pool->waitq)) | |
409 | wake_up (&pool->waitq); | |
410 | /* | |
411 | * Resist a temptation to do | |
412 | * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); | |
413 | * Better have a few empty pages hang around. | |
414 | */ | |
415 | spin_unlock_irqrestore (&pool->lock, flags); | |
416 | } | |
417 | ||
9ac7849e TH |
418 | /* |
419 | * Managed DMA pool | |
420 | */ | |
421 | static void dmam_pool_release(struct device *dev, void *res) | |
422 | { | |
423 | struct dma_pool *pool = *(struct dma_pool **)res; | |
424 | ||
425 | dma_pool_destroy(pool); | |
426 | } | |
427 | ||
428 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) | |
429 | { | |
430 | return *(struct dma_pool **)res == match_data; | |
431 | } | |
432 | ||
433 | /** | |
434 | * dmam_pool_create - Managed dma_pool_create() | |
435 | * @name: name of pool, for diagnostics | |
436 | * @dev: device that will be doing the DMA | |
437 | * @size: size of the blocks in this pool. | |
438 | * @align: alignment requirement for blocks; must be a power of two | |
439 | * @allocation: returned blocks won't cross this boundary (or zero) | |
440 | * | |
441 | * Managed dma_pool_create(). DMA pool created with this function is | |
442 | * automatically destroyed on driver detach. | |
443 | */ | |
444 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | |
445 | size_t size, size_t align, size_t allocation) | |
446 | { | |
447 | struct dma_pool **ptr, *pool; | |
448 | ||
449 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); | |
450 | if (!ptr) | |
451 | return NULL; | |
452 | ||
453 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); | |
454 | if (pool) | |
455 | devres_add(dev, ptr); | |
456 | else | |
457 | devres_free(ptr); | |
458 | ||
459 | return pool; | |
460 | } | |
461 | ||
462 | /** | |
463 | * dmam_pool_destroy - Managed dma_pool_destroy() | |
464 | * @pool: dma pool that will be destroyed | |
465 | * | |
466 | * Managed dma_pool_destroy(). | |
467 | */ | |
468 | void dmam_pool_destroy(struct dma_pool *pool) | |
469 | { | |
470 | struct device *dev = pool->dev; | |
471 | ||
472 | dma_pool_destroy(pool); | |
473 | WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); | |
474 | } | |
1da177e4 LT |
475 | |
476 | EXPORT_SYMBOL (dma_pool_create); | |
477 | EXPORT_SYMBOL (dma_pool_destroy); | |
478 | EXPORT_SYMBOL (dma_pool_alloc); | |
479 | EXPORT_SYMBOL (dma_pool_free); | |
9ac7849e TH |
480 | EXPORT_SYMBOL (dmam_pool_create); |
481 | EXPORT_SYMBOL (dmam_pool_destroy); |