ALSA: Clean up SG-buffer helper functions and macros
[deliverable/linux.git] / sound / core / memalloc.c
1 /*
2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 * Takashi Iwai <tiwai@suse.de>
4 *
5 * Generic memory allocators
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 #include <linux/module.h>
25 #include <linux/proc_fs.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/seq_file.h>
31 #include <asm/uaccess.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/moduleparam.h>
34 #include <linux/mutex.h>
35 #include <sound/memalloc.h>
36 #ifdef CONFIG_SBUS
37 #include <asm/sbus.h>
38 #endif
39
40
41 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>");
42 MODULE_DESCRIPTION("Memory allocator for ALSA system.");
43 MODULE_LICENSE("GPL");
44
45
46 /*
47 */
48
49 static DEFINE_MUTEX(list_mutex);
50 static LIST_HEAD(mem_list_head);
51
52 /* buffer preservation list */
53 struct snd_mem_list {
54 struct snd_dma_buffer buffer;
55 unsigned int id;
56 struct list_head list;
57 };
58
59 /* id for pre-allocated buffers */
60 #define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
61
62 /*
63 *
64 * Generic memory allocators
65 *
66 */
67
68 static long snd_allocated_pages; /* holding the number of allocated pages */
69
70 static inline void inc_snd_pages(int order)
71 {
72 snd_allocated_pages += 1 << order;
73 }
74
75 static inline void dec_snd_pages(int order)
76 {
77 snd_allocated_pages -= 1 << order;
78 }
79
80 /**
81 * snd_malloc_pages - allocate pages with the given size
82 * @size: the size to allocate in bytes
83 * @gfp_flags: the allocation conditions, GFP_XXX
84 *
85 * Allocates the physically contiguous pages with the given size.
86 *
87 * Returns the pointer of the buffer, or NULL if no enoguh memory.
88 */
89 void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
90 {
91 int pg;
92 void *res;
93
94 if (WARN_ON(!size))
95 return NULL;
96 if (WARN_ON(!gfp_flags))
97 return NULL;
98 gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
99 pg = get_order(size);
100 if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
101 inc_snd_pages(pg);
102 return res;
103 }
104
105 /**
106 * snd_free_pages - release the pages
107 * @ptr: the buffer pointer to release
108 * @size: the allocated buffer size
109 *
110 * Releases the buffer allocated via snd_malloc_pages().
111 */
112 void snd_free_pages(void *ptr, size_t size)
113 {
114 int pg;
115
116 if (ptr == NULL)
117 return;
118 pg = get_order(size);
119 dec_snd_pages(pg);
120 free_pages((unsigned long) ptr, pg);
121 }
122
123 /*
124 *
125 * Bus-specific memory allocators
126 *
127 */
128
129 #ifdef CONFIG_HAS_DMA
130 /* allocate the coherent DMA pages */
131 static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
132 {
133 int pg;
134 void *res;
135 gfp_t gfp_flags;
136
137 if (WARN_ON(!dma))
138 return NULL;
139 pg = get_order(size);
140 gfp_flags = GFP_KERNEL
141 | __GFP_COMP /* compound page lets parts be mapped */
142 | __GFP_NORETRY /* don't trigger OOM-killer */
143 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
144 res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
145 if (res != NULL)
146 inc_snd_pages(pg);
147
148 return res;
149 }
150
151 /* free the coherent DMA pages */
152 static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
153 dma_addr_t dma)
154 {
155 int pg;
156
157 if (ptr == NULL)
158 return;
159 pg = get_order(size);
160 dec_snd_pages(pg);
161 dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
162 }
163 #endif /* CONFIG_HAS_DMA */
164
165 #ifdef CONFIG_SBUS
166
167 static void *snd_malloc_sbus_pages(struct device *dev, size_t size,
168 dma_addr_t *dma_addr)
169 {
170 struct sbus_dev *sdev = (struct sbus_dev *)dev;
171 int pg;
172 void *res;
173
174 if (WARN_ON(!dma_addr))
175 return NULL;
176 pg = get_order(size);
177 res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr);
178 if (res != NULL)
179 inc_snd_pages(pg);
180 return res;
181 }
182
183 static void snd_free_sbus_pages(struct device *dev, size_t size,
184 void *ptr, dma_addr_t dma_addr)
185 {
186 struct sbus_dev *sdev = (struct sbus_dev *)dev;
187 int pg;
188
189 if (ptr == NULL)
190 return;
191 pg = get_order(size);
192 dec_snd_pages(pg);
193 sbus_free_consistent(sdev, PAGE_SIZE * (1 << pg), ptr, dma_addr);
194 }
195
196 #endif /* CONFIG_SBUS */
197
198 /*
199 *
200 * ALSA generic memory management
201 *
202 */
203
204
205 /**
206 * snd_dma_alloc_pages - allocate the buffer area according to the given type
207 * @type: the DMA buffer type
208 * @device: the device pointer
209 * @size: the buffer size to allocate
210 * @dmab: buffer allocation record to store the allocated data
211 *
212 * Calls the memory-allocator function for the corresponding
213 * buffer type.
214 *
215 * Returns zero if the buffer with the given size is allocated successfuly,
216 * other a negative value at error.
217 */
218 int snd_dma_alloc_pages(int type, struct device *device, size_t size,
219 struct snd_dma_buffer *dmab)
220 {
221 if (WARN_ON(!size))
222 return -ENXIO;
223 if (WARN_ON(!dmab))
224 return -ENXIO;
225
226 dmab->dev.type = type;
227 dmab->dev.dev = device;
228 dmab->bytes = 0;
229 switch (type) {
230 case SNDRV_DMA_TYPE_CONTINUOUS:
231 dmab->area = snd_malloc_pages(size, (unsigned long)device);
232 dmab->addr = 0;
233 break;
234 #ifdef CONFIG_SBUS
235 case SNDRV_DMA_TYPE_SBUS:
236 dmab->area = snd_malloc_sbus_pages(device, size, &dmab->addr);
237 break;
238 #endif
239 #ifdef CONFIG_HAS_DMA
240 case SNDRV_DMA_TYPE_DEV:
241 dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
242 break;
243 case SNDRV_DMA_TYPE_DEV_SG:
244 snd_malloc_sgbuf_pages(device, size, dmab, NULL);
245 break;
246 #endif
247 default:
248 printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
249 dmab->area = NULL;
250 dmab->addr = 0;
251 return -ENXIO;
252 }
253 if (! dmab->area)
254 return -ENOMEM;
255 dmab->bytes = size;
256 return 0;
257 }
258
259 /**
260 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
261 * @type: the DMA buffer type
262 * @device: the device pointer
263 * @size: the buffer size to allocate
264 * @dmab: buffer allocation record to store the allocated data
265 *
266 * Calls the memory-allocator function for the corresponding
267 * buffer type. When no space is left, this function reduces the size and
268 * tries to allocate again. The size actually allocated is stored in
269 * res_size argument.
270 *
271 * Returns zero if the buffer with the given size is allocated successfuly,
272 * other a negative value at error.
273 */
274 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
275 struct snd_dma_buffer *dmab)
276 {
277 int err;
278
279 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
280 if (err != -ENOMEM)
281 return err;
282 size >>= 1;
283 if (size <= PAGE_SIZE)
284 return -ENOMEM;
285 }
286 if (! dmab->area)
287 return -ENOMEM;
288 return 0;
289 }
290
291
292 /**
293 * snd_dma_free_pages - release the allocated buffer
294 * @dmab: the buffer allocation record to release
295 *
296 * Releases the allocated buffer via snd_dma_alloc_pages().
297 */
298 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
299 {
300 switch (dmab->dev.type) {
301 case SNDRV_DMA_TYPE_CONTINUOUS:
302 snd_free_pages(dmab->area, dmab->bytes);
303 break;
304 #ifdef CONFIG_SBUS
305 case SNDRV_DMA_TYPE_SBUS:
306 snd_free_sbus_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
307 break;
308 #endif
309 #ifdef CONFIG_HAS_DMA
310 case SNDRV_DMA_TYPE_DEV:
311 snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
312 break;
313 case SNDRV_DMA_TYPE_DEV_SG:
314 snd_free_sgbuf_pages(dmab);
315 break;
316 #endif
317 default:
318 printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
319 }
320 }
321
322
323 /**
324 * snd_dma_get_reserved - get the reserved buffer for the given device
325 * @dmab: the buffer allocation record to store
326 * @id: the buffer id
327 *
328 * Looks for the reserved-buffer list and re-uses if the same buffer
329 * is found in the list. When the buffer is found, it's removed from the free list.
330 *
331 * Returns the size of buffer if the buffer is found, or zero if not found.
332 */
333 size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
334 {
335 struct snd_mem_list *mem;
336
337 if (WARN_ON(!dmab))
338 return 0;
339
340 mutex_lock(&list_mutex);
341 list_for_each_entry(mem, &mem_list_head, list) {
342 if (mem->id == id &&
343 (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
344 ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
345 struct device *dev = dmab->dev.dev;
346 list_del(&mem->list);
347 *dmab = mem->buffer;
348 if (dmab->dev.dev == NULL)
349 dmab->dev.dev = dev;
350 kfree(mem);
351 mutex_unlock(&list_mutex);
352 return dmab->bytes;
353 }
354 }
355 mutex_unlock(&list_mutex);
356 return 0;
357 }
358
359 /**
360 * snd_dma_reserve_buf - reserve the buffer
361 * @dmab: the buffer to reserve
362 * @id: the buffer id
363 *
364 * Reserves the given buffer as a reserved buffer.
365 *
366 * Returns zero if successful, or a negative code at error.
367 */
368 int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
369 {
370 struct snd_mem_list *mem;
371
372 if (WARN_ON(!dmab))
373 return -EINVAL;
374 mem = kmalloc(sizeof(*mem), GFP_KERNEL);
375 if (! mem)
376 return -ENOMEM;
377 mutex_lock(&list_mutex);
378 mem->buffer = *dmab;
379 mem->id = id;
380 list_add_tail(&mem->list, &mem_list_head);
381 mutex_unlock(&list_mutex);
382 return 0;
383 }
384
385 /*
386 * purge all reserved buffers
387 */
388 static void free_all_reserved_pages(void)
389 {
390 struct list_head *p;
391 struct snd_mem_list *mem;
392
393 mutex_lock(&list_mutex);
394 while (! list_empty(&mem_list_head)) {
395 p = mem_list_head.next;
396 mem = list_entry(p, struct snd_mem_list, list);
397 list_del(p);
398 snd_dma_free_pages(&mem->buffer);
399 kfree(mem);
400 }
401 mutex_unlock(&list_mutex);
402 }
403
404
405 #ifdef CONFIG_PROC_FS
406 /*
407 * proc file interface
408 */
409 #define SND_MEM_PROC_FILE "driver/snd-page-alloc"
410 static struct proc_dir_entry *snd_mem_proc;
411
412 static int snd_mem_proc_read(struct seq_file *seq, void *offset)
413 {
414 long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
415 struct snd_mem_list *mem;
416 int devno;
417 static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" };
418
419 mutex_lock(&list_mutex);
420 seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n",
421 pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
422 devno = 0;
423 list_for_each_entry(mem, &mem_list_head, list) {
424 devno++;
425 seq_printf(seq, "buffer %d : ID %08x : type %s\n",
426 devno, mem->id, types[mem->buffer.dev.type]);
427 seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
428 (unsigned long)mem->buffer.addr,
429 (int)mem->buffer.bytes);
430 }
431 mutex_unlock(&list_mutex);
432 return 0;
433 }
434
435 static int snd_mem_proc_open(struct inode *inode, struct file *file)
436 {
437 return single_open(file, snd_mem_proc_read, NULL);
438 }
439
440 /* FIXME: for pci only - other bus? */
441 #ifdef CONFIG_PCI
442 #define gettoken(bufp) strsep(bufp, " \t\n")
443
444 static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
445 size_t count, loff_t * ppos)
446 {
447 char buf[128];
448 char *token, *p;
449
450 if (count > sizeof(buf) - 1)
451 return -EINVAL;
452 if (copy_from_user(buf, buffer, count))
453 return -EFAULT;
454 buf[count] = '\0';
455
456 p = buf;
457 token = gettoken(&p);
458 if (! token || *token == '#')
459 return count;
460 if (strcmp(token, "add") == 0) {
461 char *endp;
462 int vendor, device, size, buffers;
463 long mask;
464 int i, alloced;
465 struct pci_dev *pci;
466
467 if ((token = gettoken(&p)) == NULL ||
468 (vendor = simple_strtol(token, NULL, 0)) <= 0 ||
469 (token = gettoken(&p)) == NULL ||
470 (device = simple_strtol(token, NULL, 0)) <= 0 ||
471 (token = gettoken(&p)) == NULL ||
472 (mask = simple_strtol(token, NULL, 0)) < 0 ||
473 (token = gettoken(&p)) == NULL ||
474 (size = memparse(token, &endp)) < 64*1024 ||
475 size > 16*1024*1024 /* too big */ ||
476 (token = gettoken(&p)) == NULL ||
477 (buffers = simple_strtol(token, NULL, 0)) <= 0 ||
478 buffers > 4) {
479 printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
480 return count;
481 }
482 vendor &= 0xffff;
483 device &= 0xffff;
484
485 alloced = 0;
486 pci = NULL;
487 while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
488 if (mask > 0 && mask < 0xffffffff) {
489 if (pci_set_dma_mask(pci, mask) < 0 ||
490 pci_set_consistent_dma_mask(pci, mask) < 0) {
491 printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
492 pci_dev_put(pci);
493 return count;
494 }
495 }
496 for (i = 0; i < buffers; i++) {
497 struct snd_dma_buffer dmab;
498 memset(&dmab, 0, sizeof(dmab));
499 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
500 size, &dmab) < 0) {
501 printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
502 pci_dev_put(pci);
503 return count;
504 }
505 snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
506 }
507 alloced++;
508 }
509 if (! alloced) {
510 for (i = 0; i < buffers; i++) {
511 struct snd_dma_buffer dmab;
512 memset(&dmab, 0, sizeof(dmab));
513 /* FIXME: We can allocate only in ZONE_DMA
514 * without a device pointer!
515 */
516 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
517 size, &dmab) < 0) {
518 printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
519 break;
520 }
521 snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
522 }
523 }
524 } else if (strcmp(token, "erase") == 0)
525 /* FIXME: need for releasing each buffer chunk? */
526 free_all_reserved_pages();
527 else
528 printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
529 return count;
530 }
531 #endif /* CONFIG_PCI */
532
533 static const struct file_operations snd_mem_proc_fops = {
534 .owner = THIS_MODULE,
535 .open = snd_mem_proc_open,
536 .read = seq_read,
537 #ifdef CONFIG_PCI
538 .write = snd_mem_proc_write,
539 #endif
540 .llseek = seq_lseek,
541 .release = single_release,
542 };
543
544 #endif /* CONFIG_PROC_FS */
545
546 /*
547 * module entry
548 */
549
550 static int __init snd_mem_init(void)
551 {
552 #ifdef CONFIG_PROC_FS
553 snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
554 &snd_mem_proc_fops);
555 #endif
556 return 0;
557 }
558
559 static void __exit snd_mem_exit(void)
560 {
561 remove_proc_entry(SND_MEM_PROC_FILE, NULL);
562 free_all_reserved_pages();
563 if (snd_allocated_pages > 0)
564 printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
565 }
566
567
568 module_init(snd_mem_init)
569 module_exit(snd_mem_exit)
570
571
572 /*
573 * exports
574 */
575 EXPORT_SYMBOL(snd_dma_alloc_pages);
576 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
577 EXPORT_SYMBOL(snd_dma_free_pages);
578
579 EXPORT_SYMBOL(snd_dma_get_reserved_buf);
580 EXPORT_SYMBOL(snd_dma_reserve_buf);
581
582 EXPORT_SYMBOL(snd_malloc_pages);
583 EXPORT_SYMBOL(snd_free_pages);
This page took 0.08218 seconds and 6 git commands to generate.