dmaengine: omap-dma: Fix memory leak when terminating running transfer
[deliverable/linux.git] / drivers / dma / dmaengine.c
1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22 /*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
45 * See Documentation/dmaengine.txt for more details
46 */
47
48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
50 #include <linux/dma-mapping.h>
51 #include <linux/init.h>
52 #include <linux/module.h>
53 #include <linux/mm.h>
54 #include <linux/device.h>
55 #include <linux/dmaengine.h>
56 #include <linux/hardirq.h>
57 #include <linux/spinlock.h>
58 #include <linux/percpu.h>
59 #include <linux/rcupdate.h>
60 #include <linux/mutex.h>
61 #include <linux/jiffies.h>
62 #include <linux/rculist.h>
63 #include <linux/idr.h>
64 #include <linux/slab.h>
65 #include <linux/acpi.h>
66 #include <linux/acpi_dma.h>
67 #include <linux/of_dma.h>
68 #include <linux/mempool.h>
69
70 static DEFINE_MUTEX(dma_list_mutex);
71 static DEFINE_IDR(dma_idr);
72 static LIST_HEAD(dma_device_list);
73 static long dmaengine_ref_count;
74
75 /* --- sysfs implementation --- */
76
77 /**
78 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
79 * @dev - device node
80 *
81 * Must be called under dma_list_mutex
82 */
83 static struct dma_chan *dev_to_dma_chan(struct device *dev)
84 {
85 struct dma_chan_dev *chan_dev;
86
87 chan_dev = container_of(dev, typeof(*chan_dev), device);
88 return chan_dev->chan;
89 }
90
91 static ssize_t memcpy_count_show(struct device *dev,
92 struct device_attribute *attr, char *buf)
93 {
94 struct dma_chan *chan;
95 unsigned long count = 0;
96 int i;
97 int err;
98
99 mutex_lock(&dma_list_mutex);
100 chan = dev_to_dma_chan(dev);
101 if (chan) {
102 for_each_possible_cpu(i)
103 count += per_cpu_ptr(chan->local, i)->memcpy_count;
104 err = sprintf(buf, "%lu\n", count);
105 } else
106 err = -ENODEV;
107 mutex_unlock(&dma_list_mutex);
108
109 return err;
110 }
111 static DEVICE_ATTR_RO(memcpy_count);
112
113 static ssize_t bytes_transferred_show(struct device *dev,
114 struct device_attribute *attr, char *buf)
115 {
116 struct dma_chan *chan;
117 unsigned long count = 0;
118 int i;
119 int err;
120
121 mutex_lock(&dma_list_mutex);
122 chan = dev_to_dma_chan(dev);
123 if (chan) {
124 for_each_possible_cpu(i)
125 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
126 err = sprintf(buf, "%lu\n", count);
127 } else
128 err = -ENODEV;
129 mutex_unlock(&dma_list_mutex);
130
131 return err;
132 }
133 static DEVICE_ATTR_RO(bytes_transferred);
134
135 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
136 char *buf)
137 {
138 struct dma_chan *chan;
139 int err;
140
141 mutex_lock(&dma_list_mutex);
142 chan = dev_to_dma_chan(dev);
143 if (chan)
144 err = sprintf(buf, "%d\n", chan->client_count);
145 else
146 err = -ENODEV;
147 mutex_unlock(&dma_list_mutex);
148
149 return err;
150 }
151 static DEVICE_ATTR_RO(in_use);
152
153 static struct attribute *dma_dev_attrs[] = {
154 &dev_attr_memcpy_count.attr,
155 &dev_attr_bytes_transferred.attr,
156 &dev_attr_in_use.attr,
157 NULL,
158 };
159 ATTRIBUTE_GROUPS(dma_dev);
160
161 static void chan_dev_release(struct device *dev)
162 {
163 struct dma_chan_dev *chan_dev;
164
165 chan_dev = container_of(dev, typeof(*chan_dev), device);
166 if (atomic_dec_and_test(chan_dev->idr_ref)) {
167 mutex_lock(&dma_list_mutex);
168 idr_remove(&dma_idr, chan_dev->dev_id);
169 mutex_unlock(&dma_list_mutex);
170 kfree(chan_dev->idr_ref);
171 }
172 kfree(chan_dev);
173 }
174
175 static struct class dma_devclass = {
176 .name = "dma",
177 .dev_groups = dma_dev_groups,
178 .dev_release = chan_dev_release,
179 };
180
181 /* --- client and device registration --- */
182
183 #define dma_device_satisfies_mask(device, mask) \
184 __dma_device_satisfies_mask((device), &(mask))
185 static int
186 __dma_device_satisfies_mask(struct dma_device *device,
187 const dma_cap_mask_t *want)
188 {
189 dma_cap_mask_t has;
190
191 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
192 DMA_TX_TYPE_END);
193 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
194 }
195
196 static struct module *dma_chan_to_owner(struct dma_chan *chan)
197 {
198 return chan->device->dev->driver->owner;
199 }
200
201 /**
202 * balance_ref_count - catch up the channel reference count
203 * @chan - channel to balance ->client_count versus dmaengine_ref_count
204 *
205 * balance_ref_count must be called under dma_list_mutex
206 */
207 static void balance_ref_count(struct dma_chan *chan)
208 {
209 struct module *owner = dma_chan_to_owner(chan);
210
211 while (chan->client_count < dmaengine_ref_count) {
212 __module_get(owner);
213 chan->client_count++;
214 }
215 }
216
217 /**
218 * dma_chan_get - try to grab a dma channel's parent driver module
219 * @chan - channel to grab
220 *
221 * Must be called under dma_list_mutex
222 */
223 static int dma_chan_get(struct dma_chan *chan)
224 {
225 struct module *owner = dma_chan_to_owner(chan);
226 int ret;
227
228 /* The channel is already in use, update client count */
229 if (chan->client_count) {
230 __module_get(owner);
231 goto out;
232 }
233
234 if (!try_module_get(owner))
235 return -ENODEV;
236
237 /* allocate upon first client reference */
238 if (chan->device->device_alloc_chan_resources) {
239 ret = chan->device->device_alloc_chan_resources(chan);
240 if (ret < 0)
241 goto err_out;
242 }
243
244 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
245 balance_ref_count(chan);
246
247 out:
248 chan->client_count++;
249 return 0;
250
251 err_out:
252 module_put(owner);
253 return ret;
254 }
255
256 /**
257 * dma_chan_put - drop a reference to a dma channel's parent driver module
258 * @chan - channel to release
259 *
260 * Must be called under dma_list_mutex
261 */
262 static void dma_chan_put(struct dma_chan *chan)
263 {
264 /* This channel is not in use, bail out */
265 if (!chan->client_count)
266 return;
267
268 chan->client_count--;
269 module_put(dma_chan_to_owner(chan));
270
271 /* This channel is not in use anymore, free it */
272 if (!chan->client_count && chan->device->device_free_chan_resources)
273 chan->device->device_free_chan_resources(chan);
274 }
275
276 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
277 {
278 enum dma_status status;
279 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
280
281 dma_async_issue_pending(chan);
282 do {
283 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
284 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
285 pr_err("%s: timeout!\n", __func__);
286 return DMA_ERROR;
287 }
288 if (status != DMA_IN_PROGRESS)
289 break;
290 cpu_relax();
291 } while (1);
292
293 return status;
294 }
295 EXPORT_SYMBOL(dma_sync_wait);
296
297 /**
298 * dma_cap_mask_all - enable iteration over all operation types
299 */
300 static dma_cap_mask_t dma_cap_mask_all;
301
302 /**
303 * dma_chan_tbl_ent - tracks channel allocations per core/operation
304 * @chan - associated channel for this entry
305 */
306 struct dma_chan_tbl_ent {
307 struct dma_chan *chan;
308 };
309
310 /**
311 * channel_table - percpu lookup table for memory-to-memory offload providers
312 */
313 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
314
315 static int __init dma_channel_table_init(void)
316 {
317 enum dma_transaction_type cap;
318 int err = 0;
319
320 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
321
322 /* 'interrupt', 'private', and 'slave' are channel capabilities,
323 * but are not associated with an operation so they do not need
324 * an entry in the channel_table
325 */
326 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
327 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
328 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
329
330 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
331 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
332 if (!channel_table[cap]) {
333 err = -ENOMEM;
334 break;
335 }
336 }
337
338 if (err) {
339 pr_err("initialization failure\n");
340 for_each_dma_cap_mask(cap, dma_cap_mask_all)
341 free_percpu(channel_table[cap]);
342 }
343
344 return err;
345 }
346 arch_initcall(dma_channel_table_init);
347
348 /**
349 * dma_find_channel - find a channel to carry out the operation
350 * @tx_type: transaction type
351 */
352 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
353 {
354 return this_cpu_read(channel_table[tx_type]->chan);
355 }
356 EXPORT_SYMBOL(dma_find_channel);
357
358 /*
359 * net_dma_find_channel - find a channel for net_dma
360 * net_dma has alignment requirements
361 */
362 struct dma_chan *net_dma_find_channel(void)
363 {
364 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
365 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
366 return NULL;
367
368 return chan;
369 }
370 EXPORT_SYMBOL(net_dma_find_channel);
371
372 /**
373 * dma_issue_pending_all - flush all pending operations across all channels
374 */
375 void dma_issue_pending_all(void)
376 {
377 struct dma_device *device;
378 struct dma_chan *chan;
379
380 rcu_read_lock();
381 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
382 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
383 continue;
384 list_for_each_entry(chan, &device->channels, device_node)
385 if (chan->client_count)
386 device->device_issue_pending(chan);
387 }
388 rcu_read_unlock();
389 }
390 EXPORT_SYMBOL(dma_issue_pending_all);
391
392 /**
393 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
394 */
395 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
396 {
397 int node = dev_to_node(chan->device->dev);
398 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
399 }
400
401 /**
402 * min_chan - returns the channel with min count and in the same numa-node as the cpu
403 * @cap: capability to match
404 * @cpu: cpu index which the channel should be close to
405 *
406 * If some channels are close to the given cpu, the one with the lowest
407 * reference count is returned. Otherwise, cpu is ignored and only the
408 * reference count is taken into account.
409 * Must be called under dma_list_mutex.
410 */
411 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
412 {
413 struct dma_device *device;
414 struct dma_chan *chan;
415 struct dma_chan *min = NULL;
416 struct dma_chan *localmin = NULL;
417
418 list_for_each_entry(device, &dma_device_list, global_node) {
419 if (!dma_has_cap(cap, device->cap_mask) ||
420 dma_has_cap(DMA_PRIVATE, device->cap_mask))
421 continue;
422 list_for_each_entry(chan, &device->channels, device_node) {
423 if (!chan->client_count)
424 continue;
425 if (!min || chan->table_count < min->table_count)
426 min = chan;
427
428 if (dma_chan_is_local(chan, cpu))
429 if (!localmin ||
430 chan->table_count < localmin->table_count)
431 localmin = chan;
432 }
433 }
434
435 chan = localmin ? localmin : min;
436
437 if (chan)
438 chan->table_count++;
439
440 return chan;
441 }
442
443 /**
444 * dma_channel_rebalance - redistribute the available channels
445 *
446 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
447 * operation type) in the SMP case, and operation isolation (avoid
448 * multi-tasking channels) in the non-SMP case. Must be called under
449 * dma_list_mutex.
450 */
451 static void dma_channel_rebalance(void)
452 {
453 struct dma_chan *chan;
454 struct dma_device *device;
455 int cpu;
456 int cap;
457
458 /* undo the last distribution */
459 for_each_dma_cap_mask(cap, dma_cap_mask_all)
460 for_each_possible_cpu(cpu)
461 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
462
463 list_for_each_entry(device, &dma_device_list, global_node) {
464 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
465 continue;
466 list_for_each_entry(chan, &device->channels, device_node)
467 chan->table_count = 0;
468 }
469
470 /* don't populate the channel_table if no clients are available */
471 if (!dmaengine_ref_count)
472 return;
473
474 /* redistribute available channels */
475 for_each_dma_cap_mask(cap, dma_cap_mask_all)
476 for_each_online_cpu(cpu) {
477 chan = min_chan(cap, cpu);
478 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
479 }
480 }
481
482 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
483 {
484 struct dma_device *device;
485
486 if (!chan || !caps)
487 return -EINVAL;
488
489 device = chan->device;
490
491 /* check if the channel supports slave transactions */
492 if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
493 return -ENXIO;
494
495 /*
496 * Check whether it reports it uses the generic slave
497 * capabilities, if not, that means it doesn't support any
498 * kind of slave capabilities reporting.
499 */
500 if (!device->directions)
501 return -ENXIO;
502
503 caps->src_addr_widths = device->src_addr_widths;
504 caps->dst_addr_widths = device->dst_addr_widths;
505 caps->directions = device->directions;
506 caps->residue_granularity = device->residue_granularity;
507
508 caps->cmd_pause = !!device->device_pause;
509 caps->cmd_terminate = !!device->device_terminate_all;
510
511 return 0;
512 }
513 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
514
515 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
516 struct dma_device *dev,
517 dma_filter_fn fn, void *fn_param)
518 {
519 struct dma_chan *chan;
520
521 if (!__dma_device_satisfies_mask(dev, mask)) {
522 pr_debug("%s: wrong capabilities\n", __func__);
523 return NULL;
524 }
525 /* devices with multiple channels need special handling as we need to
526 * ensure that all channels are either private or public.
527 */
528 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
529 list_for_each_entry(chan, &dev->channels, device_node) {
530 /* some channels are already publicly allocated */
531 if (chan->client_count)
532 return NULL;
533 }
534
535 list_for_each_entry(chan, &dev->channels, device_node) {
536 if (chan->client_count) {
537 pr_debug("%s: %s busy\n",
538 __func__, dma_chan_name(chan));
539 continue;
540 }
541 if (fn && !fn(chan, fn_param)) {
542 pr_debug("%s: %s filter said false\n",
543 __func__, dma_chan_name(chan));
544 continue;
545 }
546 return chan;
547 }
548
549 return NULL;
550 }
551
552 /**
553 * dma_request_slave_channel - try to get specific channel exclusively
554 * @chan: target channel
555 */
556 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
557 {
558 int err = -EBUSY;
559
560 /* lock against __dma_request_channel */
561 mutex_lock(&dma_list_mutex);
562
563 if (chan->client_count == 0) {
564 err = dma_chan_get(chan);
565 if (err)
566 pr_debug("%s: failed to get %s: (%d)\n",
567 __func__, dma_chan_name(chan), err);
568 } else
569 chan = NULL;
570
571 mutex_unlock(&dma_list_mutex);
572
573
574 return chan;
575 }
576 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
577
578 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
579 {
580 dma_cap_mask_t mask;
581 struct dma_chan *chan;
582 int err;
583
584 dma_cap_zero(mask);
585 dma_cap_set(DMA_SLAVE, mask);
586
587 /* lock against __dma_request_channel */
588 mutex_lock(&dma_list_mutex);
589
590 chan = private_candidate(&mask, device, NULL, NULL);
591 if (chan) {
592 err = dma_chan_get(chan);
593 if (err) {
594 pr_debug("%s: failed to get %s: (%d)\n",
595 __func__, dma_chan_name(chan), err);
596 chan = NULL;
597 }
598 }
599
600 mutex_unlock(&dma_list_mutex);
601
602 return chan;
603 }
604 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
605
606 /**
607 * __dma_request_channel - try to allocate an exclusive channel
608 * @mask: capabilities that the channel must satisfy
609 * @fn: optional callback to disposition available channels
610 * @fn_param: opaque parameter to pass to dma_filter_fn
611 *
612 * Returns pointer to appropriate DMA channel on success or NULL.
613 */
614 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
615 dma_filter_fn fn, void *fn_param)
616 {
617 struct dma_device *device, *_d;
618 struct dma_chan *chan = NULL;
619 int err;
620
621 /* Find a channel */
622 mutex_lock(&dma_list_mutex);
623 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
624 chan = private_candidate(mask, device, fn, fn_param);
625 if (chan) {
626 /* Found a suitable channel, try to grab, prep, and
627 * return it. We first set DMA_PRIVATE to disable
628 * balance_ref_count as this channel will not be
629 * published in the general-purpose allocator
630 */
631 dma_cap_set(DMA_PRIVATE, device->cap_mask);
632 device->privatecnt++;
633 err = dma_chan_get(chan);
634
635 if (err == -ENODEV) {
636 pr_debug("%s: %s module removed\n",
637 __func__, dma_chan_name(chan));
638 list_del_rcu(&device->global_node);
639 } else if (err)
640 pr_debug("%s: failed to get %s: (%d)\n",
641 __func__, dma_chan_name(chan), err);
642 else
643 break;
644 if (--device->privatecnt == 0)
645 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
646 chan = NULL;
647 }
648 }
649 mutex_unlock(&dma_list_mutex);
650
651 pr_debug("%s: %s (%s)\n",
652 __func__,
653 chan ? "success" : "fail",
654 chan ? dma_chan_name(chan) : NULL);
655
656 return chan;
657 }
658 EXPORT_SYMBOL_GPL(__dma_request_channel);
659
660 /**
661 * dma_request_slave_channel - try to allocate an exclusive slave channel
662 * @dev: pointer to client device structure
663 * @name: slave channel name
664 *
665 * Returns pointer to appropriate DMA channel on success or an error pointer.
666 */
667 struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
668 const char *name)
669 {
670 /* If device-tree is present get slave info from here */
671 if (dev->of_node)
672 return of_dma_request_slave_channel(dev->of_node, name);
673
674 /* If device was enumerated by ACPI get slave info from here */
675 if (ACPI_HANDLE(dev))
676 return acpi_dma_request_slave_chan_by_name(dev, name);
677
678 return ERR_PTR(-ENODEV);
679 }
680 EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
681
682 /**
683 * dma_request_slave_channel - try to allocate an exclusive slave channel
684 * @dev: pointer to client device structure
685 * @name: slave channel name
686 *
687 * Returns pointer to appropriate DMA channel on success or NULL.
688 */
689 struct dma_chan *dma_request_slave_channel(struct device *dev,
690 const char *name)
691 {
692 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
693 if (IS_ERR(ch))
694 return NULL;
695 return ch;
696 }
697 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
698
699 void dma_release_channel(struct dma_chan *chan)
700 {
701 mutex_lock(&dma_list_mutex);
702 WARN_ONCE(chan->client_count != 1,
703 "chan reference count %d != 1\n", chan->client_count);
704 dma_chan_put(chan);
705 /* drop PRIVATE cap enabled by __dma_request_channel() */
706 if (--chan->device->privatecnt == 0)
707 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
708 mutex_unlock(&dma_list_mutex);
709 }
710 EXPORT_SYMBOL_GPL(dma_release_channel);
711
712 /**
713 * dmaengine_get - register interest in dma_channels
714 */
715 void dmaengine_get(void)
716 {
717 struct dma_device *device, *_d;
718 struct dma_chan *chan;
719 int err;
720
721 mutex_lock(&dma_list_mutex);
722 dmaengine_ref_count++;
723
724 /* try to grab channels */
725 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
726 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
727 continue;
728 list_for_each_entry(chan, &device->channels, device_node) {
729 err = dma_chan_get(chan);
730 if (err == -ENODEV) {
731 /* module removed before we could use it */
732 list_del_rcu(&device->global_node);
733 break;
734 } else if (err)
735 pr_debug("%s: failed to get %s: (%d)\n",
736 __func__, dma_chan_name(chan), err);
737 }
738 }
739
740 /* if this is the first reference and there were channels
741 * waiting we need to rebalance to get those channels
742 * incorporated into the channel table
743 */
744 if (dmaengine_ref_count == 1)
745 dma_channel_rebalance();
746 mutex_unlock(&dma_list_mutex);
747 }
748 EXPORT_SYMBOL(dmaengine_get);
749
750 /**
751 * dmaengine_put - let dma drivers be removed when ref_count == 0
752 */
753 void dmaengine_put(void)
754 {
755 struct dma_device *device;
756 struct dma_chan *chan;
757
758 mutex_lock(&dma_list_mutex);
759 dmaengine_ref_count--;
760 BUG_ON(dmaengine_ref_count < 0);
761 /* drop channel references */
762 list_for_each_entry(device, &dma_device_list, global_node) {
763 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
764 continue;
765 list_for_each_entry(chan, &device->channels, device_node)
766 dma_chan_put(chan);
767 }
768 mutex_unlock(&dma_list_mutex);
769 }
770 EXPORT_SYMBOL(dmaengine_put);
771
772 static bool device_has_all_tx_types(struct dma_device *device)
773 {
774 /* A device that satisfies this test has channels that will never cause
775 * an async_tx channel switch event as all possible operation types can
776 * be handled.
777 */
778 #ifdef CONFIG_ASYNC_TX_DMA
779 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
780 return false;
781 #endif
782
783 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
784 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
785 return false;
786 #endif
787
788 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
789 if (!dma_has_cap(DMA_XOR, device->cap_mask))
790 return false;
791
792 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
793 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
794 return false;
795 #endif
796 #endif
797
798 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
799 if (!dma_has_cap(DMA_PQ, device->cap_mask))
800 return false;
801
802 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
803 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
804 return false;
805 #endif
806 #endif
807
808 return true;
809 }
810
811 static int get_dma_id(struct dma_device *device)
812 {
813 int rc;
814
815 mutex_lock(&dma_list_mutex);
816
817 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
818 if (rc >= 0)
819 device->dev_id = rc;
820
821 mutex_unlock(&dma_list_mutex);
822 return rc < 0 ? rc : 0;
823 }
824
825 /**
826 * dma_async_device_register - registers DMA devices found
827 * @device: &dma_device
828 */
829 int dma_async_device_register(struct dma_device *device)
830 {
831 int chancnt = 0, rc;
832 struct dma_chan* chan;
833 atomic_t *idr_ref;
834
835 if (!device)
836 return -ENODEV;
837
838 /* validate device routines */
839 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
840 !device->device_prep_dma_memcpy);
841 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
842 !device->device_prep_dma_xor);
843 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
844 !device->device_prep_dma_xor_val);
845 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
846 !device->device_prep_dma_pq);
847 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
848 !device->device_prep_dma_pq_val);
849 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
850 !device->device_prep_dma_interrupt);
851 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
852 !device->device_prep_dma_sg);
853 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
854 !device->device_prep_dma_cyclic);
855 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
856 !device->device_prep_interleaved_dma);
857
858 BUG_ON(!device->device_tx_status);
859 BUG_ON(!device->device_issue_pending);
860 BUG_ON(!device->dev);
861
862 WARN(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->directions,
863 "this driver doesn't support generic slave capabilities reporting\n");
864
865 /* note: this only matters in the
866 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
867 */
868 if (device_has_all_tx_types(device))
869 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
870
871 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
872 if (!idr_ref)
873 return -ENOMEM;
874 rc = get_dma_id(device);
875 if (rc != 0) {
876 kfree(idr_ref);
877 return rc;
878 }
879
880 atomic_set(idr_ref, 0);
881
882 /* represent channels in sysfs. Probably want devs too */
883 list_for_each_entry(chan, &device->channels, device_node) {
884 rc = -ENOMEM;
885 chan->local = alloc_percpu(typeof(*chan->local));
886 if (chan->local == NULL)
887 goto err_out;
888 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
889 if (chan->dev == NULL) {
890 free_percpu(chan->local);
891 chan->local = NULL;
892 goto err_out;
893 }
894
895 chan->chan_id = chancnt++;
896 chan->dev->device.class = &dma_devclass;
897 chan->dev->device.parent = device->dev;
898 chan->dev->chan = chan;
899 chan->dev->idr_ref = idr_ref;
900 chan->dev->dev_id = device->dev_id;
901 atomic_inc(idr_ref);
902 dev_set_name(&chan->dev->device, "dma%dchan%d",
903 device->dev_id, chan->chan_id);
904
905 rc = device_register(&chan->dev->device);
906 if (rc) {
907 free_percpu(chan->local);
908 chan->local = NULL;
909 kfree(chan->dev);
910 atomic_dec(idr_ref);
911 goto err_out;
912 }
913 chan->client_count = 0;
914 }
915 device->chancnt = chancnt;
916
917 mutex_lock(&dma_list_mutex);
918 /* take references on public channels */
919 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
920 list_for_each_entry(chan, &device->channels, device_node) {
921 /* if clients are already waiting for channels we need
922 * to take references on their behalf
923 */
924 if (dma_chan_get(chan) == -ENODEV) {
925 /* note we can only get here for the first
926 * channel as the remaining channels are
927 * guaranteed to get a reference
928 */
929 rc = -ENODEV;
930 mutex_unlock(&dma_list_mutex);
931 goto err_out;
932 }
933 }
934 list_add_tail_rcu(&device->global_node, &dma_device_list);
935 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
936 device->privatecnt++; /* Always private */
937 dma_channel_rebalance();
938 mutex_unlock(&dma_list_mutex);
939
940 return 0;
941
942 err_out:
943 /* if we never registered a channel just release the idr */
944 if (atomic_read(idr_ref) == 0) {
945 mutex_lock(&dma_list_mutex);
946 idr_remove(&dma_idr, device->dev_id);
947 mutex_unlock(&dma_list_mutex);
948 kfree(idr_ref);
949 return rc;
950 }
951
952 list_for_each_entry(chan, &device->channels, device_node) {
953 if (chan->local == NULL)
954 continue;
955 mutex_lock(&dma_list_mutex);
956 chan->dev->chan = NULL;
957 mutex_unlock(&dma_list_mutex);
958 device_unregister(&chan->dev->device);
959 free_percpu(chan->local);
960 }
961 return rc;
962 }
963 EXPORT_SYMBOL(dma_async_device_register);
964
965 /**
966 * dma_async_device_unregister - unregister a DMA device
967 * @device: &dma_device
968 *
969 * This routine is called by dma driver exit routines, dmaengine holds module
970 * references to prevent it being called while channels are in use.
971 */
972 void dma_async_device_unregister(struct dma_device *device)
973 {
974 struct dma_chan *chan;
975
976 mutex_lock(&dma_list_mutex);
977 list_del_rcu(&device->global_node);
978 dma_channel_rebalance();
979 mutex_unlock(&dma_list_mutex);
980
981 list_for_each_entry(chan, &device->channels, device_node) {
982 WARN_ONCE(chan->client_count,
983 "%s called while %d clients hold a reference\n",
984 __func__, chan->client_count);
985 mutex_lock(&dma_list_mutex);
986 chan->dev->chan = NULL;
987 mutex_unlock(&dma_list_mutex);
988 device_unregister(&chan->dev->device);
989 free_percpu(chan->local);
990 }
991 }
992 EXPORT_SYMBOL(dma_async_device_unregister);
993
994 struct dmaengine_unmap_pool {
995 struct kmem_cache *cache;
996 const char *name;
997 mempool_t *pool;
998 size_t size;
999 };
1000
1001 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1002 static struct dmaengine_unmap_pool unmap_pool[] = {
1003 __UNMAP_POOL(2),
1004 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1005 __UNMAP_POOL(16),
1006 __UNMAP_POOL(128),
1007 __UNMAP_POOL(256),
1008 #endif
1009 };
1010
1011 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1012 {
1013 int order = get_count_order(nr);
1014
1015 switch (order) {
1016 case 0 ... 1:
1017 return &unmap_pool[0];
1018 case 2 ... 4:
1019 return &unmap_pool[1];
1020 case 5 ... 7:
1021 return &unmap_pool[2];
1022 case 8:
1023 return &unmap_pool[3];
1024 default:
1025 BUG();
1026 return NULL;
1027 }
1028 }
1029
1030 static void dmaengine_unmap(struct kref *kref)
1031 {
1032 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1033 struct device *dev = unmap->dev;
1034 int cnt, i;
1035
1036 cnt = unmap->to_cnt;
1037 for (i = 0; i < cnt; i++)
1038 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1039 DMA_TO_DEVICE);
1040 cnt += unmap->from_cnt;
1041 for (; i < cnt; i++)
1042 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1043 DMA_FROM_DEVICE);
1044 cnt += unmap->bidi_cnt;
1045 for (; i < cnt; i++) {
1046 if (unmap->addr[i] == 0)
1047 continue;
1048 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1049 DMA_BIDIRECTIONAL);
1050 }
1051 cnt = unmap->map_cnt;
1052 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1053 }
1054
1055 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1056 {
1057 if (unmap)
1058 kref_put(&unmap->kref, dmaengine_unmap);
1059 }
1060 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1061
1062 static void dmaengine_destroy_unmap_pool(void)
1063 {
1064 int i;
1065
1066 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1067 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1068
1069 if (p->pool)
1070 mempool_destroy(p->pool);
1071 p->pool = NULL;
1072 if (p->cache)
1073 kmem_cache_destroy(p->cache);
1074 p->cache = NULL;
1075 }
1076 }
1077
1078 static int __init dmaengine_init_unmap_pool(void)
1079 {
1080 int i;
1081
1082 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1083 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1084 size_t size;
1085
1086 size = sizeof(struct dmaengine_unmap_data) +
1087 sizeof(dma_addr_t) * p->size;
1088
1089 p->cache = kmem_cache_create(p->name, size, 0,
1090 SLAB_HWCACHE_ALIGN, NULL);
1091 if (!p->cache)
1092 break;
1093 p->pool = mempool_create_slab_pool(1, p->cache);
1094 if (!p->pool)
1095 break;
1096 }
1097
1098 if (i == ARRAY_SIZE(unmap_pool))
1099 return 0;
1100
1101 dmaengine_destroy_unmap_pool();
1102 return -ENOMEM;
1103 }
1104
1105 struct dmaengine_unmap_data *
1106 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1107 {
1108 struct dmaengine_unmap_data *unmap;
1109
1110 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1111 if (!unmap)
1112 return NULL;
1113
1114 memset(unmap, 0, sizeof(*unmap));
1115 kref_init(&unmap->kref);
1116 unmap->dev = dev;
1117 unmap->map_cnt = nr;
1118
1119 return unmap;
1120 }
1121 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1122
1123 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1124 struct dma_chan *chan)
1125 {
1126 tx->chan = chan;
1127 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1128 spin_lock_init(&tx->lock);
1129 #endif
1130 }
1131 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1132
1133 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1134 * @tx: in-flight transaction to wait on
1135 */
1136 enum dma_status
1137 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1138 {
1139 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1140
1141 if (!tx)
1142 return DMA_COMPLETE;
1143
1144 while (tx->cookie == -EBUSY) {
1145 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1146 pr_err("%s timeout waiting for descriptor submission\n",
1147 __func__);
1148 return DMA_ERROR;
1149 }
1150 cpu_relax();
1151 }
1152 return dma_sync_wait(tx->chan, tx->cookie);
1153 }
1154 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1155
1156 /* dma_run_dependencies - helper routine for dma drivers to process
1157 * (start) dependent operations on their target channel
1158 * @tx: transaction with dependencies
1159 */
1160 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1161 {
1162 struct dma_async_tx_descriptor *dep = txd_next(tx);
1163 struct dma_async_tx_descriptor *dep_next;
1164 struct dma_chan *chan;
1165
1166 if (!dep)
1167 return;
1168
1169 /* we'll submit tx->next now, so clear the link */
1170 txd_clear_next(tx);
1171 chan = dep->chan;
1172
1173 /* keep submitting up until a channel switch is detected
1174 * in that case we will be called again as a result of
1175 * processing the interrupt from async_tx_channel_switch
1176 */
1177 for (; dep; dep = dep_next) {
1178 txd_lock(dep);
1179 txd_clear_parent(dep);
1180 dep_next = txd_next(dep);
1181 if (dep_next && dep_next->chan == chan)
1182 txd_clear_next(dep); /* ->next will be submitted */
1183 else
1184 dep_next = NULL; /* submit current dep and terminate */
1185 txd_unlock(dep);
1186
1187 dep->tx_submit(dep);
1188 }
1189
1190 chan->device->device_issue_pending(chan);
1191 }
1192 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1193
1194 static int __init dma_bus_init(void)
1195 {
1196 int err = dmaengine_init_unmap_pool();
1197
1198 if (err)
1199 return err;
1200 return class_register(&dma_devclass);
1201 }
1202 arch_initcall(dma_bus_init);
1203
1204
This page took 0.075514 seconds and 5 git commands to generate.