perf: Fix event->ctx locking
[deliverable/linux.git] / drivers / dma / dmaengine.c
1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22 /*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
45 * See Documentation/dmaengine.txt for more details
46 */
47
48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
50 #include <linux/dma-mapping.h>
51 #include <linux/init.h>
52 #include <linux/module.h>
53 #include <linux/mm.h>
54 #include <linux/device.h>
55 #include <linux/dmaengine.h>
56 #include <linux/hardirq.h>
57 #include <linux/spinlock.h>
58 #include <linux/percpu.h>
59 #include <linux/rcupdate.h>
60 #include <linux/mutex.h>
61 #include <linux/jiffies.h>
62 #include <linux/rculist.h>
63 #include <linux/idr.h>
64 #include <linux/slab.h>
65 #include <linux/acpi.h>
66 #include <linux/acpi_dma.h>
67 #include <linux/of_dma.h>
68 #include <linux/mempool.h>
69
70 static DEFINE_MUTEX(dma_list_mutex);
71 static DEFINE_IDR(dma_idr);
72 static LIST_HEAD(dma_device_list);
73 static long dmaengine_ref_count;
74
75 /* --- sysfs implementation --- */
76
77 /**
78 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
79 * @dev - device node
80 *
81 * Must be called under dma_list_mutex
82 */
83 static struct dma_chan *dev_to_dma_chan(struct device *dev)
84 {
85 struct dma_chan_dev *chan_dev;
86
87 chan_dev = container_of(dev, typeof(*chan_dev), device);
88 return chan_dev->chan;
89 }
90
91 static ssize_t memcpy_count_show(struct device *dev,
92 struct device_attribute *attr, char *buf)
93 {
94 struct dma_chan *chan;
95 unsigned long count = 0;
96 int i;
97 int err;
98
99 mutex_lock(&dma_list_mutex);
100 chan = dev_to_dma_chan(dev);
101 if (chan) {
102 for_each_possible_cpu(i)
103 count += per_cpu_ptr(chan->local, i)->memcpy_count;
104 err = sprintf(buf, "%lu\n", count);
105 } else
106 err = -ENODEV;
107 mutex_unlock(&dma_list_mutex);
108
109 return err;
110 }
111 static DEVICE_ATTR_RO(memcpy_count);
112
113 static ssize_t bytes_transferred_show(struct device *dev,
114 struct device_attribute *attr, char *buf)
115 {
116 struct dma_chan *chan;
117 unsigned long count = 0;
118 int i;
119 int err;
120
121 mutex_lock(&dma_list_mutex);
122 chan = dev_to_dma_chan(dev);
123 if (chan) {
124 for_each_possible_cpu(i)
125 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
126 err = sprintf(buf, "%lu\n", count);
127 } else
128 err = -ENODEV;
129 mutex_unlock(&dma_list_mutex);
130
131 return err;
132 }
133 static DEVICE_ATTR_RO(bytes_transferred);
134
135 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
136 char *buf)
137 {
138 struct dma_chan *chan;
139 int err;
140
141 mutex_lock(&dma_list_mutex);
142 chan = dev_to_dma_chan(dev);
143 if (chan)
144 err = sprintf(buf, "%d\n", chan->client_count);
145 else
146 err = -ENODEV;
147 mutex_unlock(&dma_list_mutex);
148
149 return err;
150 }
151 static DEVICE_ATTR_RO(in_use);
152
153 static struct attribute *dma_dev_attrs[] = {
154 &dev_attr_memcpy_count.attr,
155 &dev_attr_bytes_transferred.attr,
156 &dev_attr_in_use.attr,
157 NULL,
158 };
159 ATTRIBUTE_GROUPS(dma_dev);
160
161 static void chan_dev_release(struct device *dev)
162 {
163 struct dma_chan_dev *chan_dev;
164
165 chan_dev = container_of(dev, typeof(*chan_dev), device);
166 if (atomic_dec_and_test(chan_dev->idr_ref)) {
167 mutex_lock(&dma_list_mutex);
168 idr_remove(&dma_idr, chan_dev->dev_id);
169 mutex_unlock(&dma_list_mutex);
170 kfree(chan_dev->idr_ref);
171 }
172 kfree(chan_dev);
173 }
174
175 static struct class dma_devclass = {
176 .name = "dma",
177 .dev_groups = dma_dev_groups,
178 .dev_release = chan_dev_release,
179 };
180
181 /* --- client and device registration --- */
182
183 #define dma_device_satisfies_mask(device, mask) \
184 __dma_device_satisfies_mask((device), &(mask))
185 static int
186 __dma_device_satisfies_mask(struct dma_device *device,
187 const dma_cap_mask_t *want)
188 {
189 dma_cap_mask_t has;
190
191 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
192 DMA_TX_TYPE_END);
193 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
194 }
195
196 static struct module *dma_chan_to_owner(struct dma_chan *chan)
197 {
198 return chan->device->dev->driver->owner;
199 }
200
201 /**
202 * balance_ref_count - catch up the channel reference count
203 * @chan - channel to balance ->client_count versus dmaengine_ref_count
204 *
205 * balance_ref_count must be called under dma_list_mutex
206 */
207 static void balance_ref_count(struct dma_chan *chan)
208 {
209 struct module *owner = dma_chan_to_owner(chan);
210
211 while (chan->client_count < dmaengine_ref_count) {
212 __module_get(owner);
213 chan->client_count++;
214 }
215 }
216
217 /**
218 * dma_chan_get - try to grab a dma channel's parent driver module
219 * @chan - channel to grab
220 *
221 * Must be called under dma_list_mutex
222 */
223 static int dma_chan_get(struct dma_chan *chan)
224 {
225 int err = -ENODEV;
226 struct module *owner = dma_chan_to_owner(chan);
227
228 if (chan->client_count) {
229 __module_get(owner);
230 err = 0;
231 } else if (try_module_get(owner))
232 err = 0;
233
234 if (err == 0)
235 chan->client_count++;
236
237 /* allocate upon first client reference */
238 if (chan->client_count == 1 && err == 0) {
239 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
240
241 if (desc_cnt < 0) {
242 err = desc_cnt;
243 chan->client_count = 0;
244 module_put(owner);
245 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
246 balance_ref_count(chan);
247 }
248
249 return err;
250 }
251
252 /**
253 * dma_chan_put - drop a reference to a dma channel's parent driver module
254 * @chan - channel to release
255 *
256 * Must be called under dma_list_mutex
257 */
258 static void dma_chan_put(struct dma_chan *chan)
259 {
260 if (!chan->client_count)
261 return; /* this channel failed alloc_chan_resources */
262 chan->client_count--;
263 module_put(dma_chan_to_owner(chan));
264 if (chan->client_count == 0)
265 chan->device->device_free_chan_resources(chan);
266 }
267
268 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
269 {
270 enum dma_status status;
271 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
272
273 dma_async_issue_pending(chan);
274 do {
275 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
276 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
277 pr_err("%s: timeout!\n", __func__);
278 return DMA_ERROR;
279 }
280 if (status != DMA_IN_PROGRESS)
281 break;
282 cpu_relax();
283 } while (1);
284
285 return status;
286 }
287 EXPORT_SYMBOL(dma_sync_wait);
288
289 /**
290 * dma_cap_mask_all - enable iteration over all operation types
291 */
292 static dma_cap_mask_t dma_cap_mask_all;
293
294 /**
295 * dma_chan_tbl_ent - tracks channel allocations per core/operation
296 * @chan - associated channel for this entry
297 */
298 struct dma_chan_tbl_ent {
299 struct dma_chan *chan;
300 };
301
302 /**
303 * channel_table - percpu lookup table for memory-to-memory offload providers
304 */
305 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
306
307 static int __init dma_channel_table_init(void)
308 {
309 enum dma_transaction_type cap;
310 int err = 0;
311
312 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
313
314 /* 'interrupt', 'private', and 'slave' are channel capabilities,
315 * but are not associated with an operation so they do not need
316 * an entry in the channel_table
317 */
318 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
319 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
320 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
321
322 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
323 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
324 if (!channel_table[cap]) {
325 err = -ENOMEM;
326 break;
327 }
328 }
329
330 if (err) {
331 pr_err("initialization failure\n");
332 for_each_dma_cap_mask(cap, dma_cap_mask_all)
333 free_percpu(channel_table[cap]);
334 }
335
336 return err;
337 }
338 arch_initcall(dma_channel_table_init);
339
340 /**
341 * dma_find_channel - find a channel to carry out the operation
342 * @tx_type: transaction type
343 */
344 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
345 {
346 return this_cpu_read(channel_table[tx_type]->chan);
347 }
348 EXPORT_SYMBOL(dma_find_channel);
349
350 /*
351 * net_dma_find_channel - find a channel for net_dma
352 * net_dma has alignment requirements
353 */
354 struct dma_chan *net_dma_find_channel(void)
355 {
356 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
357 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
358 return NULL;
359
360 return chan;
361 }
362 EXPORT_SYMBOL(net_dma_find_channel);
363
364 /**
365 * dma_issue_pending_all - flush all pending operations across all channels
366 */
367 void dma_issue_pending_all(void)
368 {
369 struct dma_device *device;
370 struct dma_chan *chan;
371
372 rcu_read_lock();
373 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
375 continue;
376 list_for_each_entry(chan, &device->channels, device_node)
377 if (chan->client_count)
378 device->device_issue_pending(chan);
379 }
380 rcu_read_unlock();
381 }
382 EXPORT_SYMBOL(dma_issue_pending_all);
383
384 /**
385 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
386 */
387 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
388 {
389 int node = dev_to_node(chan->device->dev);
390 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
391 }
392
393 /**
394 * min_chan - returns the channel with min count and in the same numa-node as the cpu
395 * @cap: capability to match
396 * @cpu: cpu index which the channel should be close to
397 *
398 * If some channels are close to the given cpu, the one with the lowest
399 * reference count is returned. Otherwise, cpu is ignored and only the
400 * reference count is taken into account.
401 * Must be called under dma_list_mutex.
402 */
403 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
404 {
405 struct dma_device *device;
406 struct dma_chan *chan;
407 struct dma_chan *min = NULL;
408 struct dma_chan *localmin = NULL;
409
410 list_for_each_entry(device, &dma_device_list, global_node) {
411 if (!dma_has_cap(cap, device->cap_mask) ||
412 dma_has_cap(DMA_PRIVATE, device->cap_mask))
413 continue;
414 list_for_each_entry(chan, &device->channels, device_node) {
415 if (!chan->client_count)
416 continue;
417 if (!min || chan->table_count < min->table_count)
418 min = chan;
419
420 if (dma_chan_is_local(chan, cpu))
421 if (!localmin ||
422 chan->table_count < localmin->table_count)
423 localmin = chan;
424 }
425 }
426
427 chan = localmin ? localmin : min;
428
429 if (chan)
430 chan->table_count++;
431
432 return chan;
433 }
434
435 /**
436 * dma_channel_rebalance - redistribute the available channels
437 *
438 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
439 * operation type) in the SMP case, and operation isolation (avoid
440 * multi-tasking channels) in the non-SMP case. Must be called under
441 * dma_list_mutex.
442 */
443 static void dma_channel_rebalance(void)
444 {
445 struct dma_chan *chan;
446 struct dma_device *device;
447 int cpu;
448 int cap;
449
450 /* undo the last distribution */
451 for_each_dma_cap_mask(cap, dma_cap_mask_all)
452 for_each_possible_cpu(cpu)
453 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
454
455 list_for_each_entry(device, &dma_device_list, global_node) {
456 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
457 continue;
458 list_for_each_entry(chan, &device->channels, device_node)
459 chan->table_count = 0;
460 }
461
462 /* don't populate the channel_table if no clients are available */
463 if (!dmaengine_ref_count)
464 return;
465
466 /* redistribute available channels */
467 for_each_dma_cap_mask(cap, dma_cap_mask_all)
468 for_each_online_cpu(cpu) {
469 chan = min_chan(cap, cpu);
470 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
471 }
472 }
473
474 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
475 struct dma_device *dev,
476 dma_filter_fn fn, void *fn_param)
477 {
478 struct dma_chan *chan;
479
480 if (!__dma_device_satisfies_mask(dev, mask)) {
481 pr_debug("%s: wrong capabilities\n", __func__);
482 return NULL;
483 }
484 /* devices with multiple channels need special handling as we need to
485 * ensure that all channels are either private or public.
486 */
487 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
488 list_for_each_entry(chan, &dev->channels, device_node) {
489 /* some channels are already publicly allocated */
490 if (chan->client_count)
491 return NULL;
492 }
493
494 list_for_each_entry(chan, &dev->channels, device_node) {
495 if (chan->client_count) {
496 pr_debug("%s: %s busy\n",
497 __func__, dma_chan_name(chan));
498 continue;
499 }
500 if (fn && !fn(chan, fn_param)) {
501 pr_debug("%s: %s filter said false\n",
502 __func__, dma_chan_name(chan));
503 continue;
504 }
505 return chan;
506 }
507
508 return NULL;
509 }
510
511 /**
512 * dma_request_slave_channel - try to get specific channel exclusively
513 * @chan: target channel
514 */
515 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
516 {
517 int err = -EBUSY;
518
519 /* lock against __dma_request_channel */
520 mutex_lock(&dma_list_mutex);
521
522 if (chan->client_count == 0) {
523 err = dma_chan_get(chan);
524 if (err)
525 pr_debug("%s: failed to get %s: (%d)\n",
526 __func__, dma_chan_name(chan), err);
527 } else
528 chan = NULL;
529
530 mutex_unlock(&dma_list_mutex);
531
532
533 return chan;
534 }
535 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
536
537 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
538 {
539 dma_cap_mask_t mask;
540 struct dma_chan *chan;
541 int err;
542
543 dma_cap_zero(mask);
544 dma_cap_set(DMA_SLAVE, mask);
545
546 /* lock against __dma_request_channel */
547 mutex_lock(&dma_list_mutex);
548
549 chan = private_candidate(&mask, device, NULL, NULL);
550 if (chan) {
551 err = dma_chan_get(chan);
552 if (err) {
553 pr_debug("%s: failed to get %s: (%d)\n",
554 __func__, dma_chan_name(chan), err);
555 chan = NULL;
556 }
557 }
558
559 mutex_unlock(&dma_list_mutex);
560
561 return chan;
562 }
563 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
564
565 /**
566 * __dma_request_channel - try to allocate an exclusive channel
567 * @mask: capabilities that the channel must satisfy
568 * @fn: optional callback to disposition available channels
569 * @fn_param: opaque parameter to pass to dma_filter_fn
570 *
571 * Returns pointer to appropriate DMA channel on success or NULL.
572 */
573 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
574 dma_filter_fn fn, void *fn_param)
575 {
576 struct dma_device *device, *_d;
577 struct dma_chan *chan = NULL;
578 int err;
579
580 /* Find a channel */
581 mutex_lock(&dma_list_mutex);
582 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
583 chan = private_candidate(mask, device, fn, fn_param);
584 if (chan) {
585 /* Found a suitable channel, try to grab, prep, and
586 * return it. We first set DMA_PRIVATE to disable
587 * balance_ref_count as this channel will not be
588 * published in the general-purpose allocator
589 */
590 dma_cap_set(DMA_PRIVATE, device->cap_mask);
591 device->privatecnt++;
592 err = dma_chan_get(chan);
593
594 if (err == -ENODEV) {
595 pr_debug("%s: %s module removed\n",
596 __func__, dma_chan_name(chan));
597 list_del_rcu(&device->global_node);
598 } else if (err)
599 pr_debug("%s: failed to get %s: (%d)\n",
600 __func__, dma_chan_name(chan), err);
601 else
602 break;
603 if (--device->privatecnt == 0)
604 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
605 chan = NULL;
606 }
607 }
608 mutex_unlock(&dma_list_mutex);
609
610 pr_debug("%s: %s (%s)\n",
611 __func__,
612 chan ? "success" : "fail",
613 chan ? dma_chan_name(chan) : NULL);
614
615 return chan;
616 }
617 EXPORT_SYMBOL_GPL(__dma_request_channel);
618
619 /**
620 * dma_request_slave_channel - try to allocate an exclusive slave channel
621 * @dev: pointer to client device structure
622 * @name: slave channel name
623 *
624 * Returns pointer to appropriate DMA channel on success or an error pointer.
625 */
626 struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
627 const char *name)
628 {
629 /* If device-tree is present get slave info from here */
630 if (dev->of_node)
631 return of_dma_request_slave_channel(dev->of_node, name);
632
633 /* If device was enumerated by ACPI get slave info from here */
634 if (ACPI_HANDLE(dev))
635 return acpi_dma_request_slave_chan_by_name(dev, name);
636
637 return ERR_PTR(-ENODEV);
638 }
639 EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
640
641 /**
642 * dma_request_slave_channel - try to allocate an exclusive slave channel
643 * @dev: pointer to client device structure
644 * @name: slave channel name
645 *
646 * Returns pointer to appropriate DMA channel on success or NULL.
647 */
648 struct dma_chan *dma_request_slave_channel(struct device *dev,
649 const char *name)
650 {
651 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
652 if (IS_ERR(ch))
653 return NULL;
654 return ch;
655 }
656 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
657
658 void dma_release_channel(struct dma_chan *chan)
659 {
660 mutex_lock(&dma_list_mutex);
661 WARN_ONCE(chan->client_count != 1,
662 "chan reference count %d != 1\n", chan->client_count);
663 dma_chan_put(chan);
664 /* drop PRIVATE cap enabled by __dma_request_channel() */
665 if (--chan->device->privatecnt == 0)
666 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
667 mutex_unlock(&dma_list_mutex);
668 }
669 EXPORT_SYMBOL_GPL(dma_release_channel);
670
671 /**
672 * dmaengine_get - register interest in dma_channels
673 */
674 void dmaengine_get(void)
675 {
676 struct dma_device *device, *_d;
677 struct dma_chan *chan;
678 int err;
679
680 mutex_lock(&dma_list_mutex);
681 dmaengine_ref_count++;
682
683 /* try to grab channels */
684 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
685 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
686 continue;
687 list_for_each_entry(chan, &device->channels, device_node) {
688 err = dma_chan_get(chan);
689 if (err == -ENODEV) {
690 /* module removed before we could use it */
691 list_del_rcu(&device->global_node);
692 break;
693 } else if (err)
694 pr_debug("%s: failed to get %s: (%d)\n",
695 __func__, dma_chan_name(chan), err);
696 }
697 }
698
699 /* if this is the first reference and there were channels
700 * waiting we need to rebalance to get those channels
701 * incorporated into the channel table
702 */
703 if (dmaengine_ref_count == 1)
704 dma_channel_rebalance();
705 mutex_unlock(&dma_list_mutex);
706 }
707 EXPORT_SYMBOL(dmaengine_get);
708
709 /**
710 * dmaengine_put - let dma drivers be removed when ref_count == 0
711 */
712 void dmaengine_put(void)
713 {
714 struct dma_device *device;
715 struct dma_chan *chan;
716
717 mutex_lock(&dma_list_mutex);
718 dmaengine_ref_count--;
719 BUG_ON(dmaengine_ref_count < 0);
720 /* drop channel references */
721 list_for_each_entry(device, &dma_device_list, global_node) {
722 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
723 continue;
724 list_for_each_entry(chan, &device->channels, device_node)
725 dma_chan_put(chan);
726 }
727 mutex_unlock(&dma_list_mutex);
728 }
729 EXPORT_SYMBOL(dmaengine_put);
730
731 static bool device_has_all_tx_types(struct dma_device *device)
732 {
733 /* A device that satisfies this test has channels that will never cause
734 * an async_tx channel switch event as all possible operation types can
735 * be handled.
736 */
737 #ifdef CONFIG_ASYNC_TX_DMA
738 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
739 return false;
740 #endif
741
742 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
743 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
744 return false;
745 #endif
746
747 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
748 if (!dma_has_cap(DMA_XOR, device->cap_mask))
749 return false;
750
751 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
752 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
753 return false;
754 #endif
755 #endif
756
757 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
758 if (!dma_has_cap(DMA_PQ, device->cap_mask))
759 return false;
760
761 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
762 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
763 return false;
764 #endif
765 #endif
766
767 return true;
768 }
769
770 static int get_dma_id(struct dma_device *device)
771 {
772 int rc;
773
774 mutex_lock(&dma_list_mutex);
775
776 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
777 if (rc >= 0)
778 device->dev_id = rc;
779
780 mutex_unlock(&dma_list_mutex);
781 return rc < 0 ? rc : 0;
782 }
783
784 /**
785 * dma_async_device_register - registers DMA devices found
786 * @device: &dma_device
787 */
788 int dma_async_device_register(struct dma_device *device)
789 {
790 int chancnt = 0, rc;
791 struct dma_chan* chan;
792 atomic_t *idr_ref;
793
794 if (!device)
795 return -ENODEV;
796
797 /* validate device routines */
798 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
799 !device->device_prep_dma_memcpy);
800 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
801 !device->device_prep_dma_xor);
802 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
803 !device->device_prep_dma_xor_val);
804 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
805 !device->device_prep_dma_pq);
806 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
807 !device->device_prep_dma_pq_val);
808 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
809 !device->device_prep_dma_interrupt);
810 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
811 !device->device_prep_dma_sg);
812 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
813 !device->device_prep_dma_cyclic);
814 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
815 !device->device_control);
816 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
817 !device->device_prep_interleaved_dma);
818
819 BUG_ON(!device->device_alloc_chan_resources);
820 BUG_ON(!device->device_free_chan_resources);
821 BUG_ON(!device->device_tx_status);
822 BUG_ON(!device->device_issue_pending);
823 BUG_ON(!device->dev);
824
825 /* note: this only matters in the
826 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
827 */
828 if (device_has_all_tx_types(device))
829 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
830
831 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
832 if (!idr_ref)
833 return -ENOMEM;
834 rc = get_dma_id(device);
835 if (rc != 0) {
836 kfree(idr_ref);
837 return rc;
838 }
839
840 atomic_set(idr_ref, 0);
841
842 /* represent channels in sysfs. Probably want devs too */
843 list_for_each_entry(chan, &device->channels, device_node) {
844 rc = -ENOMEM;
845 chan->local = alloc_percpu(typeof(*chan->local));
846 if (chan->local == NULL)
847 goto err_out;
848 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
849 if (chan->dev == NULL) {
850 free_percpu(chan->local);
851 chan->local = NULL;
852 goto err_out;
853 }
854
855 chan->chan_id = chancnt++;
856 chan->dev->device.class = &dma_devclass;
857 chan->dev->device.parent = device->dev;
858 chan->dev->chan = chan;
859 chan->dev->idr_ref = idr_ref;
860 chan->dev->dev_id = device->dev_id;
861 atomic_inc(idr_ref);
862 dev_set_name(&chan->dev->device, "dma%dchan%d",
863 device->dev_id, chan->chan_id);
864
865 rc = device_register(&chan->dev->device);
866 if (rc) {
867 free_percpu(chan->local);
868 chan->local = NULL;
869 kfree(chan->dev);
870 atomic_dec(idr_ref);
871 goto err_out;
872 }
873 chan->client_count = 0;
874 }
875 device->chancnt = chancnt;
876
877 mutex_lock(&dma_list_mutex);
878 /* take references on public channels */
879 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
880 list_for_each_entry(chan, &device->channels, device_node) {
881 /* if clients are already waiting for channels we need
882 * to take references on their behalf
883 */
884 if (dma_chan_get(chan) == -ENODEV) {
885 /* note we can only get here for the first
886 * channel as the remaining channels are
887 * guaranteed to get a reference
888 */
889 rc = -ENODEV;
890 mutex_unlock(&dma_list_mutex);
891 goto err_out;
892 }
893 }
894 list_add_tail_rcu(&device->global_node, &dma_device_list);
895 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
896 device->privatecnt++; /* Always private */
897 dma_channel_rebalance();
898 mutex_unlock(&dma_list_mutex);
899
900 return 0;
901
902 err_out:
903 /* if we never registered a channel just release the idr */
904 if (atomic_read(idr_ref) == 0) {
905 mutex_lock(&dma_list_mutex);
906 idr_remove(&dma_idr, device->dev_id);
907 mutex_unlock(&dma_list_mutex);
908 kfree(idr_ref);
909 return rc;
910 }
911
912 list_for_each_entry(chan, &device->channels, device_node) {
913 if (chan->local == NULL)
914 continue;
915 mutex_lock(&dma_list_mutex);
916 chan->dev->chan = NULL;
917 mutex_unlock(&dma_list_mutex);
918 device_unregister(&chan->dev->device);
919 free_percpu(chan->local);
920 }
921 return rc;
922 }
923 EXPORT_SYMBOL(dma_async_device_register);
924
925 /**
926 * dma_async_device_unregister - unregister a DMA device
927 * @device: &dma_device
928 *
929 * This routine is called by dma driver exit routines, dmaengine holds module
930 * references to prevent it being called while channels are in use.
931 */
932 void dma_async_device_unregister(struct dma_device *device)
933 {
934 struct dma_chan *chan;
935
936 mutex_lock(&dma_list_mutex);
937 list_del_rcu(&device->global_node);
938 dma_channel_rebalance();
939 mutex_unlock(&dma_list_mutex);
940
941 list_for_each_entry(chan, &device->channels, device_node) {
942 WARN_ONCE(chan->client_count,
943 "%s called while %d clients hold a reference\n",
944 __func__, chan->client_count);
945 mutex_lock(&dma_list_mutex);
946 chan->dev->chan = NULL;
947 mutex_unlock(&dma_list_mutex);
948 device_unregister(&chan->dev->device);
949 free_percpu(chan->local);
950 }
951 }
952 EXPORT_SYMBOL(dma_async_device_unregister);
953
954 struct dmaengine_unmap_pool {
955 struct kmem_cache *cache;
956 const char *name;
957 mempool_t *pool;
958 size_t size;
959 };
960
961 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
962 static struct dmaengine_unmap_pool unmap_pool[] = {
963 __UNMAP_POOL(2),
964 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
965 __UNMAP_POOL(16),
966 __UNMAP_POOL(128),
967 __UNMAP_POOL(256),
968 #endif
969 };
970
971 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
972 {
973 int order = get_count_order(nr);
974
975 switch (order) {
976 case 0 ... 1:
977 return &unmap_pool[0];
978 case 2 ... 4:
979 return &unmap_pool[1];
980 case 5 ... 7:
981 return &unmap_pool[2];
982 case 8:
983 return &unmap_pool[3];
984 default:
985 BUG();
986 return NULL;
987 }
988 }
989
990 static void dmaengine_unmap(struct kref *kref)
991 {
992 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
993 struct device *dev = unmap->dev;
994 int cnt, i;
995
996 cnt = unmap->to_cnt;
997 for (i = 0; i < cnt; i++)
998 dma_unmap_page(dev, unmap->addr[i], unmap->len,
999 DMA_TO_DEVICE);
1000 cnt += unmap->from_cnt;
1001 for (; i < cnt; i++)
1002 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1003 DMA_FROM_DEVICE);
1004 cnt += unmap->bidi_cnt;
1005 for (; i < cnt; i++) {
1006 if (unmap->addr[i] == 0)
1007 continue;
1008 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1009 DMA_BIDIRECTIONAL);
1010 }
1011 cnt = unmap->map_cnt;
1012 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1013 }
1014
1015 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1016 {
1017 if (unmap)
1018 kref_put(&unmap->kref, dmaengine_unmap);
1019 }
1020 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1021
1022 static void dmaengine_destroy_unmap_pool(void)
1023 {
1024 int i;
1025
1026 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1027 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1028
1029 if (p->pool)
1030 mempool_destroy(p->pool);
1031 p->pool = NULL;
1032 if (p->cache)
1033 kmem_cache_destroy(p->cache);
1034 p->cache = NULL;
1035 }
1036 }
1037
1038 static int __init dmaengine_init_unmap_pool(void)
1039 {
1040 int i;
1041
1042 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1043 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1044 size_t size;
1045
1046 size = sizeof(struct dmaengine_unmap_data) +
1047 sizeof(dma_addr_t) * p->size;
1048
1049 p->cache = kmem_cache_create(p->name, size, 0,
1050 SLAB_HWCACHE_ALIGN, NULL);
1051 if (!p->cache)
1052 break;
1053 p->pool = mempool_create_slab_pool(1, p->cache);
1054 if (!p->pool)
1055 break;
1056 }
1057
1058 if (i == ARRAY_SIZE(unmap_pool))
1059 return 0;
1060
1061 dmaengine_destroy_unmap_pool();
1062 return -ENOMEM;
1063 }
1064
1065 struct dmaengine_unmap_data *
1066 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1067 {
1068 struct dmaengine_unmap_data *unmap;
1069
1070 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1071 if (!unmap)
1072 return NULL;
1073
1074 memset(unmap, 0, sizeof(*unmap));
1075 kref_init(&unmap->kref);
1076 unmap->dev = dev;
1077 unmap->map_cnt = nr;
1078
1079 return unmap;
1080 }
1081 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1082
1083 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1084 struct dma_chan *chan)
1085 {
1086 tx->chan = chan;
1087 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1088 spin_lock_init(&tx->lock);
1089 #endif
1090 }
1091 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1092
1093 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1094 * @tx: in-flight transaction to wait on
1095 */
1096 enum dma_status
1097 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1098 {
1099 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1100
1101 if (!tx)
1102 return DMA_COMPLETE;
1103
1104 while (tx->cookie == -EBUSY) {
1105 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1106 pr_err("%s timeout waiting for descriptor submission\n",
1107 __func__);
1108 return DMA_ERROR;
1109 }
1110 cpu_relax();
1111 }
1112 return dma_sync_wait(tx->chan, tx->cookie);
1113 }
1114 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1115
1116 /* dma_run_dependencies - helper routine for dma drivers to process
1117 * (start) dependent operations on their target channel
1118 * @tx: transaction with dependencies
1119 */
1120 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1121 {
1122 struct dma_async_tx_descriptor *dep = txd_next(tx);
1123 struct dma_async_tx_descriptor *dep_next;
1124 struct dma_chan *chan;
1125
1126 if (!dep)
1127 return;
1128
1129 /* we'll submit tx->next now, so clear the link */
1130 txd_clear_next(tx);
1131 chan = dep->chan;
1132
1133 /* keep submitting up until a channel switch is detected
1134 * in that case we will be called again as a result of
1135 * processing the interrupt from async_tx_channel_switch
1136 */
1137 for (; dep; dep = dep_next) {
1138 txd_lock(dep);
1139 txd_clear_parent(dep);
1140 dep_next = txd_next(dep);
1141 if (dep_next && dep_next->chan == chan)
1142 txd_clear_next(dep); /* ->next will be submitted */
1143 else
1144 dep_next = NULL; /* submit current dep and terminate */
1145 txd_unlock(dep);
1146
1147 dep->tx_submit(dep);
1148 }
1149
1150 chan->device->device_issue_pending(chan);
1151 }
1152 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1153
1154 static int __init dma_bus_init(void)
1155 {
1156 int err = dmaengine_init_unmap_pool();
1157
1158 if (err)
1159 return err;
1160 return class_register(&dma_devclass);
1161 }
1162 arch_initcall(dma_bus_init);
1163
1164
This page took 0.078742 seconds and 5 git commands to generate.