dmaengine: provide a common function for completing a dma descriptor
[deliverable/linux.git] / drivers / dma / dw_dmac.c
1 /*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24
25 #include "dw_dmac_regs.h"
26 #include "dmaengine.h"
27
28 /*
29 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
30 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
31 * of which use ARM any more). See the "Databook" from Synopsys for
32 * information beyond what licensees probably provide.
33 *
34 * The driver has currently been tested only with the Atmel AT32AP7000,
35 * which does not support descriptor writeback.
36 */
37
38 #define DWC_DEFAULT_CTLLO(_chan) ({ \
39 struct dw_dma_slave *__slave = (_chan->private); \
40 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
41 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
42 int _dms = __slave ? __slave->dst_master : 0; \
43 int _sms = __slave ? __slave->src_master : 1; \
44 u8 _smsize = __slave ? _sconfig->src_maxburst : \
45 DW_DMA_MSIZE_16; \
46 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
47 DW_DMA_MSIZE_16; \
48 \
49 (DWC_CTLL_DST_MSIZE(_dmsize) \
50 | DWC_CTLL_SRC_MSIZE(_smsize) \
51 | DWC_CTLL_LLP_D_EN \
52 | DWC_CTLL_LLP_S_EN \
53 | DWC_CTLL_DMS(_dms) \
54 | DWC_CTLL_SMS(_sms)); \
55 })
56
57 /*
58 * This is configuration-dependent and usually a funny size like 4095.
59 *
60 * Note that this is a transfer count, i.e. if we transfer 32-bit
61 * words, we can do 16380 bytes per descriptor.
62 *
63 * This parameter is also system-specific.
64 */
65 #define DWC_MAX_COUNT 4095U
66
67 /*
68 * Number of descriptors to allocate for each channel. This should be
69 * made configurable somehow; preferably, the clients (at least the
70 * ones using slave transfers) should be able to give us a hint.
71 */
72 #define NR_DESCS_PER_CHANNEL 64
73
74 /*----------------------------------------------------------------------*/
75
76 /*
77 * Because we're not relying on writeback from the controller (it may not
78 * even be configured into the core!) we don't need to use dma_pool. These
79 * descriptors -- and associated data -- are cacheable. We do need to make
80 * sure their dcache entries are written back before handing them off to
81 * the controller, though.
82 */
83
84 static struct device *chan2dev(struct dma_chan *chan)
85 {
86 return &chan->dev->device;
87 }
88 static struct device *chan2parent(struct dma_chan *chan)
89 {
90 return chan->dev->device.parent;
91 }
92
93 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
94 {
95 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
96 }
97
98 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
99 {
100 struct dw_desc *desc, *_desc;
101 struct dw_desc *ret = NULL;
102 unsigned int i = 0;
103 unsigned long flags;
104
105 spin_lock_irqsave(&dwc->lock, flags);
106 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
107 if (async_tx_test_ack(&desc->txd)) {
108 list_del(&desc->desc_node);
109 ret = desc;
110 break;
111 }
112 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
113 i++;
114 }
115 spin_unlock_irqrestore(&dwc->lock, flags);
116
117 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
118
119 return ret;
120 }
121
122 static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
123 {
124 struct dw_desc *child;
125
126 list_for_each_entry(child, &desc->tx_list, desc_node)
127 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
128 child->txd.phys, sizeof(child->lli),
129 DMA_TO_DEVICE);
130 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
131 desc->txd.phys, sizeof(desc->lli),
132 DMA_TO_DEVICE);
133 }
134
135 /*
136 * Move a descriptor, including any children, to the free list.
137 * `desc' must not be on any lists.
138 */
139 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
140 {
141 unsigned long flags;
142
143 if (desc) {
144 struct dw_desc *child;
145
146 dwc_sync_desc_for_cpu(dwc, desc);
147
148 spin_lock_irqsave(&dwc->lock, flags);
149 list_for_each_entry(child, &desc->tx_list, desc_node)
150 dev_vdbg(chan2dev(&dwc->chan),
151 "moving child desc %p to freelist\n",
152 child);
153 list_splice_init(&desc->tx_list, &dwc->free_list);
154 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
155 list_add(&desc->desc_node, &dwc->free_list);
156 spin_unlock_irqrestore(&dwc->lock, flags);
157 }
158 }
159
160 static void dwc_initialize(struct dw_dma_chan *dwc)
161 {
162 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
163 struct dw_dma_slave *dws = dwc->chan.private;
164 u32 cfghi = DWC_CFGH_FIFO_MODE;
165 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
166
167 if (dwc->initialized == true)
168 return;
169
170 if (dws) {
171 /*
172 * We need controller-specific data to set up slave
173 * transfers.
174 */
175 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
176
177 cfghi = dws->cfg_hi;
178 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
179 }
180
181 channel_writel(dwc, CFG_LO, cfglo);
182 channel_writel(dwc, CFG_HI, cfghi);
183
184 /* Enable interrupts */
185 channel_set_bit(dw, MASK.XFER, dwc->mask);
186 channel_set_bit(dw, MASK.ERROR, dwc->mask);
187
188 dwc->initialized = true;
189 }
190
191 /*----------------------------------------------------------------------*/
192
193 /* Called with dwc->lock held and bh disabled */
194 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
195 {
196 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
197
198 /* ASSERT: channel is idle */
199 if (dma_readl(dw, CH_EN) & dwc->mask) {
200 dev_err(chan2dev(&dwc->chan),
201 "BUG: Attempted to start non-idle channel\n");
202 dev_err(chan2dev(&dwc->chan),
203 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
204 channel_readl(dwc, SAR),
205 channel_readl(dwc, DAR),
206 channel_readl(dwc, LLP),
207 channel_readl(dwc, CTL_HI),
208 channel_readl(dwc, CTL_LO));
209
210 /* The tasklet will hopefully advance the queue... */
211 return;
212 }
213
214 dwc_initialize(dwc);
215
216 channel_writel(dwc, LLP, first->txd.phys);
217 channel_writel(dwc, CTL_LO,
218 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
219 channel_writel(dwc, CTL_HI, 0);
220 channel_set_bit(dw, CH_EN, dwc->mask);
221 }
222
223 /*----------------------------------------------------------------------*/
224
225 static void
226 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
227 bool callback_required)
228 {
229 dma_async_tx_callback callback = NULL;
230 void *param = NULL;
231 struct dma_async_tx_descriptor *txd = &desc->txd;
232 struct dw_desc *child;
233 unsigned long flags;
234
235 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
236
237 spin_lock_irqsave(&dwc->lock, flags);
238 dma_cookie_complete(txd);
239 if (callback_required) {
240 callback = txd->callback;
241 param = txd->callback_param;
242 }
243
244 dwc_sync_desc_for_cpu(dwc, desc);
245
246 /* async_tx_ack */
247 list_for_each_entry(child, &desc->tx_list, desc_node)
248 async_tx_ack(&child->txd);
249 async_tx_ack(&desc->txd);
250
251 list_splice_init(&desc->tx_list, &dwc->free_list);
252 list_move(&desc->desc_node, &dwc->free_list);
253
254 if (!dwc->chan.private) {
255 struct device *parent = chan2parent(&dwc->chan);
256 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
257 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
258 dma_unmap_single(parent, desc->lli.dar,
259 desc->len, DMA_FROM_DEVICE);
260 else
261 dma_unmap_page(parent, desc->lli.dar,
262 desc->len, DMA_FROM_DEVICE);
263 }
264 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
265 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
266 dma_unmap_single(parent, desc->lli.sar,
267 desc->len, DMA_TO_DEVICE);
268 else
269 dma_unmap_page(parent, desc->lli.sar,
270 desc->len, DMA_TO_DEVICE);
271 }
272 }
273
274 spin_unlock_irqrestore(&dwc->lock, flags);
275
276 if (callback_required && callback)
277 callback(param);
278 }
279
280 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
281 {
282 struct dw_desc *desc, *_desc;
283 LIST_HEAD(list);
284 unsigned long flags;
285
286 spin_lock_irqsave(&dwc->lock, flags);
287 if (dma_readl(dw, CH_EN) & dwc->mask) {
288 dev_err(chan2dev(&dwc->chan),
289 "BUG: XFER bit set, but channel not idle!\n");
290
291 /* Try to continue after resetting the channel... */
292 channel_clear_bit(dw, CH_EN, dwc->mask);
293 while (dma_readl(dw, CH_EN) & dwc->mask)
294 cpu_relax();
295 }
296
297 /*
298 * Submit queued descriptors ASAP, i.e. before we go through
299 * the completed ones.
300 */
301 list_splice_init(&dwc->active_list, &list);
302 if (!list_empty(&dwc->queue)) {
303 list_move(dwc->queue.next, &dwc->active_list);
304 dwc_dostart(dwc, dwc_first_active(dwc));
305 }
306
307 spin_unlock_irqrestore(&dwc->lock, flags);
308
309 list_for_each_entry_safe(desc, _desc, &list, desc_node)
310 dwc_descriptor_complete(dwc, desc, true);
311 }
312
313 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
314 {
315 dma_addr_t llp;
316 struct dw_desc *desc, *_desc;
317 struct dw_desc *child;
318 u32 status_xfer;
319 unsigned long flags;
320
321 spin_lock_irqsave(&dwc->lock, flags);
322 llp = channel_readl(dwc, LLP);
323 status_xfer = dma_readl(dw, RAW.XFER);
324
325 if (status_xfer & dwc->mask) {
326 /* Everything we've submitted is done */
327 dma_writel(dw, CLEAR.XFER, dwc->mask);
328 spin_unlock_irqrestore(&dwc->lock, flags);
329
330 dwc_complete_all(dw, dwc);
331 return;
332 }
333
334 if (list_empty(&dwc->active_list)) {
335 spin_unlock_irqrestore(&dwc->lock, flags);
336 return;
337 }
338
339 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
340
341 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
342 /* check first descriptors addr */
343 if (desc->txd.phys == llp) {
344 spin_unlock_irqrestore(&dwc->lock, flags);
345 return;
346 }
347
348 /* check first descriptors llp */
349 if (desc->lli.llp == llp) {
350 /* This one is currently in progress */
351 spin_unlock_irqrestore(&dwc->lock, flags);
352 return;
353 }
354
355 list_for_each_entry(child, &desc->tx_list, desc_node)
356 if (child->lli.llp == llp) {
357 /* Currently in progress */
358 spin_unlock_irqrestore(&dwc->lock, flags);
359 return;
360 }
361
362 /*
363 * No descriptors so far seem to be in progress, i.e.
364 * this one must be done.
365 */
366 spin_unlock_irqrestore(&dwc->lock, flags);
367 dwc_descriptor_complete(dwc, desc, true);
368 spin_lock_irqsave(&dwc->lock, flags);
369 }
370
371 dev_err(chan2dev(&dwc->chan),
372 "BUG: All descriptors done, but channel not idle!\n");
373
374 /* Try to continue after resetting the channel... */
375 channel_clear_bit(dw, CH_EN, dwc->mask);
376 while (dma_readl(dw, CH_EN) & dwc->mask)
377 cpu_relax();
378
379 if (!list_empty(&dwc->queue)) {
380 list_move(dwc->queue.next, &dwc->active_list);
381 dwc_dostart(dwc, dwc_first_active(dwc));
382 }
383 spin_unlock_irqrestore(&dwc->lock, flags);
384 }
385
386 static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
387 {
388 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
389 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
390 lli->sar, lli->dar, lli->llp,
391 lli->ctlhi, lli->ctllo);
392 }
393
394 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
395 {
396 struct dw_desc *bad_desc;
397 struct dw_desc *child;
398 unsigned long flags;
399
400 dwc_scan_descriptors(dw, dwc);
401
402 spin_lock_irqsave(&dwc->lock, flags);
403
404 /*
405 * The descriptor currently at the head of the active list is
406 * borked. Since we don't have any way to report errors, we'll
407 * just have to scream loudly and try to carry on.
408 */
409 bad_desc = dwc_first_active(dwc);
410 list_del_init(&bad_desc->desc_node);
411 list_move(dwc->queue.next, dwc->active_list.prev);
412
413 /* Clear the error flag and try to restart the controller */
414 dma_writel(dw, CLEAR.ERROR, dwc->mask);
415 if (!list_empty(&dwc->active_list))
416 dwc_dostart(dwc, dwc_first_active(dwc));
417
418 /*
419 * KERN_CRITICAL may seem harsh, but since this only happens
420 * when someone submits a bad physical address in a
421 * descriptor, we should consider ourselves lucky that the
422 * controller flagged an error instead of scribbling over
423 * random memory locations.
424 */
425 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
426 "Bad descriptor submitted for DMA!\n");
427 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
428 " cookie: %d\n", bad_desc->txd.cookie);
429 dwc_dump_lli(dwc, &bad_desc->lli);
430 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
431 dwc_dump_lli(dwc, &child->lli);
432
433 spin_unlock_irqrestore(&dwc->lock, flags);
434
435 /* Pretend the descriptor completed successfully */
436 dwc_descriptor_complete(dwc, bad_desc, true);
437 }
438
439 /* --------------------- Cyclic DMA API extensions -------------------- */
440
441 inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
442 {
443 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
444 return channel_readl(dwc, SAR);
445 }
446 EXPORT_SYMBOL(dw_dma_get_src_addr);
447
448 inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
449 {
450 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
451 return channel_readl(dwc, DAR);
452 }
453 EXPORT_SYMBOL(dw_dma_get_dst_addr);
454
455 /* called with dwc->lock held and all DMAC interrupts disabled */
456 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
457 u32 status_err, u32 status_xfer)
458 {
459 unsigned long flags;
460
461 if (dwc->mask) {
462 void (*callback)(void *param);
463 void *callback_param;
464
465 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
466 channel_readl(dwc, LLP));
467
468 callback = dwc->cdesc->period_callback;
469 callback_param = dwc->cdesc->period_callback_param;
470
471 if (callback)
472 callback(callback_param);
473 }
474
475 /*
476 * Error and transfer complete are highly unlikely, and will most
477 * likely be due to a configuration error by the user.
478 */
479 if (unlikely(status_err & dwc->mask) ||
480 unlikely(status_xfer & dwc->mask)) {
481 int i;
482
483 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
484 "interrupt, stopping DMA transfer\n",
485 status_xfer ? "xfer" : "error");
486
487 spin_lock_irqsave(&dwc->lock, flags);
488
489 dev_err(chan2dev(&dwc->chan),
490 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
491 channel_readl(dwc, SAR),
492 channel_readl(dwc, DAR),
493 channel_readl(dwc, LLP),
494 channel_readl(dwc, CTL_HI),
495 channel_readl(dwc, CTL_LO));
496
497 channel_clear_bit(dw, CH_EN, dwc->mask);
498 while (dma_readl(dw, CH_EN) & dwc->mask)
499 cpu_relax();
500
501 /* make sure DMA does not restart by loading a new list */
502 channel_writel(dwc, LLP, 0);
503 channel_writel(dwc, CTL_LO, 0);
504 channel_writel(dwc, CTL_HI, 0);
505
506 dma_writel(dw, CLEAR.ERROR, dwc->mask);
507 dma_writel(dw, CLEAR.XFER, dwc->mask);
508
509 for (i = 0; i < dwc->cdesc->periods; i++)
510 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
511
512 spin_unlock_irqrestore(&dwc->lock, flags);
513 }
514 }
515
516 /* ------------------------------------------------------------------------- */
517
518 static void dw_dma_tasklet(unsigned long data)
519 {
520 struct dw_dma *dw = (struct dw_dma *)data;
521 struct dw_dma_chan *dwc;
522 u32 status_xfer;
523 u32 status_err;
524 int i;
525
526 status_xfer = dma_readl(dw, RAW.XFER);
527 status_err = dma_readl(dw, RAW.ERROR);
528
529 dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err);
530
531 for (i = 0; i < dw->dma.chancnt; i++) {
532 dwc = &dw->chan[i];
533 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
534 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
535 else if (status_err & (1 << i))
536 dwc_handle_error(dw, dwc);
537 else if (status_xfer & (1 << i))
538 dwc_scan_descriptors(dw, dwc);
539 }
540
541 /*
542 * Re-enable interrupts.
543 */
544 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
545 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
546 }
547
548 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
549 {
550 struct dw_dma *dw = dev_id;
551 u32 status;
552
553 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
554 dma_readl(dw, STATUS_INT));
555
556 /*
557 * Just disable the interrupts. We'll turn them back on in the
558 * softirq handler.
559 */
560 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
561 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
562
563 status = dma_readl(dw, STATUS_INT);
564 if (status) {
565 dev_err(dw->dma.dev,
566 "BUG: Unexpected interrupts pending: 0x%x\n",
567 status);
568
569 /* Try to recover */
570 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
571 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
572 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
573 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
574 }
575
576 tasklet_schedule(&dw->tasklet);
577
578 return IRQ_HANDLED;
579 }
580
581 /*----------------------------------------------------------------------*/
582
583 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
584 {
585 struct dw_desc *desc = txd_to_dw_desc(tx);
586 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
587 dma_cookie_t cookie;
588 unsigned long flags;
589
590 spin_lock_irqsave(&dwc->lock, flags);
591 cookie = dma_cookie_assign(tx);
592
593 /*
594 * REVISIT: We should attempt to chain as many descriptors as
595 * possible, perhaps even appending to those already submitted
596 * for DMA. But this is hard to do in a race-free manner.
597 */
598 if (list_empty(&dwc->active_list)) {
599 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
600 desc->txd.cookie);
601 list_add_tail(&desc->desc_node, &dwc->active_list);
602 dwc_dostart(dwc, dwc_first_active(dwc));
603 } else {
604 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
605 desc->txd.cookie);
606
607 list_add_tail(&desc->desc_node, &dwc->queue);
608 }
609
610 spin_unlock_irqrestore(&dwc->lock, flags);
611
612 return cookie;
613 }
614
615 static struct dma_async_tx_descriptor *
616 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
617 size_t len, unsigned long flags)
618 {
619 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
620 struct dw_desc *desc;
621 struct dw_desc *first;
622 struct dw_desc *prev;
623 size_t xfer_count;
624 size_t offset;
625 unsigned int src_width;
626 unsigned int dst_width;
627 u32 ctllo;
628
629 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
630 dest, src, len, flags);
631
632 if (unlikely(!len)) {
633 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
634 return NULL;
635 }
636
637 /*
638 * We can be a lot more clever here, but this should take care
639 * of the most common optimization.
640 */
641 if (!((src | dest | len) & 7))
642 src_width = dst_width = 3;
643 else if (!((src | dest | len) & 3))
644 src_width = dst_width = 2;
645 else if (!((src | dest | len) & 1))
646 src_width = dst_width = 1;
647 else
648 src_width = dst_width = 0;
649
650 ctllo = DWC_DEFAULT_CTLLO(chan)
651 | DWC_CTLL_DST_WIDTH(dst_width)
652 | DWC_CTLL_SRC_WIDTH(src_width)
653 | DWC_CTLL_DST_INC
654 | DWC_CTLL_SRC_INC
655 | DWC_CTLL_FC_M2M;
656 prev = first = NULL;
657
658 for (offset = 0; offset < len; offset += xfer_count << src_width) {
659 xfer_count = min_t(size_t, (len - offset) >> src_width,
660 DWC_MAX_COUNT);
661
662 desc = dwc_desc_get(dwc);
663 if (!desc)
664 goto err_desc_get;
665
666 desc->lli.sar = src + offset;
667 desc->lli.dar = dest + offset;
668 desc->lli.ctllo = ctllo;
669 desc->lli.ctlhi = xfer_count;
670
671 if (!first) {
672 first = desc;
673 } else {
674 prev->lli.llp = desc->txd.phys;
675 dma_sync_single_for_device(chan2parent(chan),
676 prev->txd.phys, sizeof(prev->lli),
677 DMA_TO_DEVICE);
678 list_add_tail(&desc->desc_node,
679 &first->tx_list);
680 }
681 prev = desc;
682 }
683
684
685 if (flags & DMA_PREP_INTERRUPT)
686 /* Trigger interrupt after last block */
687 prev->lli.ctllo |= DWC_CTLL_INT_EN;
688
689 prev->lli.llp = 0;
690 dma_sync_single_for_device(chan2parent(chan),
691 prev->txd.phys, sizeof(prev->lli),
692 DMA_TO_DEVICE);
693
694 first->txd.flags = flags;
695 first->len = len;
696
697 return &first->txd;
698
699 err_desc_get:
700 dwc_desc_put(dwc, first);
701 return NULL;
702 }
703
704 static struct dma_async_tx_descriptor *
705 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
706 unsigned int sg_len, enum dma_transfer_direction direction,
707 unsigned long flags)
708 {
709 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
710 struct dw_dma_slave *dws = chan->private;
711 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
712 struct dw_desc *prev;
713 struct dw_desc *first;
714 u32 ctllo;
715 dma_addr_t reg;
716 unsigned int reg_width;
717 unsigned int mem_width;
718 unsigned int i;
719 struct scatterlist *sg;
720 size_t total_len = 0;
721
722 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
723
724 if (unlikely(!dws || !sg_len))
725 return NULL;
726
727 prev = first = NULL;
728
729 switch (direction) {
730 case DMA_MEM_TO_DEV:
731 reg_width = __fls(sconfig->dst_addr_width);
732 reg = sconfig->dst_addr;
733 ctllo = (DWC_DEFAULT_CTLLO(chan)
734 | DWC_CTLL_DST_WIDTH(reg_width)
735 | DWC_CTLL_DST_FIX
736 | DWC_CTLL_SRC_INC);
737
738 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
739 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
740
741 for_each_sg(sgl, sg, sg_len, i) {
742 struct dw_desc *desc;
743 u32 len, dlen, mem;
744
745 mem = sg_phys(sg);
746 len = sg_dma_len(sg);
747
748 if (!((mem | len) & 7))
749 mem_width = 3;
750 else if (!((mem | len) & 3))
751 mem_width = 2;
752 else if (!((mem | len) & 1))
753 mem_width = 1;
754 else
755 mem_width = 0;
756
757 slave_sg_todev_fill_desc:
758 desc = dwc_desc_get(dwc);
759 if (!desc) {
760 dev_err(chan2dev(chan),
761 "not enough descriptors available\n");
762 goto err_desc_get;
763 }
764
765 desc->lli.sar = mem;
766 desc->lli.dar = reg;
767 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
768 if ((len >> mem_width) > DWC_MAX_COUNT) {
769 dlen = DWC_MAX_COUNT << mem_width;
770 mem += dlen;
771 len -= dlen;
772 } else {
773 dlen = len;
774 len = 0;
775 }
776
777 desc->lli.ctlhi = dlen >> mem_width;
778
779 if (!first) {
780 first = desc;
781 } else {
782 prev->lli.llp = desc->txd.phys;
783 dma_sync_single_for_device(chan2parent(chan),
784 prev->txd.phys,
785 sizeof(prev->lli),
786 DMA_TO_DEVICE);
787 list_add_tail(&desc->desc_node,
788 &first->tx_list);
789 }
790 prev = desc;
791 total_len += dlen;
792
793 if (len)
794 goto slave_sg_todev_fill_desc;
795 }
796 break;
797 case DMA_DEV_TO_MEM:
798 reg_width = __fls(sconfig->src_addr_width);
799 reg = sconfig->src_addr;
800 ctllo = (DWC_DEFAULT_CTLLO(chan)
801 | DWC_CTLL_SRC_WIDTH(reg_width)
802 | DWC_CTLL_DST_INC
803 | DWC_CTLL_SRC_FIX);
804
805 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
806 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
807
808 for_each_sg(sgl, sg, sg_len, i) {
809 struct dw_desc *desc;
810 u32 len, dlen, mem;
811
812 mem = sg_phys(sg);
813 len = sg_dma_len(sg);
814
815 if (!((mem | len) & 7))
816 mem_width = 3;
817 else if (!((mem | len) & 3))
818 mem_width = 2;
819 else if (!((mem | len) & 1))
820 mem_width = 1;
821 else
822 mem_width = 0;
823
824 slave_sg_fromdev_fill_desc:
825 desc = dwc_desc_get(dwc);
826 if (!desc) {
827 dev_err(chan2dev(chan),
828 "not enough descriptors available\n");
829 goto err_desc_get;
830 }
831
832 desc->lli.sar = reg;
833 desc->lli.dar = mem;
834 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
835 if ((len >> reg_width) > DWC_MAX_COUNT) {
836 dlen = DWC_MAX_COUNT << reg_width;
837 mem += dlen;
838 len -= dlen;
839 } else {
840 dlen = len;
841 len = 0;
842 }
843 desc->lli.ctlhi = dlen >> reg_width;
844
845 if (!first) {
846 first = desc;
847 } else {
848 prev->lli.llp = desc->txd.phys;
849 dma_sync_single_for_device(chan2parent(chan),
850 prev->txd.phys,
851 sizeof(prev->lli),
852 DMA_TO_DEVICE);
853 list_add_tail(&desc->desc_node,
854 &first->tx_list);
855 }
856 prev = desc;
857 total_len += dlen;
858
859 if (len)
860 goto slave_sg_fromdev_fill_desc;
861 }
862 break;
863 default:
864 return NULL;
865 }
866
867 if (flags & DMA_PREP_INTERRUPT)
868 /* Trigger interrupt after last block */
869 prev->lli.ctllo |= DWC_CTLL_INT_EN;
870
871 prev->lli.llp = 0;
872 dma_sync_single_for_device(chan2parent(chan),
873 prev->txd.phys, sizeof(prev->lli),
874 DMA_TO_DEVICE);
875
876 first->len = total_len;
877
878 return &first->txd;
879
880 err_desc_get:
881 dwc_desc_put(dwc, first);
882 return NULL;
883 }
884
885 /*
886 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
887 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
888 *
889 * NOTE: burst size 2 is not supported by controller.
890 *
891 * This can be done by finding least significant bit set: n & (n - 1)
892 */
893 static inline void convert_burst(u32 *maxburst)
894 {
895 if (*maxburst > 1)
896 *maxburst = fls(*maxburst) - 2;
897 else
898 *maxburst = 0;
899 }
900
901 static int
902 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
903 {
904 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
905
906 /* Check if it is chan is configured for slave transfers */
907 if (!chan->private)
908 return -EINVAL;
909
910 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
911
912 convert_burst(&dwc->dma_sconfig.src_maxburst);
913 convert_burst(&dwc->dma_sconfig.dst_maxburst);
914
915 return 0;
916 }
917
918 static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
919 unsigned long arg)
920 {
921 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
922 struct dw_dma *dw = to_dw_dma(chan->device);
923 struct dw_desc *desc, *_desc;
924 unsigned long flags;
925 u32 cfglo;
926 LIST_HEAD(list);
927
928 if (cmd == DMA_PAUSE) {
929 spin_lock_irqsave(&dwc->lock, flags);
930
931 cfglo = channel_readl(dwc, CFG_LO);
932 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
933 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
934 cpu_relax();
935
936 dwc->paused = true;
937 spin_unlock_irqrestore(&dwc->lock, flags);
938 } else if (cmd == DMA_RESUME) {
939 if (!dwc->paused)
940 return 0;
941
942 spin_lock_irqsave(&dwc->lock, flags);
943
944 cfglo = channel_readl(dwc, CFG_LO);
945 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
946 dwc->paused = false;
947
948 spin_unlock_irqrestore(&dwc->lock, flags);
949 } else if (cmd == DMA_TERMINATE_ALL) {
950 spin_lock_irqsave(&dwc->lock, flags);
951
952 channel_clear_bit(dw, CH_EN, dwc->mask);
953 while (dma_readl(dw, CH_EN) & dwc->mask)
954 cpu_relax();
955
956 dwc->paused = false;
957
958 /* active_list entries will end up before queued entries */
959 list_splice_init(&dwc->queue, &list);
960 list_splice_init(&dwc->active_list, &list);
961
962 spin_unlock_irqrestore(&dwc->lock, flags);
963
964 /* Flush all pending and queued descriptors */
965 list_for_each_entry_safe(desc, _desc, &list, desc_node)
966 dwc_descriptor_complete(dwc, desc, false);
967 } else if (cmd == DMA_SLAVE_CONFIG) {
968 return set_runtime_config(chan, (struct dma_slave_config *)arg);
969 } else {
970 return -ENXIO;
971 }
972
973 return 0;
974 }
975
976 static enum dma_status
977 dwc_tx_status(struct dma_chan *chan,
978 dma_cookie_t cookie,
979 struct dma_tx_state *txstate)
980 {
981 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
982 dma_cookie_t last_used;
983 dma_cookie_t last_complete;
984 int ret;
985
986 last_complete = chan->completed_cookie;
987 last_used = chan->cookie;
988
989 ret = dma_async_is_complete(cookie, last_complete, last_used);
990 if (ret != DMA_SUCCESS) {
991 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
992
993 last_complete = chan->completed_cookie;
994 last_used = chan->cookie;
995
996 ret = dma_async_is_complete(cookie, last_complete, last_used);
997 }
998
999 if (ret != DMA_SUCCESS)
1000 dma_set_tx_state(txstate, last_complete, last_used,
1001 dwc_first_active(dwc)->len);
1002 else
1003 dma_set_tx_state(txstate, last_complete, last_used, 0);
1004
1005 if (dwc->paused)
1006 return DMA_PAUSED;
1007
1008 return ret;
1009 }
1010
1011 static void dwc_issue_pending(struct dma_chan *chan)
1012 {
1013 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1014
1015 if (!list_empty(&dwc->queue))
1016 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1017 }
1018
1019 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1020 {
1021 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1022 struct dw_dma *dw = to_dw_dma(chan->device);
1023 struct dw_desc *desc;
1024 int i;
1025 unsigned long flags;
1026
1027 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1028
1029 /* ASSERT: channel is idle */
1030 if (dma_readl(dw, CH_EN) & dwc->mask) {
1031 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1032 return -EIO;
1033 }
1034
1035 chan->completed_cookie = chan->cookie = 1;
1036
1037 /*
1038 * NOTE: some controllers may have additional features that we
1039 * need to initialize here, like "scatter-gather" (which
1040 * doesn't mean what you think it means), and status writeback.
1041 */
1042
1043 spin_lock_irqsave(&dwc->lock, flags);
1044 i = dwc->descs_allocated;
1045 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1046 spin_unlock_irqrestore(&dwc->lock, flags);
1047
1048 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
1049 if (!desc) {
1050 dev_info(chan2dev(chan),
1051 "only allocated %d descriptors\n", i);
1052 spin_lock_irqsave(&dwc->lock, flags);
1053 break;
1054 }
1055
1056 INIT_LIST_HEAD(&desc->tx_list);
1057 dma_async_tx_descriptor_init(&desc->txd, chan);
1058 desc->txd.tx_submit = dwc_tx_submit;
1059 desc->txd.flags = DMA_CTRL_ACK;
1060 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
1061 sizeof(desc->lli), DMA_TO_DEVICE);
1062 dwc_desc_put(dwc, desc);
1063
1064 spin_lock_irqsave(&dwc->lock, flags);
1065 i = ++dwc->descs_allocated;
1066 }
1067
1068 spin_unlock_irqrestore(&dwc->lock, flags);
1069
1070 dev_dbg(chan2dev(chan),
1071 "alloc_chan_resources allocated %d descriptors\n", i);
1072
1073 return i;
1074 }
1075
1076 static void dwc_free_chan_resources(struct dma_chan *chan)
1077 {
1078 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1079 struct dw_dma *dw = to_dw_dma(chan->device);
1080 struct dw_desc *desc, *_desc;
1081 unsigned long flags;
1082 LIST_HEAD(list);
1083
1084 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1085 dwc->descs_allocated);
1086
1087 /* ASSERT: channel is idle */
1088 BUG_ON(!list_empty(&dwc->active_list));
1089 BUG_ON(!list_empty(&dwc->queue));
1090 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1091
1092 spin_lock_irqsave(&dwc->lock, flags);
1093 list_splice_init(&dwc->free_list, &list);
1094 dwc->descs_allocated = 0;
1095 dwc->initialized = false;
1096
1097 /* Disable interrupts */
1098 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1099 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1100
1101 spin_unlock_irqrestore(&dwc->lock, flags);
1102
1103 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1104 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1105 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1106 sizeof(desc->lli), DMA_TO_DEVICE);
1107 kfree(desc);
1108 }
1109
1110 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1111 }
1112
1113 /* --------------------- Cyclic DMA API extensions -------------------- */
1114
1115 /**
1116 * dw_dma_cyclic_start - start the cyclic DMA transfer
1117 * @chan: the DMA channel to start
1118 *
1119 * Must be called with soft interrupts disabled. Returns zero on success or
1120 * -errno on failure.
1121 */
1122 int dw_dma_cyclic_start(struct dma_chan *chan)
1123 {
1124 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1125 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1126 unsigned long flags;
1127
1128 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1129 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1130 return -ENODEV;
1131 }
1132
1133 spin_lock_irqsave(&dwc->lock, flags);
1134
1135 /* assert channel is idle */
1136 if (dma_readl(dw, CH_EN) & dwc->mask) {
1137 dev_err(chan2dev(&dwc->chan),
1138 "BUG: Attempted to start non-idle channel\n");
1139 dev_err(chan2dev(&dwc->chan),
1140 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1141 channel_readl(dwc, SAR),
1142 channel_readl(dwc, DAR),
1143 channel_readl(dwc, LLP),
1144 channel_readl(dwc, CTL_HI),
1145 channel_readl(dwc, CTL_LO));
1146 spin_unlock_irqrestore(&dwc->lock, flags);
1147 return -EBUSY;
1148 }
1149
1150 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1151 dma_writel(dw, CLEAR.XFER, dwc->mask);
1152
1153 /* setup DMAC channel registers */
1154 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1155 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1156 channel_writel(dwc, CTL_HI, 0);
1157
1158 channel_set_bit(dw, CH_EN, dwc->mask);
1159
1160 spin_unlock_irqrestore(&dwc->lock, flags);
1161
1162 return 0;
1163 }
1164 EXPORT_SYMBOL(dw_dma_cyclic_start);
1165
1166 /**
1167 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1168 * @chan: the DMA channel to stop
1169 *
1170 * Must be called with soft interrupts disabled.
1171 */
1172 void dw_dma_cyclic_stop(struct dma_chan *chan)
1173 {
1174 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1175 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1176 unsigned long flags;
1177
1178 spin_lock_irqsave(&dwc->lock, flags);
1179
1180 channel_clear_bit(dw, CH_EN, dwc->mask);
1181 while (dma_readl(dw, CH_EN) & dwc->mask)
1182 cpu_relax();
1183
1184 spin_unlock_irqrestore(&dwc->lock, flags);
1185 }
1186 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1187
1188 /**
1189 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1190 * @chan: the DMA channel to prepare
1191 * @buf_addr: physical DMA address where the buffer starts
1192 * @buf_len: total number of bytes for the entire buffer
1193 * @period_len: number of bytes for each period
1194 * @direction: transfer direction, to or from device
1195 *
1196 * Must be called before trying to start the transfer. Returns a valid struct
1197 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1198 */
1199 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1200 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1201 enum dma_transfer_direction direction)
1202 {
1203 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1204 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
1205 struct dw_cyclic_desc *cdesc;
1206 struct dw_cyclic_desc *retval = NULL;
1207 struct dw_desc *desc;
1208 struct dw_desc *last = NULL;
1209 unsigned long was_cyclic;
1210 unsigned int reg_width;
1211 unsigned int periods;
1212 unsigned int i;
1213 unsigned long flags;
1214
1215 spin_lock_irqsave(&dwc->lock, flags);
1216 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1217 spin_unlock_irqrestore(&dwc->lock, flags);
1218 dev_dbg(chan2dev(&dwc->chan),
1219 "queue and/or active list are not empty\n");
1220 return ERR_PTR(-EBUSY);
1221 }
1222
1223 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1224 spin_unlock_irqrestore(&dwc->lock, flags);
1225 if (was_cyclic) {
1226 dev_dbg(chan2dev(&dwc->chan),
1227 "channel already prepared for cyclic DMA\n");
1228 return ERR_PTR(-EBUSY);
1229 }
1230
1231 retval = ERR_PTR(-EINVAL);
1232
1233 if (direction == DMA_MEM_TO_DEV)
1234 reg_width = __ffs(sconfig->dst_addr_width);
1235 else
1236 reg_width = __ffs(sconfig->src_addr_width);
1237
1238 periods = buf_len / period_len;
1239
1240 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1241 if (period_len > (DWC_MAX_COUNT << reg_width))
1242 goto out_err;
1243 if (unlikely(period_len & ((1 << reg_width) - 1)))
1244 goto out_err;
1245 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1246 goto out_err;
1247 if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
1248 goto out_err;
1249
1250 retval = ERR_PTR(-ENOMEM);
1251
1252 if (periods > NR_DESCS_PER_CHANNEL)
1253 goto out_err;
1254
1255 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1256 if (!cdesc)
1257 goto out_err;
1258
1259 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1260 if (!cdesc->desc)
1261 goto out_err_alloc;
1262
1263 for (i = 0; i < periods; i++) {
1264 desc = dwc_desc_get(dwc);
1265 if (!desc)
1266 goto out_err_desc_get;
1267
1268 switch (direction) {
1269 case DMA_MEM_TO_DEV:
1270 desc->lli.dar = sconfig->dst_addr;
1271 desc->lli.sar = buf_addr + (period_len * i);
1272 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1273 | DWC_CTLL_DST_WIDTH(reg_width)
1274 | DWC_CTLL_SRC_WIDTH(reg_width)
1275 | DWC_CTLL_DST_FIX
1276 | DWC_CTLL_SRC_INC
1277 | DWC_CTLL_INT_EN);
1278
1279 desc->lli.ctllo |= sconfig->device_fc ?
1280 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1281 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1282
1283 break;
1284 case DMA_DEV_TO_MEM:
1285 desc->lli.dar = buf_addr + (period_len * i);
1286 desc->lli.sar = sconfig->src_addr;
1287 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1288 | DWC_CTLL_SRC_WIDTH(reg_width)
1289 | DWC_CTLL_DST_WIDTH(reg_width)
1290 | DWC_CTLL_DST_INC
1291 | DWC_CTLL_SRC_FIX
1292 | DWC_CTLL_INT_EN);
1293
1294 desc->lli.ctllo |= sconfig->device_fc ?
1295 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1296 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1297
1298 break;
1299 default:
1300 break;
1301 }
1302
1303 desc->lli.ctlhi = (period_len >> reg_width);
1304 cdesc->desc[i] = desc;
1305
1306 if (last) {
1307 last->lli.llp = desc->txd.phys;
1308 dma_sync_single_for_device(chan2parent(chan),
1309 last->txd.phys, sizeof(last->lli),
1310 DMA_TO_DEVICE);
1311 }
1312
1313 last = desc;
1314 }
1315
1316 /* lets make a cyclic list */
1317 last->lli.llp = cdesc->desc[0]->txd.phys;
1318 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1319 sizeof(last->lli), DMA_TO_DEVICE);
1320
1321 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
1322 "period %zu periods %d\n", buf_addr, buf_len,
1323 period_len, periods);
1324
1325 cdesc->periods = periods;
1326 dwc->cdesc = cdesc;
1327
1328 return cdesc;
1329
1330 out_err_desc_get:
1331 while (i--)
1332 dwc_desc_put(dwc, cdesc->desc[i]);
1333 out_err_alloc:
1334 kfree(cdesc);
1335 out_err:
1336 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1337 return (struct dw_cyclic_desc *)retval;
1338 }
1339 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1340
1341 /**
1342 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1343 * @chan: the DMA channel to free
1344 */
1345 void dw_dma_cyclic_free(struct dma_chan *chan)
1346 {
1347 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1348 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1349 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1350 int i;
1351 unsigned long flags;
1352
1353 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1354
1355 if (!cdesc)
1356 return;
1357
1358 spin_lock_irqsave(&dwc->lock, flags);
1359
1360 channel_clear_bit(dw, CH_EN, dwc->mask);
1361 while (dma_readl(dw, CH_EN) & dwc->mask)
1362 cpu_relax();
1363
1364 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1365 dma_writel(dw, CLEAR.XFER, dwc->mask);
1366
1367 spin_unlock_irqrestore(&dwc->lock, flags);
1368
1369 for (i = 0; i < cdesc->periods; i++)
1370 dwc_desc_put(dwc, cdesc->desc[i]);
1371
1372 kfree(cdesc->desc);
1373 kfree(cdesc);
1374
1375 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1376 }
1377 EXPORT_SYMBOL(dw_dma_cyclic_free);
1378
1379 /*----------------------------------------------------------------------*/
1380
1381 static void dw_dma_off(struct dw_dma *dw)
1382 {
1383 int i;
1384
1385 dma_writel(dw, CFG, 0);
1386
1387 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1388 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1389 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1390 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1391
1392 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1393 cpu_relax();
1394
1395 for (i = 0; i < dw->dma.chancnt; i++)
1396 dw->chan[i].initialized = false;
1397 }
1398
1399 static int __init dw_probe(struct platform_device *pdev)
1400 {
1401 struct dw_dma_platform_data *pdata;
1402 struct resource *io;
1403 struct dw_dma *dw;
1404 size_t size;
1405 int irq;
1406 int err;
1407 int i;
1408
1409 pdata = dev_get_platdata(&pdev->dev);
1410 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1411 return -EINVAL;
1412
1413 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1414 if (!io)
1415 return -EINVAL;
1416
1417 irq = platform_get_irq(pdev, 0);
1418 if (irq < 0)
1419 return irq;
1420
1421 size = sizeof(struct dw_dma);
1422 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
1423 dw = kzalloc(size, GFP_KERNEL);
1424 if (!dw)
1425 return -ENOMEM;
1426
1427 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
1428 err = -EBUSY;
1429 goto err_kfree;
1430 }
1431
1432 dw->regs = ioremap(io->start, DW_REGLEN);
1433 if (!dw->regs) {
1434 err = -ENOMEM;
1435 goto err_release_r;
1436 }
1437
1438 dw->clk = clk_get(&pdev->dev, "hclk");
1439 if (IS_ERR(dw->clk)) {
1440 err = PTR_ERR(dw->clk);
1441 goto err_clk;
1442 }
1443 clk_enable(dw->clk);
1444
1445 /* force dma off, just in case */
1446 dw_dma_off(dw);
1447
1448 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1449 if (err)
1450 goto err_irq;
1451
1452 platform_set_drvdata(pdev, dw);
1453
1454 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1455
1456 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1457
1458 INIT_LIST_HEAD(&dw->dma.channels);
1459 for (i = 0; i < pdata->nr_channels; i++) {
1460 struct dw_dma_chan *dwc = &dw->chan[i];
1461
1462 dwc->chan.device = &dw->dma;
1463 dwc->chan.cookie = dwc->chan.completed_cookie = 1;
1464 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1465 list_add_tail(&dwc->chan.device_node,
1466 &dw->dma.channels);
1467 else
1468 list_add(&dwc->chan.device_node, &dw->dma.channels);
1469
1470 /* 7 is highest priority & 0 is lowest. */
1471 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1472 dwc->priority = pdata->nr_channels - i - 1;
1473 else
1474 dwc->priority = i;
1475
1476 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1477 spin_lock_init(&dwc->lock);
1478 dwc->mask = 1 << i;
1479
1480 INIT_LIST_HEAD(&dwc->active_list);
1481 INIT_LIST_HEAD(&dwc->queue);
1482 INIT_LIST_HEAD(&dwc->free_list);
1483
1484 channel_clear_bit(dw, CH_EN, dwc->mask);
1485 }
1486
1487 /* Clear/disable all interrupts on all channels. */
1488 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1489 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1490 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1491 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1492
1493 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1494 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1495 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1496 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1497
1498 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1499 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1500 if (pdata->is_private)
1501 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1502 dw->dma.dev = &pdev->dev;
1503 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1504 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1505
1506 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1507
1508 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1509 dw->dma.device_control = dwc_control;
1510
1511 dw->dma.device_tx_status = dwc_tx_status;
1512 dw->dma.device_issue_pending = dwc_issue_pending;
1513
1514 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1515
1516 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1517 dev_name(&pdev->dev), pdata->nr_channels);
1518
1519 dma_async_device_register(&dw->dma);
1520
1521 return 0;
1522
1523 err_irq:
1524 clk_disable(dw->clk);
1525 clk_put(dw->clk);
1526 err_clk:
1527 iounmap(dw->regs);
1528 dw->regs = NULL;
1529 err_release_r:
1530 release_resource(io);
1531 err_kfree:
1532 kfree(dw);
1533 return err;
1534 }
1535
1536 static int __exit dw_remove(struct platform_device *pdev)
1537 {
1538 struct dw_dma *dw = platform_get_drvdata(pdev);
1539 struct dw_dma_chan *dwc, *_dwc;
1540 struct resource *io;
1541
1542 dw_dma_off(dw);
1543 dma_async_device_unregister(&dw->dma);
1544
1545 free_irq(platform_get_irq(pdev, 0), dw);
1546 tasklet_kill(&dw->tasklet);
1547
1548 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1549 chan.device_node) {
1550 list_del(&dwc->chan.device_node);
1551 channel_clear_bit(dw, CH_EN, dwc->mask);
1552 }
1553
1554 clk_disable(dw->clk);
1555 clk_put(dw->clk);
1556
1557 iounmap(dw->regs);
1558 dw->regs = NULL;
1559
1560 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1561 release_mem_region(io->start, DW_REGLEN);
1562
1563 kfree(dw);
1564
1565 return 0;
1566 }
1567
1568 static void dw_shutdown(struct platform_device *pdev)
1569 {
1570 struct dw_dma *dw = platform_get_drvdata(pdev);
1571
1572 dw_dma_off(platform_get_drvdata(pdev));
1573 clk_disable(dw->clk);
1574 }
1575
1576 static int dw_suspend_noirq(struct device *dev)
1577 {
1578 struct platform_device *pdev = to_platform_device(dev);
1579 struct dw_dma *dw = platform_get_drvdata(pdev);
1580
1581 dw_dma_off(platform_get_drvdata(pdev));
1582 clk_disable(dw->clk);
1583
1584 return 0;
1585 }
1586
1587 static int dw_resume_noirq(struct device *dev)
1588 {
1589 struct platform_device *pdev = to_platform_device(dev);
1590 struct dw_dma *dw = platform_get_drvdata(pdev);
1591
1592 clk_enable(dw->clk);
1593 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1594 return 0;
1595 }
1596
1597 static const struct dev_pm_ops dw_dev_pm_ops = {
1598 .suspend_noirq = dw_suspend_noirq,
1599 .resume_noirq = dw_resume_noirq,
1600 .freeze_noirq = dw_suspend_noirq,
1601 .thaw_noirq = dw_resume_noirq,
1602 .restore_noirq = dw_resume_noirq,
1603 .poweroff_noirq = dw_suspend_noirq,
1604 };
1605
1606 static struct platform_driver dw_driver = {
1607 .remove = __exit_p(dw_remove),
1608 .shutdown = dw_shutdown,
1609 .driver = {
1610 .name = "dw_dmac",
1611 .pm = &dw_dev_pm_ops,
1612 },
1613 };
1614
1615 static int __init dw_init(void)
1616 {
1617 return platform_driver_probe(&dw_driver, dw_probe);
1618 }
1619 subsys_initcall(dw_init);
1620
1621 static void __exit dw_exit(void)
1622 {
1623 platform_driver_unregister(&dw_driver);
1624 }
1625 module_exit(dw_exit);
1626
1627 MODULE_LICENSE("GPL v2");
1628 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1629 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1630 MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
This page took 0.103544 seconds and 5 git commands to generate.