2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/dmaengine.h>
24 #include <linux/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/dmapool.h>
27 #include <linux/platform_device.h>
29 #include <asm/dma-sh.h>
32 /* DMA descriptor control */
33 #define DESC_LAST (-1)
35 #define DESC_NCOMP (0)
37 #define NR_DESCS_PER_CHANNEL 32
39 * Define the default configuration for dual address memory-memory transfer.
40 * The 0x400 value represents auto-request, external->external.
42 * And this driver set 4byte burst mode.
43 * If you want to change mode, you need to change RS_DEFAULT of value.
44 * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
46 #define RS_DEFAULT (RS_DUAL)
48 #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
49 static void sh_dmae_writel(struct sh_dmae_chan
*sh_dc
, u32 data
, u32 reg
)
51 ctrl_outl(data
, (SH_DMAC_CHAN_BASE(sh_dc
->id
) + reg
));
54 static u32
sh_dmae_readl(struct sh_dmae_chan
*sh_dc
, u32 reg
)
56 return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc
->id
) + reg
));
59 static void dmae_init(struct sh_dmae_chan
*sh_chan
)
61 u32 chcr
= RS_DEFAULT
; /* default is DUAL mode */
62 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
66 * Reset DMA controller
68 * SH7780 has two DMAOR register
70 static void sh_dmae_ctl_stop(int id
)
72 unsigned short dmaor
= dmaor_read_reg(id
);
74 dmaor
&= ~(DMAOR_NMIF
| DMAOR_AE
);
75 dmaor_write_reg(id
, dmaor
);
78 static int sh_dmae_rst(int id
)
83 dmaor
= dmaor_read_reg(id
) | DMAOR_INIT
;
85 dmaor_write_reg(id
, dmaor
);
86 if (dmaor_read_reg(id
) & (DMAOR_AE
| DMAOR_NMIF
)) {
87 pr_warning(KERN_ERR
"dma-sh: Can't initialize DMAOR.\n");
93 static int dmae_is_busy(struct sh_dmae_chan
*sh_chan
)
95 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
97 if (!(chcr
& CHCR_TE
))
98 return -EBUSY
; /* working */
100 return 0; /* waiting */
103 static inline unsigned int calc_xmit_shift(struct sh_dmae_chan
*sh_chan
)
105 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
106 return ts_shift
[(chcr
& CHCR_TS_MASK
) >> CHCR_TS_SHIFT
];
109 static void dmae_set_reg(struct sh_dmae_chan
*sh_chan
, struct sh_dmae_regs hw
)
111 sh_dmae_writel(sh_chan
, hw
.sar
, SAR
);
112 sh_dmae_writel(sh_chan
, hw
.dar
, DAR
);
113 sh_dmae_writel(sh_chan
, hw
.tcr
>> calc_xmit_shift(sh_chan
), TCR
);
116 static void dmae_start(struct sh_dmae_chan
*sh_chan
)
118 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
120 chcr
|= CHCR_DE
| CHCR_IE
;
121 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
124 static void dmae_halt(struct sh_dmae_chan
*sh_chan
)
126 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
128 chcr
&= ~(CHCR_DE
| CHCR_TE
| CHCR_IE
);
129 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
132 static int dmae_set_chcr(struct sh_dmae_chan
*sh_chan
, u32 val
)
134 int ret
= dmae_is_busy(sh_chan
);
135 /* When DMA was working, can not set data to CHCR */
139 sh_dmae_writel(sh_chan
, val
, CHCR
);
143 #define DMARS1_ADDR 0x04
144 #define DMARS2_ADDR 0x08
145 #define DMARS_SHIFT 8
146 #define DMARS_CHAN_MSK 0x01
147 static int dmae_set_dmars(struct sh_dmae_chan
*sh_chan
, u16 val
)
151 int ret
= dmae_is_busy(sh_chan
);
155 if (sh_chan
->id
& DMARS_CHAN_MSK
)
158 switch (sh_chan
->id
) {
162 addr
= SH_DMARS_BASE
;
167 addr
= (SH_DMARS_BASE
+ DMARS1_ADDR
);
172 addr
= (SH_DMARS_BASE
+ DMARS2_ADDR
);
178 ctrl_outw((val
<< shift
) |
179 (ctrl_inw(addr
) & (shift
? 0xFF00 : 0x00FF)),
185 static dma_cookie_t
sh_dmae_tx_submit(struct dma_async_tx_descriptor
*tx
)
187 struct sh_desc
*desc
= tx_to_sh_desc(tx
);
188 struct sh_dmae_chan
*sh_chan
= to_sh_chan(tx
->chan
);
191 spin_lock_bh(&sh_chan
->desc_lock
);
193 cookie
= sh_chan
->common
.cookie
;
198 /* If desc only in the case of 1 */
199 if (desc
->async_tx
.cookie
!= -EBUSY
)
200 desc
->async_tx
.cookie
= cookie
;
201 sh_chan
->common
.cookie
= desc
->async_tx
.cookie
;
203 list_splice_init(&desc
->tx_list
, sh_chan
->ld_queue
.prev
);
205 spin_unlock_bh(&sh_chan
->desc_lock
);
210 static struct sh_desc
*sh_dmae_get_desc(struct sh_dmae_chan
*sh_chan
)
212 struct sh_desc
*desc
, *_desc
, *ret
= NULL
;
214 spin_lock_bh(&sh_chan
->desc_lock
);
215 list_for_each_entry_safe(desc
, _desc
, &sh_chan
->ld_free
, node
) {
216 if (async_tx_test_ack(&desc
->async_tx
)) {
217 list_del(&desc
->node
);
222 spin_unlock_bh(&sh_chan
->desc_lock
);
227 static void sh_dmae_put_desc(struct sh_dmae_chan
*sh_chan
, struct sh_desc
*desc
)
230 spin_lock_bh(&sh_chan
->desc_lock
);
232 list_splice_init(&desc
->tx_list
, &sh_chan
->ld_free
);
233 list_add(&desc
->node
, &sh_chan
->ld_free
);
235 spin_unlock_bh(&sh_chan
->desc_lock
);
239 static int sh_dmae_alloc_chan_resources(struct dma_chan
*chan
)
241 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
242 struct sh_desc
*desc
;
244 spin_lock_bh(&sh_chan
->desc_lock
);
245 while (sh_chan
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
246 spin_unlock_bh(&sh_chan
->desc_lock
);
247 desc
= kzalloc(sizeof(struct sh_desc
), GFP_KERNEL
);
249 spin_lock_bh(&sh_chan
->desc_lock
);
252 dma_async_tx_descriptor_init(&desc
->async_tx
,
254 desc
->async_tx
.tx_submit
= sh_dmae_tx_submit
;
255 desc
->async_tx
.flags
= DMA_CTRL_ACK
;
256 INIT_LIST_HEAD(&desc
->tx_list
);
257 sh_dmae_put_desc(sh_chan
, desc
);
259 spin_lock_bh(&sh_chan
->desc_lock
);
260 sh_chan
->descs_allocated
++;
262 spin_unlock_bh(&sh_chan
->desc_lock
);
264 return sh_chan
->descs_allocated
;
268 * sh_dma_free_chan_resources - Free all resources of the channel.
270 static void sh_dmae_free_chan_resources(struct dma_chan
*chan
)
272 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
273 struct sh_desc
*desc
, *_desc
;
276 BUG_ON(!list_empty(&sh_chan
->ld_queue
));
277 spin_lock_bh(&sh_chan
->desc_lock
);
279 list_splice_init(&sh_chan
->ld_free
, &list
);
280 sh_chan
->descs_allocated
= 0;
282 spin_unlock_bh(&sh_chan
->desc_lock
);
284 list_for_each_entry_safe(desc
, _desc
, &list
, node
)
288 static struct dma_async_tx_descriptor
*sh_dmae_prep_memcpy(
289 struct dma_chan
*chan
, dma_addr_t dma_dest
, dma_addr_t dma_src
,
290 size_t len
, unsigned long flags
)
292 struct sh_dmae_chan
*sh_chan
;
293 struct sh_desc
*first
= NULL
, *prev
= NULL
, *new;
302 sh_chan
= to_sh_chan(chan
);
305 /* Allocate the link descriptor from DMA pool */
306 new = sh_dmae_get_desc(sh_chan
);
308 dev_err(sh_chan
->dev
,
309 "No free memory for link descriptor\n");
313 copy_size
= min(len
, (size_t)SH_DMA_TCR_MAX
);
315 new->hw
.sar
= dma_src
;
316 new->hw
.dar
= dma_dest
;
317 new->hw
.tcr
= copy_size
;
321 new->mark
= DESC_NCOMP
;
322 async_tx_ack(&new->async_tx
);
326 dma_src
+= copy_size
;
327 dma_dest
+= copy_size
;
328 /* Insert the link descriptor to the LD ring */
329 list_add_tail(&new->node
, &first
->tx_list
);
332 new->async_tx
.flags
= flags
; /* client is in control of this ack */
333 new->async_tx
.cookie
= -EBUSY
; /* Last desc */
335 return &first
->async_tx
;
338 sh_dmae_put_desc(sh_chan
, first
);
344 * sh_chan_ld_cleanup - Clean up link descriptors
346 * This function clean up the ld_queue of DMA channel.
348 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan
*sh_chan
)
350 struct sh_desc
*desc
, *_desc
;
352 spin_lock_bh(&sh_chan
->desc_lock
);
353 list_for_each_entry_safe(desc
, _desc
, &sh_chan
->ld_queue
, node
) {
354 dma_async_tx_callback callback
;
355 void *callback_param
;
358 if (desc
->mark
== DESC_NCOMP
)
362 callback
= desc
->async_tx
.callback
;
363 callback_param
= desc
->async_tx
.callback_param
;
365 /* Remove from ld_queue list */
366 list_splice_init(&desc
->tx_list
, &sh_chan
->ld_free
);
368 dev_dbg(sh_chan
->dev
, "link descriptor %p will be recycle.\n",
371 list_move(&desc
->node
, &sh_chan
->ld_free
);
372 /* Run the link descriptor callback function */
374 spin_unlock_bh(&sh_chan
->desc_lock
);
375 dev_dbg(sh_chan
->dev
, "link descriptor %p callback\n",
377 callback(callback_param
);
378 spin_lock_bh(&sh_chan
->desc_lock
);
381 spin_unlock_bh(&sh_chan
->desc_lock
);
384 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan
*sh_chan
)
386 struct list_head
*ld_node
;
387 struct sh_dmae_regs hw
;
390 if (dmae_is_busy(sh_chan
))
393 /* Find the first un-transfer desciptor */
394 for (ld_node
= sh_chan
->ld_queue
.next
;
395 (ld_node
!= &sh_chan
->ld_queue
)
396 && (to_sh_desc(ld_node
)->mark
== DESC_COMP
);
397 ld_node
= ld_node
->next
)
400 if (ld_node
!= &sh_chan
->ld_queue
) {
401 /* Get the ld start address from ld_queue */
402 hw
= to_sh_desc(ld_node
)->hw
;
403 dmae_set_reg(sh_chan
, hw
);
408 static void sh_dmae_memcpy_issue_pending(struct dma_chan
*chan
)
410 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
411 sh_chan_xfer_ld_queue(sh_chan
);
414 static enum dma_status
sh_dmae_is_complete(struct dma_chan
*chan
,
419 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
420 dma_cookie_t last_used
;
421 dma_cookie_t last_complete
;
423 sh_dmae_chan_ld_cleanup(sh_chan
);
425 last_used
= chan
->cookie
;
426 last_complete
= sh_chan
->completed_cookie
;
427 if (last_complete
== -EBUSY
)
428 last_complete
= last_used
;
431 *done
= last_complete
;
436 return dma_async_is_complete(cookie
, last_complete
, last_used
);
439 static irqreturn_t
sh_dmae_interrupt(int irq
, void *data
)
441 irqreturn_t ret
= IRQ_NONE
;
442 struct sh_dmae_chan
*sh_chan
= (struct sh_dmae_chan
*)data
;
443 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
445 if (chcr
& CHCR_TE
) {
450 tasklet_schedule(&sh_chan
->tasklet
);
456 #if defined(CONFIG_CPU_SH4)
457 static irqreturn_t
sh_dmae_err(int irq
, void *data
)
460 struct sh_dmae_device
*shdev
= (struct sh_dmae_device
*)data
;
463 if (shdev
->pdata
.mode
& SHDMA_MIX_IRQ
) {
466 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
471 if (dmaor_read_reg(cnt
) & (DMAOR_NMIF
| DMAOR_AE
)) {
479 /* reset dma controller */
480 err
= sh_dmae_rst(0);
483 if (shdev
->pdata
.mode
& SHDMA_DMAOR1
) {
484 err
= sh_dmae_rst(1);
494 static void dmae_do_tasklet(unsigned long data
)
496 struct sh_dmae_chan
*sh_chan
= (struct sh_dmae_chan
*)data
;
497 struct sh_desc
*desc
, *_desc
, *cur_desc
= NULL
;
498 u32 sar_buf
= sh_dmae_readl(sh_chan
, SAR
);
500 list_for_each_entry_safe(desc
, _desc
,
501 &sh_chan
->ld_queue
, node
) {
502 if ((desc
->hw
.sar
+ desc
->hw
.tcr
) == sar_buf
) {
509 switch (cur_desc
->async_tx
.cookie
) {
510 case 0: /* other desc data */
512 case -EBUSY
: /* last desc */
513 sh_chan
->completed_cookie
=
514 cur_desc
->async_tx
.cookie
;
516 default: /* first desc ( 0 < )*/
517 sh_chan
->completed_cookie
=
518 cur_desc
->async_tx
.cookie
- 1;
521 cur_desc
->mark
= DESC_COMP
;
524 sh_chan_xfer_ld_queue(sh_chan
);
525 sh_dmae_chan_ld_cleanup(sh_chan
);
528 static unsigned int get_dmae_irq(unsigned int id
)
530 unsigned int irq
= 0;
531 if (id
< ARRAY_SIZE(dmte_irq_map
))
532 irq
= dmte_irq_map
[id
];
536 static int __devinit
sh_dmae_chan_probe(struct sh_dmae_device
*shdev
, int id
)
539 unsigned int irq
= get_dmae_irq(id
);
540 unsigned long irqflags
= IRQF_DISABLED
;
541 struct sh_dmae_chan
*new_sh_chan
;
544 new_sh_chan
= kzalloc(sizeof(struct sh_dmae_chan
), GFP_KERNEL
);
546 dev_err(shdev
->common
.dev
,
547 "No free memory for allocating dma channels!\n");
551 new_sh_chan
->dev
= shdev
->common
.dev
;
552 new_sh_chan
->id
= id
;
554 /* Init DMA tasklet */
555 tasklet_init(&new_sh_chan
->tasklet
, dmae_do_tasklet
,
556 (unsigned long)new_sh_chan
);
558 /* Init the channel */
559 dmae_init(new_sh_chan
);
561 spin_lock_init(&new_sh_chan
->desc_lock
);
563 /* Init descripter manage list */
564 INIT_LIST_HEAD(&new_sh_chan
->ld_queue
);
565 INIT_LIST_HEAD(&new_sh_chan
->ld_free
);
567 /* copy struct dma_device */
568 new_sh_chan
->common
.device
= &shdev
->common
;
570 /* Add the channel to DMA device channel list */
571 list_add_tail(&new_sh_chan
->common
.device_node
,
572 &shdev
->common
.channels
);
573 shdev
->common
.chancnt
++;
575 if (shdev
->pdata
.mode
& SHDMA_MIX_IRQ
) {
576 irqflags
= IRQF_SHARED
;
577 #if defined(DMTE6_IRQ)
578 if (irq
>= DMTE6_IRQ
)
585 snprintf(new_sh_chan
->dev_id
, sizeof(new_sh_chan
->dev_id
),
586 "sh-dmae%d", new_sh_chan
->id
);
588 /* set up channel irq */
589 err
= request_irq(irq
, &sh_dmae_interrupt
, irqflags
,
590 new_sh_chan
->dev_id
, new_sh_chan
);
592 dev_err(shdev
->common
.dev
, "DMA channel %d request_irq error "
593 "with return %d\n", id
, err
);
597 /* CHCR register control function */
598 new_sh_chan
->set_chcr
= dmae_set_chcr
;
599 /* DMARS register control function */
600 new_sh_chan
->set_dmars
= dmae_set_dmars
;
602 shdev
->chan
[id
] = new_sh_chan
;
606 /* remove from dmaengine device node */
607 list_del(&new_sh_chan
->common
.device_node
);
612 static void sh_dmae_chan_remove(struct sh_dmae_device
*shdev
)
616 for (i
= shdev
->common
.chancnt
- 1 ; i
>= 0 ; i
--) {
617 if (shdev
->chan
[i
]) {
618 struct sh_dmae_chan
*shchan
= shdev
->chan
[i
];
619 if (!(shdev
->pdata
.mode
& SHDMA_MIX_IRQ
))
620 free_irq(dmte_irq_map
[i
], shchan
);
622 list_del(&shchan
->common
.device_node
);
624 shdev
->chan
[i
] = NULL
;
627 shdev
->common
.chancnt
= 0;
630 static int __init
sh_dmae_probe(struct platform_device
*pdev
)
632 int err
= 0, cnt
, ecnt
;
633 unsigned long irqflags
= IRQF_DISABLED
;
634 #if defined(CONFIG_CPU_SH4)
635 int eirq
[] = { DMAE0_IRQ
,
636 #if defined(DMAE1_IRQ)
641 struct sh_dmae_device
*shdev
;
643 /* get platform data */
644 if (!pdev
->dev
.platform_data
)
647 shdev
= kzalloc(sizeof(struct sh_dmae_device
), GFP_KERNEL
);
649 dev_err(&pdev
->dev
, "No enough memory\n");
654 memcpy(&shdev
->pdata
, pdev
->dev
.platform_data
,
655 sizeof(struct sh_dmae_pdata
));
657 /* reset dma controller */
658 err
= sh_dmae_rst(0);
662 /* SH7780/85/23 has DMAOR1 */
663 if (shdev
->pdata
.mode
& SHDMA_DMAOR1
) {
664 err
= sh_dmae_rst(1);
669 INIT_LIST_HEAD(&shdev
->common
.channels
);
671 dma_cap_set(DMA_MEMCPY
, shdev
->common
.cap_mask
);
672 shdev
->common
.device_alloc_chan_resources
673 = sh_dmae_alloc_chan_resources
;
674 shdev
->common
.device_free_chan_resources
= sh_dmae_free_chan_resources
;
675 shdev
->common
.device_prep_dma_memcpy
= sh_dmae_prep_memcpy
;
676 shdev
->common
.device_is_tx_complete
= sh_dmae_is_complete
;
677 shdev
->common
.device_issue_pending
= sh_dmae_memcpy_issue_pending
;
678 shdev
->common
.dev
= &pdev
->dev
;
679 /* Default transfer size of 32 bytes requires 32-byte alignment */
680 shdev
->common
.copy_align
= 5;
682 #if defined(CONFIG_CPU_SH4)
683 /* Non Mix IRQ mode SH7722/SH7730 etc... */
684 if (shdev
->pdata
.mode
& SHDMA_MIX_IRQ
) {
685 irqflags
= IRQF_SHARED
;
687 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
692 for (ecnt
= 0 ; ecnt
< ARRAY_SIZE(eirq
); ecnt
++) {
693 err
= request_irq(eirq
[ecnt
], sh_dmae_err
, irqflags
,
694 "DMAC Address Error", shdev
);
696 dev_err(&pdev
->dev
, "DMA device request_irq"
697 "error (irq %d) with return %d\n",
702 #endif /* CONFIG_CPU_SH4 */
704 /* Create DMA Channel */
705 for (cnt
= 0 ; cnt
< MAX_DMA_CHANNELS
; cnt
++) {
706 err
= sh_dmae_chan_probe(shdev
, cnt
);
711 platform_set_drvdata(pdev
, shdev
);
712 dma_async_device_register(&shdev
->common
);
717 sh_dmae_chan_remove(shdev
);
720 for (ecnt
-- ; ecnt
>= 0; ecnt
--)
721 free_irq(eirq
[ecnt
], shdev
);
729 static int __exit
sh_dmae_remove(struct platform_device
*pdev
)
731 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
733 dma_async_device_unregister(&shdev
->common
);
735 if (shdev
->pdata
.mode
& SHDMA_MIX_IRQ
) {
736 free_irq(DMTE0_IRQ
, shdev
);
737 #if defined(DMTE6_IRQ)
738 free_irq(DMTE6_IRQ
, shdev
);
742 /* channel data remove */
743 sh_dmae_chan_remove(shdev
);
745 if (!(shdev
->pdata
.mode
& SHDMA_MIX_IRQ
)) {
746 free_irq(DMAE0_IRQ
, shdev
);
747 #if defined(DMAE1_IRQ)
748 free_irq(DMAE1_IRQ
, shdev
);
756 static void sh_dmae_shutdown(struct platform_device
*pdev
)
758 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
760 if (shdev
->pdata
.mode
& SHDMA_DMAOR1
)
764 static struct platform_driver sh_dmae_driver
= {
765 .remove
= __exit_p(sh_dmae_remove
),
766 .shutdown
= sh_dmae_shutdown
,
768 .name
= "sh-dma-engine",
772 static int __init
sh_dmae_init(void)
774 return platform_driver_probe(&sh_dmae_driver
, sh_dmae_probe
);
776 module_init(sh_dmae_init
);
778 static void __exit
sh_dmae_exit(void)
780 platform_driver_unregister(&sh_dmae_driver
);
782 module_exit(sh_dmae_exit
);
784 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
785 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
786 MODULE_LICENSE("GPL");