2 * timb_dma.c timberdale FPGA DMA driver
3 * Copyright (c) 2010 Intel Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 * Timberdale FPGA DMA engine
23 #include <linux/dmaengine.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/platform_device.h>
31 #include <linux/timb_dma.h>
33 #define DRIVER_NAME "timb-dma"
35 /* Global DMA registers */
36 #define TIMBDMA_ACR 0x34
37 #define TIMBDMA_32BIT_ADDR 0x01
39 #define TIMBDMA_ISR 0x080000
40 #define TIMBDMA_IPR 0x080004
41 #define TIMBDMA_IER 0x080008
43 /* Channel specific registers */
44 /* RX instances base addresses are 0x00, 0x40, 0x80 ...
45 * TX instances base addresses are 0x18, 0x58, 0x98 ...
47 #define TIMBDMA_INSTANCE_OFFSET 0x40
48 #define TIMBDMA_INSTANCE_TX_OFFSET 0x18
50 /* RX registers, relative the instance base */
51 #define TIMBDMA_OFFS_RX_DHAR 0x00
52 #define TIMBDMA_OFFS_RX_DLAR 0x04
53 #define TIMBDMA_OFFS_RX_LR 0x0C
54 #define TIMBDMA_OFFS_RX_BLR 0x10
55 #define TIMBDMA_OFFS_RX_ER 0x14
56 #define TIMBDMA_RX_EN 0x01
57 /* bytes per Row, video specific register
58 * which is placed after the TX registers...
60 #define TIMBDMA_OFFS_RX_BPRR 0x30
62 /* TX registers, relative the instance base */
63 #define TIMBDMA_OFFS_TX_DHAR 0x00
64 #define TIMBDMA_OFFS_TX_DLAR 0x04
65 #define TIMBDMA_OFFS_TX_BLR 0x0C
66 #define TIMBDMA_OFFS_TX_LR 0x14
69 #define TIMB_DMA_DESC_SIZE 8
71 struct timb_dma_desc
{
72 struct list_head desc_node
;
73 struct dma_async_tx_descriptor txd
;
75 unsigned int desc_list_len
;
79 struct timb_dma_chan
{
81 void __iomem
*membase
;
82 spinlock_t lock
; /* Used to protect data structures,
83 especially the lists and descriptors,
84 from races between the tasklet and calls
86 dma_cookie_t last_completed_cookie
;
88 struct list_head active_list
;
89 struct list_head queue
;
90 struct list_head free_list
;
91 unsigned int bytes_per_line
;
92 enum dma_data_direction direction
;
93 unsigned int descs
; /* Descriptors to allocate */
94 unsigned int desc_elems
; /* number of elems per descriptor */
98 struct dma_device dma
;
99 void __iomem
*membase
;
100 struct tasklet_struct tasklet
;
101 struct timb_dma_chan channels
[0];
104 static struct device
*chan2dev(struct dma_chan
*chan
)
106 return &chan
->dev
->device
;
108 static struct device
*chan2dmadev(struct dma_chan
*chan
)
110 return chan2dev(chan
)->parent
->parent
;
113 static struct timb_dma
*tdchantotd(struct timb_dma_chan
*td_chan
)
115 int id
= td_chan
->chan
.chan_id
;
116 return (struct timb_dma
*)((u8
*)td_chan
-
117 id
* sizeof(struct timb_dma_chan
) - sizeof(struct timb_dma
));
120 /* Must be called with the spinlock held */
121 static void __td_enable_chan_irq(struct timb_dma_chan
*td_chan
)
123 int id
= td_chan
->chan
.chan_id
;
124 struct timb_dma
*td
= tdchantotd(td_chan
);
127 /* enable interrupt for this channel */
128 ier
= ioread32(td
->membase
+ TIMBDMA_IER
);
130 dev_dbg(chan2dev(&td_chan
->chan
), "Enabling irq: %d, IER: 0x%x\n", id
,
132 iowrite32(ier
, td
->membase
+ TIMBDMA_IER
);
135 /* Should be called with the spinlock held */
136 static bool __td_dma_done_ack(struct timb_dma_chan
*td_chan
)
138 int id
= td_chan
->chan
.chan_id
;
139 struct timb_dma
*td
= (struct timb_dma
*)((u8
*)td_chan
-
140 id
* sizeof(struct timb_dma_chan
) - sizeof(struct timb_dma
));
144 dev_dbg(chan2dev(&td_chan
->chan
), "Checking irq: %d, td: %p\n", id
, td
);
146 isr
= ioread32(td
->membase
+ TIMBDMA_ISR
) & (1 << id
);
148 iowrite32(isr
, td
->membase
+ TIMBDMA_ISR
);
155 static void __td_unmap_desc(struct timb_dma_chan
*td_chan
, const u8
*dma_desc
,
161 addr
= (dma_desc
[7] << 24) | (dma_desc
[6] << 16) | (dma_desc
[5] << 8) |
164 len
= (dma_desc
[3] << 8) | dma_desc
[2];
167 dma_unmap_single(chan2dev(&td_chan
->chan
), addr
, len
,
170 dma_unmap_page(chan2dev(&td_chan
->chan
), addr
, len
,
174 static void __td_unmap_descs(struct timb_dma_desc
*td_desc
, bool single
)
176 struct timb_dma_chan
*td_chan
= container_of(td_desc
->txd
.chan
,
177 struct timb_dma_chan
, chan
);
180 for (descs
= td_desc
->desc_list
; ; descs
+= TIMB_DMA_DESC_SIZE
) {
181 __td_unmap_desc(td_chan
, descs
, single
);
187 static int td_fill_desc(struct timb_dma_chan
*td_chan
, u8
*dma_desc
,
188 struct scatterlist
*sg
, bool last
)
190 if (sg_dma_len(sg
) > USHORT_MAX
) {
191 dev_err(chan2dev(&td_chan
->chan
), "Too big sg element\n");
195 /* length must be word aligned */
196 if (sg_dma_len(sg
) % sizeof(u32
)) {
197 dev_err(chan2dev(&td_chan
->chan
), "Incorrect length: %d\n",
202 dev_dbg(chan2dev(&td_chan
->chan
), "desc: %p, addr: %p\n",
203 dma_desc
, (void *)sg_dma_address(sg
));
205 dma_desc
[7] = (sg_dma_address(sg
) >> 24) & 0xff;
206 dma_desc
[6] = (sg_dma_address(sg
) >> 16) & 0xff;
207 dma_desc
[5] = (sg_dma_address(sg
) >> 8) & 0xff;
208 dma_desc
[4] = (sg_dma_address(sg
) >> 0) & 0xff;
210 dma_desc
[3] = (sg_dma_len(sg
) >> 8) & 0xff;
211 dma_desc
[2] = (sg_dma_len(sg
) >> 0) & 0xff;
214 dma_desc
[0] = 0x21 | (last
? 0x02 : 0); /* tran, valid */
219 /* Must be called with the spinlock held */
220 static void __td_start_dma(struct timb_dma_chan
*td_chan
)
222 struct timb_dma_desc
*td_desc
;
224 if (td_chan
->ongoing
) {
225 dev_err(chan2dev(&td_chan
->chan
),
226 "Transfer already ongoing\n");
230 td_desc
= list_entry(td_chan
->active_list
.next
, struct timb_dma_desc
,
233 dev_dbg(chan2dev(&td_chan
->chan
),
234 "td_chan: %p, chan: %d, membase: %p\n",
235 td_chan
, td_chan
->chan
.chan_id
, td_chan
->membase
);
237 if (td_chan
->direction
== DMA_FROM_DEVICE
) {
239 /* descriptor address */
240 iowrite32(0, td_chan
->membase
+ TIMBDMA_OFFS_RX_DHAR
);
241 iowrite32(td_desc
->txd
.phys
, td_chan
->membase
+
242 TIMBDMA_OFFS_RX_DLAR
);
244 iowrite32(td_chan
->bytes_per_line
, td_chan
->membase
+
245 TIMBDMA_OFFS_RX_BPRR
);
247 iowrite32(TIMBDMA_RX_EN
, td_chan
->membase
+ TIMBDMA_OFFS_RX_ER
);
250 iowrite32(0, td_chan
->membase
+ TIMBDMA_OFFS_TX_DHAR
);
251 iowrite32(td_desc
->txd
.phys
, td_chan
->membase
+
252 TIMBDMA_OFFS_TX_DLAR
);
255 td_chan
->ongoing
= true;
257 if (td_desc
->interrupt
)
258 __td_enable_chan_irq(td_chan
);
261 static void __td_finish(struct timb_dma_chan
*td_chan
)
263 dma_async_tx_callback callback
;
265 struct dma_async_tx_descriptor
*txd
;
266 struct timb_dma_desc
*td_desc
;
268 /* can happen if the descriptor is canceled */
269 if (list_empty(&td_chan
->active_list
))
272 td_desc
= list_entry(td_chan
->active_list
.next
, struct timb_dma_desc
,
276 dev_dbg(chan2dev(&td_chan
->chan
), "descriptor %u complete\n",
279 /* make sure to stop the transfer */
280 if (td_chan
->direction
== DMA_FROM_DEVICE
)
281 iowrite32(0, td_chan
->membase
+ TIMBDMA_OFFS_RX_ER
);
282 /* Currently no support for stopping DMA transfers
284 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
286 td_chan
->last_completed_cookie
= txd
->cookie
;
287 td_chan
->ongoing
= false;
289 callback
= txd
->callback
;
290 param
= txd
->callback_param
;
292 list_move(&td_desc
->desc_node
, &td_chan
->free_list
);
294 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
))
295 __td_unmap_descs(td_desc
,
296 txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
);
299 * The API requires that no submissions are done from a
300 * callback, so we don't need to drop the lock here
306 static u32
__td_ier_mask(struct timb_dma
*td
)
311 for (i
= 0; i
< td
->dma
.chancnt
; i
++) {
312 struct timb_dma_chan
*td_chan
= td
->channels
+ i
;
313 if (td_chan
->ongoing
) {
314 struct timb_dma_desc
*td_desc
=
315 list_entry(td_chan
->active_list
.next
,
316 struct timb_dma_desc
, desc_node
);
317 if (td_desc
->interrupt
)
325 static void __td_start_next(struct timb_dma_chan
*td_chan
)
327 struct timb_dma_desc
*td_desc
;
329 BUG_ON(list_empty(&td_chan
->queue
));
330 BUG_ON(td_chan
->ongoing
);
332 td_desc
= list_entry(td_chan
->queue
.next
, struct timb_dma_desc
,
335 dev_dbg(chan2dev(&td_chan
->chan
), "%s: started %u\n",
336 __func__
, td_desc
->txd
.cookie
);
338 list_move(&td_desc
->desc_node
, &td_chan
->active_list
);
339 __td_start_dma(td_chan
);
342 static dma_cookie_t
td_tx_submit(struct dma_async_tx_descriptor
*txd
)
344 struct timb_dma_desc
*td_desc
= container_of(txd
, struct timb_dma_desc
,
346 struct timb_dma_chan
*td_chan
= container_of(txd
->chan
,
347 struct timb_dma_chan
, chan
);
350 spin_lock_bh(&td_chan
->lock
);
352 cookie
= txd
->chan
->cookie
;
355 txd
->chan
->cookie
= cookie
;
356 txd
->cookie
= cookie
;
358 if (list_empty(&td_chan
->active_list
)) {
359 dev_dbg(chan2dev(txd
->chan
), "%s: started %u\n", __func__
,
361 list_add_tail(&td_desc
->desc_node
, &td_chan
->active_list
);
362 __td_start_dma(td_chan
);
364 dev_dbg(chan2dev(txd
->chan
), "tx_submit: queued %u\n",
367 list_add_tail(&td_desc
->desc_node
, &td_chan
->queue
);
370 spin_unlock_bh(&td_chan
->lock
);
375 static struct timb_dma_desc
*td_alloc_init_desc(struct timb_dma_chan
*td_chan
)
377 struct dma_chan
*chan
= &td_chan
->chan
;
378 struct timb_dma_desc
*td_desc
;
381 td_desc
= kzalloc(sizeof(struct timb_dma_desc
), GFP_KERNEL
);
383 dev_err(chan2dev(chan
), "Failed to alloc descriptor\n");
387 td_desc
->desc_list_len
= td_chan
->desc_elems
* TIMB_DMA_DESC_SIZE
;
389 td_desc
->desc_list
= kzalloc(td_desc
->desc_list_len
, GFP_KERNEL
);
390 if (!td_desc
->desc_list
) {
391 dev_err(chan2dev(chan
), "Failed to alloc descriptor\n");
395 dma_async_tx_descriptor_init(&td_desc
->txd
, chan
);
396 td_desc
->txd
.tx_submit
= td_tx_submit
;
397 td_desc
->txd
.flags
= DMA_CTRL_ACK
;
399 td_desc
->txd
.phys
= dma_map_single(chan2dmadev(chan
),
400 td_desc
->desc_list
, td_desc
->desc_list_len
, DMA_TO_DEVICE
);
402 err
= dma_mapping_error(chan2dmadev(chan
), td_desc
->txd
.phys
);
404 dev_err(chan2dev(chan
), "DMA mapping error: %d\n", err
);
410 kfree(td_desc
->desc_list
);
417 static void td_free_desc(struct timb_dma_desc
*td_desc
)
419 dev_dbg(chan2dev(td_desc
->txd
.chan
), "Freeing desc: %p\n", td_desc
);
420 dma_unmap_single(chan2dmadev(td_desc
->txd
.chan
), td_desc
->txd
.phys
,
421 td_desc
->desc_list_len
, DMA_TO_DEVICE
);
423 kfree(td_desc
->desc_list
);
427 static void td_desc_put(struct timb_dma_chan
*td_chan
,
428 struct timb_dma_desc
*td_desc
)
430 dev_dbg(chan2dev(&td_chan
->chan
), "Putting desc: %p\n", td_desc
);
432 spin_lock_bh(&td_chan
->lock
);
433 list_add(&td_desc
->desc_node
, &td_chan
->free_list
);
434 spin_unlock_bh(&td_chan
->lock
);
437 static struct timb_dma_desc
*td_desc_get(struct timb_dma_chan
*td_chan
)
439 struct timb_dma_desc
*td_desc
, *_td_desc
;
440 struct timb_dma_desc
*ret
= NULL
;
442 spin_lock_bh(&td_chan
->lock
);
443 list_for_each_entry_safe(td_desc
, _td_desc
, &td_chan
->free_list
,
445 if (async_tx_test_ack(&td_desc
->txd
)) {
446 list_del(&td_desc
->desc_node
);
450 dev_dbg(chan2dev(&td_chan
->chan
), "desc %p not ACKed\n",
453 spin_unlock_bh(&td_chan
->lock
);
458 static int td_alloc_chan_resources(struct dma_chan
*chan
)
460 struct timb_dma_chan
*td_chan
=
461 container_of(chan
, struct timb_dma_chan
, chan
);
464 dev_dbg(chan2dev(chan
), "%s: entry\n", __func__
);
466 BUG_ON(!list_empty(&td_chan
->free_list
));
467 for (i
= 0; i
< td_chan
->descs
; i
++) {
468 struct timb_dma_desc
*td_desc
= td_alloc_init_desc(td_chan
);
473 dev_err(chan2dev(chan
),
474 "Couldnt allocate any descriptors\n");
479 td_desc_put(td_chan
, td_desc
);
482 spin_lock_bh(&td_chan
->lock
);
483 td_chan
->last_completed_cookie
= 1;
485 spin_unlock_bh(&td_chan
->lock
);
490 static void td_free_chan_resources(struct dma_chan
*chan
)
492 struct timb_dma_chan
*td_chan
=
493 container_of(chan
, struct timb_dma_chan
, chan
);
494 struct timb_dma_desc
*td_desc
, *_td_desc
;
497 dev_dbg(chan2dev(chan
), "%s: Entry\n", __func__
);
499 /* check that all descriptors are free */
500 BUG_ON(!list_empty(&td_chan
->active_list
));
501 BUG_ON(!list_empty(&td_chan
->queue
));
503 spin_lock_bh(&td_chan
->lock
);
504 list_splice_init(&td_chan
->free_list
, &list
);
505 spin_unlock_bh(&td_chan
->lock
);
507 list_for_each_entry_safe(td_desc
, _td_desc
, &list
, desc_node
) {
508 dev_dbg(chan2dev(chan
), "%s: Freeing desc: %p\n", __func__
,
510 td_free_desc(td_desc
);
514 static enum dma_status
td_is_tx_complete(struct dma_chan
*chan
,
515 dma_cookie_t cookie
, dma_cookie_t
*done
, dma_cookie_t
*used
)
517 struct timb_dma_chan
*td_chan
=
518 container_of(chan
, struct timb_dma_chan
, chan
);
519 dma_cookie_t last_used
;
520 dma_cookie_t last_complete
;
523 dev_dbg(chan2dev(chan
), "%s: Entry\n", __func__
);
525 last_complete
= td_chan
->last_completed_cookie
;
526 last_used
= chan
->cookie
;
528 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
531 *done
= last_complete
;
535 dev_dbg(chan2dev(chan
),
536 "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
537 __func__
, ret
, last_complete
, last_used
);
542 static void td_issue_pending(struct dma_chan
*chan
)
544 struct timb_dma_chan
*td_chan
=
545 container_of(chan
, struct timb_dma_chan
, chan
);
547 dev_dbg(chan2dev(chan
), "%s: Entry\n", __func__
);
548 spin_lock_bh(&td_chan
->lock
);
550 if (!list_empty(&td_chan
->active_list
))
551 /* transfer ongoing */
552 if (__td_dma_done_ack(td_chan
))
553 __td_finish(td_chan
);
555 if (list_empty(&td_chan
->active_list
) && !list_empty(&td_chan
->queue
))
556 __td_start_next(td_chan
);
558 spin_unlock_bh(&td_chan
->lock
);
561 static struct dma_async_tx_descriptor
*td_prep_slave_sg(struct dma_chan
*chan
,
562 struct scatterlist
*sgl
, unsigned int sg_len
,
563 enum dma_data_direction direction
, unsigned long flags
)
565 struct timb_dma_chan
*td_chan
=
566 container_of(chan
, struct timb_dma_chan
, chan
);
567 struct timb_dma_desc
*td_desc
;
568 struct scatterlist
*sg
;
570 unsigned int desc_usage
= 0;
572 if (!sgl
|| !sg_len
) {
573 dev_err(chan2dev(chan
), "%s: No SG list\n", __func__
);
577 /* even channels are for RX, odd for TX */
578 if (td_chan
->direction
!= direction
) {
579 dev_err(chan2dev(chan
),
580 "Requesting channel in wrong direction\n");
584 td_desc
= td_desc_get(td_chan
);
586 dev_err(chan2dev(chan
), "Not enough descriptors available\n");
590 td_desc
->interrupt
= (flags
& DMA_PREP_INTERRUPT
) != 0;
592 for_each_sg(sgl
, sg
, sg_len
, i
) {
594 if (desc_usage
> td_desc
->desc_list_len
) {
595 dev_err(chan2dev(chan
), "No descriptor space\n");
599 err
= td_fill_desc(td_chan
, td_desc
->desc_list
+ desc_usage
, sg
,
602 dev_err(chan2dev(chan
), "Failed to update desc: %d\n",
604 td_desc_put(td_chan
, td_desc
);
607 desc_usage
+= TIMB_DMA_DESC_SIZE
;
610 dma_sync_single_for_device(chan2dmadev(chan
), td_desc
->txd
.phys
,
611 td_desc
->desc_list_len
, DMA_TO_DEVICE
);
613 return &td_desc
->txd
;
616 static void td_terminate_all(struct dma_chan
*chan
)
618 struct timb_dma_chan
*td_chan
=
619 container_of(chan
, struct timb_dma_chan
, chan
);
620 struct timb_dma_desc
*td_desc
, *_td_desc
;
622 dev_dbg(chan2dev(chan
), "%s: Entry\n", __func__
);
624 /* first the easy part, put the queue into the free list */
625 spin_lock_bh(&td_chan
->lock
);
626 list_for_each_entry_safe(td_desc
, _td_desc
, &td_chan
->queue
,
628 list_move(&td_desc
->desc_node
, &td_chan
->free_list
);
630 /* now tear down the runnning */
631 __td_finish(td_chan
);
632 spin_unlock_bh(&td_chan
->lock
);
635 static void td_tasklet(unsigned long data
)
637 struct timb_dma
*td
= (struct timb_dma
*)data
;
643 isr
= ioread32(td
->membase
+ TIMBDMA_ISR
);
644 ipr
= isr
& __td_ier_mask(td
);
646 /* ack the interrupts */
647 iowrite32(ipr
, td
->membase
+ TIMBDMA_ISR
);
649 for (i
= 0; i
< td
->dma
.chancnt
; i
++)
650 if (ipr
& (1 << i
)) {
651 struct timb_dma_chan
*td_chan
= td
->channels
+ i
;
652 spin_lock(&td_chan
->lock
);
653 __td_finish(td_chan
);
654 if (!list_empty(&td_chan
->queue
))
655 __td_start_next(td_chan
);
656 spin_unlock(&td_chan
->lock
);
659 ier
= __td_ier_mask(td
);
660 iowrite32(ier
, td
->membase
+ TIMBDMA_IER
);
664 static irqreturn_t
td_irq(int irq
, void *devid
)
666 struct timb_dma
*td
= devid
;
667 u32 ipr
= ioread32(td
->membase
+ TIMBDMA_IPR
);
670 /* disable interrupts, will be re-enabled in tasklet */
671 iowrite32(0, td
->membase
+ TIMBDMA_IER
);
673 tasklet_schedule(&td
->tasklet
);
681 static int __devinit
td_probe(struct platform_device
*pdev
)
683 struct timb_dma_platform_data
*pdata
= pdev
->dev
.platform_data
;
685 struct resource
*iomem
;
691 dev_err(&pdev
->dev
, "No platform data\n");
695 iomem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
699 irq
= platform_get_irq(pdev
, 0);
703 if (!request_mem_region(iomem
->start
, resource_size(iomem
),
707 td
= kzalloc(sizeof(struct timb_dma
) +
708 sizeof(struct timb_dma_chan
) * pdata
->nr_channels
, GFP_KERNEL
);
711 goto err_release_region
;
714 dev_dbg(&pdev
->dev
, "Allocated TD: %p\n", td
);
716 td
->membase
= ioremap(iomem
->start
, resource_size(iomem
));
718 dev_err(&pdev
->dev
, "Failed to remap I/O memory\n");
723 /* 32bit addressing */
724 iowrite32(TIMBDMA_32BIT_ADDR
, td
->membase
+ TIMBDMA_ACR
);
726 /* disable and clear any interrupts */
727 iowrite32(0x0, td
->membase
+ TIMBDMA_IER
);
728 iowrite32(0xFFFFFFFF, td
->membase
+ TIMBDMA_ISR
);
730 tasklet_init(&td
->tasklet
, td_tasklet
, (unsigned long)td
);
732 err
= request_irq(irq
, td_irq
, IRQF_SHARED
, DRIVER_NAME
, td
);
734 dev_err(&pdev
->dev
, "Failed to request IRQ\n");
735 goto err_tasklet_kill
;
738 td
->dma
.device_alloc_chan_resources
= td_alloc_chan_resources
;
739 td
->dma
.device_free_chan_resources
= td_free_chan_resources
;
740 td
->dma
.device_is_tx_complete
= td_is_tx_complete
;
741 td
->dma
.device_issue_pending
= td_issue_pending
;
743 dma_cap_set(DMA_SLAVE
, td
->dma
.cap_mask
);
744 dma_cap_set(DMA_PRIVATE
, td
->dma
.cap_mask
);
745 td
->dma
.device_prep_slave_sg
= td_prep_slave_sg
;
746 td
->dma
.device_terminate_all
= td_terminate_all
;
748 td
->dma
.dev
= &pdev
->dev
;
750 INIT_LIST_HEAD(&td
->dma
.channels
);
752 for (i
= 0; i
< pdata
->nr_channels
; i
++, td
->dma
.chancnt
++) {
753 struct timb_dma_chan
*td_chan
= &td
->channels
[i
];
754 struct timb_dma_platform_data_channel
*pchan
=
757 /* even channels are RX, odd are TX */
758 if (((i
% 2) && pchan
->rx
) || (!(i
% 2) && !pchan
->rx
)) {
759 dev_err(&pdev
->dev
, "Wrong channel configuration\n");
761 goto err_tasklet_kill
;
764 td_chan
->chan
.device
= &td
->dma
;
765 td_chan
->chan
.cookie
= 1;
766 td_chan
->chan
.chan_id
= i
;
767 spin_lock_init(&td_chan
->lock
);
768 INIT_LIST_HEAD(&td_chan
->active_list
);
769 INIT_LIST_HEAD(&td_chan
->queue
);
770 INIT_LIST_HEAD(&td_chan
->free_list
);
772 td_chan
->descs
= pchan
->descriptors
;
773 td_chan
->desc_elems
= pchan
->descriptor_elements
;
774 td_chan
->bytes_per_line
= pchan
->bytes_per_line
;
775 td_chan
->direction
= pchan
->rx
? DMA_FROM_DEVICE
:
778 td_chan
->membase
= td
->membase
+
779 (i
/ 2) * TIMBDMA_INSTANCE_OFFSET
+
780 (pchan
->rx
? 0 : TIMBDMA_INSTANCE_TX_OFFSET
);
782 dev_dbg(&pdev
->dev
, "Chan: %d, membase: %p\n",
783 i
, td_chan
->membase
);
785 list_add_tail(&td_chan
->chan
.device_node
, &td
->dma
.channels
);
788 err
= dma_async_device_register(&td
->dma
);
790 dev_err(&pdev
->dev
, "Failed to register async device\n");
794 platform_set_drvdata(pdev
, td
);
796 dev_dbg(&pdev
->dev
, "Probe result: %d\n", err
);
802 tasklet_kill(&td
->tasklet
);
803 iounmap(td
->membase
);
807 release_mem_region(iomem
->start
, resource_size(iomem
));
813 static int __devexit
td_remove(struct platform_device
*pdev
)
815 struct timb_dma
*td
= platform_get_drvdata(pdev
);
816 struct resource
*iomem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
817 int irq
= platform_get_irq(pdev
, 0);
819 dma_async_device_unregister(&td
->dma
);
821 tasklet_kill(&td
->tasklet
);
822 iounmap(td
->membase
);
824 release_mem_region(iomem
->start
, resource_size(iomem
));
826 platform_set_drvdata(pdev
, NULL
);
828 dev_dbg(&pdev
->dev
, "Removed...\n");
832 static struct platform_driver td_driver
= {
835 .owner
= THIS_MODULE
,
838 .remove
= __exit_p(td_remove
),
841 static int __init
td_init(void)
843 return platform_driver_register(&td_driver
);
845 module_init(td_init
);
847 static void __exit
td_exit(void)
849 platform_driver_unregister(&td_driver
);
851 module_exit(td_exit
);
853 MODULE_LICENSE("GPL v2");
854 MODULE_DESCRIPTION("Timberdale DMA controller driver");
855 MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
856 MODULE_ALIAS("platform:"DRIVER_NAME
);