2 * Texas Instruments CPDMA Driver
4 * Copyright (C) 2010 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 #include <linux/kernel.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/delay.h>
24 #include <linux/genalloc.h>
25 #include "davinci_cpdma.h"
28 #define CPDMA_TXIDVER 0x00
29 #define CPDMA_TXCONTROL 0x04
30 #define CPDMA_TXTEARDOWN 0x08
31 #define CPDMA_RXIDVER 0x10
32 #define CPDMA_RXCONTROL 0x14
33 #define CPDMA_SOFTRESET 0x1c
34 #define CPDMA_RXTEARDOWN 0x18
35 #define CPDMA_TXINTSTATRAW 0x80
36 #define CPDMA_TXINTSTATMASKED 0x84
37 #define CPDMA_TXINTMASKSET 0x88
38 #define CPDMA_TXINTMASKCLEAR 0x8c
39 #define CPDMA_MACINVECTOR 0x90
40 #define CPDMA_MACEOIVECTOR 0x94
41 #define CPDMA_RXINTSTATRAW 0xa0
42 #define CPDMA_RXINTSTATMASKED 0xa4
43 #define CPDMA_RXINTMASKSET 0xa8
44 #define CPDMA_RXINTMASKCLEAR 0xac
45 #define CPDMA_DMAINTSTATRAW 0xb0
46 #define CPDMA_DMAINTSTATMASKED 0xb4
47 #define CPDMA_DMAINTMASKSET 0xb8
48 #define CPDMA_DMAINTMASKCLEAR 0xbc
49 #define CPDMA_DMAINT_HOSTERR BIT(1)
51 /* the following exist only if has_ext_regs is set */
52 #define CPDMA_DMACONTROL 0x20
53 #define CPDMA_DMASTATUS 0x24
54 #define CPDMA_RXBUFFOFS 0x28
55 #define CPDMA_EM_CONTROL 0x2c
57 /* Descriptor mode bits */
58 #define CPDMA_DESC_SOP BIT(31)
59 #define CPDMA_DESC_EOP BIT(30)
60 #define CPDMA_DESC_OWNER BIT(29)
61 #define CPDMA_DESC_EOQ BIT(28)
62 #define CPDMA_DESC_TD_COMPLETE BIT(27)
63 #define CPDMA_DESC_PASS_CRC BIT(26)
64 #define CPDMA_DESC_TO_PORT_EN BIT(20)
65 #define CPDMA_TO_PORT_SHIFT 16
66 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
67 #define CPDMA_DESC_CRC_LEN 4
69 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
83 struct cpdma_desc_pool
{
86 void __iomem
*iomap
; /* ioremap map */
87 void *cpumap
; /* dma_alloc map */
88 int desc_size
, mem_size
;
89 int num_desc
, used_desc
;
91 struct gen_pool
*gen_pool
;
101 enum cpdma_state state
;
102 struct cpdma_params params
;
104 struct cpdma_desc_pool
*pool
;
106 struct cpdma_chan
*channels
[2 * CPDMA_MAX_CHANNELS
];
110 struct cpdma_desc __iomem
*head
, *tail
;
111 void __iomem
*hdp
, *cp
, *rxfree
;
112 enum cpdma_state state
;
113 struct cpdma_ctlr
*ctlr
;
119 cpdma_handler_fn handler
;
120 enum dma_data_direction dir
;
121 struct cpdma_chan_stats stats
;
122 /* offsets into dmaregs */
123 int int_set
, int_clear
, td
;
126 /* The following make access to common cpdma_ctlr params more readable */
127 #define dmaregs params.dmaregs
128 #define num_chan params.num_chan
130 /* various accessors */
131 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
132 #define chan_read(chan, fld) __raw_readl((chan)->fld)
133 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
134 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
135 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
136 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
138 #define cpdma_desc_to_port(chan, mode, directed) \
140 if (!is_rx_chan(chan) && ((directed == 1) || \
142 mode |= (CPDMA_DESC_TO_PORT_EN | \
143 (directed << CPDMA_TO_PORT_SHIFT)); \
146 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool
*pool
)
151 WARN_ON(pool
->used_desc
);
153 dma_free_coherent(pool
->dev
, pool
->mem_size
, pool
->cpumap
,
156 iounmap(pool
->iomap
);
160 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
161 * emac) have dedicated on-chip memory for these descriptors. Some other
162 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
163 * abstract out these details
165 static struct cpdma_desc_pool
*
166 cpdma_desc_pool_create(struct device
*dev
, u32 phys
, dma_addr_t hw_addr
,
169 struct cpdma_desc_pool
*pool
;
172 pool
= devm_kzalloc(dev
, sizeof(*pool
), GFP_KERNEL
);
174 goto gen_pool_create_fail
;
177 pool
->mem_size
= size
;
178 pool
->desc_size
= ALIGN(sizeof(struct cpdma_desc
), align
);
179 pool
->num_desc
= size
/ pool
->desc_size
;
181 pool
->gen_pool
= devm_gen_pool_create(dev
, ilog2(pool
->desc_size
), -1,
183 if (IS_ERR(pool
->gen_pool
)) {
184 dev_err(dev
, "pool create failed %ld\n",
185 PTR_ERR(pool
->gen_pool
));
186 goto gen_pool_create_fail
;
191 pool
->iomap
= ioremap(phys
, size
); /* should be memremap? */
192 pool
->hw_addr
= hw_addr
;
194 pool
->cpumap
= dma_alloc_coherent(dev
, size
, &pool
->hw_addr
,
196 pool
->iomap
= (void __iomem __force
*)pool
->cpumap
;
197 pool
->phys
= pool
->hw_addr
; /* assumes no IOMMU, don't use this value */
201 goto gen_pool_create_fail
;
203 ret
= gen_pool_add_virt(pool
->gen_pool
, (unsigned long)pool
->iomap
,
204 pool
->phys
, pool
->mem_size
, -1);
206 dev_err(dev
, "pool add failed %d\n", ret
);
207 goto gen_pool_add_virt_fail
;
212 gen_pool_add_virt_fail
:
213 cpdma_desc_pool_destroy(pool
);
214 gen_pool_create_fail
:
218 static inline dma_addr_t
desc_phys(struct cpdma_desc_pool
*pool
,
219 struct cpdma_desc __iomem
*desc
)
223 return pool
->hw_addr
+ (__force
long)desc
- (__force
long)pool
->iomap
;
226 static inline struct cpdma_desc __iomem
*
227 desc_from_phys(struct cpdma_desc_pool
*pool
, dma_addr_t dma
)
229 return dma
? pool
->iomap
+ dma
- pool
->hw_addr
: NULL
;
232 static struct cpdma_desc __iomem
*
233 cpdma_desc_alloc(struct cpdma_desc_pool
*pool
)
235 struct cpdma_desc __iomem
*desc
= NULL
;
237 desc
= (struct cpdma_desc __iomem
*)gen_pool_alloc(pool
->gen_pool
,
245 static void cpdma_desc_free(struct cpdma_desc_pool
*pool
,
246 struct cpdma_desc __iomem
*desc
, int num_desc
)
248 gen_pool_free(pool
->gen_pool
, (unsigned long)desc
, pool
->desc_size
);
252 struct cpdma_ctlr
*cpdma_ctlr_create(struct cpdma_params
*params
)
254 struct cpdma_ctlr
*ctlr
;
256 ctlr
= devm_kzalloc(params
->dev
, sizeof(*ctlr
), GFP_KERNEL
);
260 ctlr
->state
= CPDMA_STATE_IDLE
;
261 ctlr
->params
= *params
;
262 ctlr
->dev
= params
->dev
;
263 spin_lock_init(&ctlr
->lock
);
265 ctlr
->pool
= cpdma_desc_pool_create(ctlr
->dev
,
266 ctlr
->params
.desc_mem_phys
,
267 ctlr
->params
.desc_hw_addr
,
268 ctlr
->params
.desc_mem_size
,
269 ctlr
->params
.desc_align
);
273 if (WARN_ON(ctlr
->num_chan
> CPDMA_MAX_CHANNELS
))
274 ctlr
->num_chan
= CPDMA_MAX_CHANNELS
;
277 EXPORT_SYMBOL_GPL(cpdma_ctlr_create
);
279 int cpdma_ctlr_start(struct cpdma_ctlr
*ctlr
)
284 spin_lock_irqsave(&ctlr
->lock
, flags
);
285 if (ctlr
->state
!= CPDMA_STATE_IDLE
) {
286 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
290 if (ctlr
->params
.has_soft_reset
) {
291 unsigned timeout
= 10 * 100;
293 dma_reg_write(ctlr
, CPDMA_SOFTRESET
, 1);
295 if (dma_reg_read(ctlr
, CPDMA_SOFTRESET
) == 0)
303 for (i
= 0; i
< ctlr
->num_chan
; i
++) {
304 __raw_writel(0, ctlr
->params
.txhdp
+ 4 * i
);
305 __raw_writel(0, ctlr
->params
.rxhdp
+ 4 * i
);
306 __raw_writel(0, ctlr
->params
.txcp
+ 4 * i
);
307 __raw_writel(0, ctlr
->params
.rxcp
+ 4 * i
);
310 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
311 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
313 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 1);
314 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 1);
316 ctlr
->state
= CPDMA_STATE_ACTIVE
;
318 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
319 if (ctlr
->channels
[i
])
320 cpdma_chan_start(ctlr
->channels
[i
]);
322 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
325 EXPORT_SYMBOL_GPL(cpdma_ctlr_start
);
327 int cpdma_ctlr_stop(struct cpdma_ctlr
*ctlr
)
332 spin_lock_irqsave(&ctlr
->lock
, flags
);
333 if (ctlr
->state
== CPDMA_STATE_TEARDOWN
) {
334 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
338 ctlr
->state
= CPDMA_STATE_TEARDOWN
;
340 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
341 if (ctlr
->channels
[i
])
342 cpdma_chan_stop(ctlr
->channels
[i
]);
345 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
346 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
348 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 0);
349 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 0);
351 ctlr
->state
= CPDMA_STATE_IDLE
;
353 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
356 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop
);
358 int cpdma_ctlr_destroy(struct cpdma_ctlr
*ctlr
)
366 spin_lock_irqsave(&ctlr
->lock
, flags
);
367 if (ctlr
->state
!= CPDMA_STATE_IDLE
)
368 cpdma_ctlr_stop(ctlr
);
370 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++)
371 cpdma_chan_destroy(ctlr
->channels
[i
]);
373 cpdma_desc_pool_destroy(ctlr
->pool
);
374 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
377 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy
);
379 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr
*ctlr
, bool enable
)
384 spin_lock_irqsave(&ctlr
->lock
, flags
);
385 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
386 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
390 reg
= enable
? CPDMA_DMAINTMASKSET
: CPDMA_DMAINTMASKCLEAR
;
391 dma_reg_write(ctlr
, reg
, CPDMA_DMAINT_HOSTERR
);
393 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
394 if (ctlr
->channels
[i
])
395 cpdma_chan_int_ctrl(ctlr
->channels
[i
], enable
);
398 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
401 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl
);
403 void cpdma_ctlr_eoi(struct cpdma_ctlr
*ctlr
, u32 value
)
405 dma_reg_write(ctlr
, CPDMA_MACEOIVECTOR
, value
);
407 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi
);
409 struct cpdma_chan
*cpdma_chan_create(struct cpdma_ctlr
*ctlr
, int chan_num
,
410 cpdma_handler_fn handler
)
412 struct cpdma_chan
*chan
;
413 int offset
= (chan_num
% CPDMA_MAX_CHANNELS
) * 4;
416 if (__chan_linear(chan_num
) >= ctlr
->num_chan
)
419 chan
= devm_kzalloc(ctlr
->dev
, sizeof(*chan
), GFP_KERNEL
);
421 return ERR_PTR(-ENOMEM
);
423 spin_lock_irqsave(&ctlr
->lock
, flags
);
424 if (ctlr
->channels
[chan_num
]) {
425 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
426 devm_kfree(ctlr
->dev
, chan
);
427 return ERR_PTR(-EBUSY
);
431 chan
->state
= CPDMA_STATE_IDLE
;
432 chan
->chan_num
= chan_num
;
433 chan
->handler
= handler
;
434 chan
->desc_num
= ctlr
->pool
->num_desc
/ 2;
436 if (is_rx_chan(chan
)) {
437 chan
->hdp
= ctlr
->params
.rxhdp
+ offset
;
438 chan
->cp
= ctlr
->params
.rxcp
+ offset
;
439 chan
->rxfree
= ctlr
->params
.rxfree
+ offset
;
440 chan
->int_set
= CPDMA_RXINTMASKSET
;
441 chan
->int_clear
= CPDMA_RXINTMASKCLEAR
;
442 chan
->td
= CPDMA_RXTEARDOWN
;
443 chan
->dir
= DMA_FROM_DEVICE
;
445 chan
->hdp
= ctlr
->params
.txhdp
+ offset
;
446 chan
->cp
= ctlr
->params
.txcp
+ offset
;
447 chan
->int_set
= CPDMA_TXINTMASKSET
;
448 chan
->int_clear
= CPDMA_TXINTMASKCLEAR
;
449 chan
->td
= CPDMA_TXTEARDOWN
;
450 chan
->dir
= DMA_TO_DEVICE
;
452 chan
->mask
= BIT(chan_linear(chan
));
454 spin_lock_init(&chan
->lock
);
456 ctlr
->channels
[chan_num
] = chan
;
457 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
460 EXPORT_SYMBOL_GPL(cpdma_chan_create
);
462 int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr
*ctlr
)
464 return ctlr
->pool
->num_desc
/ 2;
466 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num
);
468 int cpdma_chan_destroy(struct cpdma_chan
*chan
)
470 struct cpdma_ctlr
*ctlr
;
477 spin_lock_irqsave(&ctlr
->lock
, flags
);
478 if (chan
->state
!= CPDMA_STATE_IDLE
)
479 cpdma_chan_stop(chan
);
480 ctlr
->channels
[chan
->chan_num
] = NULL
;
481 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
484 EXPORT_SYMBOL_GPL(cpdma_chan_destroy
);
486 int cpdma_chan_get_stats(struct cpdma_chan
*chan
,
487 struct cpdma_chan_stats
*stats
)
492 spin_lock_irqsave(&chan
->lock
, flags
);
493 memcpy(stats
, &chan
->stats
, sizeof(*stats
));
494 spin_unlock_irqrestore(&chan
->lock
, flags
);
497 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats
);
499 static void __cpdma_chan_submit(struct cpdma_chan
*chan
,
500 struct cpdma_desc __iomem
*desc
)
502 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
503 struct cpdma_desc __iomem
*prev
= chan
->tail
;
504 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
508 desc_dma
= desc_phys(pool
, desc
);
510 /* simple case - idle channel */
512 chan
->stats
.head_enqueue
++;
515 if (chan
->state
== CPDMA_STATE_ACTIVE
)
516 chan_write(chan
, hdp
, desc_dma
);
520 /* first chain the descriptor at the tail of the list */
521 desc_write(prev
, hw_next
, desc_dma
);
523 chan
->stats
.tail_enqueue
++;
525 /* next check if EOQ has been triggered already */
526 mode
= desc_read(prev
, hw_mode
);
527 if (((mode
& (CPDMA_DESC_EOQ
| CPDMA_DESC_OWNER
)) == CPDMA_DESC_EOQ
) &&
528 (chan
->state
== CPDMA_STATE_ACTIVE
)) {
529 desc_write(prev
, hw_mode
, mode
& ~CPDMA_DESC_EOQ
);
530 chan_write(chan
, hdp
, desc_dma
);
531 chan
->stats
.misqueued
++;
535 int cpdma_chan_submit(struct cpdma_chan
*chan
, void *token
, void *data
,
536 int len
, int directed
)
538 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
539 struct cpdma_desc __iomem
*desc
;
545 spin_lock_irqsave(&chan
->lock
, flags
);
547 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
552 if (chan
->count
>= chan
->desc_num
) {
553 chan
->stats
.desc_alloc_fail
++;
558 desc
= cpdma_desc_alloc(ctlr
->pool
);
560 chan
->stats
.desc_alloc_fail
++;
565 if (len
< ctlr
->params
.min_packet_size
) {
566 len
= ctlr
->params
.min_packet_size
;
567 chan
->stats
.runt_transmit_buff
++;
570 buffer
= dma_map_single(ctlr
->dev
, data
, len
, chan
->dir
);
571 ret
= dma_mapping_error(ctlr
->dev
, buffer
);
573 cpdma_desc_free(ctlr
->pool
, desc
, 1);
578 mode
= CPDMA_DESC_OWNER
| CPDMA_DESC_SOP
| CPDMA_DESC_EOP
;
579 cpdma_desc_to_port(chan
, mode
, directed
);
581 desc_write(desc
, hw_next
, 0);
582 desc_write(desc
, hw_buffer
, buffer
);
583 desc_write(desc
, hw_len
, len
);
584 desc_write(desc
, hw_mode
, mode
| len
);
585 desc_write(desc
, sw_token
, token
);
586 desc_write(desc
, sw_buffer
, buffer
);
587 desc_write(desc
, sw_len
, len
);
589 __cpdma_chan_submit(chan
, desc
);
591 if (chan
->state
== CPDMA_STATE_ACTIVE
&& chan
->rxfree
)
592 chan_write(chan
, rxfree
, 1);
597 spin_unlock_irqrestore(&chan
->lock
, flags
);
600 EXPORT_SYMBOL_GPL(cpdma_chan_submit
);
602 bool cpdma_check_free_tx_desc(struct cpdma_chan
*chan
)
604 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
605 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
609 spin_lock_irqsave(&chan
->lock
, flags
);
610 free_tx_desc
= (chan
->count
< chan
->desc_num
) &&
611 gen_pool_avail(pool
->gen_pool
);
612 spin_unlock_irqrestore(&chan
->lock
, flags
);
615 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc
);
617 static void __cpdma_chan_free(struct cpdma_chan
*chan
,
618 struct cpdma_desc __iomem
*desc
,
619 int outlen
, int status
)
621 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
622 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
627 token
= (void *)desc_read(desc
, sw_token
);
628 buff_dma
= desc_read(desc
, sw_buffer
);
629 origlen
= desc_read(desc
, sw_len
);
631 dma_unmap_single(ctlr
->dev
, buff_dma
, origlen
, chan
->dir
);
632 cpdma_desc_free(pool
, desc
, 1);
633 (*chan
->handler
)(token
, outlen
, status
);
636 static int __cpdma_chan_process(struct cpdma_chan
*chan
)
638 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
639 struct cpdma_desc __iomem
*desc
;
642 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
646 spin_lock_irqsave(&chan
->lock
, flags
);
650 chan
->stats
.empty_dequeue
++;
654 desc_dma
= desc_phys(pool
, desc
);
656 status
= __raw_readl(&desc
->hw_mode
);
657 outlen
= status
& 0x7ff;
658 if (status
& CPDMA_DESC_OWNER
) {
659 chan
->stats
.busy_dequeue
++;
664 if (status
& CPDMA_DESC_PASS_CRC
)
665 outlen
-= CPDMA_DESC_CRC_LEN
;
667 status
= status
& (CPDMA_DESC_EOQ
| CPDMA_DESC_TD_COMPLETE
|
668 CPDMA_DESC_PORT_MASK
);
670 chan
->head
= desc_from_phys(pool
, desc_read(desc
, hw_next
));
671 chan_write(chan
, cp
, desc_dma
);
673 chan
->stats
.good_dequeue
++;
675 if (status
& CPDMA_DESC_EOQ
) {
676 chan
->stats
.requeue
++;
677 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
680 spin_unlock_irqrestore(&chan
->lock
, flags
);
681 if (unlikely(status
& CPDMA_DESC_TD_COMPLETE
))
686 __cpdma_chan_free(chan
, desc
, outlen
, cb_status
);
690 spin_unlock_irqrestore(&chan
->lock
, flags
);
694 int cpdma_chan_process(struct cpdma_chan
*chan
, int quota
)
696 int used
= 0, ret
= 0;
698 if (chan
->state
!= CPDMA_STATE_ACTIVE
)
701 while (used
< quota
) {
702 ret
= __cpdma_chan_process(chan
);
709 EXPORT_SYMBOL_GPL(cpdma_chan_process
);
711 int cpdma_chan_start(struct cpdma_chan
*chan
)
713 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
714 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
717 spin_lock_irqsave(&chan
->lock
, flags
);
718 if (chan
->state
!= CPDMA_STATE_IDLE
) {
719 spin_unlock_irqrestore(&chan
->lock
, flags
);
722 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
723 spin_unlock_irqrestore(&chan
->lock
, flags
);
726 dma_reg_write(ctlr
, chan
->int_set
, chan
->mask
);
727 chan
->state
= CPDMA_STATE_ACTIVE
;
729 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
731 chan_write(chan
, rxfree
, chan
->count
);
734 spin_unlock_irqrestore(&chan
->lock
, flags
);
737 EXPORT_SYMBOL_GPL(cpdma_chan_start
);
739 int cpdma_chan_stop(struct cpdma_chan
*chan
)
741 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
742 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
747 spin_lock_irqsave(&chan
->lock
, flags
);
748 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
749 spin_unlock_irqrestore(&chan
->lock
, flags
);
753 chan
->state
= CPDMA_STATE_TEARDOWN
;
754 dma_reg_write(ctlr
, chan
->int_clear
, chan
->mask
);
756 /* trigger teardown */
757 dma_reg_write(ctlr
, chan
->td
, chan_linear(chan
));
759 /* wait for teardown complete */
760 timeout
= 100 * 100; /* 100 ms */
762 u32 cp
= chan_read(chan
, cp
);
763 if ((cp
& CPDMA_TEARDOWN_VALUE
) == CPDMA_TEARDOWN_VALUE
)
769 chan_write(chan
, cp
, CPDMA_TEARDOWN_VALUE
);
771 /* handle completed packets */
772 spin_unlock_irqrestore(&chan
->lock
, flags
);
774 ret
= __cpdma_chan_process(chan
);
777 } while ((ret
& CPDMA_DESC_TD_COMPLETE
) == 0);
778 spin_lock_irqsave(&chan
->lock
, flags
);
780 /* remaining packets haven't been tx/rx'ed, clean them up */
782 struct cpdma_desc __iomem
*desc
= chan
->head
;
785 next_dma
= desc_read(desc
, hw_next
);
786 chan
->head
= desc_from_phys(pool
, next_dma
);
788 chan
->stats
.teardown_dequeue
++;
790 /* issue callback without locks held */
791 spin_unlock_irqrestore(&chan
->lock
, flags
);
792 __cpdma_chan_free(chan
, desc
, 0, -ENOSYS
);
793 spin_lock_irqsave(&chan
->lock
, flags
);
796 chan
->state
= CPDMA_STATE_IDLE
;
797 spin_unlock_irqrestore(&chan
->lock
, flags
);
800 EXPORT_SYMBOL_GPL(cpdma_chan_stop
);
802 int cpdma_chan_int_ctrl(struct cpdma_chan
*chan
, bool enable
)
806 spin_lock_irqsave(&chan
->lock
, flags
);
807 if (chan
->state
!= CPDMA_STATE_ACTIVE
) {
808 spin_unlock_irqrestore(&chan
->lock
, flags
);
812 dma_reg_write(chan
->ctlr
, enable
? chan
->int_set
: chan
->int_clear
,
814 spin_unlock_irqrestore(&chan
->lock
, flags
);
819 struct cpdma_control_info
{
823 #define ACCESS_RO BIT(0)
824 #define ACCESS_WO BIT(1)
825 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
828 static struct cpdma_control_info controls
[] = {
829 [CPDMA_CMD_IDLE
] = {CPDMA_DMACONTROL
, 3, 1, ACCESS_WO
},
830 [CPDMA_COPY_ERROR_FRAMES
] = {CPDMA_DMACONTROL
, 4, 1, ACCESS_RW
},
831 [CPDMA_RX_OFF_LEN_UPDATE
] = {CPDMA_DMACONTROL
, 2, 1, ACCESS_RW
},
832 [CPDMA_RX_OWNERSHIP_FLIP
] = {CPDMA_DMACONTROL
, 1, 1, ACCESS_RW
},
833 [CPDMA_TX_PRIO_FIXED
] = {CPDMA_DMACONTROL
, 0, 1, ACCESS_RW
},
834 [CPDMA_STAT_IDLE
] = {CPDMA_DMASTATUS
, 31, 1, ACCESS_RO
},
835 [CPDMA_STAT_TX_ERR_CODE
] = {CPDMA_DMASTATUS
, 20, 0xf, ACCESS_RW
},
836 [CPDMA_STAT_TX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 16, 0x7, ACCESS_RW
},
837 [CPDMA_STAT_RX_ERR_CODE
] = {CPDMA_DMASTATUS
, 12, 0xf, ACCESS_RW
},
838 [CPDMA_STAT_RX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 8, 0x7, ACCESS_RW
},
839 [CPDMA_RX_BUFFER_OFFSET
] = {CPDMA_RXBUFFOFS
, 0, 0xffff, ACCESS_RW
},
842 int cpdma_control_get(struct cpdma_ctlr
*ctlr
, int control
)
845 struct cpdma_control_info
*info
= &controls
[control
];
848 spin_lock_irqsave(&ctlr
->lock
, flags
);
851 if (!ctlr
->params
.has_ext_regs
)
855 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
859 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
863 if ((info
->access
& ACCESS_RO
) != ACCESS_RO
)
866 ret
= (dma_reg_read(ctlr
, info
->reg
) >> info
->shift
) & info
->mask
;
869 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
873 int cpdma_control_set(struct cpdma_ctlr
*ctlr
, int control
, int value
)
876 struct cpdma_control_info
*info
= &controls
[control
];
880 spin_lock_irqsave(&ctlr
->lock
, flags
);
883 if (!ctlr
->params
.has_ext_regs
)
887 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
891 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
895 if ((info
->access
& ACCESS_WO
) != ACCESS_WO
)
898 val
= dma_reg_read(ctlr
, info
->reg
);
899 val
&= ~(info
->mask
<< info
->shift
);
900 val
|= (value
& info
->mask
) << info
->shift
;
901 dma_reg_write(ctlr
, info
->reg
, val
);
905 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
908 EXPORT_SYMBOL_GPL(cpdma_control_set
);
910 MODULE_LICENSE("GPL");