2 * Texas Instruments CPDMA Driver
4 * Copyright (C) 2010 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 #include <linux/kernel.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/delay.h>
24 #include <linux/genalloc.h>
25 #include "davinci_cpdma.h"
28 #define CPDMA_TXIDVER 0x00
29 #define CPDMA_TXCONTROL 0x04
30 #define CPDMA_TXTEARDOWN 0x08
31 #define CPDMA_RXIDVER 0x10
32 #define CPDMA_RXCONTROL 0x14
33 #define CPDMA_SOFTRESET 0x1c
34 #define CPDMA_RXTEARDOWN 0x18
35 #define CPDMA_TXINTSTATRAW 0x80
36 #define CPDMA_TXINTSTATMASKED 0x84
37 #define CPDMA_TXINTMASKSET 0x88
38 #define CPDMA_TXINTMASKCLEAR 0x8c
39 #define CPDMA_MACINVECTOR 0x90
40 #define CPDMA_MACEOIVECTOR 0x94
41 #define CPDMA_RXINTSTATRAW 0xa0
42 #define CPDMA_RXINTSTATMASKED 0xa4
43 #define CPDMA_RXINTMASKSET 0xa8
44 #define CPDMA_RXINTMASKCLEAR 0xac
45 #define CPDMA_DMAINTSTATRAW 0xb0
46 #define CPDMA_DMAINTSTATMASKED 0xb4
47 #define CPDMA_DMAINTMASKSET 0xb8
48 #define CPDMA_DMAINTMASKCLEAR 0xbc
49 #define CPDMA_DMAINT_HOSTERR BIT(1)
51 /* the following exist only if has_ext_regs is set */
52 #define CPDMA_DMACONTROL 0x20
53 #define CPDMA_DMASTATUS 0x24
54 #define CPDMA_RXBUFFOFS 0x28
55 #define CPDMA_EM_CONTROL 0x2c
57 /* Descriptor mode bits */
58 #define CPDMA_DESC_SOP BIT(31)
59 #define CPDMA_DESC_EOP BIT(30)
60 #define CPDMA_DESC_OWNER BIT(29)
61 #define CPDMA_DESC_EOQ BIT(28)
62 #define CPDMA_DESC_TD_COMPLETE BIT(27)
63 #define CPDMA_DESC_PASS_CRC BIT(26)
64 #define CPDMA_DESC_TO_PORT_EN BIT(20)
65 #define CPDMA_TO_PORT_SHIFT 16
66 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
67 #define CPDMA_DESC_CRC_LEN 4
69 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
83 struct cpdma_desc_pool
{
86 void __iomem
*iomap
; /* ioremap map */
87 void *cpumap
; /* dma_alloc map */
88 int desc_size
, mem_size
;
89 int num_desc
, used_desc
;
91 struct gen_pool
*gen_pool
;
101 enum cpdma_state state
;
102 struct cpdma_params params
;
104 struct cpdma_desc_pool
*pool
;
106 struct cpdma_chan
*channels
[2 * CPDMA_MAX_CHANNELS
];
110 struct cpdma_desc __iomem
*head
, *tail
;
111 void __iomem
*hdp
, *cp
, *rxfree
;
112 enum cpdma_state state
;
113 struct cpdma_ctlr
*ctlr
;
119 cpdma_handler_fn handler
;
120 enum dma_data_direction dir
;
121 struct cpdma_chan_stats stats
;
122 /* offsets into dmaregs */
123 int int_set
, int_clear
, td
;
126 /* The following make access to common cpdma_ctlr params more readable */
127 #define dmaregs params.dmaregs
128 #define num_chan params.num_chan
130 /* various accessors */
131 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
132 #define chan_read(chan, fld) __raw_readl((chan)->fld)
133 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
134 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
135 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
136 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
138 #define cpdma_desc_to_port(chan, mode, directed) \
140 if (!is_rx_chan(chan) && ((directed == 1) || \
142 mode |= (CPDMA_DESC_TO_PORT_EN | \
143 (directed << CPDMA_TO_PORT_SHIFT)); \
146 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool
*pool
)
151 WARN_ON(pool
->used_desc
);
153 dma_free_coherent(pool
->dev
, pool
->mem_size
, pool
->cpumap
,
156 iounmap(pool
->iomap
);
160 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
161 * emac) have dedicated on-chip memory for these descriptors. Some other
162 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
163 * abstract out these details
165 static struct cpdma_desc_pool
*
166 cpdma_desc_pool_create(struct device
*dev
, u32 phys
, dma_addr_t hw_addr
,
169 struct cpdma_desc_pool
*pool
;
172 pool
= devm_kzalloc(dev
, sizeof(*pool
), GFP_KERNEL
);
174 goto gen_pool_create_fail
;
177 pool
->mem_size
= size
;
178 pool
->desc_size
= ALIGN(sizeof(struct cpdma_desc
), align
);
179 pool
->num_desc
= size
/ pool
->desc_size
;
181 pool
->gen_pool
= devm_gen_pool_create(dev
, ilog2(pool
->desc_size
), -1,
183 if (IS_ERR(pool
->gen_pool
)) {
184 dev_err(dev
, "pool create failed %ld\n",
185 PTR_ERR(pool
->gen_pool
));
186 goto gen_pool_create_fail
;
191 pool
->iomap
= ioremap(phys
, size
); /* should be memremap? */
192 pool
->hw_addr
= hw_addr
;
194 pool
->cpumap
= dma_alloc_coherent(dev
, size
, &pool
->hw_addr
,
196 pool
->iomap
= (void __iomem __force
*)pool
->cpumap
;
197 pool
->phys
= pool
->hw_addr
; /* assumes no IOMMU, don't use this value */
201 goto gen_pool_create_fail
;
203 ret
= gen_pool_add_virt(pool
->gen_pool
, (unsigned long)pool
->iomap
,
204 pool
->phys
, pool
->mem_size
, -1);
206 dev_err(dev
, "pool add failed %d\n", ret
);
207 goto gen_pool_add_virt_fail
;
212 gen_pool_add_virt_fail
:
213 cpdma_desc_pool_destroy(pool
);
214 gen_pool_create_fail
:
218 static inline dma_addr_t
desc_phys(struct cpdma_desc_pool
*pool
,
219 struct cpdma_desc __iomem
*desc
)
223 return pool
->hw_addr
+ (__force
long)desc
- (__force
long)pool
->iomap
;
226 static inline struct cpdma_desc __iomem
*
227 desc_from_phys(struct cpdma_desc_pool
*pool
, dma_addr_t dma
)
229 return dma
? pool
->iomap
+ dma
- pool
->hw_addr
: NULL
;
232 static struct cpdma_desc __iomem
*
233 cpdma_desc_alloc(struct cpdma_desc_pool
*pool
)
235 struct cpdma_desc __iomem
*desc
= NULL
;
237 desc
= (struct cpdma_desc __iomem
*)gen_pool_alloc(pool
->gen_pool
,
245 static void cpdma_desc_free(struct cpdma_desc_pool
*pool
,
246 struct cpdma_desc __iomem
*desc
, int num_desc
)
248 gen_pool_free(pool
->gen_pool
, (unsigned long)desc
, pool
->desc_size
);
252 struct cpdma_ctlr
*cpdma_ctlr_create(struct cpdma_params
*params
)
254 struct cpdma_ctlr
*ctlr
;
256 ctlr
= devm_kzalloc(params
->dev
, sizeof(*ctlr
), GFP_KERNEL
);
260 ctlr
->state
= CPDMA_STATE_IDLE
;
261 ctlr
->params
= *params
;
262 ctlr
->dev
= params
->dev
;
263 spin_lock_init(&ctlr
->lock
);
265 ctlr
->pool
= cpdma_desc_pool_create(ctlr
->dev
,
266 ctlr
->params
.desc_mem_phys
,
267 ctlr
->params
.desc_hw_addr
,
268 ctlr
->params
.desc_mem_size
,
269 ctlr
->params
.desc_align
);
273 if (WARN_ON(ctlr
->num_chan
> CPDMA_MAX_CHANNELS
))
274 ctlr
->num_chan
= CPDMA_MAX_CHANNELS
;
277 EXPORT_SYMBOL_GPL(cpdma_ctlr_create
);
279 int cpdma_ctlr_start(struct cpdma_ctlr
*ctlr
)
284 spin_lock_irqsave(&ctlr
->lock
, flags
);
285 if (ctlr
->state
!= CPDMA_STATE_IDLE
) {
286 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
290 if (ctlr
->params
.has_soft_reset
) {
291 unsigned timeout
= 10 * 100;
293 dma_reg_write(ctlr
, CPDMA_SOFTRESET
, 1);
295 if (dma_reg_read(ctlr
, CPDMA_SOFTRESET
) == 0)
303 for (i
= 0; i
< ctlr
->num_chan
; i
++) {
304 __raw_writel(0, ctlr
->params
.txhdp
+ 4 * i
);
305 __raw_writel(0, ctlr
->params
.rxhdp
+ 4 * i
);
306 __raw_writel(0, ctlr
->params
.txcp
+ 4 * i
);
307 __raw_writel(0, ctlr
->params
.rxcp
+ 4 * i
);
310 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
311 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
313 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 1);
314 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 1);
316 ctlr
->state
= CPDMA_STATE_ACTIVE
;
318 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
319 if (ctlr
->channels
[i
])
320 cpdma_chan_start(ctlr
->channels
[i
]);
322 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
325 EXPORT_SYMBOL_GPL(cpdma_ctlr_start
);
327 int cpdma_ctlr_stop(struct cpdma_ctlr
*ctlr
)
332 spin_lock_irqsave(&ctlr
->lock
, flags
);
333 if (ctlr
->state
== CPDMA_STATE_TEARDOWN
) {
334 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
338 ctlr
->state
= CPDMA_STATE_TEARDOWN
;
340 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
341 if (ctlr
->channels
[i
])
342 cpdma_chan_stop(ctlr
->channels
[i
]);
345 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
346 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
348 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 0);
349 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 0);
351 ctlr
->state
= CPDMA_STATE_IDLE
;
353 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
356 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop
);
358 int cpdma_ctlr_destroy(struct cpdma_ctlr
*ctlr
)
365 if (ctlr
->state
!= CPDMA_STATE_IDLE
)
366 cpdma_ctlr_stop(ctlr
);
368 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++)
369 cpdma_chan_destroy(ctlr
->channels
[i
]);
371 cpdma_desc_pool_destroy(ctlr
->pool
);
374 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy
);
376 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr
*ctlr
, bool enable
)
381 spin_lock_irqsave(&ctlr
->lock
, flags
);
382 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
383 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
387 reg
= enable
? CPDMA_DMAINTMASKSET
: CPDMA_DMAINTMASKCLEAR
;
388 dma_reg_write(ctlr
, reg
, CPDMA_DMAINT_HOSTERR
);
390 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
391 if (ctlr
->channels
[i
])
392 cpdma_chan_int_ctrl(ctlr
->channels
[i
], enable
);
395 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
398 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl
);
400 void cpdma_ctlr_eoi(struct cpdma_ctlr
*ctlr
, u32 value
)
402 dma_reg_write(ctlr
, CPDMA_MACEOIVECTOR
, value
);
404 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi
);
406 struct cpdma_chan
*cpdma_chan_create(struct cpdma_ctlr
*ctlr
, int chan_num
,
407 cpdma_handler_fn handler
)
409 struct cpdma_chan
*chan
;
410 int offset
= (chan_num
% CPDMA_MAX_CHANNELS
) * 4;
413 if (__chan_linear(chan_num
) >= ctlr
->num_chan
)
416 chan
= devm_kzalloc(ctlr
->dev
, sizeof(*chan
), GFP_KERNEL
);
418 return ERR_PTR(-ENOMEM
);
420 spin_lock_irqsave(&ctlr
->lock
, flags
);
421 if (ctlr
->channels
[chan_num
]) {
422 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
423 devm_kfree(ctlr
->dev
, chan
);
424 return ERR_PTR(-EBUSY
);
428 chan
->state
= CPDMA_STATE_IDLE
;
429 chan
->chan_num
= chan_num
;
430 chan
->handler
= handler
;
431 chan
->desc_num
= ctlr
->pool
->num_desc
/ 2;
433 if (is_rx_chan(chan
)) {
434 chan
->hdp
= ctlr
->params
.rxhdp
+ offset
;
435 chan
->cp
= ctlr
->params
.rxcp
+ offset
;
436 chan
->rxfree
= ctlr
->params
.rxfree
+ offset
;
437 chan
->int_set
= CPDMA_RXINTMASKSET
;
438 chan
->int_clear
= CPDMA_RXINTMASKCLEAR
;
439 chan
->td
= CPDMA_RXTEARDOWN
;
440 chan
->dir
= DMA_FROM_DEVICE
;
442 chan
->hdp
= ctlr
->params
.txhdp
+ offset
;
443 chan
->cp
= ctlr
->params
.txcp
+ offset
;
444 chan
->int_set
= CPDMA_TXINTMASKSET
;
445 chan
->int_clear
= CPDMA_TXINTMASKCLEAR
;
446 chan
->td
= CPDMA_TXTEARDOWN
;
447 chan
->dir
= DMA_TO_DEVICE
;
449 chan
->mask
= BIT(chan_linear(chan
));
451 spin_lock_init(&chan
->lock
);
453 ctlr
->channels
[chan_num
] = chan
;
454 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
457 EXPORT_SYMBOL_GPL(cpdma_chan_create
);
459 int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr
*ctlr
)
461 return ctlr
->pool
->num_desc
/ 2;
463 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num
);
465 int cpdma_chan_destroy(struct cpdma_chan
*chan
)
467 struct cpdma_ctlr
*ctlr
;
474 spin_lock_irqsave(&ctlr
->lock
, flags
);
475 if (chan
->state
!= CPDMA_STATE_IDLE
)
476 cpdma_chan_stop(chan
);
477 ctlr
->channels
[chan
->chan_num
] = NULL
;
478 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
481 EXPORT_SYMBOL_GPL(cpdma_chan_destroy
);
483 int cpdma_chan_get_stats(struct cpdma_chan
*chan
,
484 struct cpdma_chan_stats
*stats
)
489 spin_lock_irqsave(&chan
->lock
, flags
);
490 memcpy(stats
, &chan
->stats
, sizeof(*stats
));
491 spin_unlock_irqrestore(&chan
->lock
, flags
);
494 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats
);
496 static void __cpdma_chan_submit(struct cpdma_chan
*chan
,
497 struct cpdma_desc __iomem
*desc
)
499 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
500 struct cpdma_desc __iomem
*prev
= chan
->tail
;
501 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
505 desc_dma
= desc_phys(pool
, desc
);
507 /* simple case - idle channel */
509 chan
->stats
.head_enqueue
++;
512 if (chan
->state
== CPDMA_STATE_ACTIVE
)
513 chan_write(chan
, hdp
, desc_dma
);
517 /* first chain the descriptor at the tail of the list */
518 desc_write(prev
, hw_next
, desc_dma
);
520 chan
->stats
.tail_enqueue
++;
522 /* next check if EOQ has been triggered already */
523 mode
= desc_read(prev
, hw_mode
);
524 if (((mode
& (CPDMA_DESC_EOQ
| CPDMA_DESC_OWNER
)) == CPDMA_DESC_EOQ
) &&
525 (chan
->state
== CPDMA_STATE_ACTIVE
)) {
526 desc_write(prev
, hw_mode
, mode
& ~CPDMA_DESC_EOQ
);
527 chan_write(chan
, hdp
, desc_dma
);
528 chan
->stats
.misqueued
++;
532 int cpdma_chan_submit(struct cpdma_chan
*chan
, void *token
, void *data
,
533 int len
, int directed
)
535 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
536 struct cpdma_desc __iomem
*desc
;
542 spin_lock_irqsave(&chan
->lock
, flags
);
544 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
549 if (chan
->count
>= chan
->desc_num
) {
550 chan
->stats
.desc_alloc_fail
++;
555 desc
= cpdma_desc_alloc(ctlr
->pool
);
557 chan
->stats
.desc_alloc_fail
++;
562 if (len
< ctlr
->params
.min_packet_size
) {
563 len
= ctlr
->params
.min_packet_size
;
564 chan
->stats
.runt_transmit_buff
++;
567 buffer
= dma_map_single(ctlr
->dev
, data
, len
, chan
->dir
);
568 ret
= dma_mapping_error(ctlr
->dev
, buffer
);
570 cpdma_desc_free(ctlr
->pool
, desc
, 1);
575 mode
= CPDMA_DESC_OWNER
| CPDMA_DESC_SOP
| CPDMA_DESC_EOP
;
576 cpdma_desc_to_port(chan
, mode
, directed
);
578 desc_write(desc
, hw_next
, 0);
579 desc_write(desc
, hw_buffer
, buffer
);
580 desc_write(desc
, hw_len
, len
);
581 desc_write(desc
, hw_mode
, mode
| len
);
582 desc_write(desc
, sw_token
, token
);
583 desc_write(desc
, sw_buffer
, buffer
);
584 desc_write(desc
, sw_len
, len
);
586 __cpdma_chan_submit(chan
, desc
);
588 if (chan
->state
== CPDMA_STATE_ACTIVE
&& chan
->rxfree
)
589 chan_write(chan
, rxfree
, 1);
594 spin_unlock_irqrestore(&chan
->lock
, flags
);
597 EXPORT_SYMBOL_GPL(cpdma_chan_submit
);
599 bool cpdma_check_free_tx_desc(struct cpdma_chan
*chan
)
601 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
602 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
606 spin_lock_irqsave(&chan
->lock
, flags
);
607 free_tx_desc
= (chan
->count
< chan
->desc_num
) &&
608 gen_pool_avail(pool
->gen_pool
);
609 spin_unlock_irqrestore(&chan
->lock
, flags
);
612 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc
);
614 static void __cpdma_chan_free(struct cpdma_chan
*chan
,
615 struct cpdma_desc __iomem
*desc
,
616 int outlen
, int status
)
618 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
619 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
624 token
= (void *)desc_read(desc
, sw_token
);
625 buff_dma
= desc_read(desc
, sw_buffer
);
626 origlen
= desc_read(desc
, sw_len
);
628 dma_unmap_single(ctlr
->dev
, buff_dma
, origlen
, chan
->dir
);
629 cpdma_desc_free(pool
, desc
, 1);
630 (*chan
->handler
)(token
, outlen
, status
);
633 static int __cpdma_chan_process(struct cpdma_chan
*chan
)
635 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
636 struct cpdma_desc __iomem
*desc
;
639 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
643 spin_lock_irqsave(&chan
->lock
, flags
);
647 chan
->stats
.empty_dequeue
++;
651 desc_dma
= desc_phys(pool
, desc
);
653 status
= __raw_readl(&desc
->hw_mode
);
654 outlen
= status
& 0x7ff;
655 if (status
& CPDMA_DESC_OWNER
) {
656 chan
->stats
.busy_dequeue
++;
661 if (status
& CPDMA_DESC_PASS_CRC
)
662 outlen
-= CPDMA_DESC_CRC_LEN
;
664 status
= status
& (CPDMA_DESC_EOQ
| CPDMA_DESC_TD_COMPLETE
|
665 CPDMA_DESC_PORT_MASK
);
667 chan
->head
= desc_from_phys(pool
, desc_read(desc
, hw_next
));
668 chan_write(chan
, cp
, desc_dma
);
670 chan
->stats
.good_dequeue
++;
672 if (status
& CPDMA_DESC_EOQ
) {
673 chan
->stats
.requeue
++;
674 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
677 spin_unlock_irqrestore(&chan
->lock
, flags
);
678 if (unlikely(status
& CPDMA_DESC_TD_COMPLETE
))
683 __cpdma_chan_free(chan
, desc
, outlen
, cb_status
);
687 spin_unlock_irqrestore(&chan
->lock
, flags
);
691 int cpdma_chan_process(struct cpdma_chan
*chan
, int quota
)
693 int used
= 0, ret
= 0;
695 if (chan
->state
!= CPDMA_STATE_ACTIVE
)
698 while (used
< quota
) {
699 ret
= __cpdma_chan_process(chan
);
706 EXPORT_SYMBOL_GPL(cpdma_chan_process
);
708 int cpdma_chan_start(struct cpdma_chan
*chan
)
710 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
711 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
714 spin_lock_irqsave(&chan
->lock
, flags
);
715 if (chan
->state
!= CPDMA_STATE_IDLE
) {
716 spin_unlock_irqrestore(&chan
->lock
, flags
);
719 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
720 spin_unlock_irqrestore(&chan
->lock
, flags
);
723 dma_reg_write(ctlr
, chan
->int_set
, chan
->mask
);
724 chan
->state
= CPDMA_STATE_ACTIVE
;
726 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
728 chan_write(chan
, rxfree
, chan
->count
);
731 spin_unlock_irqrestore(&chan
->lock
, flags
);
734 EXPORT_SYMBOL_GPL(cpdma_chan_start
);
736 int cpdma_chan_stop(struct cpdma_chan
*chan
)
738 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
739 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
744 spin_lock_irqsave(&chan
->lock
, flags
);
745 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
746 spin_unlock_irqrestore(&chan
->lock
, flags
);
750 chan
->state
= CPDMA_STATE_TEARDOWN
;
751 dma_reg_write(ctlr
, chan
->int_clear
, chan
->mask
);
753 /* trigger teardown */
754 dma_reg_write(ctlr
, chan
->td
, chan_linear(chan
));
756 /* wait for teardown complete */
757 timeout
= 100 * 100; /* 100 ms */
759 u32 cp
= chan_read(chan
, cp
);
760 if ((cp
& CPDMA_TEARDOWN_VALUE
) == CPDMA_TEARDOWN_VALUE
)
766 chan_write(chan
, cp
, CPDMA_TEARDOWN_VALUE
);
768 /* handle completed packets */
769 spin_unlock_irqrestore(&chan
->lock
, flags
);
771 ret
= __cpdma_chan_process(chan
);
774 } while ((ret
& CPDMA_DESC_TD_COMPLETE
) == 0);
775 spin_lock_irqsave(&chan
->lock
, flags
);
777 /* remaining packets haven't been tx/rx'ed, clean them up */
779 struct cpdma_desc __iomem
*desc
= chan
->head
;
782 next_dma
= desc_read(desc
, hw_next
);
783 chan
->head
= desc_from_phys(pool
, next_dma
);
785 chan
->stats
.teardown_dequeue
++;
787 /* issue callback without locks held */
788 spin_unlock_irqrestore(&chan
->lock
, flags
);
789 __cpdma_chan_free(chan
, desc
, 0, -ENOSYS
);
790 spin_lock_irqsave(&chan
->lock
, flags
);
793 chan
->state
= CPDMA_STATE_IDLE
;
794 spin_unlock_irqrestore(&chan
->lock
, flags
);
797 EXPORT_SYMBOL_GPL(cpdma_chan_stop
);
799 int cpdma_chan_int_ctrl(struct cpdma_chan
*chan
, bool enable
)
803 spin_lock_irqsave(&chan
->lock
, flags
);
804 if (chan
->state
!= CPDMA_STATE_ACTIVE
) {
805 spin_unlock_irqrestore(&chan
->lock
, flags
);
809 dma_reg_write(chan
->ctlr
, enable
? chan
->int_set
: chan
->int_clear
,
811 spin_unlock_irqrestore(&chan
->lock
, flags
);
816 struct cpdma_control_info
{
820 #define ACCESS_RO BIT(0)
821 #define ACCESS_WO BIT(1)
822 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
825 static struct cpdma_control_info controls
[] = {
826 [CPDMA_CMD_IDLE
] = {CPDMA_DMACONTROL
, 3, 1, ACCESS_WO
},
827 [CPDMA_COPY_ERROR_FRAMES
] = {CPDMA_DMACONTROL
, 4, 1, ACCESS_RW
},
828 [CPDMA_RX_OFF_LEN_UPDATE
] = {CPDMA_DMACONTROL
, 2, 1, ACCESS_RW
},
829 [CPDMA_RX_OWNERSHIP_FLIP
] = {CPDMA_DMACONTROL
, 1, 1, ACCESS_RW
},
830 [CPDMA_TX_PRIO_FIXED
] = {CPDMA_DMACONTROL
, 0, 1, ACCESS_RW
},
831 [CPDMA_STAT_IDLE
] = {CPDMA_DMASTATUS
, 31, 1, ACCESS_RO
},
832 [CPDMA_STAT_TX_ERR_CODE
] = {CPDMA_DMASTATUS
, 20, 0xf, ACCESS_RW
},
833 [CPDMA_STAT_TX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 16, 0x7, ACCESS_RW
},
834 [CPDMA_STAT_RX_ERR_CODE
] = {CPDMA_DMASTATUS
, 12, 0xf, ACCESS_RW
},
835 [CPDMA_STAT_RX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 8, 0x7, ACCESS_RW
},
836 [CPDMA_RX_BUFFER_OFFSET
] = {CPDMA_RXBUFFOFS
, 0, 0xffff, ACCESS_RW
},
839 int cpdma_control_get(struct cpdma_ctlr
*ctlr
, int control
)
842 struct cpdma_control_info
*info
= &controls
[control
];
845 spin_lock_irqsave(&ctlr
->lock
, flags
);
848 if (!ctlr
->params
.has_ext_regs
)
852 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
856 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
860 if ((info
->access
& ACCESS_RO
) != ACCESS_RO
)
863 ret
= (dma_reg_read(ctlr
, info
->reg
) >> info
->shift
) & info
->mask
;
866 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
870 int cpdma_control_set(struct cpdma_ctlr
*ctlr
, int control
, int value
)
873 struct cpdma_control_info
*info
= &controls
[control
];
877 spin_lock_irqsave(&ctlr
->lock
, flags
);
880 if (!ctlr
->params
.has_ext_regs
)
884 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
888 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
892 if ((info
->access
& ACCESS_WO
) != ACCESS_WO
)
895 val
= dma_reg_read(ctlr
, info
->reg
);
896 val
&= ~(info
->mask
<< info
->shift
);
897 val
|= (value
& info
->mask
) << info
->shift
;
898 dma_reg_write(ctlr
, info
->reg
, val
);
902 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
905 EXPORT_SYMBOL_GPL(cpdma_control_set
);
907 MODULE_LICENSE("GPL");