4 * Author: Roy Huang <roy.huang@analog.com>
6 * Created: Tue Sep 21 10:52:42 CEST 2004
8 * Blackfin SPORT Driver
10 * Copyright 2004-2007 Analog Devices Inc.
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/gpio.h>
35 #include <linux/bug.h>
36 #include <asm/portmux.h>
38 #include <asm/blackfin.h>
39 #include <asm/cacheflush.h>
41 #include "bf5xx-sport.h"
42 /* delay between frame sync pulse and first data bit in multichannel mode */
43 #define FRAME_DELAY (1<<12)
45 struct sport_device
*sport_handle
;
46 EXPORT_SYMBOL(sport_handle
);
47 /* note: multichannel is in units of 8 channels,
48 * tdm_count is # channels NOT / 8 ! */
49 int sport_set_multichannel(struct sport_device
*sport
,
50 int tdm_count
, u32 mask
, int packed
)
52 pr_debug("%s tdm_count=%d mask:0x%08x packed=%d\n", __func__
,
53 tdm_count
, mask
, packed
);
55 if ((sport
->regs
->tcr1
& TSPEN
) || (sport
->regs
->rcr1
& RSPEN
))
62 return -EINVAL
; /* Only support less than 32 channels now */
65 sport
->regs
->mcmc1
= ((tdm_count
>>3)-1) << 12;
66 sport
->regs
->mcmc2
= FRAME_DELAY
| MCMEN
| \
67 (packed
? (MCDTXPE
|MCDRXPE
) : 0);
69 sport
->regs
->mtcs0
= mask
;
70 sport
->regs
->mrcs0
= mask
;
71 sport
->regs
->mtcs1
= 0;
72 sport
->regs
->mrcs1
= 0;
73 sport
->regs
->mtcs2
= 0;
74 sport
->regs
->mrcs2
= 0;
75 sport
->regs
->mtcs3
= 0;
76 sport
->regs
->mrcs3
= 0;
78 sport
->regs
->mcmc1
= 0;
79 sport
->regs
->mcmc2
= 0;
81 sport
->regs
->mtcs0
= 0;
82 sport
->regs
->mrcs0
= 0;
85 sport
->regs
->mtcs1
= 0; sport
->regs
->mtcs2
= 0; sport
->regs
->mtcs3
= 0;
86 sport
->regs
->mrcs1
= 0; sport
->regs
->mrcs2
= 0; sport
->regs
->mrcs3
= 0;
92 EXPORT_SYMBOL(sport_set_multichannel
);
94 int sport_config_rx(struct sport_device
*sport
, unsigned int rcr1
,
95 unsigned int rcr2
, unsigned int clkdiv
, unsigned int fsdiv
)
97 if ((sport
->regs
->tcr1
& TSPEN
) || (sport
->regs
->rcr1
& RSPEN
))
100 sport
->regs
->rcr1
= rcr1
;
101 sport
->regs
->rcr2
= rcr2
;
102 sport
->regs
->rclkdiv
= clkdiv
;
103 sport
->regs
->rfsdiv
= fsdiv
;
109 EXPORT_SYMBOL(sport_config_rx
);
111 int sport_config_tx(struct sport_device
*sport
, unsigned int tcr1
,
112 unsigned int tcr2
, unsigned int clkdiv
, unsigned int fsdiv
)
114 if ((sport
->regs
->tcr1
& TSPEN
) || (sport
->regs
->rcr1
& RSPEN
))
117 sport
->regs
->tcr1
= tcr1
;
118 sport
->regs
->tcr2
= tcr2
;
119 sport
->regs
->tclkdiv
= clkdiv
;
120 sport
->regs
->tfsdiv
= fsdiv
;
126 EXPORT_SYMBOL(sport_config_tx
);
128 static void setup_desc(struct dmasg
*desc
, void *buf
, int fragcount
,
129 size_t fragsize
, unsigned int cfg
,
130 unsigned int x_count
, unsigned int ycount
, size_t wdsize
)
135 for (i
= 0; i
< fragcount
; ++i
) {
136 desc
[i
].next_desc_addr
= (unsigned long)&(desc
[i
+ 1]);
137 desc
[i
].start_addr
= (unsigned long)buf
+ i
*fragsize
;
139 desc
[i
].x_count
= x_count
;
140 desc
[i
].x_modify
= wdsize
;
141 desc
[i
].y_count
= ycount
;
142 desc
[i
].y_modify
= wdsize
;
146 desc
[fragcount
-1].next_desc_addr
= (unsigned long)desc
;
148 pr_debug("setup desc: desc0=%p, next0=%lx, desc1=%p,"
149 "next1=%lx\nx_count=%x,y_count=%x,addr=0x%lx,cfs=0x%x\n",
150 &(desc
[0]), desc
[0].next_desc_addr
,
151 &(desc
[1]), desc
[1].next_desc_addr
,
152 desc
[0].x_count
, desc
[0].y_count
,
153 desc
[0].start_addr
, desc
[0].cfg
);
156 static int sport_start(struct sport_device
*sport
)
158 enable_dma(sport
->dma_rx_chan
);
159 enable_dma(sport
->dma_tx_chan
);
160 sport
->regs
->rcr1
|= RSPEN
;
161 sport
->regs
->tcr1
|= TSPEN
;
167 static int sport_stop(struct sport_device
*sport
)
169 sport
->regs
->tcr1
&= ~TSPEN
;
170 sport
->regs
->rcr1
&= ~RSPEN
;
173 disable_dma(sport
->dma_rx_chan
);
174 disable_dma(sport
->dma_tx_chan
);
178 static inline int sport_hook_rx_dummy(struct sport_device
*sport
)
180 struct dmasg
*desc
, temp_desc
;
183 BUG_ON(sport
->dummy_rx_desc
== NULL
);
184 BUG_ON(sport
->curr_rx_desc
== sport
->dummy_rx_desc
);
186 /* Maybe the dummy buffer descriptor ring is damaged */
187 sport
->dummy_rx_desc
->next_desc_addr
= \
188 (unsigned long)(sport
->dummy_rx_desc
+1);
190 local_irq_save(flags
);
191 desc
= (struct dmasg
*)get_dma_next_desc_ptr(sport
->dma_rx_chan
);
192 /* Copy the descriptor which will be damaged to backup */
196 desc
->next_desc_addr
= (unsigned long)(sport
->dummy_rx_desc
);
197 local_irq_restore(flags
);
198 /* Waiting for dummy buffer descriptor is already hooked*/
199 while ((get_dma_curr_desc_ptr(sport
->dma_rx_chan
) -
200 sizeof(struct dmasg
)) !=
201 (unsigned long)sport
->dummy_rx_desc
)
203 sport
->curr_rx_desc
= sport
->dummy_rx_desc
;
204 /* Restore the damaged descriptor */
210 static inline int sport_rx_dma_start(struct sport_device
*sport
, int dummy
)
213 sport
->dummy_rx_desc
->next_desc_addr
= \
214 (unsigned long) sport
->dummy_rx_desc
;
215 sport
->curr_rx_desc
= sport
->dummy_rx_desc
;
217 sport
->curr_rx_desc
= sport
->dma_rx_desc
;
219 set_dma_next_desc_addr(sport
->dma_rx_chan
, \
220 (unsigned long)(sport
->curr_rx_desc
));
221 set_dma_x_count(sport
->dma_rx_chan
, 0);
222 set_dma_x_modify(sport
->dma_rx_chan
, 0);
223 set_dma_config(sport
->dma_rx_chan
, (DMAFLOW_LARGE
| NDSIZE_9
| \
225 set_dma_curr_addr(sport
->dma_rx_chan
, sport
->curr_rx_desc
->start_addr
);
231 static inline int sport_tx_dma_start(struct sport_device
*sport
, int dummy
)
234 sport
->dummy_tx_desc
->next_desc_addr
= \
235 (unsigned long) sport
->dummy_tx_desc
;
236 sport
->curr_tx_desc
= sport
->dummy_tx_desc
;
238 sport
->curr_tx_desc
= sport
->dma_tx_desc
;
240 set_dma_next_desc_addr(sport
->dma_tx_chan
, \
241 (unsigned long)(sport
->curr_tx_desc
));
242 set_dma_x_count(sport
->dma_tx_chan
, 0);
243 set_dma_x_modify(sport
->dma_tx_chan
, 0);
244 set_dma_config(sport
->dma_tx_chan
,
245 (DMAFLOW_LARGE
| NDSIZE_9
| WDSIZE_32
));
246 set_dma_curr_addr(sport
->dma_tx_chan
, sport
->curr_tx_desc
->start_addr
);
252 int sport_rx_start(struct sport_device
*sport
)
255 pr_debug("%s enter\n", __func__
);
259 /* tx is running, rx is not running */
260 BUG_ON(sport
->dma_rx_desc
== NULL
);
261 BUG_ON(sport
->curr_rx_desc
!= sport
->dummy_rx_desc
);
262 local_irq_save(flags
);
263 while ((get_dma_curr_desc_ptr(sport
->dma_rx_chan
) -
264 sizeof(struct dmasg
)) !=
265 (unsigned long)sport
->dummy_rx_desc
)
267 sport
->dummy_rx_desc
->next_desc_addr
=
268 (unsigned long)(sport
->dma_rx_desc
);
269 local_irq_restore(flags
);
270 sport
->curr_rx_desc
= sport
->dma_rx_desc
;
272 sport_tx_dma_start(sport
, 1);
273 sport_rx_dma_start(sport
, 0);
281 EXPORT_SYMBOL(sport_rx_start
);
283 int sport_rx_stop(struct sport_device
*sport
)
285 pr_debug("%s enter\n", __func__
);
290 /* TX dma is still running, hook the dummy buffer */
291 sport_hook_rx_dummy(sport
);
293 /* Both rx and tx dma will be stopped */
295 sport
->curr_rx_desc
= NULL
;
296 sport
->curr_tx_desc
= NULL
;
303 EXPORT_SYMBOL(sport_rx_stop
);
305 static inline int sport_hook_tx_dummy(struct sport_device
*sport
)
307 struct dmasg
*desc
, temp_desc
;
310 BUG_ON(sport
->dummy_tx_desc
== NULL
);
311 BUG_ON(sport
->curr_tx_desc
== sport
->dummy_tx_desc
);
313 sport
->dummy_tx_desc
->next_desc_addr
= \
314 (unsigned long)(sport
->dummy_tx_desc
+1);
316 /* Shorten the time on last normal descriptor */
317 local_irq_save(flags
);
318 desc
= (struct dmasg
*)get_dma_next_desc_ptr(sport
->dma_tx_chan
);
319 /* Store the descriptor which will be damaged */
323 desc
->next_desc_addr
= (unsigned long)(sport
->dummy_tx_desc
);
324 local_irq_restore(flags
);
325 /* Waiting for dummy buffer descriptor is already hooked*/
326 while ((get_dma_curr_desc_ptr(sport
->dma_tx_chan
) - \
327 sizeof(struct dmasg
)) != \
328 (unsigned long)sport
->dummy_tx_desc
)
330 sport
->curr_tx_desc
= sport
->dummy_tx_desc
;
331 /* Restore the damaged descriptor */
337 int sport_tx_start(struct sport_device
*sport
)
340 pr_debug("%s: tx_run:%d, rx_run:%d\n", __func__
,
341 sport
->tx_run
, sport
->rx_run
);
345 BUG_ON(sport
->dma_tx_desc
== NULL
);
346 BUG_ON(sport
->curr_tx_desc
!= sport
->dummy_tx_desc
);
347 /* Hook the normal buffer descriptor */
348 local_irq_save(flags
);
349 while ((get_dma_curr_desc_ptr(sport
->dma_tx_chan
) -
350 sizeof(struct dmasg
)) !=
351 (unsigned long)sport
->dummy_tx_desc
)
353 sport
->dummy_tx_desc
->next_desc_addr
=
354 (unsigned long)(sport
->dma_tx_desc
);
355 local_irq_restore(flags
);
356 sport
->curr_tx_desc
= sport
->dma_tx_desc
;
359 sport_tx_dma_start(sport
, 0);
360 /* Let rx dma run the dummy buffer */
361 sport_rx_dma_start(sport
, 1);
367 EXPORT_SYMBOL(sport_tx_start
);
369 int sport_tx_stop(struct sport_device
*sport
)
374 /* RX is still running, hook the dummy buffer */
375 sport_hook_tx_dummy(sport
);
377 /* Both rx and tx dma stopped */
379 sport
->curr_rx_desc
= NULL
;
380 sport
->curr_tx_desc
= NULL
;
387 EXPORT_SYMBOL(sport_tx_stop
);
389 static inline int compute_wdsize(size_t wdsize
)
402 int sport_config_rx_dma(struct sport_device
*sport
, void *buf
,
403 int fragcount
, size_t fragsize
)
405 unsigned int x_count
;
406 unsigned int y_count
;
410 pr_debug("%s buf:%p, frag:%d, fragsize:0x%lx\n", __func__
, \
411 buf
, fragcount
, fragsize
);
413 x_count
= fragsize
/ sport
->wdsize
;
416 /* for fragments larger than 64k words we use 2d dma,
417 * denote fragecount as two numbers' mutliply and both of them
418 * are less than 64k.*/
419 if (x_count
>= 0x10000) {
420 int i
, count
= x_count
;
422 for (i
= 16; i
> 0; i
--) {
424 if ((count
& (x_count
- 1)) == 0) {
425 y_count
= count
>> i
;
426 if (y_count
< 0x10000)
433 pr_debug("%s(x_count:0x%x, y_count:0x%x)\n", __func__
,
436 if (sport
->dma_rx_desc
)
437 dma_free_coherent(NULL
, sport
->rx_desc_bytes
,
438 sport
->dma_rx_desc
, 0);
440 /* Allocate a new descritor ring as current one. */
441 sport
->dma_rx_desc
= dma_alloc_coherent(NULL
, \
442 fragcount
* sizeof(struct dmasg
), &addr
, 0);
443 sport
->rx_desc_bytes
= fragcount
* sizeof(struct dmasg
);
445 if (!sport
->dma_rx_desc
) {
446 pr_err("Failed to allocate memory for rx desc\n");
451 sport
->rx_fragsize
= fragsize
;
452 sport
->rx_frags
= fragcount
;
454 cfg
= 0x7000 | DI_EN
| compute_wdsize(sport
->wdsize
) | WNR
| \
455 (DESC_ELEMENT_COUNT
<< 8); /* large descriptor mode */
460 setup_desc(sport
->dma_rx_desc
, buf
, fragcount
, fragsize
,
461 cfg
|DMAEN
, x_count
, y_count
, sport
->wdsize
);
465 EXPORT_SYMBOL(sport_config_rx_dma
);
467 int sport_config_tx_dma(struct sport_device
*sport
, void *buf
, \
468 int fragcount
, size_t fragsize
)
470 unsigned int x_count
;
471 unsigned int y_count
;
475 pr_debug("%s buf:%p, fragcount:%d, fragsize:0x%lx\n",
476 __func__
, buf
, fragcount
, fragsize
);
478 x_count
= fragsize
/sport
->wdsize
;
481 /* for fragments larger than 64k words we use 2d dma,
482 * denote fragecount as two numbers' mutliply and both of them
483 * are less than 64k.*/
484 if (x_count
>= 0x10000) {
485 int i
, count
= x_count
;
487 for (i
= 16; i
> 0; i
--) {
489 if ((count
& (x_count
- 1)) == 0) {
490 y_count
= count
>> i
;
491 if (y_count
< 0x10000)
498 pr_debug("%s x_count:0x%x, y_count:0x%x\n", __func__
,
502 if (sport
->dma_tx_desc
) {
503 dma_free_coherent(NULL
, sport
->tx_desc_bytes
, \
504 sport
->dma_tx_desc
, 0);
507 sport
->dma_tx_desc
= dma_alloc_coherent(NULL
, \
508 fragcount
* sizeof(struct dmasg
), &addr
, 0);
509 sport
->tx_desc_bytes
= fragcount
* sizeof(struct dmasg
);
510 if (!sport
->dma_tx_desc
) {
511 pr_err("Failed to allocate memory for tx desc\n");
516 sport
->tx_fragsize
= fragsize
;
517 sport
->tx_frags
= fragcount
;
518 cfg
= 0x7000 | DI_EN
| compute_wdsize(sport
->wdsize
) | \
519 (DESC_ELEMENT_COUNT
<< 8); /* large descriptor mode */
524 setup_desc(sport
->dma_tx_desc
, buf
, fragcount
, fragsize
,
525 cfg
|DMAEN
, x_count
, y_count
, sport
->wdsize
);
529 EXPORT_SYMBOL(sport_config_tx_dma
);
531 /* setup dummy dma descriptor ring, which don't generate interrupts,
532 * the x_modify is set to 0 */
533 static int sport_config_rx_dummy(struct sport_device
*sport
)
538 pr_debug("%s entered\n", __func__
);
539 #if L1_DATA_A_LENGTH != 0
540 desc
= (struct dmasg
*) l1_data_sram_alloc(2 * sizeof(*desc
));
544 desc
= dma_alloc_coherent(NULL
, 2 * sizeof(*desc
), &addr
, 0);
548 pr_err("Failed to allocate memory for dummy rx desc\n");
551 memset(desc
, 0, 2 * sizeof(*desc
));
552 sport
->dummy_rx_desc
= desc
;
553 desc
->start_addr
= (unsigned long)sport
->dummy_buf
;
554 config
= DMAFLOW_LARGE
| NDSIZE_9
| compute_wdsize(sport
->wdsize
)
557 desc
->x_count
= sport
->dummy_count
/sport
->wdsize
;
558 desc
->x_modify
= sport
->wdsize
;
561 memcpy(desc
+1, desc
, sizeof(*desc
));
562 desc
->next_desc_addr
= (unsigned long)(desc
+1);
563 desc
[1].next_desc_addr
= (unsigned long)desc
;
567 static int sport_config_tx_dummy(struct sport_device
*sport
)
572 pr_debug("%s entered\n", __func__
);
574 #if L1_DATA_A_LENGTH != 0
575 desc
= (struct dmasg
*) l1_data_sram_alloc(2 * sizeof(*desc
));
579 desc
= dma_alloc_coherent(NULL
, 2 * sizeof(*desc
), &addr
, 0);
583 pr_err("Failed to allocate memory for dummy tx desc\n");
586 memset(desc
, 0, 2 * sizeof(*desc
));
587 sport
->dummy_tx_desc
= desc
;
588 desc
->start_addr
= (unsigned long)sport
->dummy_buf
+ \
590 config
= DMAFLOW_LARGE
| NDSIZE_9
|
591 compute_wdsize(sport
->wdsize
) | DMAEN
;
593 desc
->x_count
= sport
->dummy_count
/sport
->wdsize
;
594 desc
->x_modify
= sport
->wdsize
;
597 memcpy(desc
+1, desc
, sizeof(*desc
));
598 desc
->next_desc_addr
= (unsigned long)(desc
+1);
599 desc
[1].next_desc_addr
= (unsigned long)desc
;
603 unsigned long sport_curr_offset_rx(struct sport_device
*sport
)
605 unsigned long curr
= get_dma_curr_addr(sport
->dma_rx_chan
);
607 return (unsigned char *)curr
- sport
->rx_buf
;
609 EXPORT_SYMBOL(sport_curr_offset_rx
);
611 unsigned long sport_curr_offset_tx(struct sport_device
*sport
)
613 unsigned long curr
= get_dma_curr_addr(sport
->dma_tx_chan
);
615 return (unsigned char *)curr
- sport
->tx_buf
;
617 EXPORT_SYMBOL(sport_curr_offset_tx
);
619 void sport_incfrag(struct sport_device
*sport
, int *frag
, int tx
)
622 if (tx
== 1 && *frag
== sport
->tx_frags
)
625 if (tx
== 0 && *frag
== sport
->rx_frags
)
628 EXPORT_SYMBOL(sport_incfrag
);
630 void sport_decfrag(struct sport_device
*sport
, int *frag
, int tx
)
633 if (tx
== 1 && *frag
== 0)
634 *frag
= sport
->tx_frags
;
636 if (tx
== 0 && *frag
== 0)
637 *frag
= sport
->rx_frags
;
639 EXPORT_SYMBOL(sport_decfrag
);
641 static int sport_check_status(struct sport_device
*sport
,
642 unsigned int *sport_stat
,
643 unsigned int *rx_stat
,
644 unsigned int *tx_stat
)
650 status
= sport
->regs
->stat
;
651 if (status
& (TOVF
|TUVF
|ROVF
|RUVF
))
652 sport
->regs
->stat
= (status
& (TOVF
|TUVF
|ROVF
|RUVF
));
654 *sport_stat
= status
;
659 status
= get_dma_curr_irqstat(sport
->dma_rx_chan
);
660 if (status
& (DMA_DONE
|DMA_ERR
))
661 clear_dma_irqstat(sport
->dma_rx_chan
);
668 status
= get_dma_curr_irqstat(sport
->dma_tx_chan
);
669 if (status
& (DMA_DONE
|DMA_ERR
))
670 clear_dma_irqstat(sport
->dma_tx_chan
);
678 int sport_dump_stat(struct sport_device
*sport
, char *buf
, size_t len
)
682 ret
= snprintf(buf
, len
,
684 "rx dma %d sts: 0x%04x tx dma %d sts: 0x%04x\n",
687 get_dma_curr_irqstat(sport
->dma_rx_chan
),
689 get_dma_curr_irqstat(sport
->dma_tx_chan
));
693 ret
+= snprintf(buf
, len
,
694 "curr_rx_desc:0x%p, curr_tx_desc:0x%p\n"
695 "dma_rx_desc:0x%p, dma_tx_desc:0x%p\n"
696 "dummy_rx_desc:0x%p, dummy_tx_desc:0x%p\n",
697 sport
->curr_rx_desc
, sport
->curr_tx_desc
,
698 sport
->dma_rx_desc
, sport
->dma_tx_desc
,
699 sport
->dummy_rx_desc
, sport
->dummy_tx_desc
);
704 static irqreturn_t
rx_handler(int irq
, void *dev_id
)
706 unsigned int rx_stat
;
707 struct sport_device
*sport
= dev_id
;
709 pr_debug("%s enter\n", __func__
);
710 sport_check_status(sport
, NULL
, &rx_stat
, NULL
);
711 if (!(rx_stat
& DMA_DONE
))
712 pr_err("rx dma is already stopped\n");
714 if (sport
->rx_callback
) {
715 sport
->rx_callback(sport
->rx_data
);
722 static irqreturn_t
tx_handler(int irq
, void *dev_id
)
724 unsigned int tx_stat
;
725 struct sport_device
*sport
= dev_id
;
726 pr_debug("%s enter\n", __func__
);
727 sport_check_status(sport
, NULL
, NULL
, &tx_stat
);
728 if (!(tx_stat
& DMA_DONE
)) {
729 pr_err("tx dma is already stopped\n");
732 if (sport
->tx_callback
) {
733 sport
->tx_callback(sport
->tx_data
);
740 static irqreturn_t
err_handler(int irq
, void *dev_id
)
742 unsigned int status
= 0;
743 struct sport_device
*sport
= dev_id
;
745 pr_debug("%s\n", __func__
);
746 if (sport_check_status(sport
, &status
, NULL
, NULL
)) {
747 pr_err("error checking status ??");
751 if (status
& (TOVF
|TUVF
|ROVF
|RUVF
)) {
752 pr_info("sport status error:%s%s%s%s\n",
753 status
& TOVF
? " TOVF" : "",
754 status
& TUVF
? " TUVF" : "",
755 status
& ROVF
? " ROVF" : "",
756 status
& RUVF
? " RUVF" : "");
757 if (status
& TOVF
|| status
& TUVF
) {
758 disable_dma(sport
->dma_tx_chan
);
760 sport_tx_dma_start(sport
, 0);
762 sport_tx_dma_start(sport
, 1);
763 enable_dma(sport
->dma_tx_chan
);
765 disable_dma(sport
->dma_rx_chan
);
767 sport_rx_dma_start(sport
, 0);
769 sport_rx_dma_start(sport
, 1);
770 enable_dma(sport
->dma_rx_chan
);
773 status
= sport
->regs
->stat
;
774 if (status
& (TOVF
|TUVF
|ROVF
|RUVF
))
775 sport
->regs
->stat
= (status
& (TOVF
|TUVF
|ROVF
|RUVF
));
778 if (sport
->err_callback
)
779 sport
->err_callback(sport
->err_data
);
784 int sport_set_rx_callback(struct sport_device
*sport
,
785 void (*rx_callback
)(void *), void *rx_data
)
787 BUG_ON(rx_callback
== NULL
);
788 sport
->rx_callback
= rx_callback
;
789 sport
->rx_data
= rx_data
;
793 EXPORT_SYMBOL(sport_set_rx_callback
);
795 int sport_set_tx_callback(struct sport_device
*sport
,
796 void (*tx_callback
)(void *), void *tx_data
)
798 BUG_ON(tx_callback
== NULL
);
799 sport
->tx_callback
= tx_callback
;
800 sport
->tx_data
= tx_data
;
804 EXPORT_SYMBOL(sport_set_tx_callback
);
806 int sport_set_err_callback(struct sport_device
*sport
,
807 void (*err_callback
)(void *), void *err_data
)
809 BUG_ON(err_callback
== NULL
);
810 sport
->err_callback
= err_callback
;
811 sport
->err_data
= err_data
;
815 EXPORT_SYMBOL(sport_set_err_callback
);
817 struct sport_device
*sport_init(struct sport_param
*param
, unsigned wdsize
,
818 unsigned dummy_count
, void *private_data
)
821 struct sport_device
*sport
;
822 pr_debug("%s enter\n", __func__
);
823 BUG_ON(param
== NULL
);
824 BUG_ON(wdsize
== 0 || dummy_count
== 0);
825 sport
= kmalloc(sizeof(struct sport_device
), GFP_KERNEL
);
827 pr_err("Failed to allocate for sport device\n");
831 memset(sport
, 0, sizeof(struct sport_device
));
832 sport
->dma_rx_chan
= param
->dma_rx_chan
;
833 sport
->dma_tx_chan
= param
->dma_tx_chan
;
834 sport
->err_irq
= param
->err_irq
;
835 sport
->regs
= param
->regs
;
836 sport
->private_data
= private_data
;
838 if (request_dma(sport
->dma_rx_chan
, "SPORT RX Data") == -EBUSY
) {
839 pr_err("Failed to request RX dma %d\n", \
843 if (set_dma_callback(sport
->dma_rx_chan
, rx_handler
, sport
) != 0) {
844 pr_err("Failed to request RX irq %d\n", \
849 if (request_dma(sport
->dma_tx_chan
, "SPORT TX Data") == -EBUSY
) {
850 pr_err("Failed to request TX dma %d\n", \
855 if (set_dma_callback(sport
->dma_tx_chan
, tx_handler
, sport
) != 0) {
856 pr_err("Failed to request TX irq %d\n", \
861 if (request_irq(sport
->err_irq
, err_handler
, IRQF_SHARED
, "SPORT err",
863 pr_err("Failed to request err irq:%d\n", \
868 pr_err("dma rx:%d tx:%d, err irq:%d, regs:%p\n",
869 sport
->dma_rx_chan
, sport
->dma_tx_chan
,
870 sport
->err_irq
, sport
->regs
);
872 sport
->wdsize
= wdsize
;
873 sport
->dummy_count
= dummy_count
;
875 #if L1_DATA_A_LENGTH != 0
876 sport
->dummy_buf
= l1_data_sram_alloc(dummy_count
* 2);
878 sport
->dummy_buf
= kmalloc(dummy_count
* 2, GFP_KERNEL
);
880 if (sport
->dummy_buf
== NULL
) {
881 pr_err("Failed to allocate dummy buffer\n");
885 memset(sport
->dummy_buf
, 0, dummy_count
* 2);
886 ret
= sport_config_rx_dummy(sport
);
888 pr_err("Failed to config rx dummy ring\n");
891 ret
= sport_config_tx_dummy(sport
);
893 pr_err("Failed to config tx dummy ring\n");
899 free_irq(sport
->err_irq
, sport
);
901 free_dma(sport
->dma_tx_chan
);
903 free_dma(sport
->dma_rx_chan
);
908 EXPORT_SYMBOL(sport_init
);
910 void sport_done(struct sport_device
*sport
)
916 if (sport
->dma_rx_desc
)
917 dma_free_coherent(NULL
, sport
->rx_desc_bytes
,
918 sport
->dma_rx_desc
, 0);
919 if (sport
->dma_tx_desc
)
920 dma_free_coherent(NULL
, sport
->tx_desc_bytes
,
921 sport
->dma_tx_desc
, 0);
923 #if L1_DATA_A_LENGTH != 0
924 l1_data_sram_free(sport
->dummy_rx_desc
);
925 l1_data_sram_free(sport
->dummy_tx_desc
);
926 l1_data_sram_free(sport
->dummy_buf
);
928 dma_free_coherent(NULL
, 2*sizeof(struct dmasg
),
929 sport
->dummy_rx_desc
, 0);
930 dma_free_coherent(NULL
, 2*sizeof(struct dmasg
),
931 sport
->dummy_tx_desc
, 0);
932 kfree(sport
->dummy_buf
);
934 free_dma(sport
->dma_rx_chan
);
935 free_dma(sport
->dma_tx_chan
);
936 free_irq(sport
->err_irq
, sport
);
941 EXPORT_SYMBOL(sport_done
);
943 * It is only used to send several bytes when dma is not enabled
944 * sport controller is configured but not enabled.
945 * Multichannel cannot works with pio mode */
946 /* Used by ac97 to write and read codec register */
947 int sport_send_and_recv(struct sport_device
*sport
, u8
*out_data
, \
948 u8
*in_data
, int len
)
950 unsigned short dma_config
;
951 unsigned short status
;
953 unsigned long wait
= 0;
955 pr_debug("%s enter, out_data:%p, in_data:%p len:%d\n", \
956 __func__
, out_data
, in_data
, len
);
957 pr_debug("tcr1:0x%04x, tcr2:0x%04x, tclkdiv:0x%04x, tfsdiv:0x%04x\n"
958 "mcmc1:0x%04x, mcmc2:0x%04x\n",
959 sport
->regs
->tcr1
, sport
->regs
->tcr2
,
960 sport
->regs
->tclkdiv
, sport
->regs
->tfsdiv
,
961 sport
->regs
->mcmc1
, sport
->regs
->mcmc2
);
962 flush_dcache_range((unsigned)out_data
, (unsigned)(out_data
+ len
));
965 dma_config
= (RESTART
| WDSIZE_16
| DI_EN
);
966 set_dma_start_addr(sport
->dma_tx_chan
, (unsigned long)out_data
);
967 set_dma_x_count(sport
->dma_tx_chan
, len
/2);
968 set_dma_x_modify(sport
->dma_tx_chan
, 2);
969 set_dma_config(sport
->dma_tx_chan
, dma_config
);
970 enable_dma(sport
->dma_tx_chan
);
972 if (in_data
!= NULL
) {
973 invalidate_dcache_range((unsigned)in_data
, \
974 (unsigned)(in_data
+ len
));
976 dma_config
= (RESTART
| WDSIZE_16
| WNR
| DI_EN
);
977 set_dma_start_addr(sport
->dma_rx_chan
, (unsigned long)in_data
);
978 set_dma_x_count(sport
->dma_rx_chan
, len
/2);
979 set_dma_x_modify(sport
->dma_rx_chan
, 2);
980 set_dma_config(sport
->dma_rx_chan
, dma_config
);
981 enable_dma(sport
->dma_rx_chan
);
984 local_irq_save(flags
);
985 sport
->regs
->tcr1
|= TSPEN
;
986 sport
->regs
->rcr1
|= RSPEN
;
989 status
= get_dma_curr_irqstat(sport
->dma_tx_chan
);
990 while (status
& DMA_RUN
) {
992 status
= get_dma_curr_irqstat(sport
->dma_tx_chan
);
993 pr_debug("DMA status:0x%04x\n", status
);
997 status
= sport
->regs
->stat
;
1000 while (!(status
& TXHRE
)) {
1001 pr_debug("sport status:0x%04x\n", status
);
1003 status
= *(unsigned short *)&sport
->regs
->stat
;
1007 /* Wait for the last byte sent out */
1009 pr_debug("sport status:0x%04x\n", status
);
1012 sport
->regs
->tcr1
&= ~TSPEN
;
1013 sport
->regs
->rcr1
&= ~RSPEN
;
1015 disable_dma(sport
->dma_tx_chan
);
1016 /* Clear the status */
1017 clear_dma_irqstat(sport
->dma_tx_chan
);
1018 if (in_data
!= NULL
) {
1019 disable_dma(sport
->dma_rx_chan
);
1020 clear_dma_irqstat(sport
->dma_rx_chan
);
1023 local_irq_restore(flags
);
1027 EXPORT_SYMBOL(sport_send_and_recv
);
1029 MODULE_AUTHOR("Roy Huang");
1030 MODULE_DESCRIPTION("SPORT driver for ADI Blackfin");
1031 MODULE_LICENSE("GPL");
This page took 0.053066 seconds and 5 git commands to generate.