ARM: OMAP: unwrap strings
[deliverable/linux.git] / arch / arm / plat-omap / dma.c
1 /*
2 * linux/arch/arm/plat-omap/dma.c
3 *
4 * Copyright (C) 2003 - 2008 Nokia Corporation
5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6 * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7 * Graphics DMA and LCD DMA graphics tranformations
8 * by Imre Deak <imre.deak@nokia.com>
9 * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10 * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11 * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12 *
13 * Copyright (C) 2009 Texas Instruments
14 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
15 *
16 * Support functions for the OMAP internal DMA channels.
17 *
18 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
19 * Converted DMA library into DMA platform driver.
20 * - G, Manjunath Kondaiah <manjugk@ti.com>
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License version 2 as
24 * published by the Free Software Foundation.
25 *
26 */
27
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/errno.h>
33 #include <linux/interrupt.h>
34 #include <linux/irq.h>
35 #include <linux/io.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38
39 #include <mach/hardware.h>
40 #include <plat/dma.h>
41
42 #include <plat/tc.h>
43
44 /*
45 * MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
46 * channels that an instance of the SDMA IP block can support. Used
47 * to size arrays. (The actual maximum on a particular SoC may be less
48 * than this -- for example, OMAP1 SDMA instances only support 17 logical
49 * DMA channels.)
50 */
51 #define MAX_LOGICAL_DMA_CH_COUNT 32
52
53 #undef DEBUG
54
55 #ifndef CONFIG_ARCH_OMAP1
56 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
57 DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
58 };
59
60 enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
61 #endif
62
63 #define OMAP_DMA_ACTIVE 0x01
64 #define OMAP2_DMA_CSR_CLEAR_MASK 0xffffffff
65
66 #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
67
68 static struct omap_system_dma_plat_info *p;
69 static struct omap_dma_dev_attr *d;
70
71 static int enable_1510_mode;
72 static u32 errata;
73
74 static struct omap_dma_global_context_registers {
75 u32 dma_irqenable_l0;
76 u32 dma_ocp_sysconfig;
77 u32 dma_gcr;
78 } omap_dma_global_context;
79
80 struct dma_link_info {
81 int *linked_dmach_q;
82 int no_of_lchs_linked;
83
84 int q_count;
85 int q_tail;
86 int q_head;
87
88 int chain_state;
89 int chain_mode;
90
91 };
92
93 static struct dma_link_info *dma_linked_lch;
94
95 #ifndef CONFIG_ARCH_OMAP1
96
97 /* Chain handling macros */
98 #define OMAP_DMA_CHAIN_QINIT(chain_id) \
99 do { \
100 dma_linked_lch[chain_id].q_head = \
101 dma_linked_lch[chain_id].q_tail = \
102 dma_linked_lch[chain_id].q_count = 0; \
103 } while (0)
104 #define OMAP_DMA_CHAIN_QFULL(chain_id) \
105 (dma_linked_lch[chain_id].no_of_lchs_linked == \
106 dma_linked_lch[chain_id].q_count)
107 #define OMAP_DMA_CHAIN_QLAST(chain_id) \
108 do { \
109 ((dma_linked_lch[chain_id].no_of_lchs_linked-1) == \
110 dma_linked_lch[chain_id].q_count) \
111 } while (0)
112 #define OMAP_DMA_CHAIN_QEMPTY(chain_id) \
113 (0 == dma_linked_lch[chain_id].q_count)
114 #define __OMAP_DMA_CHAIN_INCQ(end) \
115 ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
116 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id) \
117 do { \
118 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
119 dma_linked_lch[chain_id].q_count--; \
120 } while (0)
121
122 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id) \
123 do { \
124 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
125 dma_linked_lch[chain_id].q_count++; \
126 } while (0)
127 #endif
128
129 static int dma_lch_count;
130 static int dma_chan_count;
131 static int omap_dma_reserve_channels;
132
133 static spinlock_t dma_chan_lock;
134 static struct omap_dma_lch *dma_chan;
135
136 static inline void disable_lnk(int lch);
137 static void omap_disable_channel_irq(int lch);
138 static inline void omap_enable_channel_irq(int lch);
139
140 #define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
141 __func__);
142
143 #ifdef CONFIG_ARCH_OMAP15XX
144 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
145 static int omap_dma_in_1510_mode(void)
146 {
147 return enable_1510_mode;
148 }
149 #else
150 #define omap_dma_in_1510_mode() 0
151 #endif
152
153 #ifdef CONFIG_ARCH_OMAP1
154 static inline int get_gdma_dev(int req)
155 {
156 u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
157 int shift = ((req - 1) % 5) * 6;
158
159 return ((omap_readl(reg) >> shift) & 0x3f) + 1;
160 }
161
162 static inline void set_gdma_dev(int req, int dev)
163 {
164 u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
165 int shift = ((req - 1) % 5) * 6;
166 u32 l;
167
168 l = omap_readl(reg);
169 l &= ~(0x3f << shift);
170 l |= (dev - 1) << shift;
171 omap_writel(l, reg);
172 }
173 #else
174 #define set_gdma_dev(req, dev) do {} while (0)
175 #define omap_readl(reg) 0
176 #define omap_writel(val, reg) do {} while (0)
177 #endif
178
179 void omap_set_dma_priority(int lch, int dst_port, int priority)
180 {
181 unsigned long reg;
182 u32 l;
183
184 if (cpu_class_is_omap1()) {
185 switch (dst_port) {
186 case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */
187 reg = OMAP_TC_OCPT1_PRIOR;
188 break;
189 case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */
190 reg = OMAP_TC_OCPT2_PRIOR;
191 break;
192 case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */
193 reg = OMAP_TC_EMIFF_PRIOR;
194 break;
195 case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */
196 reg = OMAP_TC_EMIFS_PRIOR;
197 break;
198 default:
199 BUG();
200 return;
201 }
202 l = omap_readl(reg);
203 l &= ~(0xf << 8);
204 l |= (priority & 0xf) << 8;
205 omap_writel(l, reg);
206 }
207
208 if (cpu_class_is_omap2()) {
209 u32 ccr;
210
211 ccr = p->dma_read(CCR, lch);
212 if (priority)
213 ccr |= (1 << 6);
214 else
215 ccr &= ~(1 << 6);
216 p->dma_write(ccr, CCR, lch);
217 }
218 }
219 EXPORT_SYMBOL(omap_set_dma_priority);
220
221 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
222 int frame_count, int sync_mode,
223 int dma_trigger, int src_or_dst_synch)
224 {
225 u32 l;
226
227 l = p->dma_read(CSDP, lch);
228 l &= ~0x03;
229 l |= data_type;
230 p->dma_write(l, CSDP, lch);
231
232 if (cpu_class_is_omap1()) {
233 u16 ccr;
234
235 ccr = p->dma_read(CCR, lch);
236 ccr &= ~(1 << 5);
237 if (sync_mode == OMAP_DMA_SYNC_FRAME)
238 ccr |= 1 << 5;
239 p->dma_write(ccr, CCR, lch);
240
241 ccr = p->dma_read(CCR2, lch);
242 ccr &= ~(1 << 2);
243 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
244 ccr |= 1 << 2;
245 p->dma_write(ccr, CCR2, lch);
246 }
247
248 if (cpu_class_is_omap2() && dma_trigger) {
249 u32 val;
250
251 val = p->dma_read(CCR, lch);
252
253 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
254 val &= ~((1 << 23) | (3 << 19) | 0x1f);
255 val |= (dma_trigger & ~0x1f) << 14;
256 val |= dma_trigger & 0x1f;
257
258 if (sync_mode & OMAP_DMA_SYNC_FRAME)
259 val |= 1 << 5;
260 else
261 val &= ~(1 << 5);
262
263 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
264 val |= 1 << 18;
265 else
266 val &= ~(1 << 18);
267
268 if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
269 val &= ~(1 << 24); /* dest synch */
270 val |= (1 << 23); /* Prefetch */
271 } else if (src_or_dst_synch) {
272 val |= 1 << 24; /* source synch */
273 } else {
274 val &= ~(1 << 24); /* dest synch */
275 }
276 p->dma_write(val, CCR, lch);
277 }
278
279 p->dma_write(elem_count, CEN, lch);
280 p->dma_write(frame_count, CFN, lch);
281 }
282 EXPORT_SYMBOL(omap_set_dma_transfer_params);
283
284 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
285 {
286 BUG_ON(omap_dma_in_1510_mode());
287
288 if (cpu_class_is_omap1()) {
289 u16 w;
290
291 w = p->dma_read(CCR2, lch);
292 w &= ~0x03;
293
294 switch (mode) {
295 case OMAP_DMA_CONSTANT_FILL:
296 w |= 0x01;
297 break;
298 case OMAP_DMA_TRANSPARENT_COPY:
299 w |= 0x02;
300 break;
301 case OMAP_DMA_COLOR_DIS:
302 break;
303 default:
304 BUG();
305 }
306 p->dma_write(w, CCR2, lch);
307
308 w = p->dma_read(LCH_CTRL, lch);
309 w &= ~0x0f;
310 /* Default is channel type 2D */
311 if (mode) {
312 p->dma_write(color, COLOR, lch);
313 w |= 1; /* Channel type G */
314 }
315 p->dma_write(w, LCH_CTRL, lch);
316 }
317
318 if (cpu_class_is_omap2()) {
319 u32 val;
320
321 val = p->dma_read(CCR, lch);
322 val &= ~((1 << 17) | (1 << 16));
323
324 switch (mode) {
325 case OMAP_DMA_CONSTANT_FILL:
326 val |= 1 << 16;
327 break;
328 case OMAP_DMA_TRANSPARENT_COPY:
329 val |= 1 << 17;
330 break;
331 case OMAP_DMA_COLOR_DIS:
332 break;
333 default:
334 BUG();
335 }
336 p->dma_write(val, CCR, lch);
337
338 color &= 0xffffff;
339 p->dma_write(color, COLOR, lch);
340 }
341 }
342 EXPORT_SYMBOL(omap_set_dma_color_mode);
343
344 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
345 {
346 if (cpu_class_is_omap2()) {
347 u32 csdp;
348
349 csdp = p->dma_read(CSDP, lch);
350 csdp &= ~(0x3 << 16);
351 csdp |= (mode << 16);
352 p->dma_write(csdp, CSDP, lch);
353 }
354 }
355 EXPORT_SYMBOL(omap_set_dma_write_mode);
356
357 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
358 {
359 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
360 u32 l;
361
362 l = p->dma_read(LCH_CTRL, lch);
363 l &= ~0x7;
364 l |= mode;
365 p->dma_write(l, LCH_CTRL, lch);
366 }
367 }
368 EXPORT_SYMBOL(omap_set_dma_channel_mode);
369
370 /* Note that src_port is only for omap1 */
371 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
372 unsigned long src_start,
373 int src_ei, int src_fi)
374 {
375 u32 l;
376
377 if (cpu_class_is_omap1()) {
378 u16 w;
379
380 w = p->dma_read(CSDP, lch);
381 w &= ~(0x1f << 2);
382 w |= src_port << 2;
383 p->dma_write(w, CSDP, lch);
384 }
385
386 l = p->dma_read(CCR, lch);
387 l &= ~(0x03 << 12);
388 l |= src_amode << 12;
389 p->dma_write(l, CCR, lch);
390
391 p->dma_write(src_start, CSSA, lch);
392
393 p->dma_write(src_ei, CSEI, lch);
394 p->dma_write(src_fi, CSFI, lch);
395 }
396 EXPORT_SYMBOL(omap_set_dma_src_params);
397
398 void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
399 {
400 omap_set_dma_transfer_params(lch, params->data_type,
401 params->elem_count, params->frame_count,
402 params->sync_mode, params->trigger,
403 params->src_or_dst_synch);
404 omap_set_dma_src_params(lch, params->src_port,
405 params->src_amode, params->src_start,
406 params->src_ei, params->src_fi);
407
408 omap_set_dma_dest_params(lch, params->dst_port,
409 params->dst_amode, params->dst_start,
410 params->dst_ei, params->dst_fi);
411 if (params->read_prio || params->write_prio)
412 omap_dma_set_prio_lch(lch, params->read_prio,
413 params->write_prio);
414 }
415 EXPORT_SYMBOL(omap_set_dma_params);
416
417 void omap_set_dma_src_index(int lch, int eidx, int fidx)
418 {
419 if (cpu_class_is_omap2())
420 return;
421
422 p->dma_write(eidx, CSEI, lch);
423 p->dma_write(fidx, CSFI, lch);
424 }
425 EXPORT_SYMBOL(omap_set_dma_src_index);
426
427 void omap_set_dma_src_data_pack(int lch, int enable)
428 {
429 u32 l;
430
431 l = p->dma_read(CSDP, lch);
432 l &= ~(1 << 6);
433 if (enable)
434 l |= (1 << 6);
435 p->dma_write(l, CSDP, lch);
436 }
437 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
438
439 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
440 {
441 unsigned int burst = 0;
442 u32 l;
443
444 l = p->dma_read(CSDP, lch);
445 l &= ~(0x03 << 7);
446
447 switch (burst_mode) {
448 case OMAP_DMA_DATA_BURST_DIS:
449 break;
450 case OMAP_DMA_DATA_BURST_4:
451 if (cpu_class_is_omap2())
452 burst = 0x1;
453 else
454 burst = 0x2;
455 break;
456 case OMAP_DMA_DATA_BURST_8:
457 if (cpu_class_is_omap2()) {
458 burst = 0x2;
459 break;
460 }
461 /*
462 * not supported by current hardware on OMAP1
463 * w |= (0x03 << 7);
464 * fall through
465 */
466 case OMAP_DMA_DATA_BURST_16:
467 if (cpu_class_is_omap2()) {
468 burst = 0x3;
469 break;
470 }
471 /*
472 * OMAP1 don't support burst 16
473 * fall through
474 */
475 default:
476 BUG();
477 }
478
479 l |= (burst << 7);
480 p->dma_write(l, CSDP, lch);
481 }
482 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
483
484 /* Note that dest_port is only for OMAP1 */
485 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
486 unsigned long dest_start,
487 int dst_ei, int dst_fi)
488 {
489 u32 l;
490
491 if (cpu_class_is_omap1()) {
492 l = p->dma_read(CSDP, lch);
493 l &= ~(0x1f << 9);
494 l |= dest_port << 9;
495 p->dma_write(l, CSDP, lch);
496 }
497
498 l = p->dma_read(CCR, lch);
499 l &= ~(0x03 << 14);
500 l |= dest_amode << 14;
501 p->dma_write(l, CCR, lch);
502
503 p->dma_write(dest_start, CDSA, lch);
504
505 p->dma_write(dst_ei, CDEI, lch);
506 p->dma_write(dst_fi, CDFI, lch);
507 }
508 EXPORT_SYMBOL(omap_set_dma_dest_params);
509
510 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
511 {
512 if (cpu_class_is_omap2())
513 return;
514
515 p->dma_write(eidx, CDEI, lch);
516 p->dma_write(fidx, CDFI, lch);
517 }
518 EXPORT_SYMBOL(omap_set_dma_dest_index);
519
520 void omap_set_dma_dest_data_pack(int lch, int enable)
521 {
522 u32 l;
523
524 l = p->dma_read(CSDP, lch);
525 l &= ~(1 << 13);
526 if (enable)
527 l |= 1 << 13;
528 p->dma_write(l, CSDP, lch);
529 }
530 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
531
532 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
533 {
534 unsigned int burst = 0;
535 u32 l;
536
537 l = p->dma_read(CSDP, lch);
538 l &= ~(0x03 << 14);
539
540 switch (burst_mode) {
541 case OMAP_DMA_DATA_BURST_DIS:
542 break;
543 case OMAP_DMA_DATA_BURST_4:
544 if (cpu_class_is_omap2())
545 burst = 0x1;
546 else
547 burst = 0x2;
548 break;
549 case OMAP_DMA_DATA_BURST_8:
550 if (cpu_class_is_omap2())
551 burst = 0x2;
552 else
553 burst = 0x3;
554 break;
555 case OMAP_DMA_DATA_BURST_16:
556 if (cpu_class_is_omap2()) {
557 burst = 0x3;
558 break;
559 }
560 /*
561 * OMAP1 don't support burst 16
562 * fall through
563 */
564 default:
565 printk(KERN_ERR "Invalid DMA burst mode\n");
566 BUG();
567 return;
568 }
569 l |= (burst << 14);
570 p->dma_write(l, CSDP, lch);
571 }
572 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
573
574 static inline void omap_enable_channel_irq(int lch)
575 {
576 /* Clear CSR */
577 if (cpu_class_is_omap1())
578 p->dma_read(CSR, lch);
579 else
580 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
581
582 /* Enable some nice interrupts. */
583 p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
584 }
585
586 static inline void omap_disable_channel_irq(int lch)
587 {
588 /* disable channel interrupts */
589 p->dma_write(0, CICR, lch);
590 /* Clear CSR */
591 if (cpu_class_is_omap1())
592 p->dma_read(CSR, lch);
593 else
594 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
595 }
596
597 void omap_enable_dma_irq(int lch, u16 bits)
598 {
599 dma_chan[lch].enabled_irqs |= bits;
600 }
601 EXPORT_SYMBOL(omap_enable_dma_irq);
602
603 void omap_disable_dma_irq(int lch, u16 bits)
604 {
605 dma_chan[lch].enabled_irqs &= ~bits;
606 }
607 EXPORT_SYMBOL(omap_disable_dma_irq);
608
609 static inline void enable_lnk(int lch)
610 {
611 u32 l;
612
613 l = p->dma_read(CLNK_CTRL, lch);
614
615 if (cpu_class_is_omap1())
616 l &= ~(1 << 14);
617
618 /* Set the ENABLE_LNK bits */
619 if (dma_chan[lch].next_lch != -1)
620 l = dma_chan[lch].next_lch | (1 << 15);
621
622 #ifndef CONFIG_ARCH_OMAP1
623 if (cpu_class_is_omap2())
624 if (dma_chan[lch].next_linked_ch != -1)
625 l = dma_chan[lch].next_linked_ch | (1 << 15);
626 #endif
627
628 p->dma_write(l, CLNK_CTRL, lch);
629 }
630
631 static inline void disable_lnk(int lch)
632 {
633 u32 l;
634
635 l = p->dma_read(CLNK_CTRL, lch);
636
637 /* Disable interrupts */
638 omap_disable_channel_irq(lch);
639
640 if (cpu_class_is_omap1()) {
641 /* Set the STOP_LNK bit */
642 l |= 1 << 14;
643 }
644
645 if (cpu_class_is_omap2()) {
646 /* Clear the ENABLE_LNK bit */
647 l &= ~(1 << 15);
648 }
649
650 p->dma_write(l, CLNK_CTRL, lch);
651 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
652 }
653
654 static inline void omap2_enable_irq_lch(int lch)
655 {
656 u32 val;
657 unsigned long flags;
658
659 if (!cpu_class_is_omap2())
660 return;
661
662 spin_lock_irqsave(&dma_chan_lock, flags);
663 /* clear IRQ STATUS */
664 p->dma_write(1 << lch, IRQSTATUS_L0, lch);
665 /* Enable interrupt */
666 val = p->dma_read(IRQENABLE_L0, lch);
667 val |= 1 << lch;
668 p->dma_write(val, IRQENABLE_L0, lch);
669 spin_unlock_irqrestore(&dma_chan_lock, flags);
670 }
671
672 static inline void omap2_disable_irq_lch(int lch)
673 {
674 u32 val;
675 unsigned long flags;
676
677 if (!cpu_class_is_omap2())
678 return;
679
680 spin_lock_irqsave(&dma_chan_lock, flags);
681 /* Disable interrupt */
682 val = p->dma_read(IRQENABLE_L0, lch);
683 val &= ~(1 << lch);
684 p->dma_write(val, IRQENABLE_L0, lch);
685 /* clear IRQ STATUS */
686 p->dma_write(1 << lch, IRQSTATUS_L0, lch);
687 spin_unlock_irqrestore(&dma_chan_lock, flags);
688 }
689
690 int omap_request_dma(int dev_id, const char *dev_name,
691 void (*callback)(int lch, u16 ch_status, void *data),
692 void *data, int *dma_ch_out)
693 {
694 int ch, free_ch = -1;
695 unsigned long flags;
696 struct omap_dma_lch *chan;
697
698 spin_lock_irqsave(&dma_chan_lock, flags);
699 for (ch = 0; ch < dma_chan_count; ch++) {
700 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
701 free_ch = ch;
702 if (dev_id == 0)
703 break;
704 }
705 }
706 if (free_ch == -1) {
707 spin_unlock_irqrestore(&dma_chan_lock, flags);
708 return -EBUSY;
709 }
710 chan = dma_chan + free_ch;
711 chan->dev_id = dev_id;
712
713 if (p->clear_lch_regs)
714 p->clear_lch_regs(free_ch);
715
716 if (cpu_class_is_omap2())
717 omap_clear_dma(free_ch);
718
719 spin_unlock_irqrestore(&dma_chan_lock, flags);
720
721 chan->dev_name = dev_name;
722 chan->callback = callback;
723 chan->data = data;
724 chan->flags = 0;
725
726 #ifndef CONFIG_ARCH_OMAP1
727 if (cpu_class_is_omap2()) {
728 chan->chain_id = -1;
729 chan->next_linked_ch = -1;
730 }
731 #endif
732
733 chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
734
735 if (cpu_class_is_omap1())
736 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
737 else if (cpu_class_is_omap2())
738 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
739 OMAP2_DMA_TRANS_ERR_IRQ;
740
741 if (cpu_is_omap16xx()) {
742 /* If the sync device is set, configure it dynamically. */
743 if (dev_id != 0) {
744 set_gdma_dev(free_ch + 1, dev_id);
745 dev_id = free_ch + 1;
746 }
747 /*
748 * Disable the 1510 compatibility mode and set the sync device
749 * id.
750 */
751 p->dma_write(dev_id | (1 << 10), CCR, free_ch);
752 } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
753 p->dma_write(dev_id, CCR, free_ch);
754 }
755
756 if (cpu_class_is_omap2()) {
757 omap_enable_channel_irq(free_ch);
758 omap2_enable_irq_lch(free_ch);
759 }
760
761 *dma_ch_out = free_ch;
762
763 return 0;
764 }
765 EXPORT_SYMBOL(omap_request_dma);
766
767 void omap_free_dma(int lch)
768 {
769 unsigned long flags;
770
771 if (dma_chan[lch].dev_id == -1) {
772 pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
773 lch);
774 return;
775 }
776
777 /* Disable interrupt for logical channel */
778 if (cpu_class_is_omap2())
779 omap2_disable_irq_lch(lch);
780
781 /* Disable all DMA interrupts for the channel. */
782 omap_disable_channel_irq(lch);
783
784 /* Make sure the DMA transfer is stopped. */
785 p->dma_write(0, CCR, lch);
786
787 /* Clear registers */
788 if (cpu_class_is_omap2())
789 omap_clear_dma(lch);
790
791 spin_lock_irqsave(&dma_chan_lock, flags);
792 dma_chan[lch].dev_id = -1;
793 dma_chan[lch].next_lch = -1;
794 dma_chan[lch].callback = NULL;
795 spin_unlock_irqrestore(&dma_chan_lock, flags);
796 }
797 EXPORT_SYMBOL(omap_free_dma);
798
799 /**
800 * @brief omap_dma_set_global_params : Set global priority settings for dma
801 *
802 * @param arb_rate
803 * @param max_fifo_depth
804 * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
805 * DMA_THREAD_RESERVE_ONET
806 * DMA_THREAD_RESERVE_TWOT
807 * DMA_THREAD_RESERVE_THREET
808 */
809 void
810 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
811 {
812 u32 reg;
813
814 if (!cpu_class_is_omap2()) {
815 printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
816 return;
817 }
818
819 if (max_fifo_depth == 0)
820 max_fifo_depth = 1;
821 if (arb_rate == 0)
822 arb_rate = 1;
823
824 reg = 0xff & max_fifo_depth;
825 reg |= (0x3 & tparams) << 12;
826 reg |= (arb_rate & 0xff) << 16;
827
828 p->dma_write(reg, GCR, 0);
829 }
830 EXPORT_SYMBOL(omap_dma_set_global_params);
831
832 /**
833 * @brief omap_dma_set_prio_lch : Set channel wise priority settings
834 *
835 * @param lch
836 * @param read_prio - Read priority
837 * @param write_prio - Write priority
838 * Both of the above can be set with one of the following values :
839 * DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
840 */
841 int
842 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
843 unsigned char write_prio)
844 {
845 u32 l;
846
847 if (unlikely((lch < 0 || lch >= dma_lch_count))) {
848 printk(KERN_ERR "Invalid channel id\n");
849 return -EINVAL;
850 }
851 l = p->dma_read(CCR, lch);
852 l &= ~((1 << 6) | (1 << 26));
853 if (cpu_class_is_omap2() && !cpu_is_omap242x())
854 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
855 else
856 l |= ((read_prio & 0x1) << 6);
857
858 p->dma_write(l, CCR, lch);
859
860 return 0;
861 }
862 EXPORT_SYMBOL(omap_dma_set_prio_lch);
863
864 /*
865 * Clears any DMA state so the DMA engine is ready to restart with new buffers
866 * through omap_start_dma(). Any buffers in flight are discarded.
867 */
868 void omap_clear_dma(int lch)
869 {
870 unsigned long flags;
871
872 local_irq_save(flags);
873 p->clear_dma(lch);
874 local_irq_restore(flags);
875 }
876 EXPORT_SYMBOL(omap_clear_dma);
877
878 void omap_start_dma(int lch)
879 {
880 u32 l;
881
882 /*
883 * The CPC/CDAC register needs to be initialized to zero
884 * before starting dma transfer.
885 */
886 if (cpu_is_omap15xx())
887 p->dma_write(0, CPC, lch);
888 else
889 p->dma_write(0, CDAC, lch);
890
891 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
892 int next_lch, cur_lch;
893 char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
894
895 dma_chan_link_map[lch] = 1;
896 /* Set the link register of the first channel */
897 enable_lnk(lch);
898
899 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
900 cur_lch = dma_chan[lch].next_lch;
901 do {
902 next_lch = dma_chan[cur_lch].next_lch;
903
904 /* The loop case: we've been here already */
905 if (dma_chan_link_map[cur_lch])
906 break;
907 /* Mark the current channel */
908 dma_chan_link_map[cur_lch] = 1;
909
910 enable_lnk(cur_lch);
911 omap_enable_channel_irq(cur_lch);
912
913 cur_lch = next_lch;
914 } while (next_lch != -1);
915 } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
916 p->dma_write(lch, CLNK_CTRL, lch);
917
918 omap_enable_channel_irq(lch);
919
920 l = p->dma_read(CCR, lch);
921
922 if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
923 l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
924 l |= OMAP_DMA_CCR_EN;
925
926 /*
927 * As dma_write() uses IO accessors which are weakly ordered, there
928 * is no guarantee that data in coherent DMA memory will be visible
929 * to the DMA device. Add a memory barrier here to ensure that any
930 * such data is visible prior to enabling DMA.
931 */
932 mb();
933 p->dma_write(l, CCR, lch);
934
935 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
936 }
937 EXPORT_SYMBOL(omap_start_dma);
938
939 void omap_stop_dma(int lch)
940 {
941 u32 l;
942
943 /* Disable all interrupts on the channel */
944 omap_disable_channel_irq(lch);
945
946 l = p->dma_read(CCR, lch);
947 if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
948 (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
949 int i = 0;
950 u32 sys_cf;
951
952 /* Configure No-Standby */
953 l = p->dma_read(OCP_SYSCONFIG, lch);
954 sys_cf = l;
955 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
956 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
957 p->dma_write(l , OCP_SYSCONFIG, 0);
958
959 l = p->dma_read(CCR, lch);
960 l &= ~OMAP_DMA_CCR_EN;
961 p->dma_write(l, CCR, lch);
962
963 /* Wait for sDMA FIFO drain */
964 l = p->dma_read(CCR, lch);
965 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
966 OMAP_DMA_CCR_WR_ACTIVE))) {
967 udelay(5);
968 i++;
969 l = p->dma_read(CCR, lch);
970 }
971 if (i >= 100)
972 pr_err("DMA drain did not complete on lch %d\n", lch);
973 /* Restore OCP_SYSCONFIG */
974 p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
975 } else {
976 l &= ~OMAP_DMA_CCR_EN;
977 p->dma_write(l, CCR, lch);
978 }
979
980 /*
981 * Ensure that data transferred by DMA is visible to any access
982 * after DMA has been disabled. This is important for coherent
983 * DMA regions.
984 */
985 mb();
986
987 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
988 int next_lch, cur_lch = lch;
989 char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
990
991 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
992 do {
993 /* The loop case: we've been here already */
994 if (dma_chan_link_map[cur_lch])
995 break;
996 /* Mark the current channel */
997 dma_chan_link_map[cur_lch] = 1;
998
999 disable_lnk(cur_lch);
1000
1001 next_lch = dma_chan[cur_lch].next_lch;
1002 cur_lch = next_lch;
1003 } while (next_lch != -1);
1004 }
1005
1006 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
1007 }
1008 EXPORT_SYMBOL(omap_stop_dma);
1009
1010 /*
1011 * Allows changing the DMA callback function or data. This may be needed if
1012 * the driver shares a single DMA channel for multiple dma triggers.
1013 */
1014 int omap_set_dma_callback(int lch,
1015 void (*callback)(int lch, u16 ch_status, void *data),
1016 void *data)
1017 {
1018 unsigned long flags;
1019
1020 if (lch < 0)
1021 return -ENODEV;
1022
1023 spin_lock_irqsave(&dma_chan_lock, flags);
1024 if (dma_chan[lch].dev_id == -1) {
1025 printk(KERN_ERR "DMA callback for not set for free channel\n");
1026 spin_unlock_irqrestore(&dma_chan_lock, flags);
1027 return -EINVAL;
1028 }
1029 dma_chan[lch].callback = callback;
1030 dma_chan[lch].data = data;
1031 spin_unlock_irqrestore(&dma_chan_lock, flags);
1032
1033 return 0;
1034 }
1035 EXPORT_SYMBOL(omap_set_dma_callback);
1036
1037 /*
1038 * Returns current physical source address for the given DMA channel.
1039 * If the channel is running the caller must disable interrupts prior calling
1040 * this function and process the returned value before re-enabling interrupt to
1041 * prevent races with the interrupt handler. Note that in continuous mode there
1042 * is a chance for CSSA_L register overflow between the two reads resulting
1043 * in incorrect return value.
1044 */
1045 dma_addr_t omap_get_dma_src_pos(int lch)
1046 {
1047 dma_addr_t offset = 0;
1048
1049 if (cpu_is_omap15xx())
1050 offset = p->dma_read(CPC, lch);
1051 else
1052 offset = p->dma_read(CSAC, lch);
1053
1054 if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
1055 offset = p->dma_read(CSAC, lch);
1056
1057 if (!cpu_is_omap15xx()) {
1058 /*
1059 * CDAC == 0 indicates that the DMA transfer on the channel has
1060 * not been started (no data has been transferred so far).
1061 * Return the programmed source start address in this case.
1062 */
1063 if (likely(p->dma_read(CDAC, lch)))
1064 offset = p->dma_read(CSAC, lch);
1065 else
1066 offset = p->dma_read(CSSA, lch);
1067 }
1068
1069 if (cpu_class_is_omap1())
1070 offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
1071
1072 return offset;
1073 }
1074 EXPORT_SYMBOL(omap_get_dma_src_pos);
1075
1076 /*
1077 * Returns current physical destination address for the given DMA channel.
1078 * If the channel is running the caller must disable interrupts prior calling
1079 * this function and process the returned value before re-enabling interrupt to
1080 * prevent races with the interrupt handler. Note that in continuous mode there
1081 * is a chance for CDSA_L register overflow between the two reads resulting
1082 * in incorrect return value.
1083 */
1084 dma_addr_t omap_get_dma_dst_pos(int lch)
1085 {
1086 dma_addr_t offset = 0;
1087
1088 if (cpu_is_omap15xx())
1089 offset = p->dma_read(CPC, lch);
1090 else
1091 offset = p->dma_read(CDAC, lch);
1092
1093 /*
1094 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1095 * read before the DMA controller finished disabling the channel.
1096 */
1097 if (!cpu_is_omap15xx() && offset == 0) {
1098 offset = p->dma_read(CDAC, lch);
1099 /*
1100 * CDAC == 0 indicates that the DMA transfer on the channel has
1101 * not been started (no data has been transferred so far).
1102 * Return the programmed destination start address in this case.
1103 */
1104 if (unlikely(!offset))
1105 offset = p->dma_read(CDSA, lch);
1106 }
1107
1108 if (cpu_class_is_omap1())
1109 offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
1110
1111 return offset;
1112 }
1113 EXPORT_SYMBOL(omap_get_dma_dst_pos);
1114
1115 int omap_get_dma_active_status(int lch)
1116 {
1117 return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1118 }
1119 EXPORT_SYMBOL(omap_get_dma_active_status);
1120
1121 int omap_dma_running(void)
1122 {
1123 int lch;
1124
1125 if (cpu_class_is_omap1())
1126 if (omap_lcd_dma_running())
1127 return 1;
1128
1129 for (lch = 0; lch < dma_chan_count; lch++)
1130 if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1131 return 1;
1132
1133 return 0;
1134 }
1135
1136 /*
1137 * lch_queue DMA will start right after lch_head one is finished.
1138 * For this DMA link to start, you still need to start (see omap_start_dma)
1139 * the first one. That will fire up the entire queue.
1140 */
1141 void omap_dma_link_lch(int lch_head, int lch_queue)
1142 {
1143 if (omap_dma_in_1510_mode()) {
1144 if (lch_head == lch_queue) {
1145 p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
1146 CCR, lch_head);
1147 return;
1148 }
1149 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1150 BUG();
1151 return;
1152 }
1153
1154 if ((dma_chan[lch_head].dev_id == -1) ||
1155 (dma_chan[lch_queue].dev_id == -1)) {
1156 pr_err("omap_dma: trying to link non requested channels\n");
1157 dump_stack();
1158 }
1159
1160 dma_chan[lch_head].next_lch = lch_queue;
1161 }
1162 EXPORT_SYMBOL(omap_dma_link_lch);
1163
1164 /*
1165 * Once the DMA queue is stopped, we can destroy it.
1166 */
1167 void omap_dma_unlink_lch(int lch_head, int lch_queue)
1168 {
1169 if (omap_dma_in_1510_mode()) {
1170 if (lch_head == lch_queue) {
1171 p->dma_write(p->dma_read(CCR, lch_head) & ~(3 << 8),
1172 CCR, lch_head);
1173 return;
1174 }
1175 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1176 BUG();
1177 return;
1178 }
1179
1180 if (dma_chan[lch_head].next_lch != lch_queue ||
1181 dma_chan[lch_head].next_lch == -1) {
1182 pr_err("omap_dma: trying to unlink non linked channels\n");
1183 dump_stack();
1184 }
1185
1186 if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1187 (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1188 pr_err("omap_dma: You need to stop the DMA channels before unlinking\n");
1189 dump_stack();
1190 }
1191
1192 dma_chan[lch_head].next_lch = -1;
1193 }
1194 EXPORT_SYMBOL(omap_dma_unlink_lch);
1195
1196 #ifndef CONFIG_ARCH_OMAP1
1197 /* Create chain of DMA channesls */
1198 static void create_dma_lch_chain(int lch_head, int lch_queue)
1199 {
1200 u32 l;
1201
1202 /* Check if this is the first link in chain */
1203 if (dma_chan[lch_head].next_linked_ch == -1) {
1204 dma_chan[lch_head].next_linked_ch = lch_queue;
1205 dma_chan[lch_head].prev_linked_ch = lch_queue;
1206 dma_chan[lch_queue].next_linked_ch = lch_head;
1207 dma_chan[lch_queue].prev_linked_ch = lch_head;
1208 }
1209
1210 /* a link exists, link the new channel in circular chain */
1211 else {
1212 dma_chan[lch_queue].next_linked_ch =
1213 dma_chan[lch_head].next_linked_ch;
1214 dma_chan[lch_queue].prev_linked_ch = lch_head;
1215 dma_chan[lch_head].next_linked_ch = lch_queue;
1216 dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1217 lch_queue;
1218 }
1219
1220 l = p->dma_read(CLNK_CTRL, lch_head);
1221 l &= ~(0x1f);
1222 l |= lch_queue;
1223 p->dma_write(l, CLNK_CTRL, lch_head);
1224
1225 l = p->dma_read(CLNK_CTRL, lch_queue);
1226 l &= ~(0x1f);
1227 l |= (dma_chan[lch_queue].next_linked_ch);
1228 p->dma_write(l, CLNK_CTRL, lch_queue);
1229 }
1230
1231 /**
1232 * @brief omap_request_dma_chain : Request a chain of DMA channels
1233 *
1234 * @param dev_id - Device id using the dma channel
1235 * @param dev_name - Device name
1236 * @param callback - Call back function
1237 * @chain_id -
1238 * @no_of_chans - Number of channels requested
1239 * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1240 * OMAP_DMA_DYNAMIC_CHAIN
1241 * @params - Channel parameters
1242 *
1243 * @return - Success : 0
1244 * Failure: -EINVAL/-ENOMEM
1245 */
1246 int omap_request_dma_chain(int dev_id, const char *dev_name,
1247 void (*callback) (int lch, u16 ch_status,
1248 void *data),
1249 int *chain_id, int no_of_chans, int chain_mode,
1250 struct omap_dma_channel_params params)
1251 {
1252 int *channels;
1253 int i, err;
1254
1255 /* Is the chain mode valid ? */
1256 if (chain_mode != OMAP_DMA_STATIC_CHAIN
1257 && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1258 printk(KERN_ERR "Invalid chain mode requested\n");
1259 return -EINVAL;
1260 }
1261
1262 if (unlikely((no_of_chans < 1
1263 || no_of_chans > dma_lch_count))) {
1264 printk(KERN_ERR "Invalid Number of channels requested\n");
1265 return -EINVAL;
1266 }
1267
1268 /*
1269 * Allocate a queue to maintain the status of the channels
1270 * in the chain
1271 */
1272 channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1273 if (channels == NULL) {
1274 printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1275 return -ENOMEM;
1276 }
1277
1278 /* request and reserve DMA channels for the chain */
1279 for (i = 0; i < no_of_chans; i++) {
1280 err = omap_request_dma(dev_id, dev_name,
1281 callback, NULL, &channels[i]);
1282 if (err < 0) {
1283 int j;
1284 for (j = 0; j < i; j++)
1285 omap_free_dma(channels[j]);
1286 kfree(channels);
1287 printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1288 return err;
1289 }
1290 dma_chan[channels[i]].prev_linked_ch = -1;
1291 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1292
1293 /*
1294 * Allowing client drivers to set common parameters now,
1295 * so that later only relevant (src_start, dest_start
1296 * and element count) can be set
1297 */
1298 omap_set_dma_params(channels[i], &params);
1299 }
1300
1301 *chain_id = channels[0];
1302 dma_linked_lch[*chain_id].linked_dmach_q = channels;
1303 dma_linked_lch[*chain_id].chain_mode = chain_mode;
1304 dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1305 dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1306
1307 for (i = 0; i < no_of_chans; i++)
1308 dma_chan[channels[i]].chain_id = *chain_id;
1309
1310 /* Reset the Queue pointers */
1311 OMAP_DMA_CHAIN_QINIT(*chain_id);
1312
1313 /* Set up the chain */
1314 if (no_of_chans == 1)
1315 create_dma_lch_chain(channels[0], channels[0]);
1316 else {
1317 for (i = 0; i < (no_of_chans - 1); i++)
1318 create_dma_lch_chain(channels[i], channels[i + 1]);
1319 }
1320
1321 return 0;
1322 }
1323 EXPORT_SYMBOL(omap_request_dma_chain);
1324
1325 /**
1326 * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1327 * params after setting it. Dont do this while dma is running!!
1328 *
1329 * @param chain_id - Chained logical channel id.
1330 * @param params
1331 *
1332 * @return - Success : 0
1333 * Failure : -EINVAL
1334 */
1335 int omap_modify_dma_chain_params(int chain_id,
1336 struct omap_dma_channel_params params)
1337 {
1338 int *channels;
1339 u32 i;
1340
1341 /* Check for input params */
1342 if (unlikely((chain_id < 0
1343 || chain_id >= dma_lch_count))) {
1344 printk(KERN_ERR "Invalid chain id\n");
1345 return -EINVAL;
1346 }
1347
1348 /* Check if the chain exists */
1349 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1350 printk(KERN_ERR "Chain doesn't exists\n");
1351 return -EINVAL;
1352 }
1353 channels = dma_linked_lch[chain_id].linked_dmach_q;
1354
1355 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1356 /*
1357 * Allowing client drivers to set common parameters now,
1358 * so that later only relevant (src_start, dest_start
1359 * and element count) can be set
1360 */
1361 omap_set_dma_params(channels[i], &params);
1362 }
1363
1364 return 0;
1365 }
1366 EXPORT_SYMBOL(omap_modify_dma_chain_params);
1367
1368 /**
1369 * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1370 *
1371 * @param chain_id
1372 *
1373 * @return - Success : 0
1374 * Failure : -EINVAL
1375 */
1376 int omap_free_dma_chain(int chain_id)
1377 {
1378 int *channels;
1379 u32 i;
1380
1381 /* Check for input params */
1382 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1383 printk(KERN_ERR "Invalid chain id\n");
1384 return -EINVAL;
1385 }
1386
1387 /* Check if the chain exists */
1388 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1389 printk(KERN_ERR "Chain doesn't exists\n");
1390 return -EINVAL;
1391 }
1392
1393 channels = dma_linked_lch[chain_id].linked_dmach_q;
1394 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1395 dma_chan[channels[i]].next_linked_ch = -1;
1396 dma_chan[channels[i]].prev_linked_ch = -1;
1397 dma_chan[channels[i]].chain_id = -1;
1398 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1399 omap_free_dma(channels[i]);
1400 }
1401
1402 kfree(channels);
1403
1404 dma_linked_lch[chain_id].linked_dmach_q = NULL;
1405 dma_linked_lch[chain_id].chain_mode = -1;
1406 dma_linked_lch[chain_id].chain_state = -1;
1407
1408 return (0);
1409 }
1410 EXPORT_SYMBOL(omap_free_dma_chain);
1411
1412 /**
1413 * @brief omap_dma_chain_status - Check if the chain is in
1414 * active / inactive state.
1415 * @param chain_id
1416 *
1417 * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1418 * Failure : -EINVAL
1419 */
1420 int omap_dma_chain_status(int chain_id)
1421 {
1422 /* Check for input params */
1423 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1424 printk(KERN_ERR "Invalid chain id\n");
1425 return -EINVAL;
1426 }
1427
1428 /* Check if the chain exists */
1429 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1430 printk(KERN_ERR "Chain doesn't exists\n");
1431 return -EINVAL;
1432 }
1433 pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1434 dma_linked_lch[chain_id].q_count);
1435
1436 if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1437 return OMAP_DMA_CHAIN_INACTIVE;
1438
1439 return OMAP_DMA_CHAIN_ACTIVE;
1440 }
1441 EXPORT_SYMBOL(omap_dma_chain_status);
1442
1443 /**
1444 * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1445 * set the params and start the transfer.
1446 *
1447 * @param chain_id
1448 * @param src_start - buffer start address
1449 * @param dest_start - Dest address
1450 * @param elem_count
1451 * @param frame_count
1452 * @param callbk_data - channel callback parameter data.
1453 *
1454 * @return - Success : 0
1455 * Failure: -EINVAL/-EBUSY
1456 */
1457 int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1458 int elem_count, int frame_count, void *callbk_data)
1459 {
1460 int *channels;
1461 u32 l, lch;
1462 int start_dma = 0;
1463
1464 /*
1465 * if buffer size is less than 1 then there is
1466 * no use of starting the chain
1467 */
1468 if (elem_count < 1) {
1469 printk(KERN_ERR "Invalid buffer size\n");
1470 return -EINVAL;
1471 }
1472
1473 /* Check for input params */
1474 if (unlikely((chain_id < 0
1475 || chain_id >= dma_lch_count))) {
1476 printk(KERN_ERR "Invalid chain id\n");
1477 return -EINVAL;
1478 }
1479
1480 /* Check if the chain exists */
1481 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1482 printk(KERN_ERR "Chain doesn't exist\n");
1483 return -EINVAL;
1484 }
1485
1486 /* Check if all the channels in chain are in use */
1487 if (OMAP_DMA_CHAIN_QFULL(chain_id))
1488 return -EBUSY;
1489
1490 /* Frame count may be negative in case of indexed transfers */
1491 channels = dma_linked_lch[chain_id].linked_dmach_q;
1492
1493 /* Get a free channel */
1494 lch = channels[dma_linked_lch[chain_id].q_tail];
1495
1496 /* Store the callback data */
1497 dma_chan[lch].data = callbk_data;
1498
1499 /* Increment the q_tail */
1500 OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1501
1502 /* Set the params to the free channel */
1503 if (src_start != 0)
1504 p->dma_write(src_start, CSSA, lch);
1505 if (dest_start != 0)
1506 p->dma_write(dest_start, CDSA, lch);
1507
1508 /* Write the buffer size */
1509 p->dma_write(elem_count, CEN, lch);
1510 p->dma_write(frame_count, CFN, lch);
1511
1512 /*
1513 * If the chain is dynamically linked,
1514 * then we may have to start the chain if its not active
1515 */
1516 if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1517
1518 /*
1519 * In Dynamic chain, if the chain is not started,
1520 * queue the channel
1521 */
1522 if (dma_linked_lch[chain_id].chain_state ==
1523 DMA_CHAIN_NOTSTARTED) {
1524 /* Enable the link in previous channel */
1525 if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1526 DMA_CH_QUEUED)
1527 enable_lnk(dma_chan[lch].prev_linked_ch);
1528 dma_chan[lch].state = DMA_CH_QUEUED;
1529 }
1530
1531 /*
1532 * Chain is already started, make sure its active,
1533 * if not then start the chain
1534 */
1535 else {
1536 start_dma = 1;
1537
1538 if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1539 DMA_CH_STARTED) {
1540 enable_lnk(dma_chan[lch].prev_linked_ch);
1541 dma_chan[lch].state = DMA_CH_QUEUED;
1542 start_dma = 0;
1543 if (0 == ((1 << 7) & p->dma_read(
1544 CCR, dma_chan[lch].prev_linked_ch))) {
1545 disable_lnk(dma_chan[lch].
1546 prev_linked_ch);
1547 pr_debug("\n prev ch is stopped\n");
1548 start_dma = 1;
1549 }
1550 }
1551
1552 else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1553 == DMA_CH_QUEUED) {
1554 enable_lnk(dma_chan[lch].prev_linked_ch);
1555 dma_chan[lch].state = DMA_CH_QUEUED;
1556 start_dma = 0;
1557 }
1558 omap_enable_channel_irq(lch);
1559
1560 l = p->dma_read(CCR, lch);
1561
1562 if ((0 == (l & (1 << 24))))
1563 l &= ~(1 << 25);
1564 else
1565 l |= (1 << 25);
1566 if (start_dma == 1) {
1567 if (0 == (l & (1 << 7))) {
1568 l |= (1 << 7);
1569 dma_chan[lch].state = DMA_CH_STARTED;
1570 pr_debug("starting %d\n", lch);
1571 p->dma_write(l, CCR, lch);
1572 } else
1573 start_dma = 0;
1574 } else {
1575 if (0 == (l & (1 << 7)))
1576 p->dma_write(l, CCR, lch);
1577 }
1578 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1579 }
1580 }
1581
1582 return 0;
1583 }
1584 EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1585
1586 /**
1587 * @brief omap_start_dma_chain_transfers - Start the chain
1588 *
1589 * @param chain_id
1590 *
1591 * @return - Success : 0
1592 * Failure : -EINVAL/-EBUSY
1593 */
1594 int omap_start_dma_chain_transfers(int chain_id)
1595 {
1596 int *channels;
1597 u32 l, i;
1598
1599 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1600 printk(KERN_ERR "Invalid chain id\n");
1601 return -EINVAL;
1602 }
1603
1604 channels = dma_linked_lch[chain_id].linked_dmach_q;
1605
1606 if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1607 printk(KERN_ERR "Chain is already started\n");
1608 return -EBUSY;
1609 }
1610
1611 if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1612 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1613 i++) {
1614 enable_lnk(channels[i]);
1615 omap_enable_channel_irq(channels[i]);
1616 }
1617 } else {
1618 omap_enable_channel_irq(channels[0]);
1619 }
1620
1621 l = p->dma_read(CCR, channels[0]);
1622 l |= (1 << 7);
1623 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1624 dma_chan[channels[0]].state = DMA_CH_STARTED;
1625
1626 if ((0 == (l & (1 << 24))))
1627 l &= ~(1 << 25);
1628 else
1629 l |= (1 << 25);
1630 p->dma_write(l, CCR, channels[0]);
1631
1632 dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1633
1634 return 0;
1635 }
1636 EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1637
1638 /**
1639 * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1640 *
1641 * @param chain_id
1642 *
1643 * @return - Success : 0
1644 * Failure : EINVAL
1645 */
1646 int omap_stop_dma_chain_transfers(int chain_id)
1647 {
1648 int *channels;
1649 u32 l, i;
1650 u32 sys_cf = 0;
1651
1652 /* Check for input params */
1653 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1654 printk(KERN_ERR "Invalid chain id\n");
1655 return -EINVAL;
1656 }
1657
1658 /* Check if the chain exists */
1659 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1660 printk(KERN_ERR "Chain doesn't exists\n");
1661 return -EINVAL;
1662 }
1663 channels = dma_linked_lch[chain_id].linked_dmach_q;
1664
1665 if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
1666 sys_cf = p->dma_read(OCP_SYSCONFIG, 0);
1667 l = sys_cf;
1668 /* Middle mode reg set no Standby */
1669 l &= ~((1 << 12)|(1 << 13));
1670 p->dma_write(l, OCP_SYSCONFIG, 0);
1671 }
1672
1673 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1674
1675 /* Stop the Channel transmission */
1676 l = p->dma_read(CCR, channels[i]);
1677 l &= ~(1 << 7);
1678 p->dma_write(l, CCR, channels[i]);
1679
1680 /* Disable the link in all the channels */
1681 disable_lnk(channels[i]);
1682 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1683
1684 }
1685 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1686
1687 /* Reset the Queue pointers */
1688 OMAP_DMA_CHAIN_QINIT(chain_id);
1689
1690 if (IS_DMA_ERRATA(DMA_ERRATA_i88))
1691 p->dma_write(sys_cf, OCP_SYSCONFIG, 0);
1692
1693 return 0;
1694 }
1695 EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1696
1697 /* Get the index of the ongoing DMA in chain */
1698 /**
1699 * @brief omap_get_dma_chain_index - Get the element and frame index
1700 * of the ongoing DMA in chain
1701 *
1702 * @param chain_id
1703 * @param ei - Element index
1704 * @param fi - Frame index
1705 *
1706 * @return - Success : 0
1707 * Failure : -EINVAL
1708 */
1709 int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1710 {
1711 int lch;
1712 int *channels;
1713
1714 /* Check for input params */
1715 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1716 printk(KERN_ERR "Invalid chain id\n");
1717 return -EINVAL;
1718 }
1719
1720 /* Check if the chain exists */
1721 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1722 printk(KERN_ERR "Chain doesn't exists\n");
1723 return -EINVAL;
1724 }
1725 if ((!ei) || (!fi))
1726 return -EINVAL;
1727
1728 channels = dma_linked_lch[chain_id].linked_dmach_q;
1729
1730 /* Get the current channel */
1731 lch = channels[dma_linked_lch[chain_id].q_head];
1732
1733 *ei = p->dma_read(CCEN, lch);
1734 *fi = p->dma_read(CCFN, lch);
1735
1736 return 0;
1737 }
1738 EXPORT_SYMBOL(omap_get_dma_chain_index);
1739
1740 /**
1741 * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1742 * ongoing DMA in chain
1743 *
1744 * @param chain_id
1745 *
1746 * @return - Success : Destination position
1747 * Failure : -EINVAL
1748 */
1749 int omap_get_dma_chain_dst_pos(int chain_id)
1750 {
1751 int lch;
1752 int *channels;
1753
1754 /* Check for input params */
1755 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1756 printk(KERN_ERR "Invalid chain id\n");
1757 return -EINVAL;
1758 }
1759
1760 /* Check if the chain exists */
1761 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1762 printk(KERN_ERR "Chain doesn't exists\n");
1763 return -EINVAL;
1764 }
1765
1766 channels = dma_linked_lch[chain_id].linked_dmach_q;
1767
1768 /* Get the current channel */
1769 lch = channels[dma_linked_lch[chain_id].q_head];
1770
1771 return p->dma_read(CDAC, lch);
1772 }
1773 EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1774
1775 /**
1776 * @brief omap_get_dma_chain_src_pos - Get the source position
1777 * of the ongoing DMA in chain
1778 * @param chain_id
1779 *
1780 * @return - Success : Destination position
1781 * Failure : -EINVAL
1782 */
1783 int omap_get_dma_chain_src_pos(int chain_id)
1784 {
1785 int lch;
1786 int *channels;
1787
1788 /* Check for input params */
1789 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1790 printk(KERN_ERR "Invalid chain id\n");
1791 return -EINVAL;
1792 }
1793
1794 /* Check if the chain exists */
1795 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1796 printk(KERN_ERR "Chain doesn't exists\n");
1797 return -EINVAL;
1798 }
1799
1800 channels = dma_linked_lch[chain_id].linked_dmach_q;
1801
1802 /* Get the current channel */
1803 lch = channels[dma_linked_lch[chain_id].q_head];
1804
1805 return p->dma_read(CSAC, lch);
1806 }
1807 EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1808 #endif /* ifndef CONFIG_ARCH_OMAP1 */
1809
1810 /*----------------------------------------------------------------------------*/
1811
1812 #ifdef CONFIG_ARCH_OMAP1
1813
1814 static int omap1_dma_handle_ch(int ch)
1815 {
1816 u32 csr;
1817
1818 if (enable_1510_mode && ch >= 6) {
1819 csr = dma_chan[ch].saved_csr;
1820 dma_chan[ch].saved_csr = 0;
1821 } else
1822 csr = p->dma_read(CSR, ch);
1823 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1824 dma_chan[ch + 6].saved_csr = csr >> 7;
1825 csr &= 0x7f;
1826 }
1827 if ((csr & 0x3f) == 0)
1828 return 0;
1829 if (unlikely(dma_chan[ch].dev_id == -1)) {
1830 pr_warn("Spurious interrupt from DMA channel %d (CSR %04x)\n",
1831 ch, csr);
1832 return 0;
1833 }
1834 if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1835 pr_warn("DMA timeout with device %d\n", dma_chan[ch].dev_id);
1836 if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1837 pr_warn("DMA synchronization event drop occurred with device %d\n",
1838 dma_chan[ch].dev_id);
1839 if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1840 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1841 if (likely(dma_chan[ch].callback != NULL))
1842 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1843
1844 return 1;
1845 }
1846
1847 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1848 {
1849 int ch = ((int) dev_id) - 1;
1850 int handled = 0;
1851
1852 for (;;) {
1853 int handled_now = 0;
1854
1855 handled_now += omap1_dma_handle_ch(ch);
1856 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1857 handled_now += omap1_dma_handle_ch(ch + 6);
1858 if (!handled_now)
1859 break;
1860 handled += handled_now;
1861 }
1862
1863 return handled ? IRQ_HANDLED : IRQ_NONE;
1864 }
1865
1866 #else
1867 #define omap1_dma_irq_handler NULL
1868 #endif
1869
1870 #ifdef CONFIG_ARCH_OMAP2PLUS
1871
1872 static int omap2_dma_handle_ch(int ch)
1873 {
1874 u32 status = p->dma_read(CSR, ch);
1875
1876 if (!status) {
1877 if (printk_ratelimit())
1878 pr_warn("Spurious DMA IRQ for lch %d\n", ch);
1879 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1880 return 0;
1881 }
1882 if (unlikely(dma_chan[ch].dev_id == -1)) {
1883 if (printk_ratelimit())
1884 pr_warn("IRQ %04x for non-allocated DMA channel %d\n",
1885 status, ch);
1886 return 0;
1887 }
1888 if (unlikely(status & OMAP_DMA_DROP_IRQ))
1889 pr_info("DMA synchronization event drop occurred with device %d\n",
1890 dma_chan[ch].dev_id);
1891 if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1892 printk(KERN_INFO "DMA transaction error with device %d\n",
1893 dma_chan[ch].dev_id);
1894 if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
1895 u32 ccr;
1896
1897 ccr = p->dma_read(CCR, ch);
1898 ccr &= ~OMAP_DMA_CCR_EN;
1899 p->dma_write(ccr, CCR, ch);
1900 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1901 }
1902 }
1903 if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1904 printk(KERN_INFO "DMA secure error with device %d\n",
1905 dma_chan[ch].dev_id);
1906 if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1907 printk(KERN_INFO "DMA misaligned error with device %d\n",
1908 dma_chan[ch].dev_id);
1909
1910 p->dma_write(status, CSR, ch);
1911 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1912 /* read back the register to flush the write */
1913 p->dma_read(IRQSTATUS_L0, ch);
1914
1915 /* If the ch is not chained then chain_id will be -1 */
1916 if (dma_chan[ch].chain_id != -1) {
1917 int chain_id = dma_chan[ch].chain_id;
1918 dma_chan[ch].state = DMA_CH_NOTSTARTED;
1919 if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
1920 dma_chan[dma_chan[ch].next_linked_ch].state =
1921 DMA_CH_STARTED;
1922 if (dma_linked_lch[chain_id].chain_mode ==
1923 OMAP_DMA_DYNAMIC_CHAIN)
1924 disable_lnk(ch);
1925
1926 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1927 OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1928
1929 status = p->dma_read(CSR, ch);
1930 p->dma_write(status, CSR, ch);
1931 }
1932
1933 if (likely(dma_chan[ch].callback != NULL))
1934 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1935
1936 return 0;
1937 }
1938
1939 /* STATUS register count is from 1-32 while our is 0-31 */
1940 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1941 {
1942 u32 val, enable_reg;
1943 int i;
1944
1945 val = p->dma_read(IRQSTATUS_L0, 0);
1946 if (val == 0) {
1947 if (printk_ratelimit())
1948 printk(KERN_WARNING "Spurious DMA IRQ\n");
1949 return IRQ_HANDLED;
1950 }
1951 enable_reg = p->dma_read(IRQENABLE_L0, 0);
1952 val &= enable_reg; /* Dispatch only relevant interrupts */
1953 for (i = 0; i < dma_lch_count && val != 0; i++) {
1954 if (val & 1)
1955 omap2_dma_handle_ch(i);
1956 val >>= 1;
1957 }
1958
1959 return IRQ_HANDLED;
1960 }
1961
1962 static struct irqaction omap24xx_dma_irq = {
1963 .name = "DMA",
1964 .handler = omap2_dma_irq_handler,
1965 .flags = IRQF_DISABLED
1966 };
1967
1968 #else
1969 static struct irqaction omap24xx_dma_irq;
1970 #endif
1971
1972 /*----------------------------------------------------------------------------*/
1973
1974 void omap_dma_global_context_save(void)
1975 {
1976 omap_dma_global_context.dma_irqenable_l0 =
1977 p->dma_read(IRQENABLE_L0, 0);
1978 omap_dma_global_context.dma_ocp_sysconfig =
1979 p->dma_read(OCP_SYSCONFIG, 0);
1980 omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
1981 }
1982
1983 void omap_dma_global_context_restore(void)
1984 {
1985 int ch;
1986
1987 p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
1988 p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
1989 OCP_SYSCONFIG, 0);
1990 p->dma_write(omap_dma_global_context.dma_irqenable_l0,
1991 IRQENABLE_L0, 0);
1992
1993 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
1994 p->dma_write(0x3 , IRQSTATUS_L0, 0);
1995
1996 for (ch = 0; ch < dma_chan_count; ch++)
1997 if (dma_chan[ch].dev_id != -1)
1998 omap_clear_dma(ch);
1999 }
2000
2001 static int __devinit omap_system_dma_probe(struct platform_device *pdev)
2002 {
2003 int ch, ret = 0;
2004 int dma_irq;
2005 char irq_name[4];
2006 int irq_rel;
2007
2008 p = pdev->dev.platform_data;
2009 if (!p) {
2010 dev_err(&pdev->dev,
2011 "%s: System DMA initialized without platform data\n",
2012 __func__);
2013 return -EINVAL;
2014 }
2015
2016 d = p->dma_attr;
2017 errata = p->errata;
2018
2019 if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
2020 && (omap_dma_reserve_channels <= dma_lch_count))
2021 d->lch_count = omap_dma_reserve_channels;
2022
2023 dma_lch_count = d->lch_count;
2024 dma_chan_count = dma_lch_count;
2025 dma_chan = d->chan;
2026 enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
2027
2028 if (cpu_class_is_omap2()) {
2029 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2030 dma_lch_count, GFP_KERNEL);
2031 if (!dma_linked_lch) {
2032 ret = -ENOMEM;
2033 goto exit_dma_lch_fail;
2034 }
2035 }
2036
2037 spin_lock_init(&dma_chan_lock);
2038 for (ch = 0; ch < dma_chan_count; ch++) {
2039 omap_clear_dma(ch);
2040 if (cpu_class_is_omap2())
2041 omap2_disable_irq_lch(ch);
2042
2043 dma_chan[ch].dev_id = -1;
2044 dma_chan[ch].next_lch = -1;
2045
2046 if (ch >= 6 && enable_1510_mode)
2047 continue;
2048
2049 if (cpu_class_is_omap1()) {
2050 /*
2051 * request_irq() doesn't like dev_id (ie. ch) being
2052 * zero, so we have to kludge around this.
2053 */
2054 sprintf(&irq_name[0], "%d", ch);
2055 dma_irq = platform_get_irq_byname(pdev, irq_name);
2056
2057 if (dma_irq < 0) {
2058 ret = dma_irq;
2059 goto exit_dma_irq_fail;
2060 }
2061
2062 /* INT_DMA_LCD is handled in lcd_dma.c */
2063 if (dma_irq == INT_DMA_LCD)
2064 continue;
2065
2066 ret = request_irq(dma_irq,
2067 omap1_dma_irq_handler, 0, "DMA",
2068 (void *) (ch + 1));
2069 if (ret != 0)
2070 goto exit_dma_irq_fail;
2071 }
2072 }
2073
2074 if (cpu_class_is_omap2() && !cpu_is_omap242x())
2075 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2076 DMA_DEFAULT_FIFO_DEPTH, 0);
2077
2078 if (cpu_class_is_omap2()) {
2079 strcpy(irq_name, "0");
2080 dma_irq = platform_get_irq_byname(pdev, irq_name);
2081 if (dma_irq < 0) {
2082 dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
2083 goto exit_dma_lch_fail;
2084 }
2085 ret = setup_irq(dma_irq, &omap24xx_dma_irq);
2086 if (ret) {
2087 dev_err(&pdev->dev, "set_up failed for IRQ %d for DMA (error %d)\n",
2088 dma_irq, ret);
2089 goto exit_dma_lch_fail;
2090 }
2091 }
2092
2093 /* reserve dma channels 0 and 1 in high security devices */
2094 if (cpu_is_omap34xx() &&
2095 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2096 pr_info("Reserving DMA channels 0 and 1 for HS ROM code\n");
2097 dma_chan[0].dev_id = 0;
2098 dma_chan[1].dev_id = 1;
2099 }
2100 p->show_dma_caps();
2101 return 0;
2102
2103 exit_dma_irq_fail:
2104 dev_err(&pdev->dev, "unable to request IRQ %d for DMA (error %d)\n",
2105 dma_irq, ret);
2106 for (irq_rel = 0; irq_rel < ch; irq_rel++) {
2107 dma_irq = platform_get_irq(pdev, irq_rel);
2108 free_irq(dma_irq, (void *)(irq_rel + 1));
2109 }
2110
2111 exit_dma_lch_fail:
2112 kfree(p);
2113 kfree(d);
2114 kfree(dma_chan);
2115 return ret;
2116 }
2117
2118 static int __devexit omap_system_dma_remove(struct platform_device *pdev)
2119 {
2120 int dma_irq;
2121
2122 if (cpu_class_is_omap2()) {
2123 char irq_name[4];
2124 strcpy(irq_name, "0");
2125 dma_irq = platform_get_irq_byname(pdev, irq_name);
2126 remove_irq(dma_irq, &omap24xx_dma_irq);
2127 } else {
2128 int irq_rel = 0;
2129 for ( ; irq_rel < dma_chan_count; irq_rel++) {
2130 dma_irq = platform_get_irq(pdev, irq_rel);
2131 free_irq(dma_irq, (void *)(irq_rel + 1));
2132 }
2133 }
2134 kfree(p);
2135 kfree(d);
2136 kfree(dma_chan);
2137 return 0;
2138 }
2139
2140 static struct platform_driver omap_system_dma_driver = {
2141 .probe = omap_system_dma_probe,
2142 .remove = __devexit_p(omap_system_dma_remove),
2143 .driver = {
2144 .name = "omap_dma_system"
2145 },
2146 };
2147
2148 static int __init omap_system_dma_init(void)
2149 {
2150 return platform_driver_register(&omap_system_dma_driver);
2151 }
2152 arch_initcall(omap_system_dma_init);
2153
2154 static void __exit omap_system_dma_exit(void)
2155 {
2156 platform_driver_unregister(&omap_system_dma_driver);
2157 }
2158
2159 MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
2160 MODULE_LICENSE("GPL");
2161 MODULE_ALIAS("platform:" DRIVER_NAME);
2162 MODULE_AUTHOR("Texas Instruments Inc");
2163
2164 /*
2165 * Reserve the omap SDMA channels using cmdline bootarg
2166 * "omap_dma_reserve_ch=". The valid range is 1 to 32
2167 */
2168 static int __init omap_dma_cmdline_reserve_ch(char *str)
2169 {
2170 if (get_option(&str, &omap_dma_reserve_channels) != 1)
2171 omap_dma_reserve_channels = 0;
2172 return 1;
2173 }
2174
2175 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
2176
2177
This page took 0.083004 seconds and 5 git commands to generate.