cc92a7b2db889e7cbc5ec151e61a888718325046
[deliverable/linux.git] / arch / arm / mach-s3c2410 / dma.c
1 /* linux/arch/arm/mach-bast/dma.c
2 *
3 * (c) 2003-2005 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C2410 DMA core
7 *
8 * http://www.simtec.co.uk/products/EB2410ITX/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Changelog:
15 * 27-Feb-2005 BJD Added kmem cache for dma descriptors
16 * 18-Nov-2004 BJD Removed error for loading onto stopped channel
17 * 10-Nov-2004 BJD Ensure all external symbols exported for modules
18 * 10-Nov-2004 BJD Use sys_device and sysdev_class for power management
19 * 08-Aug-2004 BJD Apply rmk's suggestions
20 * 21-Jul-2004 BJD Ported to linux 2.6
21 * 12-Jul-2004 BJD Finished re-write and change of API
22 * 06-Jul-2004 BJD Rewrote dma code to try and cope with various problems
23 * 23-May-2003 BJD Created file
24 * 19-Aug-2003 BJD Cleanup, header fix, added URL
25 *
26 * This file is based on the Sangwook Lee/Samsung patches, re-written due
27 * to various ommisions from the code (such as flexible dma configuration)
28 * for use with the BAST system board.
29 *
30 * The re-write is pretty much complete, and should be good enough for any
31 * possible DMA function
32 */
33
34
35 #ifdef CONFIG_S3C2410_DMA_DEBUG
36 #define DEBUG
37 #endif
38
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/sched.h>
42 #include <linux/spinlock.h>
43 #include <linux/interrupt.h>
44 #include <linux/sysdev.h>
45 #include <linux/slab.h>
46 #include <linux/errno.h>
47 #include <linux/delay.h>
48
49 #include <asm/system.h>
50 #include <asm/irq.h>
51 #include <asm/hardware.h>
52 #include <asm/io.h>
53 #include <asm/dma.h>
54
55 #include <asm/mach/dma.h>
56 #include <asm/arch/map.h>
57
58 /* io map for dma */
59 static void __iomem *dma_base;
60 static kmem_cache_t *dma_kmem;
61
62 /* dma channel state information */
63 struct s3c2410_dma_chan s3c2410_chans[S3C2410_DMA_CHANNELS];
64
65 /* debugging functions */
66
67 #define BUF_MAGIC (0xcafebabe)
68
69 #define dmawarn(fmt...) printk(KERN_DEBUG fmt)
70
71 #define dma_regaddr(chan, reg) ((chan)->regs + (reg))
72
73 #if 1
74 #define dma_wrreg(chan, reg, val) writel((val), (chan)->regs + (reg))
75 #else
76 static inline void
77 dma_wrreg(struct s3c2410_dma_chan *chan, int reg, unsigned long val)
78 {
79 pr_debug("writing %08x to register %08x\n",(unsigned int)val,reg);
80 writel(val, dma_regaddr(chan, reg));
81 }
82
83 #endif
84
85 #define dma_rdreg(chan, reg) readl((chan)->regs + (reg))
86
87 /* captured register state for debug */
88
89 struct s3c2410_dma_regstate {
90 unsigned long dcsrc;
91 unsigned long disrc;
92 unsigned long dstat;
93 unsigned long dcon;
94 unsigned long dmsktrig;
95 };
96
97 #ifdef CONFIG_S3C2410_DMA_DEBUG
98
99 /* dmadbg_showregs
100 *
101 * simple debug routine to print the current state of the dma registers
102 */
103
104 static void
105 dmadbg_capture(struct s3c2410_dma_chan *chan, struct s3c2410_dma_regstate *regs)
106 {
107 regs->dcsrc = dma_rdreg(chan, S3C2410_DMA_DCSRC);
108 regs->disrc = dma_rdreg(chan, S3C2410_DMA_DISRC);
109 regs->dstat = dma_rdreg(chan, S3C2410_DMA_DSTAT);
110 regs->dcon = dma_rdreg(chan, S3C2410_DMA_DCON);
111 regs->dmsktrig = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
112 }
113
114 static void
115 dmadbg_dumpregs(const char *fname, int line, struct s3c2410_dma_chan *chan,
116 struct s3c2410_dma_regstate *regs)
117 {
118 printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n",
119 chan->number, fname, line,
120 regs->dcsrc, regs->disrc, regs->dstat, regs->dmsktrig,
121 regs->dcon);
122 }
123
124 static void
125 dmadbg_showchan(const char *fname, int line, struct s3c2410_dma_chan *chan)
126 {
127 struct s3c2410_dma_regstate state;
128
129 dmadbg_capture(chan, &state);
130
131 printk(KERN_DEBUG "dma%d: %s:%d: ls=%d, cur=%p, %p %p\n",
132 chan->number, fname, line, chan->load_state,
133 chan->curr, chan->next, chan->end);
134
135 dmadbg_dumpregs(fname, line, chan, &state);
136 }
137
138 static void
139 dmadbg_showregs(const char *fname, int line, struct s3c2410_dma_chan *chan)
140 {
141 struct s3c2410_dma_regstate state;
142
143 dmadbg_capture(chan, &state);
144 dmadbg_dumpregs(fname, line, chan, &state);
145 }
146
147 #define dbg_showregs(chan) dmadbg_showregs(__FUNCTION__, __LINE__, (chan))
148 #define dbg_showchan(chan) dmadbg_showchan(__FUNCTION__, __LINE__, (chan))
149 #else
150 #define dbg_showregs(chan) do { } while(0)
151 #define dbg_showchan(chan) do { } while(0)
152 #endif /* CONFIG_S3C2410_DMA_DEBUG */
153
154 #define check_channel(chan) \
155 do { if ((chan) >= S3C2410_DMA_CHANNELS) { \
156 printk(KERN_ERR "%s: invalid channel %d\n", __FUNCTION__, (chan)); \
157 return -EINVAL; \
158 } } while(0)
159
160
161 /* s3c2410_dma_stats_timeout
162 *
163 * Update DMA stats from timeout info
164 */
165
166 static void
167 s3c2410_dma_stats_timeout(struct s3c2410_dma_stats *stats, int val)
168 {
169 if (stats == NULL)
170 return;
171
172 if (val > stats->timeout_longest)
173 stats->timeout_longest = val;
174 if (val < stats->timeout_shortest)
175 stats->timeout_shortest = val;
176
177 stats->timeout_avg += val;
178 }
179
180 /* s3c2410_dma_waitforload
181 *
182 * wait for the DMA engine to load a buffer, and update the state accordingly
183 */
184
185 static int
186 s3c2410_dma_waitforload(struct s3c2410_dma_chan *chan, int line)
187 {
188 int timeout = chan->load_timeout;
189 int took;
190
191 if (chan->load_state != S3C2410_DMALOAD_1LOADED) {
192 printk(KERN_ERR "dma%d: s3c2410_dma_waitforload() called in loadstate %d from line %d\n", chan->number, chan->load_state, line);
193 return 0;
194 }
195
196 if (chan->stats != NULL)
197 chan->stats->loads++;
198
199 while (--timeout > 0) {
200 if ((dma_rdreg(chan, S3C2410_DMA_DSTAT) << (32-20)) != 0) {
201 took = chan->load_timeout - timeout;
202
203 s3c2410_dma_stats_timeout(chan->stats, took);
204
205 switch (chan->load_state) {
206 case S3C2410_DMALOAD_1LOADED:
207 chan->load_state = S3C2410_DMALOAD_1RUNNING;
208 break;
209
210 default:
211 printk(KERN_ERR "dma%d: unknown load_state in s3c2410_dma_waitforload() %d\n", chan->number, chan->load_state);
212 }
213
214 return 1;
215 }
216 }
217
218 if (chan->stats != NULL) {
219 chan->stats->timeout_failed++;
220 }
221
222 return 0;
223 }
224
225
226
227 /* s3c2410_dma_loadbuffer
228 *
229 * load a buffer, and update the channel state
230 */
231
232 static inline int
233 s3c2410_dma_loadbuffer(struct s3c2410_dma_chan *chan,
234 struct s3c2410_dma_buf *buf)
235 {
236 unsigned long reload;
237
238 pr_debug("s3c2410_chan_loadbuffer: loading buff %p (0x%08lx,0x%06x)\n",
239 buf, (unsigned long)buf->data, buf->size);
240
241 if (buf == NULL) {
242 dmawarn("buffer is NULL\n");
243 return -EINVAL;
244 }
245
246 /* check the state of the channel before we do anything */
247
248 if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
249 dmawarn("load_state is S3C2410_DMALOAD_1LOADED\n");
250 }
251
252 if (chan->load_state == S3C2410_DMALOAD_1LOADED_1RUNNING) {
253 dmawarn("state is S3C2410_DMALOAD_1LOADED_1RUNNING\n");
254 }
255
256 /* it would seem sensible if we are the last buffer to not bother
257 * with the auto-reload bit, so that the DMA engine will not try
258 * and load another transfer after this one has finished...
259 */
260 if (chan->load_state == S3C2410_DMALOAD_NONE) {
261 pr_debug("load_state is none, checking for noreload (next=%p)\n",
262 buf->next);
263 reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0;
264 } else {
265 //pr_debug("load_state is %d => autoreload\n", chan->load_state);
266 reload = S3C2410_DCON_AUTORELOAD;
267 }
268
269 if ((buf->data & 0xf0000000) != 0x30000000) {
270 dmawarn("dmaload: buffer is %p\n", (void *)buf->data);
271 }
272
273 writel(buf->data, chan->addr_reg);
274
275 dma_wrreg(chan, S3C2410_DMA_DCON,
276 chan->dcon | reload | (buf->size/chan->xfer_unit));
277
278 chan->next = buf->next;
279
280 /* update the state of the channel */
281
282 switch (chan->load_state) {
283 case S3C2410_DMALOAD_NONE:
284 chan->load_state = S3C2410_DMALOAD_1LOADED;
285 break;
286
287 case S3C2410_DMALOAD_1RUNNING:
288 chan->load_state = S3C2410_DMALOAD_1LOADED_1RUNNING;
289 break;
290
291 default:
292 dmawarn("dmaload: unknown state %d in loadbuffer\n",
293 chan->load_state);
294 break;
295 }
296
297 return 0;
298 }
299
300 /* s3c2410_dma_call_op
301 *
302 * small routine to call the op routine with the given op if it has been
303 * registered
304 */
305
306 static void
307 s3c2410_dma_call_op(struct s3c2410_dma_chan *chan, enum s3c2410_chan_op op)
308 {
309 if (chan->op_fn != NULL) {
310 (chan->op_fn)(chan, op);
311 }
312 }
313
314 /* s3c2410_dma_buffdone
315 *
316 * small wrapper to check if callback routine needs to be called, and
317 * if so, call it
318 */
319
320 static inline void
321 s3c2410_dma_buffdone(struct s3c2410_dma_chan *chan, struct s3c2410_dma_buf *buf,
322 enum s3c2410_dma_buffresult result)
323 {
324 pr_debug("callback_fn=%p, buf=%p, id=%p, size=%d, result=%d\n",
325 chan->callback_fn, buf, buf->id, buf->size, result);
326
327 if (chan->callback_fn != NULL) {
328 (chan->callback_fn)(chan, buf->id, buf->size, result);
329 }
330 }
331
332 /* s3c2410_dma_start
333 *
334 * start a dma channel going
335 */
336
337 static int s3c2410_dma_start(struct s3c2410_dma_chan *chan)
338 {
339 unsigned long tmp;
340 unsigned long flags;
341
342 pr_debug("s3c2410_start_dma: channel=%d\n", chan->number);
343
344 local_irq_save(flags);
345
346 if (chan->state == S3C2410_DMA_RUNNING) {
347 pr_debug("s3c2410_start_dma: already running (%d)\n", chan->state);
348 local_irq_restore(flags);
349 return 0;
350 }
351
352 chan->state = S3C2410_DMA_RUNNING;
353
354 /* check wether there is anything to load, and if not, see
355 * if we can find anything to load
356 */
357
358 if (chan->load_state == S3C2410_DMALOAD_NONE) {
359 if (chan->next == NULL) {
360 printk(KERN_ERR "dma%d: channel has nothing loaded\n",
361 chan->number);
362 chan->state = S3C2410_DMA_IDLE;
363 local_irq_restore(flags);
364 return -EINVAL;
365 }
366
367 s3c2410_dma_loadbuffer(chan, chan->next);
368 }
369
370 dbg_showchan(chan);
371
372 /* enable the channel */
373
374 if (!chan->irq_enabled) {
375 enable_irq(chan->irq);
376 chan->irq_enabled = 1;
377 }
378
379 /* start the channel going */
380
381 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
382 tmp &= ~S3C2410_DMASKTRIG_STOP;
383 tmp |= S3C2410_DMASKTRIG_ON;
384 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
385
386 pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp);
387
388 #if 0
389 /* the dma buffer loads should take care of clearing the AUTO
390 * reloading feature */
391 tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
392 tmp &= ~S3C2410_DCON_NORELOAD;
393 dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
394 #endif
395
396 s3c2410_dma_call_op(chan, S3C2410_DMAOP_START);
397
398 dbg_showchan(chan);
399
400 /* if we've only loaded one buffer onto the channel, then chec
401 * to see if we have another, and if so, try and load it so when
402 * the first buffer is finished, the new one will be loaded onto
403 * the channel */
404
405 if (chan->next != NULL) {
406 if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
407
408 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
409 pr_debug("%s: buff not yet loaded, no more todo\n",
410 __FUNCTION__);
411 } else {
412 chan->load_state = S3C2410_DMALOAD_1RUNNING;
413 s3c2410_dma_loadbuffer(chan, chan->next);
414 }
415
416 } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
417 s3c2410_dma_loadbuffer(chan, chan->next);
418 }
419 }
420
421
422 local_irq_restore(flags);
423
424 return 0;
425 }
426
427 /* s3c2410_dma_canload
428 *
429 * work out if we can queue another buffer into the DMA engine
430 */
431
432 static int
433 s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
434 {
435 if (chan->load_state == S3C2410_DMALOAD_NONE ||
436 chan->load_state == S3C2410_DMALOAD_1RUNNING)
437 return 1;
438
439 return 0;
440 }
441
442
443 /* s3c2410_dma_enqueue
444 *
445 * queue an given buffer for dma transfer.
446 *
447 * id the device driver's id information for this buffer
448 * data the physical address of the buffer data
449 * size the size of the buffer in bytes
450 *
451 * If the channel is not running, then the flag S3C2410_DMAF_AUTOSTART
452 * is checked, and if set, the channel is started. If this flag isn't set,
453 * then an error will be returned.
454 *
455 * It is possible to queue more than one DMA buffer onto a channel at
456 * once, and the code will deal with the re-loading of the next buffer
457 * when necessary.
458 */
459
460 int s3c2410_dma_enqueue(unsigned int channel, void *id,
461 dma_addr_t data, int size)
462 {
463 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
464 struct s3c2410_dma_buf *buf;
465 unsigned long flags;
466
467 check_channel(channel);
468
469 pr_debug("%s: id=%p, data=%08x, size=%d\n",
470 __FUNCTION__, id, (unsigned int)data, size);
471
472 buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC);
473 if (buf == NULL) {
474 pr_debug("%s: out of memory (%ld alloc)\n",
475 __FUNCTION__, (long)sizeof(*buf));
476 return -ENOMEM;
477 }
478
479 //pr_debug("%s: new buffer %p\n", __FUNCTION__, buf);
480 //dbg_showchan(chan);
481
482 buf->next = NULL;
483 buf->data = buf->ptr = data;
484 buf->size = size;
485 buf->id = id;
486 buf->magic = BUF_MAGIC;
487
488 local_irq_save(flags);
489
490 if (chan->curr == NULL) {
491 /* we've got nothing loaded... */
492 pr_debug("%s: buffer %p queued onto empty channel\n",
493 __FUNCTION__, buf);
494
495 chan->curr = buf;
496 chan->end = buf;
497 chan->next = NULL;
498 } else {
499 pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n",
500 chan->number, __FUNCTION__, buf);
501
502 if (chan->end == NULL)
503 pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n",
504 chan->number, __FUNCTION__, chan);
505
506 chan->end->next = buf;
507 chan->end = buf;
508 }
509
510 /* if necessary, update the next buffer field */
511 if (chan->next == NULL)
512 chan->next = buf;
513
514 /* check to see if we can load a buffer */
515 if (chan->state == S3C2410_DMA_RUNNING) {
516 if (chan->load_state == S3C2410_DMALOAD_1LOADED && 1) {
517 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
518 printk(KERN_ERR "dma%d: loadbuffer:"
519 "timeout loading buffer\n",
520 chan->number);
521 dbg_showchan(chan);
522 local_irq_restore(flags);
523 return -EINVAL;
524 }
525 }
526
527 while (s3c2410_dma_canload(chan) && chan->next != NULL) {
528 s3c2410_dma_loadbuffer(chan, chan->next);
529 }
530 } else if (chan->state == S3C2410_DMA_IDLE) {
531 if (chan->flags & S3C2410_DMAF_AUTOSTART) {
532 s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_START);
533 }
534 }
535
536 local_irq_restore(flags);
537 return 0;
538 }
539
540 EXPORT_SYMBOL(s3c2410_dma_enqueue);
541
542 static inline void
543 s3c2410_dma_freebuf(struct s3c2410_dma_buf *buf)
544 {
545 int magicok = (buf->magic == BUF_MAGIC);
546
547 buf->magic = -1;
548
549 if (magicok) {
550 kmem_cache_free(dma_kmem, buf);
551 } else {
552 printk("s3c2410_dma_freebuf: buff %p with bad magic\n", buf);
553 }
554 }
555
556 /* s3c2410_dma_lastxfer
557 *
558 * called when the system is out of buffers, to ensure that the channel
559 * is prepared for shutdown.
560 */
561
562 static inline void
563 s3c2410_dma_lastxfer(struct s3c2410_dma_chan *chan)
564 {
565 pr_debug("dma%d: s3c2410_dma_lastxfer: load_state %d\n",
566 chan->number, chan->load_state);
567
568 switch (chan->load_state) {
569 case S3C2410_DMALOAD_NONE:
570 break;
571
572 case S3C2410_DMALOAD_1LOADED:
573 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
574 /* flag error? */
575 printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
576 chan->number, __FUNCTION__);
577 return;
578 }
579 break;
580
581 case S3C2410_DMALOAD_1LOADED_1RUNNING:
582 /* I belive in this case we do not have anything to do
583 * until the next buffer comes along, and we turn off the
584 * reload */
585 return;
586
587 default:
588 pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n",
589 chan->number, chan->load_state);
590 return;
591
592 }
593
594 /* hopefully this'll shut the damned thing up after the transfer... */
595 dma_wrreg(chan, S3C2410_DMA_DCON, chan->dcon | S3C2410_DCON_NORELOAD);
596 }
597
598
599 #define dmadbg2(x...)
600
601 static irqreturn_t
602 s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
603 {
604 struct s3c2410_dma_chan *chan = (struct s3c2410_dma_chan *)devpw;
605 struct s3c2410_dma_buf *buf;
606
607 buf = chan->curr;
608
609 dbg_showchan(chan);
610
611 /* modify the channel state */
612
613 switch (chan->load_state) {
614 case S3C2410_DMALOAD_1RUNNING:
615 /* TODO - if we are running only one buffer, we probably
616 * want to reload here, and then worry about the buffer
617 * callback */
618
619 chan->load_state = S3C2410_DMALOAD_NONE;
620 break;
621
622 case S3C2410_DMALOAD_1LOADED:
623 /* iirc, we should go back to NONE loaded here, we
624 * had a buffer, and it was never verified as being
625 * loaded.
626 */
627
628 chan->load_state = S3C2410_DMALOAD_NONE;
629 break;
630
631 case S3C2410_DMALOAD_1LOADED_1RUNNING:
632 /* we'll worry about checking to see if another buffer is
633 * ready after we've called back the owner. This should
634 * ensure we do not wait around too long for the DMA
635 * engine to start the next transfer
636 */
637
638 chan->load_state = S3C2410_DMALOAD_1LOADED;
639 break;
640
641 case S3C2410_DMALOAD_NONE:
642 printk(KERN_ERR "dma%d: IRQ with no loaded buffer?\n",
643 chan->number);
644 break;
645
646 default:
647 printk(KERN_ERR "dma%d: IRQ in invalid load_state %d\n",
648 chan->number, chan->load_state);
649 break;
650 }
651
652 if (buf != NULL) {
653 /* update the chain to make sure that if we load any more
654 * buffers when we call the callback function, things should
655 * work properly */
656
657 chan->curr = buf->next;
658 buf->next = NULL;
659
660 if (buf->magic != BUF_MAGIC) {
661 printk(KERN_ERR "dma%d: %s: buf %p incorrect magic\n",
662 chan->number, __FUNCTION__, buf);
663 return IRQ_HANDLED;
664 }
665
666 s3c2410_dma_buffdone(chan, buf, S3C2410_RES_OK);
667
668 /* free resouces */
669 s3c2410_dma_freebuf(buf);
670 } else {
671 }
672
673 /* only reload if the channel is still running... our buffer done
674 * routine may have altered the state by requesting the dma channel
675 * to stop or shutdown... */
676
677 /* todo: check that when the channel is shut-down from inside this
678 * function, we cope with unsetting reload, etc */
679
680 if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) {
681 unsigned long flags;
682
683 switch (chan->load_state) {
684 case S3C2410_DMALOAD_1RUNNING:
685 /* don't need to do anything for this state */
686 break;
687
688 case S3C2410_DMALOAD_NONE:
689 /* can load buffer immediately */
690 break;
691
692 case S3C2410_DMALOAD_1LOADED:
693 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
694 /* flag error? */
695 printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
696 chan->number, __FUNCTION__);
697 return IRQ_HANDLED;
698 }
699
700 break;
701
702 case S3C2410_DMALOAD_1LOADED_1RUNNING:
703 goto no_load;
704
705 default:
706 printk(KERN_ERR "dma%d: unknown load_state in irq, %d\n",
707 chan->number, chan->load_state);
708 return IRQ_HANDLED;
709 }
710
711 local_irq_save(flags);
712 s3c2410_dma_loadbuffer(chan, chan->next);
713 local_irq_restore(flags);
714 } else {
715 s3c2410_dma_lastxfer(chan);
716
717 /* see if we can stop this channel.. */
718 if (chan->load_state == S3C2410_DMALOAD_NONE) {
719 pr_debug("dma%d: end of transfer, stopping channel (%ld)\n",
720 chan->number, jiffies);
721 s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_STOP);
722 }
723 }
724
725 no_load:
726 return IRQ_HANDLED;
727 }
728
729 /* s3c2410_request_dma
730 *
731 * get control of an dma channel
732 */
733
734 int s3c2410_dma_request(unsigned int channel, struct s3c2410_dma_client *client,
735 void *dev)
736 {
737 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
738 unsigned long flags;
739 int err;
740
741 pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
742 channel, client->name, dev);
743
744 check_channel(channel);
745
746 local_irq_save(flags);
747
748 dbg_showchan(chan);
749
750 if (chan->in_use) {
751 if (client != chan->client) {
752 printk(KERN_ERR "dma%d: already in use\n", channel);
753 local_irq_restore(flags);
754 return -EBUSY;
755 } else {
756 printk(KERN_ERR "dma%d: client already has channel\n", channel);
757 }
758 }
759
760 chan->client = client;
761 chan->in_use = 1;
762
763 if (!chan->irq_claimed) {
764 pr_debug("dma%d: %s : requesting irq %d\n",
765 channel, __FUNCTION__, chan->irq);
766
767 chan->irq_claimed = 1;
768 local_irq_restore(flags);
769
770 err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED,
771 client->name, (void *)chan);
772
773 local_irq_save(flags);
774
775 if (err) {
776 chan->in_use = 0;
777 chan->irq_claimed = 0;
778 local_irq_restore(flags);
779
780 printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n",
781 client->name, chan->irq, chan->number);
782 return err;
783 }
784
785 chan->irq_enabled = 1;
786 }
787
788 local_irq_restore(flags);
789
790 /* need to setup */
791
792 pr_debug("%s: channel initialised, %p\n", __FUNCTION__, chan);
793
794 return 0;
795 }
796
797 EXPORT_SYMBOL(s3c2410_dma_request);
798
799 /* s3c2410_dma_free
800 *
801 * release the given channel back to the system, will stop and flush
802 * any outstanding transfers, and ensure the channel is ready for the
803 * next claimant.
804 *
805 * Note, although a warning is currently printed if the freeing client
806 * info is not the same as the registrant's client info, the free is still
807 * allowed to go through.
808 */
809
810 int s3c2410_dma_free(dmach_t channel, struct s3c2410_dma_client *client)
811 {
812 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
813 unsigned long flags;
814
815 check_channel(channel);
816
817 local_irq_save(flags);
818
819
820 if (chan->client != client) {
821 printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
822 channel, chan->client, client);
823 }
824
825 /* sort out stopping and freeing the channel */
826
827 if (chan->state != S3C2410_DMA_IDLE) {
828 pr_debug("%s: need to stop dma channel %p\n",
829 __FUNCTION__, chan);
830
831 /* possibly flush the channel */
832 s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STOP);
833 }
834
835 chan->client = NULL;
836 chan->in_use = 0;
837
838 if (chan->irq_claimed)
839 free_irq(chan->irq, (void *)chan);
840 chan->irq_claimed = 0;
841
842 local_irq_restore(flags);
843
844 return 0;
845 }
846
847 EXPORT_SYMBOL(s3c2410_dma_free);
848
849 static int s3c2410_dma_dostop(struct s3c2410_dma_chan *chan)
850 {
851 unsigned long tmp;
852 unsigned long flags;
853
854 pr_debug("%s:\n", __FUNCTION__);
855
856 dbg_showchan(chan);
857
858 local_irq_save(flags);
859
860 s3c2410_dma_call_op(chan, S3C2410_DMAOP_STOP);
861
862 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
863 tmp |= S3C2410_DMASKTRIG_STOP;
864 //tmp &= ~S3C2410_DMASKTRIG_ON;
865 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
866
867 #if 0
868 /* should also clear interrupts, according to WinCE BSP */
869 tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
870 tmp |= S3C2410_DCON_NORELOAD;
871 dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
872 #endif
873
874 /* should stop do this, or should we wait for flush? */
875 chan->state = S3C2410_DMA_IDLE;
876 chan->load_state = S3C2410_DMALOAD_NONE;
877
878 local_irq_restore(flags);
879
880 return 0;
881 }
882
883 void s3c2410_dma_waitforstop(struct s3c2410_dma_chan *chan)
884 {
885 unsigned long tmp;
886 unsigned int timeout = 0x10000;
887
888 while (timeout-- > 0) {
889 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
890
891 if (!(tmp & S3C2410_DMASKTRIG_ON))
892 return;
893 }
894
895 pr_debug("dma%d: failed to stop?\n", chan->number);
896 }
897
898
899 /* s3c2410_dma_flush
900 *
901 * stop the channel, and remove all current and pending transfers
902 */
903
904 static int s3c2410_dma_flush(struct s3c2410_dma_chan *chan)
905 {
906 struct s3c2410_dma_buf *buf, *next;
907 unsigned long flags;
908
909 pr_debug("%s: chan %p (%d)\n", __FUNCTION__, chan, chan->number);
910
911 dbg_showchan(chan);
912
913 local_irq_save(flags);
914
915 if (chan->state != S3C2410_DMA_IDLE) {
916 pr_debug("%s: stopping channel...\n", __FUNCTION__ );
917 s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_STOP);
918 }
919
920 buf = chan->curr;
921 if (buf == NULL)
922 buf = chan->next;
923
924 chan->curr = chan->next = chan->end = NULL;
925
926 if (buf != NULL) {
927 for ( ; buf != NULL; buf = next) {
928 next = buf->next;
929
930 pr_debug("%s: free buffer %p, next %p\n",
931 __FUNCTION__, buf, buf->next);
932
933 s3c2410_dma_buffdone(chan, buf, S3C2410_RES_ABORT);
934 s3c2410_dma_freebuf(buf);
935 }
936 }
937
938 dbg_showregs(chan);
939
940 s3c2410_dma_waitforstop(chan);
941
942 #if 0
943 /* should also clear interrupts, according to WinCE BSP */
944 {
945 unsigned long tmp;
946
947 tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
948 tmp |= S3C2410_DCON_NORELOAD;
949 dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
950 }
951 #endif
952
953 dbg_showregs(chan);
954
955 local_irq_restore(flags);
956
957 return 0;
958 }
959
960 int
961 s3c2410_dma_started(struct s3c2410_dma_chan *chan)
962 {
963 unsigned long flags;
964
965 local_irq_save(flags);
966
967 dbg_showchan(chan);
968
969 /* if we've only loaded one buffer onto the channel, then chec
970 * to see if we have another, and if so, try and load it so when
971 * the first buffer is finished, the new one will be loaded onto
972 * the channel */
973
974 if (chan->next != NULL) {
975 if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
976
977 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
978 pr_debug("%s: buff not yet loaded, no more todo\n",
979 __FUNCTION__);
980 } else {
981 chan->load_state = S3C2410_DMALOAD_1RUNNING;
982 s3c2410_dma_loadbuffer(chan, chan->next);
983 }
984
985 } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
986 s3c2410_dma_loadbuffer(chan, chan->next);
987 }
988 }
989
990
991 local_irq_restore(flags);
992
993 return 0;
994
995 }
996
997 int
998 s3c2410_dma_ctrl(dmach_t channel, enum s3c2410_chan_op op)
999 {
1000 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1001
1002 check_channel(channel);
1003
1004 switch (op) {
1005 case S3C2410_DMAOP_START:
1006 return s3c2410_dma_start(chan);
1007
1008 case S3C2410_DMAOP_STOP:
1009 return s3c2410_dma_dostop(chan);
1010
1011 case S3C2410_DMAOP_PAUSE:
1012 case S3C2410_DMAOP_RESUME:
1013 return -ENOENT;
1014
1015 case S3C2410_DMAOP_FLUSH:
1016 return s3c2410_dma_flush(chan);
1017
1018 case S3C2410_DMAOP_STARTED:
1019 return s3c2410_dma_started(chan);
1020
1021 case S3C2410_DMAOP_TIMEOUT:
1022 return 0;
1023
1024 }
1025
1026 return -ENOENT; /* unknown, don't bother */
1027 }
1028
1029 EXPORT_SYMBOL(s3c2410_dma_ctrl);
1030
1031 /* DMA configuration for each channel
1032 *
1033 * DISRCC -> source of the DMA (AHB,APB)
1034 * DISRC -> source address of the DMA
1035 * DIDSTC -> destination of the DMA (AHB,APD)
1036 * DIDST -> destination address of the DMA
1037 */
1038
1039 /* s3c2410_dma_config
1040 *
1041 * xfersize: size of unit in bytes (1,2,4)
1042 * dcon: base value of the DCONx register
1043 */
1044
1045 int s3c2410_dma_config(dmach_t channel,
1046 int xferunit,
1047 int dcon)
1048 {
1049 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1050
1051 pr_debug("%s: chan=%d, xfer_unit=%d, dcon=%08x\n",
1052 __FUNCTION__, channel, xferunit, dcon);
1053
1054 check_channel(channel);
1055
1056 switch (xferunit) {
1057 case 1:
1058 dcon |= S3C2410_DCON_BYTE;
1059 break;
1060
1061 case 2:
1062 dcon |= S3C2410_DCON_HALFWORD;
1063 break;
1064
1065 case 4:
1066 dcon |= S3C2410_DCON_WORD;
1067 break;
1068
1069 default:
1070 pr_debug("%s: bad transfer size %d\n", __FUNCTION__, xferunit);
1071 return -EINVAL;
1072 }
1073
1074 dcon |= S3C2410_DCON_HWTRIG;
1075 dcon |= S3C2410_DCON_INTREQ;
1076
1077 pr_debug("%s: dcon now %08x\n", __FUNCTION__, dcon);
1078
1079 chan->dcon = dcon;
1080 chan->xfer_unit = xferunit;
1081
1082 return 0;
1083 }
1084
1085 EXPORT_SYMBOL(s3c2410_dma_config);
1086
1087 int s3c2410_dma_setflags(dmach_t channel, unsigned int flags)
1088 {
1089 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1090
1091 check_channel(channel);
1092
1093 pr_debug("%s: chan=%p, flags=%08x\n", __FUNCTION__, chan, flags);
1094
1095 chan->flags = flags;
1096
1097 return 0;
1098 }
1099
1100 EXPORT_SYMBOL(s3c2410_dma_setflags);
1101
1102
1103 /* do we need to protect the settings of the fields from
1104 * irq?
1105 */
1106
1107 int s3c2410_dma_set_opfn(dmach_t channel, s3c2410_dma_opfn_t rtn)
1108 {
1109 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1110
1111 check_channel(channel);
1112
1113 pr_debug("%s: chan=%p, op rtn=%p\n", __FUNCTION__, chan, rtn);
1114
1115 chan->op_fn = rtn;
1116
1117 return 0;
1118 }
1119
1120 EXPORT_SYMBOL(s3c2410_dma_set_opfn);
1121
1122 int s3c2410_dma_set_buffdone_fn(dmach_t channel, s3c2410_dma_cbfn_t rtn)
1123 {
1124 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1125
1126 check_channel(channel);
1127
1128 pr_debug("%s: chan=%p, callback rtn=%p\n", __FUNCTION__, chan, rtn);
1129
1130 chan->callback_fn = rtn;
1131
1132 return 0;
1133 }
1134
1135 EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
1136
1137 /* s3c2410_dma_devconfig
1138 *
1139 * configure the dma source/destination hardware type and address
1140 *
1141 * source: S3C2410_DMASRC_HW: source is hardware
1142 * S3C2410_DMASRC_MEM: source is memory
1143 *
1144 * hwcfg: the value for xxxSTCn register,
1145 * bit 0: 0=increment pointer, 1=leave pointer
1146 * bit 1: 0=soucre is AHB, 1=soucre is APB
1147 *
1148 * devaddr: physical address of the source
1149 */
1150
1151 int s3c2410_dma_devconfig(int channel,
1152 enum s3c2410_dmasrc source,
1153 int hwcfg,
1154 unsigned long devaddr)
1155 {
1156 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1157
1158 check_channel(channel);
1159
1160 pr_debug("%s: source=%d, hwcfg=%08x, devaddr=%08lx\n",
1161 __FUNCTION__, (int)source, hwcfg, devaddr);
1162
1163 chan->source = source;
1164 chan->dev_addr = devaddr;
1165
1166 switch (source) {
1167 case S3C2410_DMASRC_HW:
1168 /* source is hardware */
1169 pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n",
1170 __FUNCTION__, devaddr, hwcfg);
1171 dma_wrreg(chan, S3C2410_DMA_DISRCC, hwcfg & 3);
1172 dma_wrreg(chan, S3C2410_DMA_DISRC, devaddr);
1173 dma_wrreg(chan, S3C2410_DMA_DIDSTC, (0<<1) | (0<<0));
1174
1175 chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST);
1176 return 0;
1177
1178 case S3C2410_DMASRC_MEM:
1179 /* source is memory */
1180 pr_debug( "%s: mem source, devaddr=%08lx, hwcfg=%d\n",
1181 __FUNCTION__, devaddr, hwcfg);
1182 dma_wrreg(chan, S3C2410_DMA_DISRCC, (0<<1) | (0<<0));
1183 dma_wrreg(chan, S3C2410_DMA_DIDST, devaddr);
1184 dma_wrreg(chan, S3C2410_DMA_DIDSTC, hwcfg & 3);
1185
1186 chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DISRC);
1187 return 0;
1188 }
1189
1190 printk(KERN_ERR "dma%d: invalid source type (%d)\n", channel, source);
1191 return -EINVAL;
1192 }
1193
1194 EXPORT_SYMBOL(s3c2410_dma_devconfig);
1195
1196 /* s3c2410_dma_getposition
1197 *
1198 * returns the current transfer points for the dma source and destination
1199 */
1200
1201 int s3c2410_dma_getposition(dmach_t channel, dma_addr_t *src, dma_addr_t *dst)
1202 {
1203 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1204
1205 check_channel(channel);
1206
1207 if (src != NULL)
1208 *src = dma_rdreg(chan, S3C2410_DMA_DCSRC);
1209
1210 if (dst != NULL)
1211 *dst = dma_rdreg(chan, S3C2410_DMA_DCDST);
1212
1213 return 0;
1214 }
1215
1216 EXPORT_SYMBOL(s3c2410_dma_getposition);
1217
1218
1219 /* system device class */
1220
1221 #ifdef CONFIG_PM
1222
1223 static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state)
1224 {
1225 struct s3c2410_dma_chan *cp = container_of(dev, struct s3c2410_dma_chan, dev);
1226
1227 printk(KERN_DEBUG "suspending dma channel %d\n", cp->number);
1228
1229 if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) {
1230 /* the dma channel is still working, which is probably
1231 * a bad thing to do over suspend/resume. We stop the
1232 * channel and assume that the client is either going to
1233 * retry after resume, or that it is broken.
1234 */
1235
1236 printk(KERN_INFO "dma: stopping channel %d due to suspend\n",
1237 cp->number);
1238
1239 s3c2410_dma_dostop(cp);
1240 }
1241
1242 return 0;
1243 }
1244
1245 static int s3c2410_dma_resume(struct sys_device *dev)
1246 {
1247 return 0;
1248 }
1249
1250 #else
1251 #define s3c2410_dma_suspend NULL
1252 #define s3c2410_dma_resume NULL
1253 #endif /* CONFIG_PM */
1254
1255 static struct sysdev_class dma_sysclass = {
1256 set_kset_name("s3c24xx-dma"),
1257 .suspend = s3c2410_dma_suspend,
1258 .resume = s3c2410_dma_resume,
1259 };
1260
1261 /* kmem cache implementation */
1262
1263 static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f)
1264 {
1265 memset(p, 0, sizeof(struct s3c2410_dma_buf));
1266 }
1267
1268
1269 /* initialisation code */
1270
1271 static int __init s3c2410_init_dma(void)
1272 {
1273 struct s3c2410_dma_chan *cp;
1274 int channel;
1275 int ret;
1276
1277 printk("S3C2410 DMA Driver, (c) 2003-2004 Simtec Electronics\n");
1278
1279 dma_base = ioremap(S3C24XX_PA_DMA, 0x200);
1280 if (dma_base == NULL) {
1281 printk(KERN_ERR "dma failed to remap register block\n");
1282 return -ENOMEM;
1283 }
1284
1285 ret = sysdev_class_register(&dma_sysclass);
1286 if (ret != 0) {
1287 printk(KERN_ERR "dma sysclass registration failed\n");
1288 goto err;
1289 }
1290
1291 dma_kmem = kmem_cache_create("dma_desc", sizeof(struct s3c2410_dma_buf), 0,
1292 SLAB_HWCACHE_ALIGN,
1293 s3c2410_dma_cache_ctor, NULL);
1294
1295 if (dma_kmem == NULL) {
1296 printk(KERN_ERR "dma failed to make kmem cache\n");
1297 ret = -ENOMEM;
1298 goto err;
1299 }
1300
1301 for (channel = 0; channel < S3C2410_DMA_CHANNELS; channel++) {
1302 cp = &s3c2410_chans[channel];
1303
1304 memset(cp, 0, sizeof(struct s3c2410_dma_chan));
1305
1306 /* dma channel irqs are in order.. */
1307 cp->number = channel;
1308 cp->irq = channel + IRQ_DMA0;
1309 cp->regs = dma_base + (channel*0x40);
1310
1311 /* point current stats somewhere */
1312 cp->stats = &cp->stats_store;
1313 cp->stats_store.timeout_shortest = LONG_MAX;
1314
1315 /* basic channel configuration */
1316
1317 cp->load_timeout = 1<<18;
1318
1319 /* register system device */
1320
1321 cp->dev.cls = &dma_sysclass;
1322 cp->dev.id = channel;
1323 ret = sysdev_register(&cp->dev);
1324
1325 printk("DMA channel %d at %p, irq %d\n",
1326 cp->number, cp->regs, cp->irq);
1327 }
1328
1329 return 0;
1330
1331 err:
1332 kmem_cache_destroy(dma_kmem);
1333 iounmap(dma_base);
1334 dma_base = NULL;
1335 return ret;
1336 }
1337
1338 __initcall(s3c2410_init_dma);
This page took 0.093091 seconds and 4 git commands to generate.