dmaengine: omap-dma: consolidate setup of CSDP
[deliverable/linux.git] / drivers / dma / omap-dma.c
CommitLineData
7bedaa55
RK
1/*
2 * OMAP DMAengine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
fa3ad86a 8#include <linux/delay.h>
7bedaa55
RK
9#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/omap-dma.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
8d30662a
JH
20#include <linux/of_dma.h>
21#include <linux/of_device.h>
7bedaa55
RK
22
23#include "virt-dma.h"
7d7e1eba 24
7bedaa55
RK
25struct omap_dmadev {
26 struct dma_device ddev;
27 spinlock_t lock;
28 struct tasklet_struct task;
29 struct list_head pending;
1b416c4b 30 struct omap_system_dma_plat_info *plat;
7bedaa55
RK
31};
32
33struct omap_chan {
34 struct virt_dma_chan vc;
35 struct list_head node;
1b416c4b 36 struct omap_system_dma_plat_info *plat;
7bedaa55
RK
37
38 struct dma_slave_config cfg;
39 unsigned dma_sig;
3a774ea9 40 bool cyclic;
2dcdf570 41 bool paused;
7bedaa55
RK
42
43 int dma_ch;
44 struct omap_desc *desc;
45 unsigned sgidx;
46};
47
48struct omap_sg {
49 dma_addr_t addr;
50 uint32_t en; /* number of elements (24-bit) */
51 uint32_t fn; /* number of frames (16-bit) */
52};
53
54struct omap_desc {
55 struct virt_dma_desc vd;
56 enum dma_transfer_direction dir;
57 dma_addr_t dev_addr;
58
7c836bc7 59 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
7bedaa55
RK
60 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
61 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
62 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
fa3ad86a 63 uint16_t cicr; /* CICR value */
2f0d13bd 64 uint32_t csdp; /* CSDP value */
7bedaa55
RK
65
66 unsigned sglen;
67 struct omap_sg sg[0];
68};
69
70static const unsigned es_bytes[] = {
71 [OMAP_DMA_DATA_TYPE_S8] = 1,
72 [OMAP_DMA_DATA_TYPE_S16] = 2,
73 [OMAP_DMA_DATA_TYPE_S32] = 4,
74};
75
8d30662a
JH
76static struct of_dma_filter_info omap_dma_info = {
77 .filter_fn = omap_dma_filter_fn,
78};
79
7bedaa55
RK
80static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
81{
82 return container_of(d, struct omap_dmadev, ddev);
83}
84
85static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
86{
87 return container_of(c, struct omap_chan, vc.chan);
88}
89
90static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
91{
92 return container_of(t, struct omap_desc, vd.tx);
93}
94
95static void omap_dma_desc_free(struct virt_dma_desc *vd)
96{
97 kfree(container_of(vd, struct omap_desc, vd));
98}
99
fa3ad86a
RK
100static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
101{
102 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
103 uint32_t val;
104
105 if (__dma_omap15xx(od->plat->dma_attr))
106 c->plat->dma_write(0, CPC, c->dma_ch);
107 else
108 c->plat->dma_write(0, CDAC, c->dma_ch);
109
110 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
111 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
112
113 if (dma_omap1())
114 val &= ~(1 << 14);
115
116 val |= c->dma_ch | 1 << 15;
117
118 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
119 } else if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
120 c->plat->dma_write(c->dma_ch, CLNK_CTRL, c->dma_ch);
121
122 /* Clear CSR */
123 if (dma_omap1())
124 c->plat->dma_read(CSR, c->dma_ch);
125 else
126 c->plat->dma_write(~0, CSR, c->dma_ch);
127
128 /* Enable interrupts */
129 c->plat->dma_write(d->cicr, CICR, c->dma_ch);
130
131 val = c->plat->dma_read(CCR, c->dma_ch);
132 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
133 val |= OMAP_DMA_CCR_BUFFERING_DISABLE;
134 val |= OMAP_DMA_CCR_EN;
135 mb();
136 c->plat->dma_write(val, CCR, c->dma_ch);
137}
138
139static void omap_dma_stop(struct omap_chan *c)
140{
141 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
142 uint32_t val;
143
144 /* disable irq */
145 c->plat->dma_write(0, CICR, c->dma_ch);
146
147 /* Clear CSR */
148 if (dma_omap1())
149 c->plat->dma_read(CSR, c->dma_ch);
150 else
151 c->plat->dma_write(~0, CSR, c->dma_ch);
152
153 val = c->plat->dma_read(CCR, c->dma_ch);
154 if (od->plat->errata & DMA_ERRATA_i541 &&
155 val & OMAP_DMA_CCR_SEL_SRC_DST_SYNC) {
156 uint32_t sysconfig;
157 unsigned i;
158
159 sysconfig = c->plat->dma_read(OCP_SYSCONFIG, c->dma_ch);
160 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
161 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
162 c->plat->dma_write(val, OCP_SYSCONFIG, c->dma_ch);
163
164 val = c->plat->dma_read(CCR, c->dma_ch);
165 val &= ~OMAP_DMA_CCR_EN;
166 c->plat->dma_write(val, CCR, c->dma_ch);
167
168 /* Wait for sDMA FIFO to drain */
169 for (i = 0; ; i++) {
170 val = c->plat->dma_read(CCR, c->dma_ch);
171 if (!(val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE)))
172 break;
173
174 if (i > 100)
175 break;
176
177 udelay(5);
178 }
179
180 if (val & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE))
181 dev_err(c->vc.chan.device->dev,
182 "DMA drain did not complete on lch %d\n",
183 c->dma_ch);
184
185 c->plat->dma_write(sysconfig, OCP_SYSCONFIG, c->dma_ch);
186 } else {
187 val &= ~OMAP_DMA_CCR_EN;
188 c->plat->dma_write(val, CCR, c->dma_ch);
189 }
190
191 mb();
192
193 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
194 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
195
196 if (dma_omap1())
197 val |= 1 << 14; /* set the STOP_LNK bit */
198 else
199 val &= ~(1 << 15); /* Clear the ENABLE_LNK bit */
200
201 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
202 }
203}
204
7bedaa55
RK
205static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
206 unsigned idx)
207{
208 struct omap_sg *sg = d->sg + idx;
913a2d0c
RK
209
210 if (d->dir == DMA_DEV_TO_MEM) {
211 c->plat->dma_write(sg->addr, CDSA, c->dma_ch);
212 c->plat->dma_write(0, CDEI, c->dma_ch);
213 c->plat->dma_write(0, CDFI, c->dma_ch);
214 } else {
215 c->plat->dma_write(sg->addr, CSSA, c->dma_ch);
216 c->plat->dma_write(0, CSEI, c->dma_ch);
217 c->plat->dma_write(0, CSFI, c->dma_ch);
218 }
219
220 c->plat->dma_write(sg->en, CEN, c->dma_ch);
221 c->plat->dma_write(sg->fn, CFN, c->dma_ch);
222
fa3ad86a 223 omap_dma_start(c, d);
913a2d0c
RK
224}
225
226static void omap_dma_start_desc(struct omap_chan *c)
227{
228 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
229 struct omap_desc *d;
b9e97822
RK
230 uint32_t val;
231
913a2d0c
RK
232 if (!vd) {
233 c->desc = NULL;
234 return;
235 }
236
237 list_del(&vd->node);
238
239 c->desc = d = to_omap_dma_desc(&vd->tx);
240 c->sgidx = 0;
241
b9e97822 242 if (d->dir == DMA_DEV_TO_MEM) {
b9e97822 243 val = c->plat->dma_read(CCR, c->dma_ch);
913a2d0c 244 val &= ~(0x03 << 14 | 0x03 << 12);
b9e97822 245 val |= OMAP_DMA_AMODE_POST_INC << 14;
913a2d0c 246 val |= OMAP_DMA_AMODE_CONSTANT << 12;
b9e97822
RK
247 c->plat->dma_write(val, CCR, c->dma_ch);
248
913a2d0c
RK
249 c->plat->dma_write(d->dev_addr, CSSA, c->dma_ch);
250 c->plat->dma_write(0, CSEI, c->dma_ch);
251 c->plat->dma_write(d->fi, CSFI, c->dma_ch);
b9e97822 252 } else {
b9e97822 253 val = c->plat->dma_read(CCR, c->dma_ch);
913a2d0c
RK
254 val &= ~(0x03 << 12 | 0x03 << 14);
255 val |= OMAP_DMA_AMODE_CONSTANT << 14;
b9e97822
RK
256 val |= OMAP_DMA_AMODE_POST_INC << 12;
257 c->plat->dma_write(val, CCR, c->dma_ch);
258
913a2d0c
RK
259 c->plat->dma_write(d->dev_addr, CDSA, c->dma_ch);
260 c->plat->dma_write(0, CDEI, c->dma_ch);
261 c->plat->dma_write(d->fi, CDFI, c->dma_ch);
b9e97822
RK
262 }
263
2f0d13bd 264 c->plat->dma_write(d->csdp, CSDP, c->dma_ch);
b9e97822
RK
265
266 if (dma_omap1()) {
267 val = c->plat->dma_read(CCR, c->dma_ch);
268 val &= ~(1 << 5);
269 if (d->sync_mode == OMAP_DMA_SYNC_FRAME)
270 val |= 1 << 5;
271 c->plat->dma_write(val, CCR, c->dma_ch);
272
273 val = c->plat->dma_read(CCR2, c->dma_ch);
274 val &= ~(1 << 2);
275 if (d->sync_mode == OMAP_DMA_SYNC_BLOCK)
276 val |= 1 << 2;
277 c->plat->dma_write(val, CCR2, c->dma_ch);
278 } else if (c->dma_sig) {
279 val = c->plat->dma_read(CCR, c->dma_ch);
280
281 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
913a2d0c 282 val &= ~(1 << 24 | 1 << 23 | 3 << 19 | 1 << 18 | 1 << 5 | 0x1f);
b9e97822
RK
283 val |= (c->dma_sig & ~0x1f) << 14;
284 val |= c->dma_sig & 0x1f;
285
286 if (d->sync_mode & OMAP_DMA_SYNC_FRAME)
287 val |= 1 << 5;
b9e97822
RK
288
289 if (d->sync_mode & OMAP_DMA_SYNC_BLOCK)
290 val |= 1 << 18;
b9e97822
RK
291
292 switch (d->sync_type) {
913a2d0c 293 case OMAP_DMA_DST_SYNC_PREFETCH:/* dest synch */
b9e97822
RK
294 val |= 1 << 23; /* Prefetch */
295 break;
296 case 0:
b9e97822
RK
297 break;
298 default:
913a2d0c 299 val |= 1 << 24; /* source synch */
b9e97822
RK
300 break;
301 }
302 c->plat->dma_write(val, CCR, c->dma_ch);
303 }
7bedaa55 304
7bedaa55
RK
305 omap_dma_start_sg(c, d, 0);
306}
307
308static void omap_dma_callback(int ch, u16 status, void *data)
309{
310 struct omap_chan *c = data;
311 struct omap_desc *d;
312 unsigned long flags;
313
314 spin_lock_irqsave(&c->vc.lock, flags);
315 d = c->desc;
316 if (d) {
3a774ea9
RK
317 if (!c->cyclic) {
318 if (++c->sgidx < d->sglen) {
319 omap_dma_start_sg(c, d, c->sgidx);
320 } else {
321 omap_dma_start_desc(c);
322 vchan_cookie_complete(&d->vd);
323 }
7bedaa55 324 } else {
3a774ea9 325 vchan_cyclic_callback(&d->vd);
7bedaa55
RK
326 }
327 }
328 spin_unlock_irqrestore(&c->vc.lock, flags);
329}
330
331/*
332 * This callback schedules all pending channels. We could be more
333 * clever here by postponing allocation of the real DMA channels to
334 * this point, and freeing them when our virtual channel becomes idle.
335 *
336 * We would then need to deal with 'all channels in-use'
337 */
338static void omap_dma_sched(unsigned long data)
339{
340 struct omap_dmadev *d = (struct omap_dmadev *)data;
341 LIST_HEAD(head);
342
343 spin_lock_irq(&d->lock);
344 list_splice_tail_init(&d->pending, &head);
345 spin_unlock_irq(&d->lock);
346
347 while (!list_empty(&head)) {
348 struct omap_chan *c = list_first_entry(&head,
349 struct omap_chan, node);
350
351 spin_lock_irq(&c->vc.lock);
352 list_del_init(&c->node);
353 omap_dma_start_desc(c);
354 spin_unlock_irq(&c->vc.lock);
355 }
356}
357
358static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
359{
360 struct omap_chan *c = to_omap_dma_chan(chan);
361
9e2f7d82 362 dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
7bedaa55
RK
363
364 return omap_request_dma(c->dma_sig, "DMA engine",
365 omap_dma_callback, c, &c->dma_ch);
366}
367
368static void omap_dma_free_chan_resources(struct dma_chan *chan)
369{
370 struct omap_chan *c = to_omap_dma_chan(chan);
371
372 vchan_free_chan_resources(&c->vc);
373 omap_free_dma(c->dma_ch);
374
9e2f7d82 375 dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
7bedaa55
RK
376}
377
3850e22f
RK
378static size_t omap_dma_sg_size(struct omap_sg *sg)
379{
380 return sg->en * sg->fn;
381}
382
383static size_t omap_dma_desc_size(struct omap_desc *d)
384{
385 unsigned i;
386 size_t size;
387
388 for (size = i = 0; i < d->sglen; i++)
389 size += omap_dma_sg_size(&d->sg[i]);
390
391 return size * es_bytes[d->es];
392}
393
394static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
395{
396 unsigned i;
397 size_t size, es_size = es_bytes[d->es];
398
399 for (size = i = 0; i < d->sglen; i++) {
400 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
401
402 if (size)
403 size += this_size;
404 else if (addr >= d->sg[i].addr &&
405 addr < d->sg[i].addr + this_size)
406 size += d->sg[i].addr + this_size - addr;
407 }
408 return size;
409}
410
3997cab3
RK
411static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
412{
413 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
414 dma_addr_t addr;
415
416 if (__dma_omap15xx(od->plat->dma_attr))
417 addr = c->plat->dma_read(CPC, c->dma_ch);
418 else
419 addr = c->plat->dma_read(CSAC, c->dma_ch);
420
421 if (od->plat->errata & DMA_ERRATA_3_3 && addr == 0)
422 addr = c->plat->dma_read(CSAC, c->dma_ch);
423
424 if (!__dma_omap15xx(od->plat->dma_attr)) {
425 /*
426 * CDAC == 0 indicates that the DMA transfer on the channel has
427 * not been started (no data has been transferred so far).
428 * Return the programmed source start address in this case.
429 */
430 if (c->plat->dma_read(CDAC, c->dma_ch))
431 addr = c->plat->dma_read(CSAC, c->dma_ch);
432 else
433 addr = c->plat->dma_read(CSSA, c->dma_ch);
434 }
435
436 if (dma_omap1())
437 addr |= c->plat->dma_read(CSSA, c->dma_ch) & 0xffff0000;
438
439 return addr;
440}
441
442static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
443{
444 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
445 dma_addr_t addr;
446
447 if (__dma_omap15xx(od->plat->dma_attr))
448 addr = c->plat->dma_read(CPC, c->dma_ch);
449 else
450 addr = c->plat->dma_read(CDAC, c->dma_ch);
451
452 /*
453 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
454 * read before the DMA controller finished disabling the channel.
455 */
456 if (!__dma_omap15xx(od->plat->dma_attr) && addr == 0) {
457 addr = c->plat->dma_read(CDAC, c->dma_ch);
458 /*
459 * CDAC == 0 indicates that the DMA transfer on the channel has
460 * not been started (no data has been transferred so far).
461 * Return the programmed destination start address in this case.
462 */
463 if (addr == 0)
464 addr = c->plat->dma_read(CDSA, c->dma_ch);
465 }
466
467 if (dma_omap1())
468 addr |= c->plat->dma_read(CDSA, c->dma_ch) & 0xffff0000;
469
470 return addr;
471}
472
7bedaa55
RK
473static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
474 dma_cookie_t cookie, struct dma_tx_state *txstate)
475{
3850e22f
RK
476 struct omap_chan *c = to_omap_dma_chan(chan);
477 struct virt_dma_desc *vd;
478 enum dma_status ret;
479 unsigned long flags;
480
481 ret = dma_cookie_status(chan, cookie, txstate);
7cce5083 482 if (ret == DMA_COMPLETE || !txstate)
3850e22f
RK
483 return ret;
484
485 spin_lock_irqsave(&c->vc.lock, flags);
486 vd = vchan_find_desc(&c->vc, cookie);
487 if (vd) {
488 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
489 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
490 struct omap_desc *d = c->desc;
491 dma_addr_t pos;
492
493 if (d->dir == DMA_MEM_TO_DEV)
3997cab3 494 pos = omap_dma_get_src_pos(c);
3850e22f 495 else if (d->dir == DMA_DEV_TO_MEM)
3997cab3 496 pos = omap_dma_get_dst_pos(c);
3850e22f
RK
497 else
498 pos = 0;
499
500 txstate->residue = omap_dma_desc_size_pos(d, pos);
501 } else {
502 txstate->residue = 0;
503 }
504 spin_unlock_irqrestore(&c->vc.lock, flags);
505
506 return ret;
7bedaa55
RK
507}
508
509static void omap_dma_issue_pending(struct dma_chan *chan)
510{
511 struct omap_chan *c = to_omap_dma_chan(chan);
512 unsigned long flags;
513
514 spin_lock_irqsave(&c->vc.lock, flags);
515 if (vchan_issue_pending(&c->vc) && !c->desc) {
76502469
PU
516 /*
517 * c->cyclic is used only by audio and in this case the DMA need
518 * to be started without delay.
519 */
520 if (!c->cyclic) {
521 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
522 spin_lock(&d->lock);
523 if (list_empty(&c->node))
524 list_add_tail(&c->node, &d->pending);
525 spin_unlock(&d->lock);
526 tasklet_schedule(&d->task);
527 } else {
528 omap_dma_start_desc(c);
529 }
7bedaa55
RK
530 }
531 spin_unlock_irqrestore(&c->vc.lock, flags);
532}
533
534static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
535 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
536 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
537{
538 struct omap_chan *c = to_omap_dma_chan(chan);
539 enum dma_slave_buswidth dev_width;
540 struct scatterlist *sgent;
541 struct omap_desc *d;
542 dma_addr_t dev_addr;
543 unsigned i, j = 0, es, en, frame_bytes, sync_type;
544 u32 burst;
545
546 if (dir == DMA_DEV_TO_MEM) {
547 dev_addr = c->cfg.src_addr;
548 dev_width = c->cfg.src_addr_width;
549 burst = c->cfg.src_maxburst;
550 sync_type = OMAP_DMA_SRC_SYNC;
551 } else if (dir == DMA_MEM_TO_DEV) {
552 dev_addr = c->cfg.dst_addr;
553 dev_width = c->cfg.dst_addr_width;
554 burst = c->cfg.dst_maxburst;
555 sync_type = OMAP_DMA_DST_SYNC;
556 } else {
557 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
558 return NULL;
559 }
560
561 /* Bus width translates to the element size (ES) */
562 switch (dev_width) {
563 case DMA_SLAVE_BUSWIDTH_1_BYTE:
564 es = OMAP_DMA_DATA_TYPE_S8;
565 break;
566 case DMA_SLAVE_BUSWIDTH_2_BYTES:
567 es = OMAP_DMA_DATA_TYPE_S16;
568 break;
569 case DMA_SLAVE_BUSWIDTH_4_BYTES:
570 es = OMAP_DMA_DATA_TYPE_S32;
571 break;
572 default: /* not reached */
573 return NULL;
574 }
575
576 /* Now allocate and setup the descriptor. */
577 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
578 if (!d)
579 return NULL;
580
581 d->dir = dir;
582 d->dev_addr = dev_addr;
583 d->es = es;
584 d->sync_mode = OMAP_DMA_SYNC_FRAME;
585 d->sync_type = sync_type;
fa3ad86a 586 d->cicr = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
2f0d13bd 587 d->csdp = es;
fa3ad86a 588
2f0d13bd 589 if (dma_omap1()) {
fa3ad86a 590 d->cicr |= OMAP1_DMA_TOUT_IRQ;
2f0d13bd
RK
591
592 if (dir == DMA_DEV_TO_MEM)
593 d->csdp |= OMAP_DMA_PORT_EMIFF << 9 |
594 OMAP_DMA_PORT_TIPB << 2;
595 else
596 d->csdp |= OMAP_DMA_PORT_TIPB << 9 |
597 OMAP_DMA_PORT_EMIFF << 2;
598 } else {
fa3ad86a 599 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
2f0d13bd 600 }
7bedaa55
RK
601
602 /*
603 * Build our scatterlist entries: each contains the address,
604 * the number of elements (EN) in each frame, and the number of
605 * frames (FN). Number of bytes for this entry = ES * EN * FN.
606 *
607 * Burst size translates to number of elements with frame sync.
608 * Note: DMA engine defines burst to be the number of dev-width
609 * transfers.
610 */
611 en = burst;
612 frame_bytes = es_bytes[es] * en;
613 for_each_sg(sgl, sgent, sglen, i) {
614 d->sg[j].addr = sg_dma_address(sgent);
615 d->sg[j].en = en;
616 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
617 j++;
618 }
619
620 d->sglen = j;
621
622 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
623}
624
3a774ea9
RK
625static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
626 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
ec8b5e48
PU
627 size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
628 void *context)
3a774ea9 629{
fa3ad86a 630 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
3a774ea9
RK
631 struct omap_chan *c = to_omap_dma_chan(chan);
632 enum dma_slave_buswidth dev_width;
633 struct omap_desc *d;
634 dma_addr_t dev_addr;
635 unsigned es, sync_type;
636 u32 burst;
637
638 if (dir == DMA_DEV_TO_MEM) {
639 dev_addr = c->cfg.src_addr;
640 dev_width = c->cfg.src_addr_width;
641 burst = c->cfg.src_maxburst;
642 sync_type = OMAP_DMA_SRC_SYNC;
643 } else if (dir == DMA_MEM_TO_DEV) {
644 dev_addr = c->cfg.dst_addr;
645 dev_width = c->cfg.dst_addr_width;
646 burst = c->cfg.dst_maxburst;
647 sync_type = OMAP_DMA_DST_SYNC;
648 } else {
649 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
650 return NULL;
651 }
652
653 /* Bus width translates to the element size (ES) */
654 switch (dev_width) {
655 case DMA_SLAVE_BUSWIDTH_1_BYTE:
656 es = OMAP_DMA_DATA_TYPE_S8;
657 break;
658 case DMA_SLAVE_BUSWIDTH_2_BYTES:
659 es = OMAP_DMA_DATA_TYPE_S16;
660 break;
661 case DMA_SLAVE_BUSWIDTH_4_BYTES:
662 es = OMAP_DMA_DATA_TYPE_S32;
663 break;
664 default: /* not reached */
665 return NULL;
666 }
667
668 /* Now allocate and setup the descriptor. */
669 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
670 if (!d)
671 return NULL;
672
673 d->dir = dir;
674 d->dev_addr = dev_addr;
675 d->fi = burst;
676 d->es = es;
ccffa387
PU
677 if (burst)
678 d->sync_mode = OMAP_DMA_SYNC_PACKET;
679 else
680 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
3a774ea9 681 d->sync_type = sync_type;
3a774ea9
RK
682 d->sg[0].addr = buf_addr;
683 d->sg[0].en = period_len / es_bytes[es];
684 d->sg[0].fn = buf_len / period_len;
685 d->sglen = 1;
fa3ad86a
RK
686 d->cicr = OMAP_DMA_DROP_IRQ;
687 if (flags & DMA_PREP_INTERRUPT)
688 d->cicr |= OMAP_DMA_FRAME_IRQ;
689
2f0d13bd
RK
690 d->csdp = es;
691
692 if (dma_omap1()) {
fa3ad86a 693 d->cicr |= OMAP1_DMA_TOUT_IRQ;
2f0d13bd
RK
694
695 if (dir == DMA_DEV_TO_MEM)
696 d->csdp |= OMAP_DMA_PORT_EMIFF << 9 |
697 OMAP_DMA_PORT_MPUI << 2;
698 else
699 d->csdp |= OMAP_DMA_PORT_MPUI << 9 |
700 OMAP_DMA_PORT_EMIFF << 2;
701 } else {
fa3ad86a 702 d->cicr |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ;
3a774ea9 703
2f0d13bd
RK
704 /* src and dst burst mode 16 */
705 d->csdp |= 3 << 14 | 3 << 7;
706 }
707
3a774ea9
RK
708 if (!c->cyclic) {
709 c->cyclic = true;
2dde5b90 710
fa3ad86a
RK
711 if (__dma_omap15xx(od->plat->dma_attr)) {
712 uint32_t val;
2dde5b90 713
fa3ad86a
RK
714 val = c->plat->dma_read(CCR, c->dma_ch);
715 val |= 3 << 8;
716 c->plat->dma_write(val, CCR, c->dma_ch);
717 }
3a774ea9
RK
718 }
719
2dde5b90 720 return vchan_tx_prep(&c->vc, &d->vd, flags);
3a774ea9
RK
721}
722
7bedaa55
RK
723static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
724{
725 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
726 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
727 return -EINVAL;
728
729 memcpy(&c->cfg, cfg, sizeof(c->cfg));
730
731 return 0;
732}
733
734static int omap_dma_terminate_all(struct omap_chan *c)
735{
736 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
737 unsigned long flags;
738 LIST_HEAD(head);
739
740 spin_lock_irqsave(&c->vc.lock, flags);
741
742 /* Prevent this channel being scheduled */
743 spin_lock(&d->lock);
744 list_del_init(&c->node);
745 spin_unlock(&d->lock);
746
747 /*
748 * Stop DMA activity: we assume the callback will not be called
fa3ad86a 749 * after omap_dma_stop() returns (even if it does, it will see
7bedaa55
RK
750 * c->desc is NULL and exit.)
751 */
752 if (c->desc) {
753 c->desc = NULL;
2dcdf570
PU
754 /* Avoid stopping the dma twice */
755 if (!c->paused)
fa3ad86a 756 omap_dma_stop(c);
7bedaa55
RK
757 }
758
3a774ea9
RK
759 if (c->cyclic) {
760 c->cyclic = false;
2dcdf570 761 c->paused = false;
fa3ad86a
RK
762
763 if (__dma_omap15xx(od->plat->dma_attr)) {
764 uint32_t val;
765
766 val = c->plat->dma_read(CCR, c->dma_ch);
767 val &= ~(3 << 8);
768 c->plat->dma_write(val, CCR, c->dma_ch);
769 }
3a774ea9
RK
770 }
771
7bedaa55
RK
772 vchan_get_all_descriptors(&c->vc, &head);
773 spin_unlock_irqrestore(&c->vc.lock, flags);
774 vchan_dma_desc_free_list(&c->vc, &head);
775
776 return 0;
777}
778
779static int omap_dma_pause(struct omap_chan *c)
780{
2dcdf570
PU
781 /* Pause/Resume only allowed with cyclic mode */
782 if (!c->cyclic)
783 return -EINVAL;
784
785 if (!c->paused) {
fa3ad86a 786 omap_dma_stop(c);
2dcdf570
PU
787 c->paused = true;
788 }
789
790 return 0;
7bedaa55
RK
791}
792
793static int omap_dma_resume(struct omap_chan *c)
794{
2dcdf570
PU
795 /* Pause/Resume only allowed with cyclic mode */
796 if (!c->cyclic)
797 return -EINVAL;
798
799 if (c->paused) {
fa3ad86a 800 omap_dma_start(c, c->desc);
2dcdf570
PU
801 c->paused = false;
802 }
803
804 return 0;
7bedaa55
RK
805}
806
807static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
808 unsigned long arg)
809{
810 struct omap_chan *c = to_omap_dma_chan(chan);
811 int ret;
812
813 switch (cmd) {
814 case DMA_SLAVE_CONFIG:
815 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
816 break;
817
818 case DMA_TERMINATE_ALL:
819 ret = omap_dma_terminate_all(c);
820 break;
821
822 case DMA_PAUSE:
823 ret = omap_dma_pause(c);
824 break;
825
826 case DMA_RESUME:
827 ret = omap_dma_resume(c);
828 break;
829
830 default:
831 ret = -ENXIO;
832 break;
833 }
834
835 return ret;
836}
837
838static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
839{
840 struct omap_chan *c;
841
842 c = kzalloc(sizeof(*c), GFP_KERNEL);
843 if (!c)
844 return -ENOMEM;
845
1b416c4b 846 c->plat = od->plat;
7bedaa55
RK
847 c->dma_sig = dma_sig;
848 c->vc.desc_free = omap_dma_desc_free;
849 vchan_init(&c->vc, &od->ddev);
850 INIT_LIST_HEAD(&c->node);
851
852 od->ddev.chancnt++;
853
854 return 0;
855}
856
857static void omap_dma_free(struct omap_dmadev *od)
858{
859 tasklet_kill(&od->task);
860 while (!list_empty(&od->ddev.channels)) {
861 struct omap_chan *c = list_first_entry(&od->ddev.channels,
862 struct omap_chan, vc.chan.device_node);
863
864 list_del(&c->vc.chan.device_node);
865 tasklet_kill(&c->vc.task);
866 kfree(c);
867 }
7bedaa55
RK
868}
869
870static int omap_dma_probe(struct platform_device *pdev)
871{
872 struct omap_dmadev *od;
873 int rc, i;
874
104fce73 875 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
7bedaa55
RK
876 if (!od)
877 return -ENOMEM;
878
1b416c4b
RK
879 od->plat = omap_get_plat_info();
880 if (!od->plat)
881 return -EPROBE_DEFER;
882
7bedaa55 883 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
3a774ea9 884 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
7bedaa55
RK
885 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
886 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
887 od->ddev.device_tx_status = omap_dma_tx_status;
888 od->ddev.device_issue_pending = omap_dma_issue_pending;
889 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
3a774ea9 890 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
7bedaa55
RK
891 od->ddev.device_control = omap_dma_control;
892 od->ddev.dev = &pdev->dev;
893 INIT_LIST_HEAD(&od->ddev.channels);
894 INIT_LIST_HEAD(&od->pending);
895 spin_lock_init(&od->lock);
896
897 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
898
899 for (i = 0; i < 127; i++) {
900 rc = omap_dma_chan_init(od, i);
901 if (rc) {
902 omap_dma_free(od);
903 return rc;
904 }
905 }
906
907 rc = dma_async_device_register(&od->ddev);
908 if (rc) {
909 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
910 rc);
911 omap_dma_free(od);
8d30662a
JH
912 return rc;
913 }
914
915 platform_set_drvdata(pdev, od);
916
917 if (pdev->dev.of_node) {
918 omap_dma_info.dma_cap = od->ddev.cap_mask;
919
920 /* Device-tree DMA controller registration */
921 rc = of_dma_controller_register(pdev->dev.of_node,
922 of_dma_simple_xlate, &omap_dma_info);
923 if (rc) {
924 pr_warn("OMAP-DMA: failed to register DMA controller\n");
925 dma_async_device_unregister(&od->ddev);
926 omap_dma_free(od);
927 }
7bedaa55
RK
928 }
929
930 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
931
932 return rc;
933}
934
935static int omap_dma_remove(struct platform_device *pdev)
936{
937 struct omap_dmadev *od = platform_get_drvdata(pdev);
938
8d30662a
JH
939 if (pdev->dev.of_node)
940 of_dma_controller_free(pdev->dev.of_node);
941
7bedaa55
RK
942 dma_async_device_unregister(&od->ddev);
943 omap_dma_free(od);
944
945 return 0;
946}
947
8d30662a
JH
948static const struct of_device_id omap_dma_match[] = {
949 { .compatible = "ti,omap2420-sdma", },
950 { .compatible = "ti,omap2430-sdma", },
951 { .compatible = "ti,omap3430-sdma", },
952 { .compatible = "ti,omap3630-sdma", },
953 { .compatible = "ti,omap4430-sdma", },
954 {},
955};
956MODULE_DEVICE_TABLE(of, omap_dma_match);
957
7bedaa55
RK
958static struct platform_driver omap_dma_driver = {
959 .probe = omap_dma_probe,
960 .remove = omap_dma_remove,
961 .driver = {
962 .name = "omap-dma-engine",
963 .owner = THIS_MODULE,
8d30662a 964 .of_match_table = of_match_ptr(omap_dma_match),
7bedaa55
RK
965 },
966};
967
968bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
969{
970 if (chan->device->dev->driver == &omap_dma_driver.driver) {
971 struct omap_chan *c = to_omap_dma_chan(chan);
972 unsigned req = *(unsigned *)param;
973
974 return req == c->dma_sig;
975 }
976 return false;
977}
978EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
979
7bedaa55
RK
980static int omap_dma_init(void)
981{
be1f9481 982 return platform_driver_register(&omap_dma_driver);
7bedaa55
RK
983}
984subsys_initcall(omap_dma_init);
985
986static void __exit omap_dma_exit(void)
987{
7bedaa55
RK
988 platform_driver_unregister(&omap_dma_driver);
989}
990module_exit(omap_dma_exit);
991
992MODULE_AUTHOR("Russell King");
993MODULE_LICENSE("GPL");
This page took 0.1257 seconds and 5 git commands to generate.